VOL-1395: Common shared libraries needed for Python based device adapters.
This is an initial check-in of code from the master branch. Additional work
is expected on a few items to work with the new go-core and will be covered
by separate JIRAs and commits.
Change-Id: I0856ec6b79b8d3e49082c609eb9c7eedd75b1708
diff --git a/python/adapters/extensions/omci/__init__.py b/python/adapters/extensions/omci/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/extensions/omci/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/extensions/omci/database/__init__.py b/python/adapters/extensions/omci/database/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/extensions/omci/database/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/extensions/omci/database/alarm_db_ext.py b/python/adapters/extensions/omci/database/alarm_db_ext.py
new file mode 100644
index 0000000..2af6923
--- /dev/null
+++ b/python/adapters/extensions/omci/database/alarm_db_ext.py
@@ -0,0 +1,698 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from mib_db_api import *
+from voltha.protos.omci_alarm_db_pb2 import AlarmInstanceData, AlarmClassData, \
+ AlarmDeviceData, AlarmAttributeData
+
+
+class AlarmDbExternal(MibDbApi):
+ """
+ A persistent external OpenOMCI Alarm Database
+ """
+ CURRENT_VERSION = 1 # VOLTHA v1.3.0 release
+ ALARM_BITMAP_KEY = 'alarm_bit_map'
+
+ _TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+
+ # Paths from root proxy
+ ALARM_PATH = '/omci_alarms'
+ DEVICE_PATH = ALARM_PATH + '/{}' # .format(device_id)
+
+ # Classes, Instances, and Attributes as lists from root proxy
+ CLASSES_PATH = DEVICE_PATH + '/classes' # .format(device_id)
+ INSTANCES_PATH = DEVICE_PATH + '/classes/{}/instances' # .format(device_id, class_id)
+ ATTRIBUTES_PATH = DEVICE_PATH + '/classes/{}/instances/{}/attributes' # .format(device_id, class_id, instance_id)
+
+ # Single Class, Instance, and Attribute as objects from device proxy
+ CLASS_PATH = '/classes/{}' # .format(class_id)
+ INSTANCE_PATH = '/classes/{}/instances/{}' # .format(class_id, instance_id)
+ ATTRIBUTE_PATH = '/classes/{}/instances/{}/attributes/{}' # .format(class_id, instance_id
+ # attribute_name)
+
+ def __init__(self, omci_agent):
+ """
+ Class initializer
+ :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+ """
+ super(AlarmDbExternal, self).__init__(omci_agent)
+ self._core = omci_agent.core
+
+ def start(self):
+ """
+ Start up/restore the database
+ """
+ self.log.debug('start')
+
+ if not self._started:
+ super(AlarmDbExternal, self).start()
+ root_proxy = self._core.get_proxy('/')
+
+ try:
+ base = root_proxy.get(AlarmDbExternal.ALARM_PATH)
+ self.log.info('db-exists', num_devices=len(base))
+
+ except Exception as e:
+ self.log.exception('start-failure', e=e)
+ raise
+
+ def stop(self):
+ """
+ Start up the database
+ """
+ self.log.debug('stop')
+
+ if self._started:
+ super(AlarmDbExternal, self).stop()
+ # TODO: Delete this method if nothing else is done except calling the base class
+
+ def _time_to_string(self, time):
+ return time.strftime(AlarmDbExternal._TIME_FORMAT) if time is not None else ''
+
+ def _string_to_time(self, time):
+ return datetime.strptime(time, AlarmDbExternal._TIME_FORMAT) if len(time) else None
+
+ def _attribute_to_string(self, value):
+ """
+ Convert an ME's attribute value to string representation
+
+ :param value: (long) Alarm bitmaps are always a Long
+ :return: (str) String representation of the value
+ """
+ return str(value)
+
+ def _string_to_attribute(self, str_value):
+ """
+ Convert an ME's attribute value-string to its Scapy decode equivalent
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param attr_name: (str) Attribute Name (see EntityClasses)
+ :param str_value: (str) Attribute Value in string form
+
+ :return: (various) String representation of the value
+ :raises KeyError: Device, Class ID, or Attribute does not exist
+ """
+ # Alarms are always a bitmap which is a long
+ return long(str_value) if len(str_value) else 0L
+
+ def add(self, device_id, overwrite=False):
+ """
+ Add a new ONU to database
+
+ :param device_id: (str) Device ID of ONU to add
+ :param overwrite: (bool) Overwrite existing entry if found.
+
+ :raises KeyError: If device already exists and 'overwrite' is False
+ """
+ self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+ now = datetime.utcnow()
+ found = False
+ root_proxy = self._core.get_proxy('/')
+
+ data = AlarmDeviceData(device_id=device_id,
+ created=self._time_to_string(now),
+ version=AlarmDbExternal.CURRENT_VERSION,
+ last_alarm_sequence=0)
+ try:
+ dev_proxy = self._device_proxy(device_id)
+ found = True
+
+ if not overwrite:
+ # Device already exists
+ raise KeyError('Device with ID {} already exists in Alarm database'.
+ format(device_id))
+
+ # Overwrite with new data
+ data = dev_proxy.get('/', depth=0)
+ self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id), data)
+ self._modified = now
+
+ except KeyError:
+ if found:
+ raise
+ # Did not exist, add it now
+ root_proxy.add(AlarmDbExternal.ALARM_PATH, data)
+ self._created = now
+ self._modified = now
+
+ def remove(self, device_id):
+ """
+ Remove an ONU from the database
+
+ :param device_id: (str) Device ID of ONU to remove from database
+ """
+ self.log.debug('remove-device', device_id=device_id)
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ try:
+ # self._root_proxy.get(AlarmDbExternal.DEVICE_PATH.format(device_id))
+ self._root_proxy.remove(AlarmDbExternal.DEVICE_PATH.format(device_id))
+ self._modified = datetime.utcnow()
+
+ except KeyError:
+ # Did not exists, which is not a failure
+ pass
+
+ except Exception as e:
+ self.log.exception('remove-exception', device_id=device_id, e=e)
+ raise
+
+ @property
+ def _root_proxy(self):
+ return self._core.get_proxy('/')
+
+ def _device_proxy(self, device_id):
+ """
+ Return a config proxy to the OMCI Alarm_DB leaf for a given device
+
+ :param device_id: (str) ONU Device ID
+ :return: (ConfigProxy) Configuration proxy rooted at OMCI Alarm DB
+ :raises KeyError: If the device does not exist in the database
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ return self._core.get_proxy(AlarmDbExternal.DEVICE_PATH.format(device_id))
+
+ def _class_proxy(self, device_id, class_id, create=False):
+ """
+ Get a config proxy to a specific managed entity class
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param create: (bool) If true, create default instance (and class)
+ :return: (ConfigProxy) Class configuration proxy
+
+ :raises DatabaseStateError: If database is not started
+ :raises KeyError: If Instance does not exist and 'create' is False
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ fmt = AlarmDbExternal.DEVICE_PATH + AlarmDbExternal.CLASS_PATH
+ path = fmt.format(device_id, class_id)
+
+ try:
+ return self._core.get_proxy(path)
+
+ except KeyError:
+ if not create:
+ self.log.error('class-proxy-does-not-exist', device_id=device_id,
+ class_id=class_id)
+ raise
+
+ # Create class
+ data = AlarmClassData(class_id=class_id)
+ root_path = AlarmDbExternal.CLASSES_PATH.format(device_id)
+ self._root_proxy.add(root_path, data)
+
+ return self._core.get_proxy(path)
+
+ def _instance_proxy(self, device_id, class_id, instance_id, create=False):
+ """
+ Get a config proxy to a specific managed entity instance
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param instance_id: (int) Instance ID
+ :param create: (bool) If true, create default instance (and class)
+ :return: (ConfigProxy) Instance configuration proxy
+
+ :raises DatabaseStateError: If database is not started
+ :raises KeyError: If Instance does not exist and 'create' is False
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID is a string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError('instance-id is 0..0xFFFF')
+
+ fmt = AlarmDbExternal.DEVICE_PATH + AlarmDbExternal.INSTANCE_PATH
+ path = fmt.format(device_id, class_id, instance_id)
+
+ try:
+ return self._core.get_proxy(path)
+
+ except KeyError:
+ if not create:
+ self.log.error('instance-proxy-does-not-exist', device_id=device_id,
+ class_id=class_id, instance_id=instance_id)
+ raise
+
+ # Create instance, first make sure class exists
+ self._class_proxy(device_id, class_id, create=True)
+
+ now = self._time_to_string(datetime.utcnow())
+ data = AlarmInstanceData(instance_id=instance_id, created=now, modified=now)
+ root_path = AlarmDbExternal.INSTANCES_PATH.format(device_id, class_id)
+ self._root_proxy.add(root_path, data)
+
+ return self._core.get_proxy(path)
+
+ def save_last_sync_time(self, device_id, value):
+ """
+ Save the Last Sync time to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (DateTime) Value to save
+ """
+ self.log.debug('save-last-sync-time', device_id=device_id, time=str(value))
+
+ try:
+ if not isinstance(value, datetime):
+ raise TypeError('Expected a datetime object, got {}'.
+ format(type(datetime)))
+
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.last_sync_time = self._time_to_string(value)
+
+ # Update
+ self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-sync-time-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ def get_last_sync_time(self, device_id):
+ """
+ Get the Last Sync Time saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ self.log.debug('get-last-sync-time', device_id=device_id)
+
+ try:
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+ return self._string_to_time(data.last_sync_time)
+
+ except KeyError:
+ return None # OMCI MIB_DB entry has not yet been created
+
+ except Exception as e:
+ self.log.exception('get-last-sync-time-exception', e=e)
+ raise
+
+ def save_alarm_last_sync(self, device_id, value):
+ """
+ Save the Last Alarm Sequence value to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (int) Value to save
+ """
+ self.log.debug('save-last-sync', device_id=device_id, seq=str(value))
+
+ try:
+ if not isinstance(value, int):
+ raise TypeError('Expected a integer, got {}'.format(type(value)))
+
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.last_alarm_sequence = int(value)
+
+ # Update
+ self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-sequence-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ def get_alarm_last_sync(self, device_id):
+ """
+ Get the Last Sync Time saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ self.log.debug('get-last-sync', device_id=device_id)
+
+ try:
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+ return int(data.last_alarm_sequence)
+
+ except KeyError:
+ return None # OMCI ALARM_DB entry has not yet been created
+
+ except Exception as e:
+ self.log.exception('get-last-alarm-exception', e=e)
+ raise
+
+ def _add_new_class(self, device_id, class_id, instance_id, attributes):
+ """
+ Create an entry for a new class in the external database
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+ """
+ self.log.debug('add', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ now = self._time_to_string(datetime.utcnow())
+ attrs = [AlarmAttributeData(name=k,
+ value=self._attribute_to_string(v)) for k, v in attributes.items()]
+ class_data = AlarmClassData(class_id=class_id,
+ instances=[AlarmInstanceData(instance_id=instance_id,
+ created=now,
+ modified=now,
+ attributes=attrs)])
+
+ self._root_proxy.add(AlarmDbExternal.CLASSES_PATH.format(device_id), class_data)
+ self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+ entity_id=instance_id, attributes=attributes)
+ return True
+
+ def _add_new_instance(self, device_id, class_id, instance_id, attributes):
+ """
+ Create an entry for a instance of an existing class in the external database
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+ """
+ self.log.debug('add', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ now = self._time_to_string(datetime.utcnow())
+ attrs = [AlarmAttributeData(name=k,
+ value=self._attribute_to_string(v)) for k, v in attributes.items()]
+ instance_data = AlarmInstanceData(instance_id=instance_id,
+ created=now,
+ modified=now,
+ attributes=attrs)
+
+ self._root_proxy.add(AlarmDbExternal.INSTANCES_PATH.format(device_id, class_id),
+ instance_data)
+
+ self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+ entity_id=instance_id, attributes=attributes)
+ return True
+
+ def set(self, device_id, class_id, instance_id, attributes):
+ """
+ Set a database value. This should only be called by the Alarm synchronizer
+ and its related tasks
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('set', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+ try:
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be a string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+ if not isinstance(attributes, dict):
+ raise TypeError("Attributes should be a dictionary")
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ # Determine the best strategy to add the information
+ dev_proxy = self._device_proxy(device_id)
+
+ try:
+ class_data = dev_proxy.get(AlarmDbExternal.CLASS_PATH.format(class_id), deep=True)
+
+ inst_data = next((inst for inst in class_data.instances
+ if inst.instance_id == instance_id), None)
+
+ if inst_data is None:
+ return self._add_new_instance(device_id, class_id, instance_id, attributes)
+
+ # Possibly adding to or updating an existing instance
+ # Get instance proxy, creating it if needed
+
+ exist_attr_indexes = dict()
+ attr_len = len(inst_data.attributes)
+
+ for index in xrange(0, attr_len):
+ exist_attr_indexes[inst_data.attributes[index].name] = index
+
+ modified = False
+ str_value = ''
+ new_attributes = []
+
+ for k, v in attributes.items():
+ try:
+ str_value = self._attribute_to_string(v)
+ new_attributes.append(AlarmAttributeData(name=k, value=str_value))
+
+ except Exception as e:
+ self.log.exception('save-error', e=e, class_id=class_id,
+ attr=k, value_type=type(v))
+
+ if k not in exist_attr_indexes or \
+ inst_data.attributes[exist_attr_indexes[k]].value != str_value:
+ modified = True
+
+ if modified:
+ now = datetime.utcnow()
+ new_data = AlarmInstanceData(instance_id=instance_id,
+ created=inst_data.created,
+ modified=self._time_to_string(now),
+ attributes=new_attributes)
+ dev_proxy.remove(AlarmDbExternal.INSTANCE_PATH.format(class_id, instance_id))
+ self._root_proxy.add(AlarmDbExternal.INSTANCES_PATH.format(device_id,
+ class_id), new_data)
+
+ self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+ entity_id=instance_id, attributes=attributes, modified=modified)
+ return modified
+
+ except KeyError:
+ # Here if the class-id does not yet exist in the database
+ return self._add_new_class(device_id, class_id, instance_id,
+ attributes)
+ except Exception as e:
+ self.log.exception('set-exception', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes, e=e)
+ raise
+
+ def delete(self, device_id, class_id, entity_id):
+ """
+ Delete an entity from the database if it exists. If all instances
+ of a class are deleted, the class is deleted as well.
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Entity ID
+
+ :returns: (bool) True if the instance was found and deleted. False
+ if it did not exist.
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('delete', device_id=device_id, class_id=class_id,
+ entity_id=entity_id)
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ if not 0 <= entity_id <= 0xFFFF:
+ raise ValueError('instance-id is 0..0xFFFF')
+
+ try:
+ # Remove instance
+ self._instance_proxy(device_id, class_id, entity_id).remove('/')
+ now = datetime.utcnow()
+
+ # If resulting class has no instance, remove it as well
+ class_proxy = self._class_proxy(device_id, class_id)
+ class_data = class_proxy.get('/', depth=1)
+
+ if len(class_data.instances) == 0:
+ class_proxy.remove('/')
+
+ self._modified = now
+ return True
+
+ except KeyError:
+ return False # Not found
+
+ except Exception as e:
+ self.log.exception('get-last-data-exception', device_id=device_id, e=e)
+ raise
+
+ def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+ """
+ Get database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list/set or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises KeyError: If the requested device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+ try:
+ if class_id is None:
+ # Get full device info
+ dev_data = self._device_proxy(device_id).get('/', depth=-1)
+ data = self._device_to_dict(dev_data)
+
+ elif instance_id is None:
+ # Get all instances of the class
+ try:
+ cls_data = self._class_proxy(device_id, class_id).get('/', depth=-1)
+ data = self._class_to_dict(cls_data)
+
+ except KeyError:
+ data = dict()
+
+ else:
+ # Get all attributes of a specific ME
+ try:
+ inst_data = self._instance_proxy(device_id, class_id, instance_id).\
+ get('/', depth=-1)
+
+ if attributes is None:
+ # All Attributes
+ data = self._instance_to_dict(inst_data)
+
+ else:
+ # Specific attribute(s)
+ if isinstance(attributes, basestring):
+ attributes = {attributes}
+
+ data = {
+ attr.name: self._string_to_attribute(attr.value)
+ for attr in inst_data.attributes if attr.name in attributes}
+
+ except KeyError:
+ data = dict()
+
+ return data
+
+ except KeyError:
+ self.log.warn('query-no-device', device_id=device_id)
+ raise
+
+ except Exception as e:
+ self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ def _instance_to_dict(self, instance):
+ if not isinstance(instance, AlarmInstanceData):
+ raise TypeError('{} is not of type AlarmInstanceData'.format(type(instance)))
+
+ data = {
+ INSTANCE_ID_KEY: instance.instance_id,
+ CREATED_KEY: self._string_to_time(instance.created),
+ MODIFIED_KEY: self._string_to_time(instance.modified),
+ ATTRIBUTES_KEY: dict()
+ }
+ for attribute in instance.attributes:
+ data[ATTRIBUTES_KEY][attribute.name] = self._string_to_attribute(attribute.value)
+ return data
+
+ def _class_to_dict(self, val):
+ if not isinstance(val, AlarmClassData):
+ raise TypeError('{} is not of type AlarmClassData'.format(type(val)))
+
+ data = {
+ CLASS_ID_KEY: val.class_id,
+ }
+ for instance in val.instances:
+ data[instance.instance_id] = self._instance_to_dict(instance)
+ return data
+
+ def _device_to_dict(self, val):
+ if not isinstance(val, AlarmDeviceData):
+ raise TypeError('{} is not of type AlarmDeviceData'.format(type(val)))
+
+ data = {
+ DEVICE_ID_KEY: val.device_id,
+ CREATED_KEY: self._string_to_time(val.created),
+ VERSION_KEY: val.version,
+ ME_KEY: dict(),
+ MSG_TYPE_KEY: set()
+ }
+ for class_data in val.classes:
+ data[class_data.class_id] = self._class_to_dict(class_data)
+ for managed_entity in val.managed_entities:
+ data[ME_KEY][managed_entity.class_id] = managed_entity.name
+
+ for msg_type in val.message_types:
+ data[MSG_TYPE_KEY].add(msg_type.message_type)
+
+ return data
diff --git a/python/adapters/extensions/omci/database/mib_db_api.py b/python/adapters/extensions/omci/database/mib_db_api.py
new file mode 100644
index 0000000..eb93323
--- /dev/null
+++ b/python/adapters/extensions/omci/database/mib_db_api.py
@@ -0,0 +1,245 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OpenOMCI MIB Database API
+"""
+
+import structlog
+from datetime import datetime
+
+CREATED_KEY = 'created'
+MODIFIED_KEY = 'modified'
+MDS_KEY = 'mib_data_sync'
+LAST_SYNC_KEY = 'last_mib_sync'
+VERSION_KEY = 'version'
+DEVICE_ID_KEY = 'device_id'
+CLASS_ID_KEY = 'class_id'
+INSTANCE_ID_KEY = 'instance_id'
+ATTRIBUTES_KEY = 'attributes'
+ME_KEY = 'managed_entities'
+MSG_TYPE_KEY = 'message_types'
+
+
+class DatabaseStateError(Exception):
+ def __init__(self, *args):
+ Exception.__init__(self, *args)
+
+
+class MibDbApi(object):
+ """
+ MIB Database API Base Class
+
+ Derive the ME MIB Database implementation from this API. For an example
+ implementation, look at the mib_db_dict.py implementation
+ """
+ def __init__(self, omci_agent):
+ """
+ Class initializer
+ :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+ """
+ self.log = structlog.get_logger()
+ self._omci_agent = omci_agent
+ self._started = False
+
+ now = datetime.utcnow()
+ self._created = now
+ self._modified = now
+
+ def start(self):
+ """
+ Start up/restore the database. For in-memory, will be a nop. For external
+ DB, may need to create the DB and fetch create/modified values
+ """
+ if not self._started:
+ self._started = True
+ # For a derived class that is a persistent DB, Restore DB (connect,
+ # get created/modified times, ....) or something along those lines.
+ # Minimal restore could just be getting ONU device IDs' so they are cached
+ # locally. Maximum restore would be a full in-memory version of database
+ # for fast 'GET' request support.
+ # Remember to restore the '_created' and '_modified' times (above) as well
+ # from the database
+
+ def stop(self):
+ """
+ Start up the database. For in-memory, will be a nop. For external
+ DB, may need to create the DB and fetch create/modified values
+ """
+ if self._started:
+ self._started = False
+
+ @property
+ def active(self):
+ """
+ Is the database active
+ :return: (bool) True if active
+ """
+ return self._started
+
+ @property
+ def created(self):
+ """
+ Date (UTC) that the database was created
+ :return: (datetime) creation date
+ """
+ return self._created
+
+ @property
+ def modified(self):
+ """
+ Date (UTC) that the database last added or removed a device
+ or updated a device's ME information
+ :return: (datetime) last modification date
+ """
+ return self._modified
+
+ def add(self, device_id, overwrite=False):
+ """
+ Add a new ONU to database
+
+ :param device_id: (str) Device ID of ONU to add
+ :param overwrite: (bool) Overwrite existing entry if found.
+
+ :raises KeyError: If device already exists and 'overwrite' is False
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def remove(self, device_id):
+ """
+ Remove an ONU from the database
+
+ :param device_id: (str) Device ID of ONU to remove from database
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def set(self, device_id, class_id, entity_id, attributes):
+ """
+ Set/Create a database value. This should only be called by the MIB synchronizer
+ and its related tasks
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def delete(self, device_id, class_id, entity_id):
+ """
+ Delete an entity from the database if it exists
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Entity ID
+
+ :returns: (bool) True if the instance was found and deleted. False
+ if it did not exist.
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+ """
+ Get database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list/set or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises KeyError: If the requested device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def on_mib_reset(self, device_id):
+ """
+ Reset/clear the database for a specific Device
+
+ :param device_id: (str) ONU Device ID
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ # Your derived class should clear out all MIB data and update the
+ # modified stats appropriately
+ raise NotImplementedError('Implement this in your derive class')
+
+ def save_mib_data_sync(self, device_id, value):
+ """
+ Save the MIB Data Sync to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (int) Value to save
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def get_mib_data_sync(self, device_id):
+ """
+ Get the MIB Data Sync value last saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def save_last_sync(self, device_id, value):
+ """
+ Save the Last Sync time to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (DateTime) Value to save
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def get_last_sync(self, device_id):
+ """
+ Get the Last SYnc Time saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def update_supported_managed_entities(self, device_id, managed_entities):
+ """
+ Update the supported OMCI Managed Entities for this device
+
+ :param device_id: (str) ONU Device ID
+ :param managed_entities: (set) Managed Entity class IDs
+ """
+ raise NotImplementedError('Implement this in your derive class')
+
+ def update_supported_message_types(self, device_id, msg_types):
+ """
+ Update the supported OMCI Managed Entities for this device
+
+ :param device_id: (str) ONU Device ID
+ :param msg_types: (set) Message Type values (ints)
+ """
+ raise NotImplementedError('Implement this in your derive class')
diff --git a/python/adapters/extensions/omci/database/mib_db_dict.py b/python/adapters/extensions/omci/database/mib_db_dict.py
new file mode 100644
index 0000000..6a7de8f
--- /dev/null
+++ b/python/adapters/extensions/omci/database/mib_db_dict.py
@@ -0,0 +1,524 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import copy
+from mib_db_api import *
+import json
+
+
+class MibDbVolatileDict(MibDbApi):
+ """
+ A very simple in-memory database for ME storage. Data is not persistent
+ across reboots.
+
+ In Phase 2, this DB will be instantiated on a per-ONU basis but act as if
+ it is shared for all ONUs. This class will be updated with and external
+ key-value store (or other appropriate database) in Voltha 1.3 Sprint 3
+
+ This class can be used for unit tests
+ """
+ CURRENT_VERSION = 1
+
+ def __init__(self, omci_agent):
+ """
+ Class initializer
+ :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+ """
+ super(MibDbVolatileDict, self).__init__(omci_agent)
+ self._data = dict() # device_id -> ME ID -> Inst ID -> Attr Name -> Values
+
+ def start(self):
+ """
+ Start up/restore the database. For in-memory, will be a nop. For external
+ DB, may need to create the DB and fetch create/modified values
+ """
+ super(MibDbVolatileDict, self).start()
+ # TODO: Delete this method if nothing else is done except calling the base class
+
+ def stop(self):
+ """
+ Start up the database. For in-memory, will be a nop. For external
+ DB, may need to create the DB and fetch create/modified values
+ """
+ super(MibDbVolatileDict, self).stop()
+ # TODO: Delete this method if nothing else is done except calling the base class
+
+ def add(self, device_id, overwrite=False):
+ """
+ Add a new ONU to database
+
+ :param device_id: (str) Device ID of ONU to add
+ :param overwrite: (bool) Overwrite existing entry if found.
+
+ :raises KeyError: If device already exist and 'overwrite' is False
+ """
+ self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not overwrite and device_id in self._data:
+ raise KeyError('Device {} already exists in the database'
+ .format(device_id))
+
+ now = datetime.utcnow()
+ self._data[device_id] = {
+ DEVICE_ID_KEY: device_id,
+ CREATED_KEY: now,
+ LAST_SYNC_KEY: None,
+ MDS_KEY: 0,
+ VERSION_KEY: MibDbVolatileDict.CURRENT_VERSION,
+ ME_KEY: dict(),
+ MSG_TYPE_KEY: set()
+ }
+
+ def remove(self, device_id):
+ """
+ Remove an ONU from the database
+
+ :param device_id: (str) Device ID of ONU to remove from database
+ """
+ self.log.debug('remove-device', device_id=device_id)
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if device_id in self._data:
+ del self._data[device_id]
+ self._modified = datetime.utcnow()
+
+ def on_mib_reset(self, device_id):
+ """
+ Reset/clear the database for a specific Device
+
+ :param device_id: (str) ONU Device ID
+ :raises DatabaseStateError: If the database is not enabled
+ :raises KeyError: If the device does not exist in the database
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ device_db = self._data[device_id]
+ self._modified = datetime.utcnow()
+
+ self._data[device_id] = {
+ DEVICE_ID_KEY: device_id,
+ CREATED_KEY: device_db[CREATED_KEY],
+ LAST_SYNC_KEY: device_db[LAST_SYNC_KEY],
+ MDS_KEY: 0,
+ VERSION_KEY: MibDbVolatileDict.CURRENT_VERSION,
+ ME_KEY: device_db[ME_KEY],
+ MSG_TYPE_KEY: device_db[MSG_TYPE_KEY]
+ }
+
+ def save_mib_data_sync(self, device_id, value):
+ """
+ Save the MIB Data Sync to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (int) Value to save
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not isinstance(value, int):
+ raise TypeError('MIB Data Sync is an integer')
+
+ if not 0 <= value <= 255:
+ raise ValueError('Invalid MIB-data-sync value {}. Must be 0..255'.
+ format(value))
+
+ self._data[device_id][MDS_KEY] = value
+ self._modified = datetime.utcnow()
+
+ def get_mib_data_sync(self, device_id):
+ """
+ Get the MIB Data Sync value last saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if device_id not in self._data:
+ return None
+
+ return self._data[device_id].get(MDS_KEY)
+
+ def save_last_sync(self, device_id, value):
+ """
+ Save the Last Sync time to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (DateTime) Value to save
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not isinstance(value, datetime):
+ raise TypeError('Expected a datetime object, got {}'.
+ format(type(datetime)))
+
+ self._data[device_id][LAST_SYNC_KEY] = value
+ self._modified = datetime.utcnow()
+
+ def get_last_sync(self, device_id):
+ """
+ Get the Last SYnc Time saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if device_id not in self._data:
+ return None
+
+ return self._data[device_id].get(LAST_SYNC_KEY)
+
+ def set(self, device_id, class_id, instance_id, attributes):
+ """
+ Set a database value. This should only be called by the MIB synchronizer
+ and its related tasks
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be a string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+ if not isinstance(attributes, dict):
+ raise TypeError("Attributes should be a dictionary")
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ now = datetime.utcnow()
+ try:
+ device_db = self._data[device_id]
+ class_db = device_db.get(class_id)
+ created = False
+
+ if class_db is None:
+ device_db[class_id] = {CLASS_ID_KEY: class_id}
+
+ class_db = device_db[class_id]
+ self._modified = now
+ created = True
+
+ instance_db = class_db.get(instance_id)
+ if instance_db is None:
+ class_db[instance_id] = {
+ INSTANCE_ID_KEY: instance_id,
+ CREATED_KEY: now,
+ MODIFIED_KEY: now,
+ ATTRIBUTES_KEY: dict()
+ }
+ instance_db = class_db[instance_id]
+ self._modified = now
+ created = True
+
+ changed = False
+
+ me_map = self._omci_agent.get_device(device_id).me_map
+ entity = me_map.get(class_id)
+
+ for attribute, value in attributes.items():
+ assert isinstance(attribute, basestring)
+ assert value is not None, "Attribute '{}' value cannot be 'None'".\
+ format(attribute)
+
+ db_value = instance_db[ATTRIBUTES_KEY].get(attribute) \
+ if ATTRIBUTES_KEY in instance_db else None
+
+ if entity is not None and isinstance(value, basestring):
+ from scapy.fields import StrFixedLenField
+ attr_index = entity.attribute_name_to_index_map[attribute]
+ eca = entity.attributes[attr_index]
+ field = eca.field
+
+ if isinstance(field, StrFixedLenField):
+ from scapy.base_classes import Packet_metaclass
+ if isinstance(field.default, Packet_metaclass) \
+ and hasattr(field.default, 'json_from_value'):
+ # Value/hex of Packet Class to string
+ value = field.default.json_from_value(value)
+
+ if entity is not None and attribute in entity.attribute_name_to_index_map:
+ attr_index = entity.attribute_name_to_index_map[attribute]
+ eca = entity.attributes[attr_index]
+ field = eca.field
+
+ if hasattr(field, 'to_json'):
+ value = field.to_json(value, db_value)
+
+ # Complex packet types may have an attribute encoded as an object, this
+ # can be check by seeing if there is a to_json() conversion callable
+ # defined
+ if hasattr(value, 'to_json'):
+ value = value.to_json()
+
+ # Other complex packet types may be a repeated list field (FieldListField)
+ elif isinstance(value, (list, dict)):
+ value = json.dumps(value, separators=(',', ':'))
+
+ assert db_value is None or isinstance(value, type(db_value)), \
+ "New value type for attribute '{}' type is changing from '{}' to '{}'".\
+ format(attribute, type(db_value), type(value))
+
+ if db_value is None or db_value != value:
+ instance_db[ATTRIBUTES_KEY][attribute] = value
+ changed = True
+
+ if changed:
+ instance_db[MODIFIED_KEY] = now
+ self._modified = now
+
+ return changed or created
+
+ except Exception as e:
+ self.log.error('set-failure', e=e, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+ raise
+
+ def delete(self, device_id, class_id, instance_id):
+ """
+ Delete an entity from the database if it exists. If all instances
+ of a class are deleted, the class is deleted as well.
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+
+ :returns: (bool) True if the instance was found and deleted. False
+ if it did not exist.
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError('instance-id is 0..0xFFFF')
+
+ try:
+ device_db = self._data[device_id]
+ class_db = device_db.get(class_id)
+
+ if class_db is None:
+ return False
+
+ instance_db = class_db.get(instance_id)
+ if instance_db is None:
+ return False
+
+ now = datetime.utcnow()
+ del class_db[instance_id]
+
+ if len(class_db) == 1: # Is only 'CLASS_ID_KEY' remaining
+ del device_db[class_id]
+
+ self._modified = now
+ return True
+
+ except Exception as e:
+ self.log.error('delete-failure', e=e)
+ raise
+
+ def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+ """
+ Get database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list/set or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises KeyError: If the requested device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID is a string')
+
+ device_db = self._data[device_id]
+ if class_id is None:
+ return self._fix_dev_json_attributes(copy.copy(device_db), device_id)
+
+ if not isinstance(class_id, int):
+ raise TypeError('Class ID is an integer')
+
+ me_map = self._omci_agent.get_device(device_id).me_map
+ entity = me_map.get(class_id)
+
+ class_db = device_db.get(class_id, dict())
+ if instance_id is None or len(class_db) == 0:
+ return self._fix_cls_json_attributes(copy.copy(class_db), entity)
+
+ if not isinstance(instance_id, int):
+ raise TypeError('Instance ID is an integer')
+
+ instance_db = class_db.get(instance_id, dict())
+ if attributes is None or len(instance_db) == 0:
+ return self._fix_inst_json_attributes(copy.copy(instance_db), entity)
+
+ if not isinstance(attributes, (basestring, list, set)):
+ raise TypeError('Attributes should be a string or list/set of strings')
+
+ if not isinstance(attributes, (list, set)):
+ attributes = [attributes]
+
+ results = {attr: val for attr, val in instance_db[ATTRIBUTES_KEY].iteritems()
+ if attr in attributes}
+
+ for attr, attr_data in results.items():
+ attr_index = entity.attribute_name_to_index_map[attr]
+ eca = entity.attributes[attr_index]
+ results[attr] = self._fix_attr_json_attribute(copy.copy(attr_data), eca)
+
+ return results
+
+ #########################################################################
+ # Following routines are used to fix-up JSON encoded complex data. A
+ # nice side effect is that the values returned will be a deep-copy of
+ # the class/instance/attribute data of what is in the database. Note
+ # That other database values (created, modified, ...) will still reference
+ # back to the original DB.
+
+ def _fix_dev_json_attributes(self, dev_data, device_id):
+ for cls_id, cls_data in dev_data.items():
+ if isinstance(cls_id, int):
+ me_map = self._omci_agent.get_device(device_id).me_map
+ entity = me_map.get(cls_id)
+ dev_data[cls_id] = self._fix_cls_json_attributes(copy.copy(cls_data), entity)
+ return dev_data
+
+ def _fix_cls_json_attributes(self, cls_data, entity):
+ for inst_id, inst_data in cls_data.items():
+ if isinstance(inst_id, int):
+ cls_data[inst_id] = self._fix_inst_json_attributes(copy.copy(inst_data), entity)
+ return cls_data
+
+ def _fix_inst_json_attributes(self, inst_data, entity):
+ if ATTRIBUTES_KEY in inst_data:
+ for attr, attr_data in inst_data[ATTRIBUTES_KEY].items():
+ attr_index = entity.attribute_name_to_index_map[attr] \
+ if entity is not None and attr in entity.attribute_name_to_index_map else None
+ eca = entity.attributes[attr_index] if attr_index is not None else None
+ inst_data[ATTRIBUTES_KEY][attr] = self._fix_attr_json_attribute(copy.copy(attr_data), eca)
+ return inst_data
+
+ def _fix_attr_json_attribute(self, attr_data, eca):
+
+ try:
+ if eca is not None:
+ field = eca.field
+ if hasattr(field, 'load_json'):
+ value = field.load_json(attr_data)
+ return value
+
+ return json.loads(attr_data) if isinstance(attr_data, basestring) else attr_data
+
+ except ValueError:
+ return attr_data
+
+ except Exception as e:
+ pass
+
+ def update_supported_managed_entities(self, device_id, managed_entities):
+ """
+ Update the supported OMCI Managed Entities for this device
+
+ :param device_id: (str) ONU Device ID
+ :param managed_entities: (set) Managed Entity class IDs
+ """
+ now = datetime.utcnow()
+ try:
+ device_db = self._data[device_id]
+
+ entities = {class_id: self._managed_entity_to_name(device_id, class_id)
+ for class_id in managed_entities}
+
+ device_db[ME_KEY] = entities
+ self._modified = now
+
+ except Exception as e:
+ self.log.error('set-me-failure', e=e)
+ raise
+
+ def _managed_entity_to_name(self, device_id, class_id):
+ me_map = self._omci_agent.get_device(device_id).me_map
+ entity = me_map.get(class_id)
+
+ return entity.__name__ if entity is not None else 'UnknownManagedEntity'
+
+ def update_supported_message_types(self, device_id, msg_types):
+ """
+ Update the supported OMCI Managed Entities for this device
+
+ :param device_id: (str) ONU Device ID
+ :param msg_types: (set) Message Type values (ints)
+ """
+ now = datetime.utcnow()
+ try:
+ msg_type_set = {msg_type.value for msg_type in msg_types}
+ self._data[device_id][MSG_TYPE_KEY] = msg_type_set
+ self._modified = now
+
+ except Exception as e:
+ self.log.error('set-me-failure', e=e)
+ raise
diff --git a/python/adapters/extensions/omci/database/mib_db_ext.py b/python/adapters/extensions/omci/database/mib_db_ext.py
new file mode 100644
index 0000000..d06a7fa
--- /dev/null
+++ b/python/adapters/extensions/omci/database/mib_db_ext.py
@@ -0,0 +1,1061 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from mib_db_api import *
+from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, \
+ MibDeviceData, MibAttributeData, MessageType, ManagedEntity
+from voltha.extensions.omci.omci_entities import *
+from voltha.extensions.omci.omci_fields import *
+from scapy.fields import StrField, FieldListField, PacketField
+
+
+class MibDbStatistic(object):
+ """
+ For debug/tuning purposes.
+
+ With etcd around the Nov 8 time frame, (took out some created/modified settins) seeing the following:
+
+ o Creates: Avg: 141.4 mS, Min: 47 mS, Max: 323 mS (148 samples)
+ o Sets: Avg: 206.4 mS, Min: 85 mS, Max: 781 mS (142 samples)
+
+ With etcd around the Nov 7 time frame, seeing the following:
+
+ o Creates: Avg: 124.4 mS, Min: 48 mS, Max: 531 mS (148 samples)
+ o Sets: Avg: 210.7 mS, Min: 82 mS, Max: 944 mS (140 samples)
+ o Gets: Avg: 10.9 mS, Min: 0 mS, Max: 384 mS ( 99 samples)
+ o Deletes: No samples
+
+ With etcd around the v1.5 time frame, seeing the following:
+
+ o Creates: Avg: 57.1 mS, Min: 76 mS, Max: 511 mS (146 samples)
+ o Sets: Avg: 303.9 mS, Min: 126 mS, Max: 689 mS (103 samples)
+ o Gets: Avg: 3.3 mS, Min: 0 mS, Max: 8 mS ( 9 samples)
+ o Deletes: No samples
+ """
+ def __init__(self, name):
+ self._name = name
+ self._count = 0
+ self._total_time = 0 # Total milliseconds
+ self._min_time = 99999999
+ self._max_time = 0
+
+ def get_statistics(self):
+ return {
+ 'name': self._name,
+ 'count': self._count,
+ 'total_time': self._total_time,
+ 'min_time': self._min_time,
+ 'max_time': self._max_time,
+ 'avg_time': self._total_time / self._count if self._count > 0 else 0
+ }
+
+ def clear_statistics(self):
+ self._count = 0
+ self._total_time = 0 # Total milliseconds
+ self._min_time = 99999999
+ self._max_time = 0
+
+ def increment(self, time):
+ self._count += 1
+ self._total_time += time # Total milliseconds
+ if self._min_time > time:
+ self._min_time = time
+ if self._max_time < time:
+ self._max_time = time
+
+
+class MibDbExternal(MibDbApi):
+ """
+ A persistent external OpenOMCI MIB Database
+ """
+ CURRENT_VERSION = 1 # VOLTHA v1.3.0 release
+
+ _TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+
+ # Paths from root proxy
+ MIB_PATH = '/omci_mibs'
+ DEVICE_PATH = MIB_PATH + '/{}' # .format(device_id)
+
+ # Classes, Instances, and Attributes as lists from root proxy
+ CLASSES_PATH = DEVICE_PATH + '/classes' # .format(device_id)
+ INSTANCES_PATH = DEVICE_PATH + '/classes/{}/instances' # .format(device_id, class_id)
+ ATTRIBUTES_PATH = DEVICE_PATH + '/classes/{}/instances/{}/attributes' # .format(device_id, class_id, instance_id)
+
+ # Single Class, Instance, and Attribute as objects from device proxy
+ CLASS_PATH = '/classes/{}' # .format(class_id)
+ INSTANCE_PATH = '/classes/{}/instances/{}' # .format(class_id, instance_id)
+ ATTRIBUTE_PATH = '/classes/{}/instances/{}/attributes/{}' # .format(class_id, instance_id
+ # attribute_name)
+
+ def __init__(self, omci_agent):
+ """
+ Class initializer
+ :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+ """
+ super(MibDbExternal, self).__init__(omci_agent)
+ self._core = omci_agent.core
+ # Some statistics to help with debug/tuning/...
+ self._statistics = {
+ 'get': MibDbStatistic('get'),
+ 'set': MibDbStatistic('set'),
+ 'create': MibDbStatistic('create'),
+ 'delete': MibDbStatistic('delete')
+ }
+
+ def start(self):
+ """
+ Start up/restore the database
+ """
+ self.log.debug('start')
+
+ if not self._started:
+ super(MibDbExternal, self).start()
+ root_proxy = self._core.get_proxy('/')
+
+ try:
+ base = root_proxy.get(MibDbExternal.MIB_PATH)
+ self.log.info('db-exists', num_devices=len(base))
+
+ except Exception as e:
+ self.log.exception('start-failure', e=e)
+ raise
+
+ def stop(self):
+ """
+ Start up the database
+ """
+ self.log.debug('stop')
+
+ if self._started:
+ super(MibDbExternal, self).stop()
+ # TODO: Delete this method if nothing else is done except calling the base class
+
+ def _time_to_string(self, time):
+ return time.strftime(MibDbExternal._TIME_FORMAT) if time is not None else ''
+
+ def _string_to_time(self, time):
+ return datetime.strptime(time, MibDbExternal._TIME_FORMAT) if len(time) else None
+
+ def _attribute_to_string(self, device_id, class_id, attr_name, value, old_value = None):
+ """
+ Convert an ME's attribute value to string representation
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param attr_name: (str) Attribute Name (see EntityClasses)
+ :param value: (various) Attribute Value
+
+ :return: (str) String representation of the value
+ :raises KeyError: Device, Class ID, or Attribute does not exist
+ """
+ try:
+ me_map = self._omci_agent.get_device(device_id).me_map
+
+ if class_id in me_map:
+ entity = me_map[class_id]
+ attr_index = entity.attribute_name_to_index_map[attr_name]
+ eca = entity.attributes[attr_index]
+ field = eca.field
+ else:
+ # Here for auto-defined MEs (ones not defined in ME Map)
+ from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+ field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
+
+ if isinstance(field, StrFixedLenField):
+ from scapy.base_classes import Packet_metaclass
+ if hasattr(value, 'to_json') and not isinstance(value, basestring):
+ # Packet Class to string
+ str_value = value.to_json()
+ elif isinstance(field.default, Packet_metaclass) \
+ and hasattr(field.default, 'json_from_value'):
+ #and not isinstance(value, basestring):
+ # Value/hex of Packet Class to string
+ str_value = field.default.json_from_value(value)
+ else:
+ str_value = str(value)
+
+ elif isinstance(field, (StrField, MACField, IPField)):
+ # For StrField, value is an str already
+ # For MACField, value is a string in ':' delimited form
+ # For IPField, value is a string in '.' delimited form
+ str_value = str(value)
+
+ elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
+ # For ByteField, ShortField, IntField, and LongField value is an int
+ str_value = str(value)
+
+ elif isinstance(field, BitField):
+ # For BitField, value is a long
+ #
+ str_value = str(value)
+
+ elif hasattr(field, 'to_json'):
+ str_value = field.to_json(value, old_value)
+
+ elif isinstance(field, FieldListField):
+ str_value = json.dumps(value, separators=(',', ':'))
+
+ else:
+ self.log.warning('default-conversion', type=type(field),
+ class_id=class_id, attribute=attr_name, value=str(value))
+ str_value = str(value)
+
+ return str_value
+
+ except Exception as e:
+ self.log.exception('attr-to-string', device_id=device_id,
+ class_id=class_id, attr=attr_name,
+ value=value, e=e)
+ raise
+
+ def _string_to_attribute(self, device_id, class_id, attr_name, str_value):
+ """
+ Convert an ME's attribute value-string to its Scapy decode equivalent
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param attr_name: (str) Attribute Name (see EntityClasses)
+ :param str_value: (str) Attribute Value in string form
+
+ :return: (various) String representation of the value
+ :raises KeyError: Device, Class ID, or Attribute does not exist
+ """
+ try:
+ me_map = self._omci_agent.get_device(device_id).me_map
+
+ if class_id in me_map:
+ entity = me_map[class_id]
+ attr_index = entity.attribute_name_to_index_map[attr_name]
+ eca = entity.attributes[attr_index]
+ field = eca.field
+ else:
+ # Here for auto-defined MEs (ones not defined in ME Map)
+ from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+ field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
+
+ if isinstance(field, StrFixedLenField):
+ from scapy.base_classes import Packet_metaclass
+ default = field.default
+ if isinstance(default, Packet_metaclass) and \
+ hasattr(default, 'to_json'):
+ value = json.loads(str_value)
+ else:
+ value = str_value
+
+ elif isinstance(field, MACField):
+ value = str_value
+
+ elif isinstance(field, IPField):
+ value = str_value
+
+ elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
+ if str_value.lower() in ('true', 'false'):
+ str_value = '1' if str_value.lower() == 'true' else '0'
+ value = int(str_value)
+
+ elif isinstance(field, BitField):
+ value = long(str_value)
+
+ elif hasattr(field, 'load_json'):
+ value = field.load_json(str_value)
+
+ elif isinstance(field, FieldListField):
+ value = json.loads(str_value)
+
+ else:
+ self.log.warning('default-conversion', type=type(field),
+ class_id=class_id, attribute=attr_name, value=str_value)
+ value = None
+
+ return value
+
+ except Exception as e:
+ self.log.exception('attr-to-string', device_id=device_id,
+ class_id=class_id, attr=attr_name,
+ value=str_value, e=e)
+ raise
+
+ def add(self, device_id, overwrite=False):
+ """
+ Add a new ONU to database
+
+ :param device_id: (str) Device ID of ONU to add
+ :param overwrite: (bool) Overwrite existing entry if found.
+
+ :raises KeyError: If device already exists and 'overwrite' is False
+ """
+ self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+ now = datetime.utcnow()
+ found = False
+ root_proxy = self._core.get_proxy('/')
+
+ data = MibDeviceData(device_id=device_id,
+ created=self._time_to_string(now),
+ last_sync_time='',
+ mib_data_sync=0,
+ version=MibDbExternal.CURRENT_VERSION)
+ try:
+ dev_proxy = self._device_proxy(device_id)
+ found = True
+
+ if not overwrite:
+ # Device already exists
+ raise KeyError('Device with ID {} already exists in MIB database'.
+ format(device_id))
+
+ # Overwrite with new data
+ data = dev_proxy.get('/', depth=0)
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id), data)
+ self._modified = now
+
+ except KeyError:
+ if found:
+ raise
+ # Did not exist, add it now
+ root_proxy.add(MibDbExternal.MIB_PATH, data)
+ self._created = now
+ self._modified = now
+
+ def remove(self, device_id):
+ """
+ Remove an ONU from the database
+
+ :param device_id: (str) Device ID of ONU to remove from database
+ """
+ self.log.debug('remove-device', device_id=device_id)
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ try:
+ # self._root_proxy.get(MibDbExternal.DEVICE_PATH.format(device_id))
+ self._root_proxy.remove(MibDbExternal.DEVICE_PATH.format(device_id))
+ self._modified = datetime.utcnow()
+
+ except KeyError:
+ # Did not exists, which is not a failure
+ pass
+
+ except Exception as e:
+ self.log.exception('remove-exception', device_id=device_id, e=e)
+ raise
+
+ @property
+ def _root_proxy(self):
+ return self._core.get_proxy('/')
+
+ def _device_proxy(self, device_id):
+ """
+ Return a config proxy to the OMCI MIB_DB leaf for a given device
+
+ :param device_id: (str) ONU Device ID
+ :return: (ConfigProxy) Configuration proxy rooted at OMCI MIB DB
+ :raises KeyError: If the device does not exist in the database
+ """
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ return self._core.get_proxy(MibDbExternal.DEVICE_PATH.format(device_id))
+
+ def _class_proxy(self, device_id, class_id, create=False):
+ """
+ Get a config proxy to a specific managed entity class
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param create: (bool) If true, create default instance (and class)
+ :return: (ConfigProxy) Class configuration proxy
+
+ :raises DatabaseStateError: If database is not started
+ :raises KeyError: If Instance does not exist and 'create' is False
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.CLASS_PATH
+ path = fmt.format(device_id, class_id)
+
+ try:
+ return self._core.get_proxy(path)
+
+ except KeyError:
+ if not create:
+ # This can occur right after a MIB Reset if the ONU publishes AVCs right away
+ # and during the MIB audit resync for ONU created MEs in response to an OLT
+ # created ME. Fail since for these test cases they occur during a verification
+ # 'query' and not the ME creation during resync. Calling code should handle
+ # they exception if it is expected to occur on occasion.
+ self.log.debug('class-proxy-does-not-exist', device_id=device_id,
+ class_id=class_id)
+ raise
+
+ # Create class
+ data = MibClassData(class_id=class_id)
+ root_path = MibDbExternal.CLASSES_PATH.format(device_id)
+ self._root_proxy.add(root_path, data)
+
+ return self._core.get_proxy(path)
+
+ def _instance_proxy(self, device_id, class_id, instance_id, create=False):
+ """
+ Get a config proxy to a specific managed entity instance
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Class ID
+ :param instance_id: (int) Instance ID
+ :param create: (bool) If true, create default instance (and class)
+ :return: (ConfigProxy) Instance configuration proxy
+
+ :raises DatabaseStateError: If database is not started
+ :raises KeyError: If Instance does not exist and 'create' is False
+ """
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID is a string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError('instance-id is 0..0xFFFF')
+
+ fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.INSTANCE_PATH
+ path = fmt.format(device_id, class_id, instance_id)
+
+ try:
+ return self._core.get_proxy(path)
+
+ except KeyError:
+ if not create:
+ # This can occur right after a MIB Reset if the ONU publishes AVCs right away
+ # and during the MIB audit resync for ONU created MEs in response to an OLT
+ # created ME. Fail since for these test cases they occur during a verification
+ # 'query' and not the ME creation during resync. Calling code should handle
+ # they exception if it is expected to occur on occasion.
+ self.log.info('instance-proxy-does-not-exist', device_id=device_id,
+ class_id=class_id, instance_id=instance_id)
+ raise
+
+ # Create instance, first make sure class exists
+ self._class_proxy(device_id, class_id, create=True)
+
+ now = self._time_to_string(datetime.utcnow())
+ data = MibInstanceData(instance_id=instance_id, created=now, modified=now)
+ root_path = MibDbExternal.INSTANCES_PATH.format(device_id, class_id)
+ self._root_proxy.add(root_path, data)
+
+ return self._core.get_proxy(path)
+
+ def on_mib_reset(self, device_id):
+ """
+ Reset/clear the database for a specific Device
+
+ :param device_id: (str) ONU Device ID
+ :raises DatabaseStateError: If the database is not enabled
+ :raises KeyError: If the device does not exist in the database
+ """
+ self.log.debug('on-mib-reset', device_id=device_id)
+
+ try:
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=2)
+
+ # Wipe out any existing class IDs
+ class_ids = [c.class_id for c in data.classes]
+
+ if len(class_ids):
+ for class_id in class_ids:
+ device_proxy.remove(MibDbExternal.CLASS_PATH.format(class_id))
+
+ # Reset MIB Data Sync to zero
+ now = datetime.utcnow()
+ data = MibDeviceData(device_id=device_id,
+ created=data.created,
+ last_sync_time=data.last_sync_time,
+ mib_data_sync=0,
+ version=MibDbExternal.CURRENT_VERSION)
+ # Update
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('mib-reset-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('mib-reset-exception', device_id=device_id, e=e)
+ raise
+
+ def save_mib_data_sync(self, device_id, value):
+ """
+ Save the MIB Data Sync to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (int) Value to save
+ """
+ self.log.debug('save-mds', device_id=device_id, value=value)
+
+ try:
+ if not isinstance(value, int):
+ raise TypeError('MIB Data Sync is an integer')
+
+ if not 0 <= value <= 255:
+ raise ValueError('Invalid MIB-data-sync value {}. Must be 0..255'.
+ format(value))
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.mib_data_sync = value
+
+ # Update
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-mds-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('save-mds-exception', device_id=device_id, e=e)
+ raise
+
+ def get_mib_data_sync(self, device_id):
+ """
+ Get the MIB Data Sync value last saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ self.log.debug('get-mds', device_id=device_id)
+
+ try:
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+ return int(data.mib_data_sync)
+
+ except KeyError:
+ return None # OMCI MIB_DB entry has not yet been created
+
+ except Exception as e:
+ self.log.exception('get-mds-exception', device_id=device_id, e=e)
+ raise
+
+ def save_last_sync(self, device_id, value):
+ """
+ Save the Last Sync time to the database in an easy location to access
+
+ :param device_id: (str) ONU Device ID
+ :param value: (DateTime) Value to save
+ """
+ self.log.debug('save-last-sync', device_id=device_id, time=str(value))
+
+ try:
+ if not isinstance(value, datetime):
+ raise TypeError('Expected a datetime object, got {}'.
+ format(type(datetime)))
+
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.last_sync_time = self._time_to_string(value)
+
+ # Update
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-mds-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ def get_last_sync(self, device_id):
+ """
+ Get the Last Sync Time saved to the database for a device
+
+ :param device_id: (str) ONU Device ID
+ :return: (int) The Value or None if not found
+ """
+ self.log.debug('get-last-sync', device_id=device_id)
+
+ try:
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+ return self._string_to_time(data.last_sync_time)
+
+ except KeyError:
+ return None # OMCI MIB_DB entry has not yet been created
+
+ except Exception as e:
+ self.log.exception('get-last-sync-exception', e=e)
+ raise
+
+ def _add_new_class(self, device_id, class_id, instance_id, attributes):
+ """
+ Create an entry for a new class in the external database
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+ """
+ self.log.debug('add', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ now = self._time_to_string(datetime.utcnow())
+ attrs = []
+ for k, v in attributes.items():
+ if k == 'serial_number':
+ vendor_id = str(v[0:4])
+ vendor_specific = v[4:]
+ vendor_specific = str(vendor_specific.encode('hex'))
+ str_value = vendor_id + vendor_specific
+ attrs.append(MibAttributeData(name=k, value=str_value))
+ else:
+ str_value = self._attribute_to_string(device_id, class_id, k, v)
+ attrs.append(MibAttributeData(name=k, value=str_value))
+
+ class_data = MibClassData(class_id=class_id,
+ instances=[MibInstanceData(instance_id=instance_id,
+ created=now,
+ modified=now,
+ attributes=attrs)])
+
+ self._root_proxy.add(MibDbExternal.CLASSES_PATH.format(device_id), class_data)
+ self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+ entity_id=instance_id, attributes=attributes)
+ return True
+
+ def _add_new_instance(self, device_id, class_id, instance_id, attributes):
+ """
+ Create an entry for a instance of an existing class in the external database
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+ """
+ self.log.debug('add', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ now = self._time_to_string(datetime.utcnow())
+ attrs = []
+ for k, v in attributes.items():
+ if k == 'serial_number':
+ vendor_id = str(v[0:4])
+ vendor_specific = v[4:]
+ vendor_specific = str(vendor_specific.encode('hex'))
+ str_value = vendor_id+vendor_specific
+ attrs.append(MibAttributeData(name=k, value=str_value))
+ else:
+ str_value = self._attribute_to_string(device_id, class_id, k, v)
+ attrs.append(MibAttributeData(name=k, value=str_value))
+
+ instance_data = MibInstanceData(instance_id=instance_id,
+ created=now,
+ modified=now,
+ attributes=attrs)
+
+ self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id, class_id),
+ instance_data)
+
+ self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+ entity_id=instance_id, attributes=attributes)
+ return True
+
+ def set(self, device_id, class_id, instance_id, attributes):
+ """
+ Set a database value. This should only be called by the MIB synchronizer
+ and its related tasks
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) ME Entity ID
+ :param attributes: (dict) Attribute dictionary
+
+ :returns: (bool) True if the value was saved to the database. False if the
+ value was identical to the current instance
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('set', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+ try:
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be a string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+ if not 0 <= instance_id <= 0xFFFF:
+ raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+ if not isinstance(attributes, dict):
+ raise TypeError("Attributes should be a dictionary")
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ # Determine the best strategy to add the information
+ dev_proxy = self._device_proxy(device_id)
+
+ operation = 'set'
+ start_time = None
+ try:
+ class_data = dev_proxy.get(MibDbExternal.CLASS_PATH.format(class_id), deep=True)
+
+ inst_data = next((inst for inst in class_data.instances
+ if inst.instance_id == instance_id), None)
+
+ if inst_data is None:
+ operation = 'create'
+ start_time = datetime.utcnow()
+ return self._add_new_instance(device_id, class_id, instance_id, attributes)
+
+ # Possibly adding to or updating an existing instance
+ # Get instance proxy, creating it if needed
+
+ modified = False
+ new_attributes = []
+ exist_attr_indexes = dict()
+ attr_len = len(inst_data.attributes)
+
+ for index in xrange(0, attr_len):
+ name = inst_data.attributes[index].name
+ value = inst_data.attributes[index].value
+ exist_attr_indexes[name] = index
+ new_attributes.append(MibAttributeData(name=name, value=value))
+
+ for k, v in attributes.items():
+ try:
+ old_value = None if k not in exist_attr_indexes \
+ else new_attributes[exist_attr_indexes[k]].value
+
+ str_value = self._attribute_to_string(device_id, class_id, k, v, old_value)
+
+ if k not in exist_attr_indexes:
+ new_attributes.append(MibAttributeData(name=k, value=str_value))
+ modified = True
+
+ elif new_attributes[exist_attr_indexes[k]].value != str_value:
+ new_attributes[exist_attr_indexes[k]].value = str_value
+ modified = True
+
+ except Exception as e:
+ self.log.exception('save-error', e=e, class_id=class_id,
+ attr=k, value_type=type(v))
+
+ if modified:
+ now = datetime.utcnow()
+ start_time = now
+ new_data = MibInstanceData(instance_id=instance_id,
+ created=inst_data.created,
+ modified=self._time_to_string(now),
+ attributes=new_attributes)
+ dev_proxy.remove(MibDbExternal.INSTANCE_PATH.format(class_id, instance_id))
+ self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id,
+ class_id), new_data)
+ return modified
+
+ except KeyError:
+ # Here if the class-id does not yet exist in the database
+ self.log.debug("adding-key-not-found", class_id=class_id)
+ return self._add_new_class(device_id, class_id, instance_id,
+ attributes)
+ finally:
+ if start_time is not None:
+ diff = datetime.utcnow() - start_time
+ # NOTE: Change to 'debug' when checked in, manually change to 'info'
+ # for development testing.
+ self.log.debug('db-{}-time'.format(operation), milliseconds=diff.microseconds/1000)
+ self._statistics[operation].increment(diff.microseconds/1000)
+
+ except Exception as e:
+ self.log.exception('set-exception', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes, e=e)
+ raise
+
+ def delete(self, device_id, class_id, entity_id):
+ """
+ Delete an entity from the database if it exists. If all instances
+ of a class are deleted, the class is deleted as well.
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Entity ID
+
+ :returns: (bool) True if the instance was found and deleted. False
+ if it did not exist.
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('delete', device_id=device_id, class_id=class_id,
+ entity_id=entity_id)
+
+ if not self._started:
+ raise DatabaseStateError('The Database is not currently active')
+
+ if not isinstance(device_id, basestring):
+ raise TypeError('Device ID should be an string')
+
+ if not 0 <= class_id <= 0xFFFF:
+ raise ValueError('class-id is 0..0xFFFF')
+
+ if not 0 <= entity_id <= 0xFFFF:
+ raise ValueError('instance-id is 0..0xFFFF')
+
+ start_time = datetime.utcnow()
+ try:
+ # Remove instance
+ self._instance_proxy(device_id, class_id, entity_id).remove('/')
+ now = datetime.utcnow()
+
+ # If resulting class has no instance, remove it as well
+ class_proxy = self._class_proxy(device_id, class_id)
+ class_data = class_proxy.get('/', depth=1)
+
+ if len(class_data.instances) == 0:
+ class_proxy.remove('/')
+
+ self._modified = now
+ return True
+
+ except KeyError:
+ return False # Not found
+
+ except Exception as e:
+ self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ finally:
+ diff = datetime.utcnow() - start_time
+ # NOTE: Change to 'debug' when checked in, manually change to 'info'
+ # for development testing.
+ self.log.debug('db-delete-time', milliseconds=diff.microseconds/1000)
+ self._statistics['delete'].increment(diff.microseconds/1000)
+
+ def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+ """
+ Get database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list/set or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises KeyError: If the requested device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query', device_id=device_id, class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+
+ start_time = datetime.utcnow()
+ end_time = None
+ try:
+ if class_id is None:
+ # Get full device info
+ dev_data = self._device_proxy(device_id).get('/', depth=-1)
+ end_time = datetime.utcnow()
+ data = self._device_to_dict(dev_data)
+
+ elif instance_id is None:
+ # Get all instances of the class
+ try:
+ cls_data = self._class_proxy(device_id, class_id).get('/', depth=-1)
+ end_time = datetime.utcnow()
+ data = self._class_to_dict(device_id, cls_data)
+
+ except KeyError:
+ data = dict()
+
+ else:
+ # Get all attributes of a specific ME
+ try:
+ inst_data = self._instance_proxy(device_id, class_id, instance_id).\
+ get('/', depth=-1)
+ end_time = datetime.utcnow()
+
+ if attributes is None:
+ # All Attributes
+ data = self._instance_to_dict(device_id, class_id, inst_data)
+
+ else:
+ # Specific attribute(s)
+ if isinstance(attributes, basestring):
+ attributes = {attributes}
+
+ data = {
+ attr.name: self._string_to_attribute(device_id,
+ class_id,
+ attr.name,
+ attr.value)
+ for attr in inst_data.attributes if attr.name in attributes}
+
+ except KeyError:
+ data = dict()
+
+ return data
+
+ except KeyError:
+ self.log.warn('query-no-device', device_id=device_id)
+ raise
+
+ except Exception as e:
+ self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+ raise
+
+ finally:
+ if end_time is not None:
+ diff = end_time.utcnow() - start_time
+ # NOTE: Change to 'debug' when checked in, manually change to 'info'
+ # for development testing.
+ self.log.debug('db-get-time', milliseconds=diff.microseconds/1000, class_id=class_id,
+ instance_id=instance_id)
+ self._statistics['get'].increment(diff.microseconds/1000)
+
+ def _instance_to_dict(self, device_id, class_id, instance):
+ if not isinstance(instance, MibInstanceData):
+ raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
+
+ data = {
+ INSTANCE_ID_KEY: instance.instance_id,
+ CREATED_KEY: self._string_to_time(instance.created),
+ MODIFIED_KEY: self._string_to_time(instance.modified),
+ ATTRIBUTES_KEY: dict()
+ }
+ for attribute in instance.attributes:
+ data[ATTRIBUTES_KEY][attribute.name] = self._string_to_attribute(device_id,
+ class_id,
+ attribute.name,
+ attribute.value)
+ return data
+
+ def _class_to_dict(self, device_id, val):
+ if not isinstance(val, MibClassData):
+ raise TypeError('{} is not of type MibClassData'.format(type(val)))
+
+ data = {
+ CLASS_ID_KEY: val.class_id,
+ }
+ for instance in val.instances:
+ data[instance.instance_id] = self._instance_to_dict(device_id,
+ val.class_id,
+ instance)
+ return data
+
+ def _device_to_dict(self, val):
+ if not isinstance(val, MibDeviceData):
+ raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
+
+ data = {
+ DEVICE_ID_KEY: val.device_id,
+ CREATED_KEY: self._string_to_time(val.created),
+ LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
+ MDS_KEY: val.mib_data_sync,
+ VERSION_KEY: val.version,
+ ME_KEY: dict(),
+ MSG_TYPE_KEY: set()
+ }
+ for class_data in val.classes:
+ data[class_data.class_id] = self._class_to_dict(val.device_id,
+ class_data)
+ for managed_entity in val.managed_entities:
+ data[ME_KEY][managed_entity.class_id] = managed_entity.name
+
+ for msg_type in val.message_types:
+ data[MSG_TYPE_KEY].add(msg_type.message_type)
+
+ return data
+
+ def _managed_entity_to_name(self, device_id, class_id):
+ me_map = self._omci_agent.get_device(device_id).me_map
+ entity = me_map.get(class_id)
+
+ return entity.__name__ if entity is not None else 'UnknownManagedEntity'
+
+ def update_supported_managed_entities(self, device_id, managed_entities):
+ """
+ Update the supported OMCI Managed Entities for this device
+ :param device_id: (str) ONU Device ID
+ :param managed_entities: (set) Managed Entity class IDs
+ """
+ try:
+ me_list = [ManagedEntity(class_id=class_id,
+ name=self._managed_entity_to_name(device_id,
+ class_id))
+ for class_id in managed_entities]
+
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.managed_entities.extend(me_list)
+
+ # Update
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-me-list-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('add-me-failure', e=e, me_list=managed_entities)
+ raise
+
+ def update_supported_message_types(self, device_id, msg_types):
+ """
+ Update the supported OMCI Managed Entities for this device
+ :param device_id: (str) ONU Device ID
+ :param msg_types: (set) Message Type values (ints)
+ """
+ try:
+ msg_type_list = [MessageType(message_type=msg_type.value)
+ for msg_type in msg_types]
+
+ device_proxy = self._device_proxy(device_id)
+ data = device_proxy.get(depth=0)
+
+ now = datetime.utcnow()
+ data.message_types.extend(msg_type_list)
+
+ # Update
+ self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+ data)
+ self._modified = now
+ self.log.debug('save-msg-types-complete', device_id=device_id)
+
+ except Exception as e:
+ self.log.exception('add-msg-types-failure', e=e, msg_types=msg_types)
+ raise
diff --git a/python/adapters/extensions/omci/me_frame.py b/python/adapters/extensions/omci/me_frame.py
new file mode 100644
index 0000000..cf1cf3e
--- /dev/null
+++ b/python/adapters/extensions/omci/me_frame.py
@@ -0,0 +1,475 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Managed Entity Message support base class
+"""
+from voltha.extensions.omci.omci import *
+
+# abbreviations
+OP = EntityOperations
+AA = AttributeAccess
+
+
+class MEFrame(object):
+ """Base class to help simplify Frame Creation"""
+ def __init__(self, entity_class, entity_id, data):
+ assert issubclass(entity_class, EntityClass), \
+ "'{}' must be a subclass of MEFrame".format(entity_class)
+ self.check_type(entity_id, int)
+
+ if not 0 <= entity_id <= 0xFFFF:
+ raise ValueError('entity_id should be 0..65535')
+
+ self.log = structlog.get_logger()
+ self._class = entity_class
+ self._entity_id = entity_id
+ self.data = data
+
+ def __str__(self):
+ return '{}: Entity_ID: {}, Data: {}'.\
+ format(self.entity_class_name, self._entity_id, self.data)
+
+ @property
+ def entity_class(self):
+ """
+ The Entity Class for this ME
+ :return: (EntityClass) Entity class
+ """
+ return self._class
+
+ @property
+ def entity_class_name(self):
+ return self._class.__name__
+
+ @property
+ def entity_id(self):
+ """
+ The Entity ID for this ME frame
+ :return: (int) Entity ID (0..0xFFFF)
+ """
+ return self._entity_id
+
+ @staticmethod
+ def check_type(param, types):
+ if not isinstance(param, types):
+ raise TypeError("Parameter '{}' should be a {}".format(param, types))
+
+ def _check_operation(self, operation):
+ allowed = self.entity_class.mandatory_operations | self.entity_class.optional_operations
+ assert operation in allowed, "{} not allowed for '{}'".format(operation.name,
+ self.entity_class_name)
+
+ def _check_attributes(self, attributes, access):
+ keys = attributes.keys() if isinstance(attributes, dict) else attributes
+ for attr_name in keys:
+ # Bad attribute name (invalid or spelling error)?
+ index = self.entity_class.attribute_name_to_index_map.get(attr_name)
+ if index is None:
+ raise KeyError("Attribute '{}' is not valid for '{}'".
+ format(attr_name, self.entity_class_name))
+ # Invalid access?
+ assert access in self.entity_class.attributes[index].access, \
+ "Access '{}' for attribute '{}' is not valid for '{}'".format(access.name,
+ attr_name,
+ self.entity_class_name)
+
+ if access.value in [AA.W.value, AA.SBC.value] and isinstance(attributes, dict):
+ for attr_name, value in attributes.iteritems():
+ index = self.entity_class.attribute_name_to_index_map.get(attr_name)
+ attribute = self.entity_class.attributes[index]
+ if not attribute.valid(value):
+ raise ValueError("Invalid value '{}' for attribute '{}' of '{}".
+ format(value, attr_name, self.entity_class_name))
+
+ @staticmethod
+ def _attr_to_data(attributes):
+ """
+ Convert an object into the 'data' set or dictionary for get/set/create/delete
+ requests.
+
+ This method takes a 'string', 'list', or 'set' for get requests and
+ converts it to a 'set' of attributes.
+
+ For create/set requests a dictionary of attribute/value pairs is required
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, set, or dict can be provided. For create/set
+ operations, a dictionary should be provided. For delete
+ the attributes may be None since they are ignored.
+
+ :return: (set, dict) set for get/deletes, dict for create/set
+ """
+ if isinstance(attributes, basestring):
+ # data = [str(attributes)]
+ data = set()
+ data.add(str(attributes))
+
+ elif isinstance(attributes, list):
+ assert all(isinstance(attr, basestring) for attr in attributes),\
+ 'attribute list must be strings'
+ data = {str(attr) for attr in attributes}
+ assert len(data) == len(attributes), 'Attributes were not unique'
+
+ elif isinstance(attributes, set):
+ assert all(isinstance(attr, basestring) for attr in attributes),\
+ 'attribute set must be strings'
+ data = {str(attr) for attr in attributes}
+
+ elif isinstance(attributes, (dict, type(None))):
+ data = attributes
+
+ else:
+ raise TypeError("Unsupported attributes type '{}'".format(type(attributes)))
+
+ return data
+
+ def create(self):
+ """
+ Create a Create request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert hasattr(self.entity_class, 'class_id'), 'class_id required for Create actions'
+ assert hasattr(self, 'entity_id'), 'entity_id required for Create actions'
+ assert hasattr(self, 'data'), 'data required for Create actions'
+
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) > 0, 'No attributes supplied'
+
+ self._check_operation(OP.Create)
+ self._check_attributes(data, AA.Writable)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ data=data
+ ))
+
+ def delete(self):
+ """
+ Create a Delete request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.Delete)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciDelete.message_id,
+ omci_message=OmciDelete(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id')
+ ))
+
+ def set(self):
+ """
+ Create a Set request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert hasattr(self, 'data'), 'data required for Set actions'
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) > 0, 'No attributes supplied'
+
+ self._check_operation(OP.Set)
+ self._check_attributes(data, AA.Writable)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ attributes_mask=self.entity_class.mask_for(*data.keys()),
+ data=data
+ ))
+
+ def get(self):
+ """
+ Create a Get request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert hasattr(self, 'data'), 'data required for Get actions'
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, (list, set, dict))
+ assert len(data) > 0, 'No attributes supplied'
+
+ mask_set = data.keys() if isinstance(data, dict) else data
+
+ self._check_operation(OP.Get)
+ self._check_attributes(mask_set, AA.Readable)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ attributes_mask=self.entity_class.mask_for(*mask_set)
+ ))
+
+ def reboot(self, reboot_code=0):
+ """
+ Create a Reboot request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.Reboot)
+ assert 0 <= reboot_code <= 2, 'Reboot code must be 0..2'
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciReboot.message_id,
+ omci_message=OmciReboot(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ reboot_code=reboot_code
+ ))
+
+ def mib_reset(self):
+ """
+ Create a MIB Reset request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.MibReset)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciMibReset.message_id,
+ omci_message=OmciMibReset(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id')
+ ))
+
+ def mib_upload(self):
+ """
+ Create a MIB Upload request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.MibUpload)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciMibUpload.message_id,
+ omci_message=OmciMibUpload(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id')
+ ))
+
+ def mib_upload_next(self):
+ """
+ Create a MIB Upload Next request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert hasattr(self, 'data'), 'data required for Set actions'
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) > 0, 'No attributes supplied'
+ assert 'mib_data_sync' in data, "'mib_data_sync' not in attributes list"
+
+ self._check_operation(OP.MibUploadNext)
+ self._check_attributes(data, AA.Writable)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciMibUploadNext.message_id,
+ omci_message=OmciMibUploadNext(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ command_sequence_number=data['mib_data_sync']
+ ))
+
+ def get_next(self):
+ """
+ Create a Get Next request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert hasattr(self, 'data'), 'data required for Get Next actions'
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) == 1, 'Only one attribute should be specified'
+
+ mask_set = data.keys() if isinstance(data, dict) else data
+
+ self._check_operation(OP.GetNext)
+ self._check_attributes(mask_set, AA.Readable)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGetNext.message_id,
+ omci_message=OmciGetNext(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ attributes_mask=self.entity_class.mask_for(*mask_set),
+ command_sequence_number=data.values()[0]
+ ))
+
+ def synchronize_time(self, time=None):
+ """
+ Create a Synchronize Time request from for this ME
+ :param time: (DateTime) Time to set to. If none, use UTC
+ :return: (OmciFrame) OMCI Frame
+ """
+ from datetime import datetime
+ self._check_operation(OP.SynchronizeTime)
+ dt = time or datetime.utcnow()
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciSynchronizeTime.message_id,
+ omci_message=OmciSynchronizeTime(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ year=dt.year,
+ month=dt.month,
+ hour=dt.hour,
+ minute=dt.minute,
+ second=dt.second,
+ ))
+
+ def get_all_alarm(self, alarm_retrieval_mode):
+ """
+ Create a Alarm request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.GetAllAlarms)
+ assert 0 <= alarm_retrieval_mode <= 1, 'Alarm retrieval mode must be 0..1'
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGetAllAlarms.message_id,
+ omci_message=OmciGetAllAlarms(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ alarm_retrieval_mode=alarm_retrieval_mode
+ ))
+
+ def get_all_alarm_next(self, command_sequence_number):
+ """
+ Create a Alarm request from for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.GetAllAlarmsNext)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGetAllAlarmsNext.message_id,
+ omci_message=OmciGetAllAlarmsNext(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ command_sequence_number=command_sequence_number
+ ))
+
+ def start_software_download(self, image_size, window_size):
+ """
+ Create Start Software Download message
+ :return: (OmciFrame) OMCI Frame
+ """
+ self.log.debug("--> start_software_download")
+ self._check_operation(OP.StartSoftwareDownload)
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciStartSoftwareDownload.message_id,
+ omci_message=OmciStartSoftwareDownload(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ window_size=window_size,
+ image_size=image_size,
+ instance_id=getattr(self, 'entity_id')
+ ))
+
+ def end_software_download(self, crc32, image_size):
+ """
+ Create End Software Download message
+ :return: (OmciFrame) OMCI Frame
+ """
+ self._check_operation(OP.EndSoftwareDownload)
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciEndSoftwareDownload.message_id,
+ omci_message=OmciEndSoftwareDownload(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ crc32=crc32,
+ image_size=image_size,
+ instance_id=getattr(self, 'entity_id')
+ ))
+
+ def download_section(self, is_last_section, section_number, data):
+ """
+ Create Download Section message
+ :is_last_section: (bool) indicate the last section in the window
+ :section_num : (int) current section number
+ :data : (byte) data to be sent in the section
+ :return: (OmciFrame) OMCI Frame
+ """
+ self.log.debug("--> download_section: ", section_number=section_number)
+
+ self._check_operation(OP.DownloadSection)
+ if is_last_section:
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciDownloadSectionLast.message_id,
+ omci_message=OmciDownloadSectionLast(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ section_number=section_number,
+ data=data
+ ))
+ else:
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciDownloadSection.message_id,
+ omci_message=OmciDownloadSection(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ section_number=section_number,
+ data=data
+ ))
+
+ def activate_image(self, activate_flag=0):
+ """
+ Activate Image message
+ :activate_flag: 00 Activate image unconditionally
+ 01 Activate image only if no POTS/VoIP calls are in progress
+ 10 Activate image only if no emergency call is in progress
+ :return: (OmciFrame) OMCI Frame
+ """
+ self.log.debug("--> activate_image", entity=self.entity_id, flag=activate_flag)
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciActivateImage.message_id,
+ omci_message=OmciActivateImage(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ activate_flag=activate_flag
+ ))
+
+ def commit_image(self):
+ """
+ Commit Image message
+ :return: (OmciFrame) OMCI Frame
+ """
+ self.log.debug("--> commit_image", entity=self.entity_id)
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciCommitImage.message_id,
+ omci_message=OmciCommitImage(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ ))
+
diff --git a/python/adapters/extensions/omci/omci.py b/python/adapters/extensions/omci/omci.py
new file mode 100644
index 0000000..5a94146
--- /dev/null
+++ b/python/adapters/extensions/omci/omci.py
@@ -0,0 +1,23 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Omci message generator and parser implementation using scapy
+"""
+
+from omci_frame import OmciFrame
+from omci_messages import *
+from omci_entities import *
diff --git a/python/adapters/extensions/omci/omci_cc.py b/python/adapters/extensions/omci/omci_cc.py
new file mode 100644
index 0000000..e1c6019
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_cc.py
@@ -0,0 +1,1000 @@
+#
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Message support
+"""
+
+import sys
+import arrow
+from twisted.internet import reactor, defer
+from twisted.internet.defer import TimeoutError, CancelledError, failure, fail, succeed, inlineCallbacks
+from common.frameio.frameio import hexify
+from voltha.extensions.omci.omci import *
+from voltha.extensions.omci.omci_me import OntGFrame, OntDataFrame, SoftwareImageFrame
+from voltha.extensions.omci.me_frame import MEFrame
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+from common.event_bus import EventBusClient
+from enum import IntEnum
+from binascii import hexlify
+
+
+def hexify(buffer):
+ """Return a hexadecimal string encoding of input buffer"""
+ return ''.join('%02x' % ord(c) for c in buffer)
+
+
+DEFAULT_OMCI_TIMEOUT = 10 # 3 # Seconds
+MAX_OMCI_REQUEST_AGE = 60 # Seconds
+DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE = 31 # Bytes
+MAX_TABLE_ROW_COUNT = 512 # Keep get-next logic reasonable
+
+CONNECTED_KEY = 'connected'
+TX_REQUEST_KEY = 'tx-request'
+RX_RESPONSE_KEY = 'rx-response'
+UNKNOWN_CLASS_ATTRIBUTE_KEY = 'voltha-unknown-blob'
+
+
+class OmciCCRxEvents(IntEnum):
+ AVC_Notification = 0,
+ MIB_Upload = 1,
+ MIB_Upload_Next = 2,
+ Create = 3,
+ Delete = 4,
+ Set = 5,
+ Alarm_Notification = 6,
+ Test_Result = 7,
+ MIB_Reset = 8,
+ Connectivity = 9,
+ Get_ALARM_Get = 10,
+ Get_ALARM_Get_Next = 11
+
+
+# abbreviations
+OP = EntityOperations
+RxEvent = OmciCCRxEvents
+
+
+class OMCI_CC(object):
+ """ Handle OMCI Communication Channel specifics for Adtran ONUs"""
+
+ MIN_OMCI_TX_ID_LOW_PRIORITY = 0x0001 # 2 Octets max
+ MAX_OMCI_TX_ID_LOW_PRIORITY = 0x7FFF # 2 Octets max
+ MIN_OMCI_TX_ID_HIGH_PRIORITY = 0x8000 # 2 Octets max
+ MAX_OMCI_TX_ID_HIGH_PRIORITY = 0xFFFF # 2 Octets max
+ LOW_PRIORITY = 0
+ HIGH_PRIORITY = 1
+
+ # Offset into some tuples for pending lists and tx in progress
+ PENDING_DEFERRED = 0
+ PENDING_FRAME = 1
+ PENDING_TIMEOUT = 2
+ PENDING_RETRY = 3
+
+ REQUEST_TIMESTAMP = 0
+ REQUEST_DEFERRED = 1
+ REQUEST_FRAME = 2
+ REQUEST_TIMEOUT = 3
+ REQUEST_RETRY = 4
+ REQUEST_DELAYED_CALL = 5
+
+ _frame_to_event_type = {
+ OmciMibResetResponse.message_id: RxEvent.MIB_Reset,
+ OmciMibUploadResponse.message_id: RxEvent.MIB_Upload,
+ OmciMibUploadNextResponse.message_id: RxEvent.MIB_Upload_Next,
+ OmciCreateResponse.message_id: RxEvent.Create,
+ OmciDeleteResponse.message_id: RxEvent.Delete,
+ OmciSetResponse.message_id: RxEvent.Set,
+ OmciGetAllAlarmsResponse.message_id: RxEvent.Get_ALARM_Get,
+ OmciGetAllAlarmsNextResponse.message_id: RxEvent.Get_ALARM_Get_Next
+ }
+
+ def __init__(self, adapter_agent, device_id, me_map=None,
+ clock=None):
+ self.log = structlog.get_logger(device_id=device_id)
+ self._adapter_agent = adapter_agent
+ self._device_id = device_id
+ self._proxy_address = None
+ self._enabled = False
+ self._extended_messaging = False
+ self._me_map = me_map
+ if clock is None:
+ self.reactor = reactor
+ else:
+ self.reactor = clock
+
+ # Support 2 levels of priority since only baseline message set supported
+ self._tx_tid = [OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY, OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY]
+ self._tx_request = [None, None] # Tx in progress (timestamp, defer, frame, timeout, retry, delayedCall)
+ self._pending = [list(), list()] # pending queue (deferred, tx_frame, timeout, retry)
+ self._rx_response = [None, None]
+
+ # Statistics
+ self._tx_frames = 0
+ self._rx_frames = 0
+ self._rx_unknown_tid = 0 # Rx OMCI with no Tx TID match
+ self._rx_onu_frames = 0 # Autonomously generated ONU frames
+ self._rx_onu_discards = 0 # Autonomously generated ONU unknown message types
+ self._rx_timeouts = 0
+ self._rx_late = 0 # Frame response received after timeout on Tx
+ self._rx_unknown_me = 0 # Number of managed entities Rx without a decode definition
+ self._tx_errors = 0 # Exceptions during tx request
+ self._consecutive_errors = 0 # Rx & Tx errors in a row, a good RX resets this to 0
+ self._reply_min = sys.maxint # Fastest successful tx -> rx
+ self._reply_max = 0 # Longest successful tx -> rx
+ self._reply_sum = 0.0 # Total seconds for successful tx->rx (float for average)
+ self._max_hp_tx_queue = 0 # Maximum size of high priority tx pending queue
+ self._max_lp_tx_queue = 0 # Maximum size of low priority tx pending queue
+
+ self.event_bus = EventBusClient()
+
+ # If a list of custom ME Entities classes were provided, insert them into
+ # main class_id to entity map.
+ # TODO: If this class becomes hidden from the ONU DA, move this to the OMCI State Machine runner
+
+ def __str__(self):
+ return "OMCISupport: {}".format(self._device_id)
+
+ def _get_priority_index(self, high_priority):
+ """ Centralized logic to help make extended message support easier in the future"""
+ return OMCI_CC.HIGH_PRIORITY if high_priority and not self._extended_messaging \
+ else OMCI_CC.LOW_PRIORITY
+
+ def _tid_is_high_priority(self, tid):
+ """ Centralized logic to help make extended message support easier in the future"""
+
+ return not self._extended_messaging and \
+ OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY <= tid <= OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
+
+ @staticmethod
+ def event_bus_topic(device_id, event):
+ """
+ Get the topic name for a given event Frame Type
+ :param device_id: (str) ONU Device ID
+ :param event: (OmciCCRxEvents) Type of event
+ :return: (str) Topic string
+ """
+ assert event in OmciCCRxEvents, \
+ 'Event {} is not an OMCI-CC Rx Event'.format(event.name)
+
+ return 'omci-rx:{}:{}'.format(device_id, event.name)
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ """
+ Enable/disable the OMCI Communications Channel
+
+ :param value: (boolean) True to enable, False to disable
+ """
+ assert isinstance(value, bool), 'enabled is a boolean'
+
+ if self._enabled != value:
+ self._enabled = value
+ if self._enabled:
+ self._start()
+ else:
+ self._stop()
+
+ @property
+ def tx_frames(self):
+ return self._tx_frames
+
+ @property
+ def rx_frames(self):
+ return self._rx_frames
+
+ @property
+ def rx_unknown_tid(self):
+ return self._rx_unknown_tid # Tx TID not found
+
+ @property
+ def rx_unknown_me(self):
+ return self._rx_unknown_me
+
+ @property
+ def rx_onu_frames(self):
+ return self._rx_onu_frames
+
+ @property
+ def rx_onu_discards(self):
+ return self._rx_onu_discards # Attribute Value change autonomous overflows
+
+ @property
+ def rx_timeouts(self):
+ return self._rx_timeouts
+
+ @property
+ def rx_late(self):
+ return self._rx_late
+
+ @property
+ def tx_errors(self):
+ return self._tx_errors
+
+ @property
+ def consecutive_errors(self):
+ return self._consecutive_errors
+
+ @property
+ def reply_min(self):
+ return int(round(self._reply_min * 1000.0)) # Milliseconds
+
+ @property
+ def reply_max(self):
+ return int(round(self._reply_max * 1000.0)) # Milliseconds
+
+ @property
+ def reply_average(self):
+ avg = self._reply_sum / self._rx_frames if self._rx_frames > 0 else 0.0
+ return int(round(avg * 1000.0)) # Milliseconds
+
+ @property
+ def hp_tx_queue_len(self):
+ return len(self._pending[OMCI_CC.HIGH_PRIORITY])
+
+ @property
+ def lp_tx_queue_len(self):
+ return len(self._pending[OMCI_CC.LOW_PRIORITY])
+
+ @property
+ def max_hp_tx_queue(self):
+ return self._max_hp_tx_queue
+
+ @property
+ def max_lp_tx_queue(self):
+ return self._max_lp_tx_queue
+
+ def _start(self):
+ """
+ Start the OMCI Communications Channel
+ """
+ assert self._enabled, 'Start should only be called if enabled'
+ self.flush()
+
+ device = self._adapter_agent.get_device(self._device_id)
+ self._proxy_address = device.proxy_address
+
+ def _stop(self):
+ """
+ Stop the OMCI Communications Channel
+ """
+ assert not self._enabled, 'Stop should only be called if disabled'
+ self.flush()
+ self._proxy_address = None
+
+ def _receive_onu_message(self, rx_frame):
+ """ Autonomously generated ONU frame Rx handler"""
+ self.log.debug('rx-onu-frame', frame_type=type(rx_frame),
+ frame=hexify(str(rx_frame)))
+
+ msg_type = rx_frame.fields['message_type']
+ self._rx_onu_frames += 1
+
+ msg = {TX_REQUEST_KEY: None,
+ RX_RESPONSE_KEY: rx_frame}
+
+ if msg_type == EntityOperations.AlarmNotification.value:
+ topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Alarm_Notification)
+ self.reactor.callLater(0, self.event_bus.publish, topic, msg)
+
+ elif msg_type == EntityOperations.AttributeValueChange.value:
+ topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.AVC_Notification)
+ self.reactor.callLater(0, self.event_bus.publish, topic, msg)
+
+ elif msg_type == EntityOperations.TestResult.value:
+ topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Test_Result)
+ self.reactor.callLater(0, self.event_bus.publish, topic, msg)
+
+ else:
+ self.log.warn('onu-unsupported-autonomous-message', type=msg_type)
+ self._rx_onu_discards += 1
+
+ def _update_rx_tx_stats(self, now, ts):
+ ts_diff = now - arrow.Arrow.utcfromtimestamp(ts)
+ secs = ts_diff.total_seconds()
+ self._reply_sum += secs
+ if secs < self._reply_min:
+ self._reply_min = secs
+ if secs > self._reply_max:
+ self._reply_max = secs
+ return secs
+
+ def receive_message(self, msg):
+ """
+ Receive and OMCI message from the proxy channel to the OLT.
+
+ Call this from your ONU Adapter on a new OMCI Rx on the proxy channel
+ :param msg: (str) OMCI binary message (used as input to Scapy packet decoder)
+ """
+ if not self.enabled:
+ return
+
+ try:
+ now = arrow.utcnow()
+ d = None
+
+ # NOTE: Since we may need to do an independent ME map on a per-ONU basis
+ # save the current value of the entity_id_to_class_map, then
+ # replace it with our custom one before decode, and then finally
+ # restore it later. Tried other ways but really made the code messy.
+ saved_me_map = omci_entities.entity_id_to_class_map
+ omci_entities.entity_id_to_class_map = self._me_map
+
+ try:
+ rx_frame = msg if isinstance(msg, OmciFrame) else OmciFrame(msg)
+ rx_tid = rx_frame.fields['transaction_id']
+
+ if rx_tid == 0:
+ return self._receive_onu_message(rx_frame)
+
+ # Previously unreachable if this is the very first Rx or we
+ # have been running consecutive errors
+ if self._rx_frames == 0 or self._consecutive_errors != 0:
+ self.reactor.callLater(0, self._publish_connectivity_event, True)
+
+ self._rx_frames += 1
+ self._consecutive_errors = 0
+
+ except KeyError as e:
+ # Unknown, Unsupported, or vendor-specific ME. Key is the unknown classID
+ self.log.debug('frame-decode-key-error', msg=hexlify(msg), e=e)
+ rx_frame = self._decode_unknown_me(msg)
+ self._rx_unknown_me += 1
+ rx_tid = rx_frame.fields.get('transaction_id')
+
+ except Exception as e:
+ self.log.exception('frame-decode', msg=hexlify(msg), e=e)
+ return
+
+ finally:
+ omci_entities.entity_id_to_class_map = saved_me_map # Always restore it.
+
+ try:
+ high_priority = self._tid_is_high_priority(rx_tid)
+ index = self._get_priority_index(high_priority)
+
+ # (timestamp, defer, frame, timeout, retry, delayedCall)
+ last_tx_tuple = self._tx_request[index]
+
+ if last_tx_tuple is None or \
+ last_tx_tuple[OMCI_CC.REQUEST_FRAME].fields.get('transaction_id') != rx_tid:
+ # Possible late Rx on a message that timed-out
+ self._rx_unknown_tid += 1
+ self.log.warn('tx-message-missing', rx_id=rx_tid, msg=hexlify(msg))
+ return
+
+ ts, d, tx_frame, timeout, retry, dc = last_tx_tuple
+ if dc is not None and not dc.cancelled and not dc.called:
+ dc.cancel()
+ self.log.debug("cancel-timeout-called")
+
+ secs = self._update_rx_tx_stats(now, ts)
+
+ # Late arrival?
+ if d.called:
+ self._rx_late += 1
+ return
+
+ except Exception as e:
+ self.log.exception('frame-match', msg=hexlify(msg), e=e)
+ if d is not None:
+ return d.errback(failure.Failure(e))
+ return
+
+ # Extended processing needed. Note 'data' field will be None on some error
+ # status returns
+ omci_msg = rx_frame.fields['omci_message']
+
+ if isinstance(omci_msg, OmciGetResponse) and \
+ omci_msg.fields.get('data') is not None and \
+ 'table_attribute_mask' in omci_msg.fields['data']:
+ # Yes, run in a separate generator
+ reactor.callLater(0, self._process_get_rx_frame, timeout, secs,
+ rx_frame, d, tx_frame, high_priority)
+ else:
+ # Publish Rx event to listeners in a different task
+ reactor.callLater(0, self._publish_rx_frame, tx_frame, rx_frame)
+
+ # begin success callback chain (will cancel timeout and queue next Tx message)
+ from copy import copy
+ original_callbacks = copy(d.callbacks)
+ self._rx_response[index] = rx_frame
+ d.callback(rx_frame)
+
+ except Exception as e:
+ self.log.exception('rx-msg', e=e)
+
+ @inlineCallbacks
+ def _process_get_rx_frame(self, timeout, secs, rx_frame, d, tx_frame, high_priority):
+ """
+ Special handling for Get Requests that may require additional 'get_next' operations
+ if a table attribute was requested.
+ """
+ omci_msg = rx_frame.fields['omci_message']
+ if isinstance(omci_msg, OmciGetResponse) and 'table_attribute_mask' in omci_msg.fields['data']:
+ try:
+ entity_class = omci_msg.fields['entity_class']
+ entity_id = omci_msg.fields['entity_id']
+ table_attributes = omci_msg.fields['data']['table_attribute_mask']
+
+ # Table attribute mask is encoded opposite of managed entity mask.
+ if entity_class in self._me_map:
+ ec = self._me_map[entity_class]
+ for index in xrange(16):
+ attr_mask = 1 << index
+
+ if attr_mask & table_attributes:
+ eca = ec.attributes[15-index]
+ self.log.debug('omcc-get-table-attribute', table_name=eca.field.name)
+
+ seq_no = 0
+ data_buffer = ''
+ count = omci_msg.fields['data'][eca.field.name + '_size']
+
+ if count > MAX_TABLE_ROW_COUNT:
+ self.log.error('omcc-get-table-huge', count=count, name=eca.field.name)
+ raise ValueError('Huge Table Size: {}'.format(count))
+
+ # Original timeout must be chopped up into each individual get-next request
+ # in order for total transaction to complete within the timeframe of the
+ # original get() timeout.
+ number_transactions = 1 + (count + OmciTableField.PDU_SIZE - 1) / OmciTableField.PDU_SIZE
+ timeout /= (1 + number_transactions)
+
+ # Start the loop
+ vals = []
+ for offset in xrange(0, count, OmciTableField.PDU_SIZE):
+ frame = MEFrame(ec, entity_id, {eca.field.name: seq_no}).get_next()
+ seq_no += 1
+
+ max_retries = 3
+ results = yield self.send(frame, min(timeout / max_retries, secs * 3), max_retries)
+
+ omci_getnext_msg = results.fields['omci_message']
+ status = omci_getnext_msg.fields['success_code']
+
+ if status != ReasonCodes.Success.value:
+ raise Exception('get-next-failure table=' + eca.field.name +
+ ' entity_id=' + str(entity_id) +
+ ' sqn=' + str(seq_no) + ' omci-status ' + str(status))
+
+ # Extract the data
+ num_octets = count - offset
+ if num_octets > OmciTableField.PDU_SIZE:
+ num_octets = OmciTableField.PDU_SIZE
+
+ data = omci_getnext_msg.fields['data'][eca.field.name]
+ data_buffer += data[:num_octets]
+
+ while data_buffer:
+ data_buffer, val = eca.field.getfield(None, data_buffer)
+ vals.append(val)
+
+ omci_msg.fields['data'][eca.field.name] = vals
+ del omci_msg.fields['data'][eca.field.name + '_size']
+ self.log.debug('omcc-got-table-attribute-rows', table_name=eca.field.name,
+ row_count=len(vals))
+ del omci_msg.fields['data']['table_attribute_mask']
+
+ except Exception as e:
+ self.log.exception('get-next-error', e=e)
+ d.errback(failure.Failure(e), high_priority)
+ return
+
+ # Notify sender of completed request
+ reactor.callLater(0, d.callback, rx_frame, high_priority)
+
+ # Publish Rx event to listeners in a different task except for internally-consumed get-next-response
+ if not isinstance(omci_msg, OmciGetNextResponse):
+ reactor.callLater(0, self._publish_rx_frame, tx_frame, rx_frame)
+
+ def _decode_unknown_me(self, msg):
+ """
+ Decode an ME for an unsupported class ID. This should only occur for a subset
+ of message types (Get, Set, MIB Upload Next, ...) and they should only be
+ responses as well.
+
+ There are some times below that are commented out. For VOLTHA 2.0, it is
+ expected that any get, set, create, delete for unique (often vendor) MEs
+ will be coded by the ONU utilizing it and supplied to OpenOMCI as a
+ vendor-specific ME during device initialization.
+
+ :param msg: (str) Binary data
+ :return: (OmciFrame) resulting frame
+ """
+ from struct import unpack
+
+ (tid, msg_type, framing) = unpack('!HBB', msg[0:4])
+
+ assert framing == 0xa, 'Only basic OMCI framing supported at this time'
+ msg = msg[4:]
+
+ # TODO: Commented out items below are future work (not expected for VOLTHA v2.0)
+ (msg_class, kwargs) = {
+ # OmciCreateResponse.message_id: (OmciCreateResponse, None),
+ # OmciDeleteResponse.message_id: (OmciDeleteResponse, None),
+ # OmciSetResponse.message_id: (OmciSetResponse, None),
+ # OmciGetResponse.message_id: (OmciGetResponse, None),
+ # OmciGetAllAlarmsNextResponse.message_id: (OmciGetAllAlarmsNextResponse, None),
+ OmciMibUploadNextResponse.message_id: (OmciMibUploadNextResponse,
+ {
+ 'entity_class': unpack('!H', msg[0:2])[0],
+ 'entity_id': unpack('!H', msg[2:4])[0],
+ 'object_entity_class': unpack('!H', msg[4:6])[0],
+ 'object_entity_id': unpack('!H', msg[6:8])[0],
+ 'object_attributes_mask': unpack('!H', msg[8:10])[0],
+ 'object_data': {
+ UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[10:-4])
+ },
+ }),
+ # OmciAlarmNotification.message_id: (OmciAlarmNotification, None),
+ OmciAttributeValueChange.message_id: (OmciAttributeValueChange,
+ {
+ 'entity_class': unpack('!H', msg[0:2])[0],
+ 'entity_id': unpack('!H', msg[2:4])[0],
+ 'data': {
+ UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[4:-8])
+ },
+ }),
+ # OmciTestResult.message_id: (OmciTestResult, None),
+ }.get(msg_type, None)
+
+ if msg_class is None:
+ raise TypeError('Unsupport Message Type for Unknown Decode: {}',
+ msg_type)
+
+ return OmciFrame(transaction_id=tid, message_type=msg_type,
+ omci_message=msg_class(**kwargs))
+
+ def _publish_rx_frame(self, tx_frame, rx_frame):
+ """
+ Notify listeners of successful response frame
+ :param tx_frame: (OmciFrame) Original request frame
+ :param rx_frame: (OmciFrame) Response frame
+ """
+ if self._enabled and isinstance(rx_frame, OmciFrame):
+ frame_type = rx_frame.fields['omci_message'].message_id
+ event_type = OMCI_CC._frame_to_event_type.get(frame_type)
+
+ if event_type is not None:
+ topic = OMCI_CC.event_bus_topic(self._device_id, event_type)
+ msg = {TX_REQUEST_KEY: tx_frame,
+ RX_RESPONSE_KEY: rx_frame}
+
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ def _publish_connectivity_event(self, connected):
+ """
+ Notify listeners of Rx/Tx connectivity over OMCI
+ :param connected: (bool) True if connectivity transitioned from unreachable
+ to reachable
+ """
+ if self._enabled:
+ topic = OMCI_CC.event_bus_topic(self._device_id,
+ RxEvent.Connectivity)
+ msg = {CONNECTED_KEY: connected}
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ def flush(self):
+ """Flush/cancel in active or pending Tx requests"""
+ requests = []
+
+ for priority in {OMCI_CC.HIGH_PRIORITY, OMCI_CC.LOW_PRIORITY}:
+ next_frame, self._tx_request[priority] = self._tx_request[priority], None
+ if next_frame is not None:
+ requests.append((next_frame[OMCI_CC.REQUEST_DEFERRED], next_frame[OMCI_CC.REQUEST_DELAYED_CALL]))
+
+ requests += [(next_frame[OMCI_CC.PENDING_DEFERRED], None)
+ for next_frame in self._pending[priority]]
+ self._pending[priority] = list()
+
+ # Cancel them...
+ def cleanup_unhandled_error(_):
+ pass # So the cancel below does not flag an unhandled error
+
+ for d, dc in requests:
+ if d is not None and not d.called:
+ d.addErrback(cleanup_unhandled_error)
+ d.cancel()
+
+ if dc is not None and not dc.called and not dc.cancelled:
+ dc.cancel()
+
+ def _get_tx_tid(self, high_priority=False):
+ """
+ Get the next Transaction ID for a tx. Note TID=0 is reserved
+ for autonomously generated messages from an ONU
+
+ :return: (int) TID
+ """
+ if self._extended_messaging or not high_priority:
+ index = OMCI_CC.LOW_PRIORITY
+ min_tid = OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY
+ max_tid = OMCI_CC.MAX_OMCI_TX_ID_LOW_PRIORITY
+ else:
+ index = OMCI_CC.HIGH_PRIORITY
+ min_tid = OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY
+ max_tid = OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
+
+ tx_tid, self._tx_tid[index] = self._tx_tid[index], self._tx_tid[index] + 1
+
+ if self._tx_tid[index] > max_tid:
+ self._tx_tid[index] = min_tid
+
+ return tx_tid
+
+ def _request_failure(self, value, tx_tid, high_priority):
+ """
+ Handle a transmit failure. Rx Timeouts are handled on the 'dc' deferred and
+ will call a different method that may retry if requested. This routine
+ will be called after the final (if any) timeout or other error
+
+ :param value: (Failure) Twisted failure
+ :param tx_tid: (int) Associated Tx TID
+ """
+ index = self._get_priority_index(high_priority)
+
+ if self._tx_request[index] is not None:
+ tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
+ tx_frame_tid = tx_frame.fields['transaction_id']
+
+ if tx_frame_tid == tx_tid:
+ timeout = self._tx_request[index][OMCI_CC.REQUEST_TIMEOUT]
+ dc = self._tx_request[index][OMCI_CC.REQUEST_DELAYED_CALL]
+ self._tx_request[index] = None
+
+ if dc is not None and not dc.called and not dc.cancelled:
+ dc.cancel()
+
+ if isinstance(value, failure.Failure):
+ value.trap(CancelledError)
+ self._rx_timeouts += 1
+ self._consecutive_errors += 1
+ if self._consecutive_errors == 1:
+ reactor.callLater(0, self._publish_connectivity_event, False)
+
+ self.log.debug('timeout', tx_id=tx_tid, timeout=timeout)
+ value = failure.Failure(TimeoutError(timeout, "Deferred"))
+ else:
+ # Search pending queue. This may be a cancel coming in from the original
+ # task that requested the Tx. If found, remove
+ # from pending queue
+ for index, request in enumerate(self._pending[index]):
+ req = request.get(OMCI_CC.PENDING_DEFERRED)
+ if req is not None and req.fields['transaction_id'] == tx_tid:
+ self._pending[index].pop(index)
+ break
+
+ self._send_next_request(high_priority)
+ return value
+
+ def _request_success(self, rx_frame, high_priority):
+ """
+ Handle transmit success (a matching Rx was received)
+
+ :param rx_frame: (OmciFrame) OMCI response frame with matching TID
+ :return: (OmciFrame) OMCI response frame with matching TID
+ """
+ index = self._get_priority_index(high_priority)
+
+ if rx_frame is None:
+ rx_frame = self._rx_response[index]
+
+ rx_tid = rx_frame.fields.get('transaction_id')
+
+ if rx_tid is not None:
+ if self._tx_request[index] is not None:
+ tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
+ tx_tid = tx_frame.fields['transaction_id']
+
+ if rx_tid == tx_tid:
+ # Remove this request. Next callback in chain initiates next Tx
+ self._tx_request[index] = None
+ else:
+ self._rx_late += 1
+ else:
+ self._rx_late += 1
+
+ self._send_next_request(high_priority)
+
+ # Return rx_frame (to next item in callback list)
+ return rx_frame
+
+ def _request_timeout(self, tx_tid, high_priority):
+ """
+ Tx Request timed out. Resend immediately if there retries is non-zero. A
+ separate deferred (dc) is used on each actual Tx which is not the deferred
+ (d) that is returned to the caller of the 'send()' method.
+
+ :param tx_tid: (int) TID of frame
+ :param high_priority: (bool) True if high-priority queue
+ """
+ self.log.debug("_request_timeout", tx_tid=tx_tid)
+ index = self._get_priority_index(high_priority)
+
+ if self._tx_request[index] is not None:
+ # (0: timestamp, 1: defer, 2: frame, 3: timeout, 4: retry, 5: delayedCall)
+ ts, d, frame, timeout, retry, _dc = self._tx_request[index]
+
+ if frame.fields.get('transaction_id', 0) == tx_tid:
+ self._tx_request[index] = None
+
+ if retry > 0:
+ # Push on front of TX pending queue so that it transmits next with the
+ # original TID
+ self._queue_frame(d, frame, timeout, retry - 1, high_priority, front=True)
+ else:
+ d.errback(failure.Failure(TimeoutError(timeout, "Send OMCI TID -{}".format(tx_tid))))
+
+ self._send_next_request(high_priority)
+
+ def _queue_frame(self, d, frame, timeout, retry, high_priority, front=False):
+ index = self._get_priority_index(high_priority)
+ tx_tuple = (d, frame, timeout, retry) # Pending -> (deferred, tx_frame, timeout, retry)
+
+ if front:
+ self._pending[index].insert(0, tuple)
+ else:
+ self._pending[index].append(tx_tuple)
+
+ # Monitor queue stats
+ qlen = len(self._pending[index])
+
+ if high_priority:
+ if self._max_hp_tx_queue < qlen:
+ self._max_hp_tx_queue = qlen
+
+ elif self._max_lp_tx_queue < qlen:
+ self._max_lp_tx_queue = qlen
+
+ def send(self, frame, timeout=DEFAULT_OMCI_TIMEOUT, retry=0, high_priority=False):
+ """
+ Queue the OMCI Frame for a transmit to the ONU via the proxy_channel
+
+ :param frame: (OMCIFrame) Message to send
+ :param timeout: (int) Rx Timeout. 0=No response needed
+ :param retry: (int) Additional retry attempts on channel failure, default=0
+ :param high_priority: (bool) High Priority requests
+ :return: (deferred) A deferred that fires when the response frame is received
+ or if an error/timeout occurs
+ """
+ if not self.enabled or self._proxy_address is None:
+ # TODO custom exceptions throughout this code would be helpful
+ self._tx_errors += 1
+ return fail(result=failure.Failure(Exception('OMCI is not enabled')))
+
+ timeout = float(timeout)
+ if timeout > float(MAX_OMCI_REQUEST_AGE):
+ self._tx_errors += 1
+ msg = 'Maximum timeout is {} seconds'.format(MAX_OMCI_REQUEST_AGE)
+ return fail(result=failure.Failure(Exception(msg)))
+
+ if not isinstance(frame, OmciFrame):
+ self._tx_errors += 1
+ msg = "Invalid frame class '{}'".format(type(frame))
+ return fail(result=failure.Failure(Exception(msg)))
+ try:
+ index = self._get_priority_index(high_priority)
+ tx_tid = frame.fields['transaction_id']
+
+ if tx_tid is None:
+ tx_tid = self._get_tx_tid(high_priority=high_priority)
+ frame.fields['transaction_id'] = tx_tid
+
+ assert tx_tid not in self._pending[index], 'TX TID {} is already exists'.format(tx_tid)
+ assert tx_tid > 0, 'Invalid Tx TID: {}'.format(tx_tid)
+
+ # Queue it and request next Tx if tx channel is free
+ d = defer.Deferred()
+
+ self._queue_frame(d, frame, timeout, retry, high_priority, front=False)
+ self._send_next_request(high_priority)
+
+ if timeout == 0:
+ self.log.debug("send-timeout-zero", tx_tid=tx_tid)
+ self.reactor.callLater(0, d.callback, 'queued')
+
+ return d
+
+ except Exception as e:
+ self._tx_errors += 1
+ self._consecutive_errors += 1
+
+ if self._consecutive_errors == 1:
+ self.reactor.callLater(0, self._publish_connectivity_event, False)
+
+ self.log.exception('send-omci', e=e)
+ return fail(result=failure.Failure(e))
+
+ def _ok_to_send(self, tx_request, high_priority):
+ """
+ G.988 specifies not to issue a MIB upload or a Software download request
+ when a similar action is in progress on the other channel. To keep the
+ logic here simple, a new upload/download will not be allowed if either a
+ upload/download is going on
+
+ :param tx_request (OmciFrame) Frame to send
+ :param high_priority: (bool) for queue selection
+ :return: True if okay to dequeue and send frame
+ """
+ other = self._get_priority_index(not high_priority)
+
+ if self._tx_request[other] is None:
+ return True
+
+ this_msg_type = tx_request.fields['message_type'] & 0x1f
+ not_allowed = {OP.MibUpload.value,
+ OP.MibUploadNext.value,
+ OP.StartSoftwareDownload.value,
+ OP.DownloadSection.value,
+ OP.EndSoftwareDownload.value}
+
+ if this_msg_type not in not_allowed:
+ return True
+
+ other_msg_type = self._tx_request[other][OMCI_CC.REQUEST_FRAME].fields['message_type'] & 0x1f
+ return other_msg_type not in not_allowed
+
+ def _send_next_request(self, high_priority):
+ """
+ Pull next tx request and send it
+
+ :param high_priority: (bool) True if this was a high priority request
+ :return: results, so callback chain continues if needed
+ """
+ index = self._get_priority_index(high_priority)
+
+ if self._tx_request[index] is None: # TODO or self._tx_request[index][OMCI_CC.REQUEST_DEFERRED].called:
+ d = None
+ try:
+ if len(self._pending[index]) and \
+ not self._ok_to_send(self._pending[index][0][OMCI_CC.PENDING_FRAME],
+ high_priority):
+ reactor.callLater(0.05, self._send_next_request, high_priority)
+ return
+
+ next_frame = self._pending[index].pop(0)
+
+ d = next_frame[OMCI_CC.PENDING_DEFERRED]
+ frame = next_frame[OMCI_CC.PENDING_FRAME]
+ timeout = next_frame[OMCI_CC.PENDING_TIMEOUT]
+ retry = next_frame[OMCI_CC.PENDING_RETRY]
+
+ tx_tid = frame.fields['transaction_id']
+
+ # NOTE: Since we may need to do an independent ME map on a per-ONU basis
+ # save the current value of the entity_id_to_class_map, then
+ # replace it with our custom one before decode, and then finally
+ # restore it later. Tried other ways but really made the code messy.
+ saved_me_map = omci_entities.entity_id_to_class_map
+ omci_entities.entity_id_to_class_map = self._me_map
+
+ ts = arrow.utcnow().float_timestamp
+ try:
+ self._rx_response[index] = None
+ self._adapter_agent.send_proxied_message(self._proxy_address,
+ hexify(str(frame)))
+ finally:
+ omci_entities.entity_id_to_class_map = saved_me_map
+
+ self._tx_frames += 1
+
+ if timeout > 0:
+ # Timeout on internal deferred to support internal retries if requested
+ dc = self.reactor.callLater(timeout, self._request_timeout, tx_tid, high_priority)
+
+ # (timestamp, defer, frame, timeout, retry, delayedCall)
+ self._tx_request[index] = (ts, d, frame, timeout, retry, dc)
+ d.addCallbacks(self._request_success, self._request_failure,
+ callbackArgs=(high_priority,),
+ errbackArgs=(tx_tid, high_priority))
+
+ except IndexError:
+ pass # Nothing pending in this queue
+
+ except Exception as e:
+ self.log.exception('send-proxy-exception', e=e)
+ self._tx_request[index] = None
+ self.reactor.callLater(0, self._send_next_request, high_priority)
+
+ if d is not None:
+ d.errback(failure.Failure(e))
+
+ ###################################################################################
+ # MIB Action shortcuts
+
+ def send_mib_reset(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ """
+ Perform a MIB Reset
+ """
+ self.log.debug('send-mib-reset')
+
+ frame = OntDataFrame().mib_reset()
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_mib_upload(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ self.log.debug('send-mib-upload')
+
+ frame = OntDataFrame().mib_upload()
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_mib_upload_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ self.log.debug('send-mib-upload-next')
+
+ frame = OntDataFrame(sequence_number=seq_no).mib_upload_next()
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_reboot(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ """
+ Send an ONU Device reboot request (ONU-G ME).
+
+ NOTICE: This method is being deprecated and replaced with a tasks to preform this function
+ """
+ self.log.debug('send-mib-reboot')
+
+ frame = OntGFrame().reboot()
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_get_all_alarm(self, alarm_retrieval_mode=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ self.log.debug('send_get_alarm')
+
+ frame = OntDataFrame().get_all_alarm(alarm_retrieval_mode)
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_get_all_alarm_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ self.log.debug('send_get_alarm_next')
+
+ frame = OntDataFrame().get_all_alarm_next(seq_no)
+ return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+ def send_start_software_download(self, image_inst_id, image_size, window_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ frame = SoftwareImageFrame(image_inst_id).start_software_download(image_size, window_size-1)
+ return self.send(frame, timeout, 3, high_priority=high_priority)
+
+ def send_download_section(self, image_inst_id, section_num, data, size=DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE, timeout=0, high_priority=False):
+ """
+ # timeout=0 indicates no repons needed
+ """
+ # self.log.debug("send_download_section", instance_id=image_inst_id, section=section_num, timeout=timeout)
+ if timeout > 0:
+ frame = SoftwareImageFrame(image_inst_id).download_section(True, section_num, data)
+ else:
+ frame = SoftwareImageFrame(image_inst_id).download_section(False, section_num, data)
+ return self.send(frame, timeout, high_priority=high_priority)
+
+ # if timeout > 0:
+ # self.reactor.callLater(0, self.sim_receive_download_section_resp,
+ # frame.fields["transaction_id"],
+ # frame.fields["omci_message"].fields["section_number"])
+ # return d
+
+ def send_end_software_download(self, image_inst_id, crc32, image_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ frame = SoftwareImageFrame(image_inst_id).end_software_download(crc32, image_size)
+ return self.send(frame, timeout, high_priority=high_priority)
+ # self.reactor.callLater(0, self.sim_receive_end_software_download_resp, frame.fields["transaction_id"])
+ # return d
+
+ def send_active_image(self, image_inst_id, flag=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ frame = SoftwareImageFrame(image_inst_id).activate_image(flag)
+ return self.send(frame, timeout, high_priority=high_priority)
+
+ def send_commit_image(self, image_inst_id, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+ frame = SoftwareImageFrame(image_inst_id).commit_image()
+ return self.send(frame, timeout, high_priority=high_priority)
+
diff --git a/python/adapters/extensions/omci/omci_defs.py b/python/adapters/extensions/omci/omci_defs.py
new file mode 100644
index 0000000..64fefc5
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_defs.py
@@ -0,0 +1,100 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from enum import Enum, IntEnum
+
+class OmciUninitializedFieldError(Exception):
+ pass
+
+
+class OmciInvalidTypeError(Exception):
+ pass
+
+def bitpos_from_mask(mask, lsb_pos=0, increment=1):
+ """
+ Turn a decimal value (bitmask) into a list of indices where each
+ index value corresponds to the bit position of a bit that was set (1)
+ in the mask. What numbers are assigned to the bit positions is controlled
+ by lsb_pos and increment, as explained below.
+ :param mask: a decimal value used as a bit mask
+ :param lsb_pos: The decimal value associated with the LSB bit
+ :param increment: If this is +i, then the bit next to LSB will take
+ the decimal value of lsb_pos + i.
+ :return: List of bit positions where the bit was set in mask
+ """
+ out = []
+ while mask:
+ if mask & 0x01:
+ out.append(lsb_pos)
+ lsb_pos += increment
+ mask >>= 1
+ return sorted(out)
+
+
+class AttributeAccess(Enum):
+ Readable = 1
+ R = 1
+ Writable = 2
+ W = 2
+ SetByCreate = 3
+ SBC = 3
+
+
+OmciNullPointer = 0xffff
+OmciSectionDataSize = 31
+
+class EntityOperations(Enum):
+ # keep these numbers match msg_type field per OMCI spec
+ Create = 4
+ CreateComplete = 5
+ Delete = 6
+ Set = 8
+ Get = 9
+ GetComplete = 10
+ GetAllAlarms = 11
+ GetAllAlarmsNext = 12
+ MibUpload = 13
+ MibUploadNext = 14
+ MibReset = 15
+ AlarmNotification = 16
+ AttributeValueChange = 17
+ Test = 18
+ StartSoftwareDownload = 19
+ DownloadSection = 20
+ EndSoftwareDownload = 21
+ ActivateSoftware = 22
+ CommitSoftware = 23
+ SynchronizeTime = 24
+ Reboot = 25
+ GetNext = 26
+ TestResult = 27
+ GetCurrentData = 28
+ SetTable = 29 # Defined in Extended Message Set Only
+
+
+class ReasonCodes(IntEnum):
+ # OMCI Result and reason codes
+ Success = 0, # Command processed successfully
+ ProcessingError = 1, # Command processing error
+ NotSupported = 2, # Command not supported
+ ParameterError = 3, # Parameter error
+ UnknownEntity = 4, # Unknown managed entity
+ UnknownInstance = 5, # Unknown managed entity instance
+ DeviceBusy = 6, # Device busy
+ InstanceExists = 7, # Instance Exists
+ AttributeFailure = 9, # Attribute(s) failed or unknown
+
+ OperationCancelled = 255 # Proprietary defined for internal use
+
diff --git a/python/adapters/extensions/omci/omci_entities.py b/python/adapters/extensions/omci/omci_entities.py
new file mode 100644
index 0000000..95e6581
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_entities.py
@@ -0,0 +1,1564 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import inspect
+
+import sys
+from binascii import hexlify
+from bitstring import BitArray
+import json
+from scapy.fields import ByteField, ShortField, MACField, BitField, IPField
+from scapy.fields import IntField, StrFixedLenField, LongField, FieldListField, PacketLenField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_defs import OmciUninitializedFieldError, \
+ AttributeAccess, OmciNullPointer, EntityOperations, OmciInvalidTypeError
+from voltha.extensions.omci.omci_fields import OmciSerialNumberField, OmciTableField
+from voltha.extensions.omci.omci_defs import bitpos_from_mask
+
+
+class EntityClassAttribute(object):
+
+ def __init__(self, fld, access=set(), optional=False, range_check=None,
+ avc=False, tca=False, counter=False, deprecated=False):
+ """
+ Initialize an Attribute for a Managed Entity Class
+
+ :param fld: (Field) Scapy field type
+ :param access: (AttributeAccess) Allowed access
+ :param optional: (boolean) If true, attribute is option, else mandatory
+ :param range_check: (callable) None, Lambda, or Function to validate value
+ :param avc: (boolean) If true, an AVC notification can occur for the attribute
+ :param tca: (boolean) If true, a threshold crossing alert alarm notification can occur
+ for the attribute
+ :param counter: (boolean) If true, this attribute is a PM counter
+ :param deprecated: (boolean) If true, this attribute is deprecated and
+ only 'read' operations (if-any) performed.
+ """
+ self._fld = fld
+ self._access = access
+ self._optional = optional
+ self._range_check = range_check
+ self._avc = avc
+ self._tca = tca
+ self._counter = counter
+ self._deprecated = deprecated
+
+ @property
+ def field(self):
+ return self._fld
+
+ @property
+ def access(self):
+ return self._access
+
+ @property
+ def optional(self):
+ return self._optional
+
+ @property
+ def is_counter(self):
+ return self._counter
+
+ @property
+ def range_check(self):
+ return self._range_check
+
+ @property
+ def avc_allowed(self):
+ return self._avc
+
+ @property
+ def deprecated(self):
+ return self._deprecated
+
+ _type_checker_map = {
+ 'ByteField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFF,
+ 'ShortField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFF,
+ 'IntField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFFFFFF,
+ 'LongField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFFFFFFFFFFFFFF,
+ 'StrFixedLenField': lambda val: isinstance(val, basestring),
+ 'MACField': lambda val: True, # TODO: Add a constraint for this field type
+ 'BitField': lambda val: True, # TODO: Add a constraint for this field type
+ 'IPField': lambda val: True, # TODO: Add a constraint for this field type
+ 'OmciTableField': lambda val: True,
+
+ # TODO: As additional Scapy field types are used, add constraints
+ }
+
+ def valid(self, value):
+ def _isa_lambda_function(v):
+ import inspect
+ return callable(v) and len(inspect.getargspec(v).args) == 1
+
+ field_type = self.field.__class__.__name__
+ type_check = EntityClassAttribute._type_checker_map.get(field_type,
+ lambda val: True)
+
+ # TODO: Currently StrFixedLenField is used heavily for both bit fields as
+ # and other 'byte/octet' related strings that are NOT textual. Until
+ # all of these are corrected, 'StrFixedLenField' cannot test the type
+ # of the value provided
+
+ if field_type != 'StrFixedLenField' and not type_check(value):
+ return False
+
+ if _isa_lambda_function(self.range_check):
+ return self.range_check(value)
+ return True
+
+
+class EntityClassMeta(type):
+ """
+ Metaclass for EntityClass to generate secondary class attributes
+ for class attributes of the derived classes.
+ """
+ def __init__(cls, name, bases, dct):
+ super(EntityClassMeta, cls).__init__(name, bases, dct)
+
+ # initialize attribute_name_to_index_map
+ cls.attribute_name_to_index_map = dict(
+ (a._fld.name, idx) for idx, a in enumerate(cls.attributes))
+
+
+class EntityClass(object):
+
+ class_id = 'to be filled by subclass'
+ attributes = []
+ mandatory_operations = set()
+ optional_operations = set()
+ notifications = set()
+ alarms = dict() # Alarm Number -> Alarm Name
+ hidden = False # If true, this attribute is not reported by a MIB upload.
+ # This attribute is needed to be able to properly perform
+ # MIB Audits.
+
+ # will be map of attr_name -> index in attributes, initialized by metaclass
+ attribute_name_to_index_map = None
+ __metaclass__ = EntityClassMeta
+
+ def __init__(self, **kw):
+ assert(isinstance(kw, dict))
+ for k, v in kw.iteritems():
+ assert(k in self.attribute_name_to_index_map)
+ self._data = kw
+
+ def serialize(self, mask=None, operation=None):
+ octets = ''
+
+ # generate ordered list of attribute indices needed to be processed
+ # if mask is provided, we use that explicitly
+ # if mask is not provided, we determine attributes from the self._data
+ # content also taking into account the type of operation in hand
+ if mask is not None:
+ attribute_indices = EntityClass.attribute_indices_from_mask(mask)
+ else:
+ attribute_indices = self.attribute_indices_from_data()
+
+ # Serialize each indexed field (ignoring entity id)
+ for index in attribute_indices:
+ eca = self.attributes[index]
+ field = eca.field
+ try:
+ value = self._data[field.name]
+
+ if not eca.valid(value):
+ raise OmciInvalidTypeError(
+ 'Value "{}" for Entity field "{}" is not valid'.format(value,
+ field.name))
+ except KeyError:
+ raise OmciUninitializedFieldError(
+ 'Entity field "{}" not set'.format(field.name))
+
+ octets = field.addfield(None, octets, value)
+
+ return octets
+
+ def attribute_indices_from_data(self):
+ return sorted(
+ self.attribute_name_to_index_map[attr_name]
+ for attr_name in self._data.iterkeys())
+
+ byte1_mask_to_attr_indices = dict(
+ (m, bitpos_from_mask(m, 8, -1)) for m in range(256))
+ byte2_mask_to_attr_indices = dict(
+ (m, bitpos_from_mask(m, 16, -1)) for m in range(256))
+
+ @classmethod
+ def attribute_indices_from_mask(cls, mask):
+ # each bit in the 2-byte field denote an attribute index; we use a
+ # lookup table to make lookup a bit faster
+ return \
+ cls.byte1_mask_to_attr_indices[(mask >> 8) & 0xff] + \
+ cls.byte2_mask_to_attr_indices[(mask & 0xff)]
+
+ @classmethod
+ def mask_for(cls, *attr_names):
+ """
+ Return mask value corresponding to given attributes names
+ :param attr_names: Attribute names
+ :return: integer mask value
+ """
+ mask = 0
+ for attr_name in attr_names:
+ index = cls.attribute_name_to_index_map[attr_name]
+ mask |= (1 << (16 - index))
+ return mask
+
+
+# abbreviations
+ECA = EntityClassAttribute
+AA = AttributeAccess
+OP = EntityOperations
+
+
+class OntData(EntityClass):
+ class_id = 2
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+ # Only 1 octet used if GET/SET operation
+ ECA(ShortField("mib_data_sync", 0), {AA.R, AA.W})
+ ]
+ mandatory_operations = {OP.Get, OP.Set,
+ OP.GetAllAlarms, OP.GetAllAlarmsNext,
+ OP.MibReset, OP.MibUpload, OP.MibUploadNext}
+
+
+class Cardholder(EntityClass):
+ class_id = 5
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: 0 <= x < 255 or 256 <= x < 511,
+ avc=True),
+ ECA(ByteField("actual_plugin_unit_type", None), {AA.R}),
+ ECA(ByteField("expected_plugin_unit_type", None), {AA.R, AA.W}),
+ ECA(ByteField("expected_port_count", None), {AA.R, AA.W},
+ optional=True),
+ ECA(StrFixedLenField("expected_equipment_id", None, 20), {AA.R, AA.W},
+ optional=True, avc=True),
+ ECA(StrFixedLenField("actual_equipment_id", None, 20), {AA.R},
+ optional=True),
+ ECA(ByteField("protection_profile_pointer", None), {AA.R},
+ optional=True),
+ ECA(ByteField("invoke_protection_switch", None), {AA.R, AA.W},
+ optional=True, range_check=lambda x: 0 <= x <= 3),
+ ECA(ByteField("alarm_reporting_control", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+ ECA(ByteField("arc_interval", 0), {AA.R, AA.W}, optional=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Plug-in circuit pack missing',
+ 1: 'Plug-in type mismatch alarm',
+ 2: 'Improper card removal',
+ 3: 'Plug-in equipment ID mismatch alarm',
+ 4: 'Protection switch',
+ }
+
+
+class CircuitPack(EntityClass):
+ class_id = 6
+ attributes = [
+ ECA(StrFixedLenField("managed_entity_id", None, 22), {AA.R, AA.SBC},
+ range_check=lambda x: 0 <= x < 255 or 256 <= x < 511),
+ ECA(ByteField("type", None), {AA.R, AA.SBC}),
+ ECA(ByteField("number_of_ports", None), {AA.R}, optional=True),
+ ECA(OmciSerialNumberField("serial_number"), {AA.R}),
+ ECA(StrFixedLenField("version", None, 14), {AA.R}),
+ ECA(StrFixedLenField("vendor_id", None, 4), {AA.R}),
+ ECA(ByteField("administrative_state", None), {AA.R, AA.W}),
+ ECA(ByteField("operational_state", None), {AA.R}, optional=True, avc=True),
+ ECA(ByteField("bridged_or_ip_ind", None), {AA.R, AA.W}, optional=True,
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(StrFixedLenField("equipment_id", None, 20), {AA.R}, optional=True),
+ ECA(ByteField("card_configuration", None), {AA.R, AA.W, AA.SBC},
+ optional=True), # not really mandatory, see spec ITU-T G.988, 9.1.6
+ ECA(ByteField("total_tcont_buffer_number", None), {AA.R},
+ optional=True), # not really mandatory, see spec ITU-T G.988, 9.1.6
+ ECA(ByteField("total_priority_queue_number", None), {AA.R},
+ optional=True), # not really mandatory, see spec ITU-T G.988, 9.1.6
+ ECA(ByteField("total_traffic_scheduler_number", None), {AA.R},
+ optional=True), # not really mandatory, see spec ITU-T G.988, 9.1.6
+ ECA(IntField("power_shed_override", None), {AA.R, AA.W},
+ optional=True)
+ ]
+ mandatory_operations = {OP.Get, OP.Set, OP.Reboot}
+ optional_operations = {OP.Create, OP.Delete, OP.Test}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Equipment alarm',
+ 1: 'Powering alarm',
+ 2: 'Self-test failure',
+ 3: 'Laser end of life',
+ 4: 'Temperature yellow',
+ 5: 'Temperature red',
+ }
+
+class SoftwareImage(EntityClass):
+ class_id = 7
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: 0 <= x/256 <= 254 or 0 <= x % 256 <= 1),
+ ECA(StrFixedLenField("version", None, 14), {AA.R}, avc=True),
+ ECA(ByteField("is_committed", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("is_active", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("is_valid", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(StrFixedLenField("product_code", None, 25), {AA.R}, optional=True, avc=True),
+ ECA(StrFixedLenField("image_hash", None, 16), {AA.R}, optional=True, avc=True),
+ ]
+ mandatory_operations = {OP.Get, OP.StartSoftwareDownload, OP.DownloadSection,
+ OP.EndSoftwareDownload, OP.ActivateSoftware,
+ OP.CommitSoftware}
+ notifications = {OP.AttributeValueChange}
+
+
+class PptpEthernetUni(EntityClass):
+ class_id = 11
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ByteField("expected_type", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 254),
+ ECA(ByteField("sensed_type", 0), {AA.R}, optional=True, avc=True),
+ # TODO: For sensed_type AVC, see note in AT&T OMCI Specification, V3.0, page 123
+ ECA(ByteField("auto_detection_configuration", 0), {AA.R, AA.W},
+ range_check=lambda x: x in [0, 1, 2, 3, 4, 5,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x20, 0x30], optional=True), # See ITU-T G.988
+ ECA(ByteField("ethernet_loopback_configuration", 0), {AA.R, AA.W},
+ range_check=lambda x: x in [0, 3]),
+ ECA(ByteField("administrative_state", 1), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("operational_state", 1), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+ ECA(ByteField("configuration_ind", 0), {AA.R},
+ range_check=lambda x: x in [0, 1, 2, 3, 4, 0x11, 0x12, 0x13]),
+ ECA(ShortField("max_frame_size", 1518), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("dte_or_dce_ind", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(ShortField("pause_time", 0), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("bridged_or_ip_ind", 2), {AA.R, AA.W},
+ optional=True, range_check=lambda x: 0 <= x <= 2),
+ ECA(ByteField("arc", 0), {AA.R, AA.W}, optional=True,
+ range_check=lambda x: 0 <= x <= 1, avc=True),
+ ECA(ByteField("arc_interval", 0), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("pppoe_filter", 0), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("power_control", 0), {AA.R, AA.W}, optional=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'LAN Loss Of Signal',
+ }
+
+
+class MacBridgeServiceProfile(EntityClass):
+ class_id = 45
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("spanning_tree_ind", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("learning_ind", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("port_bridging_ind", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("priority", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("max_age", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0x0600 <= x <= 0x2800),
+ ECA(ShortField("hello_time", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0x0100 <= x <= 0x0A00),
+ ECA(ShortField("forward_delay", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0x0400 <= x <= 0x1E00),
+ ECA(ByteField("unknown_mac_address_discard", None),
+ {AA.R, AA.W, AA.SBC}, range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("mac_learning_depth", None),
+ {AA.R, AA.W, AA.SBC}, optional=True),
+ ECA(ByteField("dynamic_filtering_ageing_time", None),
+ {AA.R, AA.W, AA.SBC}, optional=True),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class MacBridgePortConfigurationData(EntityClass):
+ class_id = 47
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("bridge_id_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("port_num", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("tp_type", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 1 <= x <= 12),
+ ECA(ShortField("tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("port_priority", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("port_path_cost", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("port_spanning_tree_in", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("encapsulation_methods", None), {AA.R, AA.W, AA.SBC},
+ optional=True, deprecated=True),
+ ECA(ByteField("lan_fcs_ind", None), {AA.R, AA.W, AA.SBC},
+ optional=True, deprecated=True),
+ ECA(MACField("port_mac_address", None), {AA.R}, optional=True),
+ ECA(ShortField("outbound_td_pointer", None), {AA.R, AA.W},
+ optional=True),
+ ECA(ShortField("inbound_td_pointer", None), {AA.R, AA.W},
+ optional=True),
+ # TODO:
+ ECA(ByteField("mac_learning_depth", 0), {AA.R, AA.W, AA.SBC},
+ optional=True),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Port blocking',
+ }
+
+
+class MacBridgePortFilterPreAssignTable(EntityClass):
+ class_id = 79
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("ipv4_multicast", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("ipv6_multicast", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("ipv4_broadcast", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("rarp", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("ipx", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("netbeui", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("appletalk", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("bridge_management_information", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("arp", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("pppoe_broadcast", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1)
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+
+
+class VlanTaggingFilterData(EntityClass):
+ class_id = 84
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(FieldListField("vlan_filter_list", None,
+ ShortField('', 0), count_from=lambda _: 12),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("forward_operation", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0x00 <= x <= 0x21),
+ ECA(ByteField("number_of_entries", None), {AA.R, AA.W, AA.SBC})
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class Ieee8021pMapperServiceProfile(EntityClass):
+ class_id = 130
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_0",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_1",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_2",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_3",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_4",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_5",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_6",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_7",
+ OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("unmarked_frame_option", None),
+ {AA.R, AA.W, AA.SBC}, range_check=lambda x: 0 <= x <= 1),
+ ECA(StrFixedLenField("dscp_to_p_bit_mapping", None, length=24),
+ {AA.R, AA.W}), # TODO: Would a custom 3-bit group bitfield work better?
+ ECA(ByteField("default_p_bit_marking", None),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("tp_type", None), {AA.R, AA.W, AA.SBC},
+ optional=True, range_check=lambda x: 0 <= x <= 8)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class OltG(EntityClass):
+ class_id = 131
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+ ECA(StrFixedLenField("olt_vendor_id", None, 4), {AA.R, AA.W}),
+ ECA(StrFixedLenField("equipment_id", None, 20), {AA.R, AA.W}),
+ ECA(StrFixedLenField("version", None, 14), {AA.R, AA.W}),
+ ECA(StrFixedLenField("time_of_day_information", None, 14), {AA.R, AA.W})
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+
+
+class OntPowerShedding(EntityClass):
+ class_id = 133
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+ ECA(ShortField("restore_power_timer_reset_interval", 0),
+ {AA.R, AA.W}),
+ ECA(ShortField("data_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("voice_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("video_overlay_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("video_return_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("dsl_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("atm_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("ces_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("frame_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("sonet_class_shedding_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("shedding_status", None), {AA.R, AA.W}, optional=True,
+ avc=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange}
+
+
+class IpHostConfigData(EntityClass):
+ class_id = 134
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(BitField("ip_options", 0, size=8), {AA.R, AA.W}),
+ ECA(MACField("mac_address", None), {AA.R}),
+ ECA(StrFixedLenField("onu_identifier", None, 25), {AA.R, AA.W}),
+ ECA(IPField("ip_address", None), {AA.R, AA.W}),
+ ECA(IPField("mask", None), {AA.R, AA.W}),
+ ECA(IPField("gateway", None), {AA.R, AA.W}),
+ ECA(IPField("primary_dns", None), {AA.R, AA.W}),
+ ECA(IPField("secondary_dns", None), {AA.R, AA.W}),
+ ECA(IPField("current_address", None), {AA.R}, avc=True),
+ ECA(IPField("current_mask", None), {AA.R}, avc=True),
+ ECA(IPField("current_gateway", None), {AA.R}, avc=True),
+ ECA(IPField("current_primary_dns", None), {AA.R}, avc=True),
+ ECA(IPField("current_secondary_dns", None), {AA.R}, avc=True),
+ ECA(StrFixedLenField("domain_name", None, 25), {AA.R}, avc=True),
+ ECA(StrFixedLenField("host_name", None, 25), {AA.R}, avc=True),
+ ECA(ShortField("relay_agent_options", None), {AA.R, AA.W},
+ optional=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set, OP.Test}
+ notifications = {OP.AttributeValueChange}
+
+
+class VlanTaggingOperation(Packet):
+ name = "VlanTaggingOperation"
+ fields_desc = [
+ BitField("filter_outer_priority", 0, 4),
+ BitField("filter_outer_vid", 0, 13),
+ BitField("filter_outer_tpid_de", 0, 3),
+ BitField("pad1", 0, 12),
+
+ BitField("filter_inner_priority", 0, 4),
+ BitField("filter_inner_vid", 0, 13),
+ BitField("filter_inner_tpid_de", 0, 3),
+ BitField("pad2", 0, 8),
+ BitField("filter_ether_type", 0, 4),
+
+ BitField("treatment_tags_to_remove", 0, 2),
+ BitField("pad3", 0, 10),
+ BitField("treatment_outer_priority", 0, 4),
+ BitField("treatment_outer_vid", 0, 13),
+ BitField("treatment_outer_tpid_de", 0, 3),
+
+ BitField("pad4", 0, 12),
+ BitField("treatment_inner_priority", 0, 4),
+ BitField("treatment_inner_vid", 0, 13),
+ BitField("treatment_inner_tpid_de", 0, 3),
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+ @staticmethod
+ def json_from_value(value):
+ bits = BitArray(hex=hexlify(value))
+ temp = VlanTaggingOperation(
+ filter_outer_priority=bits[0:4].uint, # 4 <-size
+ filter_outer_vid=bits[4:17].uint, # 13
+ filter_outer_tpid_de=bits[17:20].uint, # 3
+ # pad 12
+ filter_inner_priority=bits[32:36].uint, # 4
+ filter_inner_vid=bits[36:49].uint, # 13
+ filter_inner_tpid_de=bits[49:52].uint, # 3
+ # pad 8
+ filter_ether_type=bits[60:64].uint, # 4
+ treatment_tags_to_remove=bits[64:66].uint, # 2
+ # pad 10
+ treatment_outer_priority=bits[76:80].uint, # 4
+ treatment_outer_vid=bits[80:93].uint, # 13
+ treatment_outer_tpid_de=bits[93:96].uint, # 3
+ # pad 12
+ treatment_inner_priority=bits[108:112].uint, # 4
+ treatment_inner_vid=bits[112:125].uint, # 13
+ treatment_inner_tpid_de=bits[125:128].uint, # 3
+ )
+ return json.dumps(temp.fields, separators=(',', ':'))
+
+ def index(self):
+ return '{:02}'.format(self.fields.get('filter_outer_priority',0)) + \
+ '{:03}'.format(self.fields.get('filter_outer_vid',0)) + \
+ '{:01}'.format(self.fields.get('filter_outer_tpid_de',0)) + \
+ '{:03}'.format(self.fields.get('filter_inner_priority',0)) + \
+ '{:04}'.format(self.fields.get('filter_inner_vid',0)) + \
+ '{:01}'.format(self.fields.get('filter_inner_tpid_de',0)) + \
+ '{:02}'.format(self.fields.get('filter_ether_type',0))
+
+ def is_delete(self):
+ return self.fields.get('treatment_tags_to_remove',0) == 0x3 and \
+ self.fields.get('pad3',0) == 0x3ff and \
+ self.fields.get('treatment_outer_priority',0) == 0xf and \
+ self.fields.get('treatment_outer_vid',0) == 0x1fff and \
+ self.fields.get('treatment_outer_tpid_de',0) == 0x7 and \
+ self.fields.get('pad4',0) == 0xfff and \
+ self.fields.get('treatment_inner_priority',0) == 0xf and \
+ self.fields.get('treatment_inner_vid',0) == 0x1fff and \
+ self.fields.get('treatment_inner_tpid_de',0) == 0x7
+
+ def delete(self):
+ self.fields['treatment_tags_to_remove'] = 0x3
+ self.fields['pad3'] = 0x3ff
+ self.fields['treatment_outer_priority'] = 0xf
+ self.fields['treatment_outer_vid'] = 0x1fff
+ self.fields['treatment_outer_tpid_de'] = 0x7
+ self.fields['pad4'] = 0xfff
+ self.fields['treatment_inner_priority'] = 0xf
+ self.fields['treatment_inner_vid'] = 0x1fff
+ self.fields['treatment_inner_tpid_de'] = 0x7
+ return self
+
+
+class ExtendedVlanTaggingOperationConfigurationData(EntityClass):
+ class_id = 171
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("association_type", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 11),
+ ECA(ShortField("received_vlan_tagging_operation_table_max_size", None),
+ {AA.R}),
+ ECA(ShortField("input_tpid", None), {AA.R, AA.W}),
+ ECA(ShortField("output_tpid", None), {AA.R, AA.W}),
+ ECA(ByteField("downstream_mode", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 8),
+ ECA(OmciTableField(
+ PacketLenField("received_frame_vlan_tagging_operation_table", None,
+ VlanTaggingOperation, length_from=lambda pkt: 16)), {AA.R, AA.W}),
+ ECA(ShortField("associated_me_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(FieldListField("dscp_to_p_bit_mapping", None,
+ BitField('', 0, size=3), count_from=lambda _: 64),
+ {AA.R, AA.W}),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext}
+ optional_operations = {OP.SetTable}
+
+
+class OntG(EntityClass):
+ class_id = 256
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+ ECA(StrFixedLenField("vendor_id", None, 4), {AA.R}),
+ ECA(StrFixedLenField("version", None, 14), {AA.R}),
+ ECA(OmciSerialNumberField("serial_number"), {AA.R}),
+ ECA(ByteField("traffic_management_option", None), {AA.R},
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(ByteField("deprecated", 0), {AA.R},
+ optional=True, deprecated=True),
+ ECA(ByteField("battery_backup", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("administrative_state", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("operational_state", None), {AA.R}, optional=True,
+ range_check=lambda x: 0 <= x <= 1, avc=True),
+ ECA(ByteField("ont_survival_time", None), {AA.R}, optional=True),
+ ECA(StrFixedLenField("logical_onu_id", None, 24), {AA.R},
+ optional=True, avc=True),
+ ECA(StrFixedLenField("logical_password", None, 12), {AA.R},
+ optional=True, avc=True),
+ ECA(ByteField("credentials_status", None), {AA.R, AA.W},
+ optional=True, range_check=lambda x: 0 <= x <= 4),
+ ECA(BitField("extended_tc_layer_options", None, size=16), {AA.R},
+ optional=True),
+ ]
+ mandatory_operations = {
+ OP.Get, OP.Set, OP.Reboot, OP.Test, OP.SynchronizeTime}
+ notifications = {OP.TestResult, OP.AttributeValueChange,
+ OP.AlarmNotification}
+ alarms = {
+ 0: 'Equipment alarm',
+ 1: 'Powering alarm',
+ 2: 'Battery missing',
+ 3: 'Battery failure',
+ 4: 'Battery low',
+ 5: 'Physical intrusion',
+ 6: 'Self-test failure',
+ 7: 'Dying gasp',
+ 8: 'Temperature yellow',
+ 9: 'Temperature red',
+ 10: 'Voltage yellow',
+ 11: 'Voltage red',
+ 12: 'ONU manual power off',
+ 13: 'Invalid image',
+ 14: 'PSE overload yellow',
+ 15: 'PSE overload red',
+ }
+
+
+class Ont2G(EntityClass):
+ class_id = 257
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+ ECA(StrFixedLenField("equipment_id", None, 20), {AA.R}),
+ ECA(ByteField("omcc_version", None), {AA.R}, avc=True),
+ ECA(ShortField("vendor_product_code", None), {AA.R}),
+ ECA(ByteField("security_capability", None), {AA.R},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("security_mode", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("total_priority_queue_number", None), {AA.R}),
+ ECA(ByteField("total_traffic_scheduler_number", None), {AA.R}),
+ ECA(ByteField("deprecated", None), {AA.R}, deprecated=True),
+ ECA(ShortField("total_gem_port_id_number", None), {AA.R}),
+ ECA(IntField("sys_uptime", None), {AA.R}),
+ ECA(BitField("connectivity_capability", None, size=16), {AA.R}),
+ ECA(ByteField("current_connectivity_mode", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 7),
+ ECA(BitField("qos_configuration_flexibility", None, size=16),
+ {AA.R}, optional=True),
+ ECA(ShortField("priority_queue_scale_factor", None), {AA.R, AA.W},
+ optional=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange}
+
+
+class Tcont(EntityClass):
+ class_id = 262
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ShortField("alloc_id", None), {AA.R, AA.W}),
+ ECA(ByteField("deprecated", 1), {AA.R}, deprecated=True),
+ ECA(ByteField("policy", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 2),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+
+
+class AniG(EntityClass):
+ class_id = 263
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ByteField("sr_indication", None), {AA.R}),
+ ECA(ShortField("total_tcont_number", None), {AA.R}),
+ ECA(ShortField("gem_block_length", None), {AA.R, AA.W}),
+ ECA(ByteField("piggyback_dba_reporting", None), {AA.R},
+ range_check=lambda x: 0 <= x <= 4),
+ ECA(ByteField("deprecated", None), {AA.R}, deprecated=True),
+ ECA(ByteField("sf_threshold", 5), {AA.R, AA.W}),
+ ECA(ByteField("sd_threshold", 9), {AA.R, AA.W}),
+ ECA(ByteField("arc", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1, avc=True),
+ ECA(ByteField("arc_interval", 0), {AA.R, AA.W}),
+ ECA(ShortField("optical_signal_level", None), {AA.R}),
+ ECA(ByteField("lower_optical_threshold", 0xFF), {AA.R, AA.W}),
+ ECA(ByteField("upper_optical_threshold", 0xFF), {AA.R, AA.W}),
+ ECA(ShortField("onu_response_time", None), {AA.R}),
+ ECA(ShortField("transmit_optical_level", None), {AA.R}),
+ ECA(ByteField("lower_transmit_power_threshold", 0x81), {AA.R, AA.W}),
+ ECA(ByteField("upper_transmit_power_threshold", 0x81), {AA.R, AA.W}),
+ ]
+ mandatory_operations = {OP.Get, OP.Set, OP.Test}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Low received optical power',
+ 1: 'High received optical power',
+ 2: 'Signal fail',
+ 3: 'Signal degrade',
+ 4: 'Low transmit optical power',
+ 5: 'High transmit optical power',
+ 6: 'Laser bias current',
+ }
+
+
+class UniG(EntityClass):
+ class_id = 264
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ShortField("deprecated", None), {AA.R, AA.W}, deprecated=True),
+ ECA(ByteField("administrative_state", None), {AA.R, AA.W}),
+ ECA(ByteField("management_capability", None), {AA.R},
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(ShortField("non_omci_management_identifier", None), {AA.R, AA.W}),
+ ECA(ShortField("relay_agent_options", None), {AA.R, AA.W},
+ optional=True),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+
+
+class GemInterworkingTp(EntityClass):
+ class_id = 266
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("gem_port_network_ctp_pointer", None),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("interworking_option", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 7),
+ ECA(ShortField("service_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interworking_tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("pptp_counter", None), {AA.R}, optional=True),
+ ECA(ByteField("operational_state", None), {AA.R}, optional=True,
+ range_check=lambda x: 0 <= x <= 1, avc=True),
+ ECA(ShortField("gal_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("gal_loopback_configuration", 0),
+ {AA.R, AA.W}, range_check=lambda x: 0 <= x <= 1),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 6: 'Operational state change',
+ }
+
+
+class GemPortNetworkCtp(EntityClass):
+ class_id = 268
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("port_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("tcont_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("direction", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 1 <= x <= 3),
+ ECA(ShortField("traffic_management_pointer_upstream", None),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("traffic_descriptor_profile_pointer", None),
+ {AA.R, AA.W, AA.SBC}, optional=True),
+ ECA(ByteField("uni_counter", None), {AA.R}, optional=True),
+ ECA(ShortField("priority_queue_pointer_downstream", None),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("encryption_state", None), {AA.R}, optional=True),
+ ECA(ShortField("traffic_desc_profile_pointer_downstream", None),
+ {AA.R, AA.W, AA.SBC}, optional=True),
+ ECA(ShortField("encryption_key_ring", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 3)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 5: 'End-to-end loss of continuity',
+ }
+
+
+class GalEthernetProfile(EntityClass):
+ class_id = 272
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ShortField("max_gem_payload_size", None), {AA.R, AA.W, AA.SBC}),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class PriorityQueueG(EntityClass):
+ class_id = 277
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ByteField("queue_configuration_option", None), {AA.R},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("maximum_queue_size", None), {AA.R}),
+ ECA(ShortField("allocated_queue_size", None), {AA.R, AA.W}),
+ ECA(ShortField("discard_block_counter_reset_interval", None), {AA.R, AA.W}),
+ ECA(ShortField("threshold_value_for_discarded_blocks", None), {AA.R, AA.W}),
+ ECA(IntField("related_port", None), {AA.R}),
+ ECA(ShortField("traffic_scheduler_pointer", 0), {AA.R, AA.W}),
+ ECA(ByteField("weight", 1), {AA.R, AA.W}),
+ ECA(ShortField("back_pressure_operation", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(IntField("back_pressure_time", 0), {AA.R, AA.W}),
+ ECA(ShortField("back_pressure_occur_queue_threshold", None), {AA.R, AA.W}),
+ ECA(ShortField("back_pressure_clear_queue_threshold", None), {AA.R, AA.W}),
+ # TODO: Custom field of 4 2-byte values would help below
+ ECA(LongField("packet_drop_queue_thresholds", None), {AA.R, AA.W},
+ optional=True),
+ ECA(ShortField("packet_drop_max_p", 0xFFFF), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("queue_drop_w_q", 9), {AA.R, AA.W}, optional=True),
+ ECA(ByteField("drop_precedence_colour_marking", 0), {AA.R, AA.W},
+ optional=True, range_check=lambda x: 0 <= x <= 7),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Block loss',
+ }
+
+
+class TrafficSchedulerG(EntityClass):
+ class_id = 278
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ShortField("tcont_pointer", None), {AA.R}),
+ ECA(ShortField("traffic_scheduler_pointer", None), {AA.R}),
+ ECA(ByteField("policy", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(ByteField("priority_weight", 0), {AA.R, AA.W}),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+
+
+class MulticastGemInterworkingTp(EntityClass):
+ class_id = 281
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC},
+ range_check=lambda x: x != OmciNullPointer),
+ ECA(ShortField("gem_port_network_ctp_pointer", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interworking_option", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: x in [0, 1, 3, 5]),
+ ECA(ShortField("service_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("interworking_tp_pointer", 0), {AA.R, AA.W, AA.SBC},
+ deprecated=True),
+ ECA(ByteField("pptp_counter", None), {AA.R}),
+ ECA(ByteField("operational_state", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("gal_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("gal_loopback_configuration", None), {AA.R, AA.W, AA.SBC},
+ deprecated=True),
+ # TODO add multicast_address_table here (page 85 of spec.)
+ # ECA(...("multicast_address_table", None), {AA.R, AA.W})
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.GetNext, OP.Set}
+ optional_operations = {OP.SetTable}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Deprecated',
+ }
+
+
+class AccessControlRow0(Packet):
+ name = "AccessControlRow0"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("row_part_id", 0, 3),
+ BitField("test", 0, 1),
+ BitField("row_key", 0, 10),
+
+ ShortField("gem_port_id", None),
+ ShortField("vlan_id", None),
+ IPField("src_ip", None),
+ IPField("dst_ip_start", None),
+ IPField("dst_ip_end", None),
+ IntField("ipm_group_bw", None),
+ ShortField("reserved0", 0)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AccessControlRow1(Packet):
+ name = "AccessControlRow1"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("row_part_id", 0, 3),
+ BitField("test", 0, 1),
+ BitField("row_key", 0, 10),
+
+ StrFixedLenField("ipv6_src_addr_start_bytes", None, 12),
+ ShortField("preview_length", None),
+ ShortField("preview_repeat_time", None),
+ ShortField("preview_repeat_count", None),
+ ShortField("preview_reset_time", None),
+ ShortField("reserved1", 0)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AccessControlRow2(Packet):
+ name = "AccessControlRow2"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("row_part_id", 0, 3),
+ BitField("test", 0, 1),
+ BitField("row_key", 0, 10),
+
+ StrFixedLenField("ipv6_dst_addr_start_bytes", None, 12),
+ StrFixedLenField("reserved2", None, 10)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class DownstreamIgmpMulticastTci(Packet):
+ name = "DownstreamIgmpMulticastTci"
+ fields_desc = [
+ ByteField("ctrl_type", None),
+ ShortField("tci", None)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class MulticastOperationsProfile(EntityClass):
+ class_id = 309
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC},
+ range_check=lambda x: x != 0 and x != OmciNullPointer),
+ ECA(ByteField("igmp_version", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: x in [1, 2, 3, 16, 17]),
+ ECA(ByteField("igmp_function", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 2),
+ ECA(ByteField("immediate_leave", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("us_igmp_tci", None), {AA.R, AA.W, AA.SBC}, optional=True),
+ ECA(ByteField("us_igmp_tag_ctrl", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 3, optional=True),
+ ECA(IntField("us_igmp_rate", None), {AA.R, AA.W, AA.SBC}, optional=True),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "dynamic_access_control_list_table", None, 24), {AA.R, AA.W}),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "static_access_control_list_table", None, 24), {AA.R, AA.W}),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField("lost_groups_list_table", None, 10), {AA.R}),
+ ECA(ByteField("robustness", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("querier_ip", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("query_interval", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("querier_max_response_time", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("last_member_response_time", 10), {AA.R, AA.W}),
+ ECA(ByteField("unauthorized_join_behaviour", None), {AA.R, AA.W}),
+ ECA(StrFixedLenField("ds_igmp_mcast_tci", None, 3), {AA.R, AA.W, AA.SBC}, optional=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext}
+ optional_operations = {OP.SetTable}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Lost multicast group',
+ }
+
+
+class MulticastServicePackage(Packet):
+ name = "MulticastServicePackage"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("reserved0", 0, 4),
+ BitField("row_key", 0, 10),
+
+ ShortField("vid_uni", None),
+ ShortField("max_simultaneous_groups", None),
+ IntField("max_multicast_bw", None),
+ ShortField("mcast_operations_profile_pointer", None),
+ StrFixedLenField("reserved1", None, 8)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AllowedPreviewGroupsRow0(Packet):
+ name = "AllowedPreviewGroupsRow0"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("row_part_id", 0, 3),
+ BitField("reserved0", 0, 1),
+ BitField("row_key", 0, 10),
+
+ StrFixedLenField("ipv6_pad", 0, 12),
+ IPField("src_ip", None),
+ ShortField("vlan_id_ani", None),
+ ShortField("vlan_id_uni", None)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AllowedPreviewGroupsRow1(Packet):
+ name = "AllowedPreviewGroupsRow1"
+ fields_desc = [
+ BitField("set_ctrl", 0, 2),
+ BitField("row_part_id", 0, 3),
+ BitField("reserved0", 0, 1),
+ BitField("row_key", 0, 10),
+
+ StrFixedLenField("ipv6_pad", 0, 12),
+ IPField("dst_ip", None),
+ ShortField("duration", None),
+ ShortField("time_left", None)
+ ]
+
+ def to_json(self):
+ return json.dumps(self.fields, separators=(',', ':'))
+
+
+class MulticastSubscriberConfigInfo(EntityClass):
+ class_id = 310
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("me_type", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ShortField("mcast_operations_profile_pointer", None),
+ {AA.R, AA.W, AA.SBC}),
+ ECA(ShortField("max_simultaneous_groups", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("max_multicast_bandwidth", None), {AA.R, AA.W, AA.SBC}),
+ ECA(ByteField("bandwidth_enforcement", None), {AA.R, AA.W, AA.SBC},
+ range_check=lambda x: 0 <= x <= 1),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "multicast_service_package_table", None, 20), {AA.R, AA.W}),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "allowed_preview_groups_table", None, 22), {AA.R, AA.W}),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext,
+ OP.SetTable}
+
+
+class VirtualEthernetInterfacePt(EntityClass):
+ class_id = 329
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x != 0 and x != OmciNullPointer),
+ ECA(ByteField("administrative_state", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("operational_state", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(StrFixedLenField(
+ "interdomain_name", None, 25), {AA.R, AA.W}, optional=True),
+ ECA(ShortField("tcp_udp_pointer", None), {AA.R, AA.W}, optional=True),
+ ECA(ShortField("iana_assigned_port", None), {AA.R}),
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Connecting function fail',
+ }
+
+
+class Omci(EntityClass):
+ class_id = 287
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R},
+ range_check=lambda x: x == 0),
+
+ # TODO: Can this be expressed better in SCAPY, probably not?
+ # On the initial, Get request for either the me_type or message_type
+ # attributes, you will receive a 4 octet value (big endian) that is
+ # the number of octets to 'get-next' to fully load the desired
+ # attribute. For a basic OMCI formatted message, that will be 29
+ # octets per get-request.
+ #
+ # For the me_type_table, these are 16-bit values (ME Class IDs)
+ #
+ # For the message_type_table, these are 8-bit values (Actions)
+
+ ECA(FieldListField("me_type_table", None, ByteField('', 0),
+ count_from=lambda _: 29), {AA.R}),
+ ECA(FieldListField("message_type_table", None, ByteField('', 0),
+ count_from=lambda _: 29), {AA.R}),
+ ]
+ mandatory_operations = {OP.Get, OP.GetNext}
+
+
+class EnhSecurityControl(EntityClass):
+ class_id = 332
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(BitField("olt_crypto_capabilities", None, 16*8), {AA.W}),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "olt_random_challenge_table", None, 17), {AA.R, AA.W}),
+ ECA(ByteField("olt_challenge_status", 0), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("onu_selected_crypto_capabilities", None), {AA.R}),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "onu_random_challenge_table", None, 16), {AA.R}, avc=True),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "onu_authentication_result_table", None, 16), {AA.R}, avc=True),
+ # TODO: need to make table and add column data
+ ECA(StrFixedLenField(
+ "olt_authentication_result_table", None, 17), {AA.W}),
+ ECA(ByteField("olt_result_status", None), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("onu_authentication_status", None), {AA.R}, avc=True,
+ range_check=lambda x: 0 <= x <= 5),
+ ECA(StrFixedLenField(
+ "master_session_key_name", None, 16), {AA.R}),
+ ECA(StrFixedLenField(
+ "broadcast_key_table", None, 18), {AA.R, AA.W}),
+ ECA(ShortField("effective_key_length", None), {AA.R}),
+
+ ]
+ mandatory_operations = {OP.Set, OP.Get, OP.GetNext}
+ notifications = {OP.AttributeValueChange}
+
+
+class EthernetPMMonitoringHistoryData(EntityClass):
+ class_id = 24
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("fcs_errors", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("excessive_collision_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("late_collision_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("frames_too_long", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("buffer_overflows_on_rx", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("buffer_overflows_on_tx", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("single_collision_frame_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("multiple_collisions_frame_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("sqe_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("deferred_tx_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("internal_mac_tx_error_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("carrier_sense_error_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("alignment_error_counter", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("internal_mac_rx_error_counter", None), {AA.R}, tca=True, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'FCS errors',
+ 1: 'Excessive collision counter',
+ 2: 'Late collision counter',
+ 3: 'Frames too long',
+ 4: 'Buffer overflows on receive',
+ 5: 'Buffer overflows on transmit',
+ 6: 'Single collision frame counter',
+ 7: 'Multiple collision frame counter',
+ 8: 'SQE counter',
+ 9: 'Deferred transmission counter',
+ 10: 'Internal MAC transmit error counter',
+ 11: 'Carrier sense error counter',
+ 12: 'Alignment error counter',
+ 13: 'Internal MAC receive error counter',
+ }
+
+
+class FecPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 312
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("corrected_bytes", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("corrected_code_words", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("uncorrectable_code_words", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("total_code_words", None), {AA.R}, counter=True),
+ ECA(ShortField("fec_seconds", None), {AA.R}, tca=True, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Corrected bytes',
+ 1: 'Corrected code words',
+ 2: 'Uncorrectable code words',
+ 4: 'FEC seconds',
+ }
+
+
+class EthernetFrameDownstreamPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 321
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("octets", None), {AA.R}, counter=True),
+ ECA(IntField("packets", None), {AA.R}, counter=True),
+ ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("64_octets", None), {AA.R}, counter=True),
+ ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+ ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+ ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+ ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+ ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Drop events',
+ 1: 'CRC errored packets',
+ 2: 'Undersize packets',
+ 3: 'Oversize packets',
+ }
+
+
+class EthernetFrameUpstreamPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 322
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("octets", None), {AA.R}, counter=True),
+ ECA(IntField("packets", None), {AA.R}, counter=True),
+ ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("64_octets", None), {AA.R}, counter=True),
+ ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+ ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+ ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+ ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+ ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Drop events',
+ 1: 'CRC errored packets',
+ 2: 'Undersize packets',
+ 3: 'Oversize packets',
+ }
+
+
+class VeipUni(EntityClass):
+ class_id = 329
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R}),
+ ECA(ByteField("administrative_state", 1), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1),
+ ECA(ByteField("operational_state", 1), {AA.R, AA.W},
+ range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+ ECA(StrFixedLenField("interdomain_name", None, 25), {AA.R, AA.W},
+ optional=True),
+ ECA(ShortField("tcp_udp_pointer", None), {AA.R, AA.W}, optional=True),
+ ECA(ShortField("iana_assigned_port", 0xFFFF), {AA.R})
+ ]
+ mandatory_operations = {OP.Get, OP.Set}
+ notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+ alarms = {
+ 0: 'Connecting function fail'
+ }
+
+
+class EthernetFrameExtendedPerformanceMonitoring(EntityClass):
+ class_id = 334
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ # 2-octet field -> Threshold data 1/2 ID
+ # 2-octet field -> Parent ME Class
+ # 2-octet field -> Parent ME Instance
+ # 2-octet field -> Accumulation disable
+ # 2-octet field -> TCA Disable
+ # 2-octet field -> Control fields bitmap
+ # 2-octet field -> TCI
+ # 2-octet field -> Reserved
+ ECA(FieldListField("control_block", None, ShortField('', 0),
+ count_from=lambda _: 8), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("octets", None), {AA.R}, counter=True),
+ ECA(IntField("packets", None), {AA.R}, counter=True),
+ ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+ ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("64_octets", None), {AA.R}, counter=True),
+ ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+ ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+ ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+ ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+ ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ optional_operations = {OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Drop events',
+ 1: 'CRC errored packets',
+ 2: 'Undersize packets',
+ 3: 'Oversize packets',
+ }
+
+
+class EthernetFrameExtendedPerformanceMonitoring64Bit(EntityClass):
+ class_id = 426
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ # 2-octet field -> Threshold data 1/2 ID
+ # 2-octet field -> Parent ME Class
+ # 2-octet field -> Parent ME Instance
+ # 2-octet field -> Accumulation disable
+ # 2-octet field -> TCA Disable
+ # 2-octet field -> Control fields bitmap
+ # 2-octet field -> TCI
+ # 2-octet field -> Reserved
+ ECA(FieldListField("control_block", None, ShortField('', 0),
+ count_from=lambda _: 8), {AA.R, AA.W, AA.SBC}),
+ ECA(LongField("drop_events", None), {AA.R}, tca=True, counter=True),
+ ECA(LongField("octets", None), {AA.R}, counter=True),
+ ECA(LongField("packets", None), {AA.R}, counter=True),
+ ECA(LongField("broadcast_packets", None), {AA.R}, counter=True),
+ ECA(LongField("multicast_packets", None), {AA.R}, counter=True),
+ ECA(LongField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(LongField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(LongField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+ ECA(LongField("64_octets", None), {AA.R}, counter=True),
+ ECA(LongField("65_to_127_octets", None), {AA.R}, counter=True),
+ ECA(LongField("128_to_255_octets", None), {AA.R}, counter=True),
+ ECA(LongField("256_to_511_octets", None), {AA.R}, counter=True),
+ ECA(LongField("512_to_1023_octets", None), {AA.R}, counter=True),
+ ECA(LongField("1024_to_1518_octets", None), {AA.R}, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ optional_operations = {OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 0: 'Drop events',
+ 1: 'CRC errored packets',
+ 2: 'Undersize packets',
+ 3: 'Oversize packets',
+ }
+
+
+class GemPortNetworkCtpMonitoringHistoryData(EntityClass):
+ class_id = 341
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("transmitted_gem_frames", None), {AA.R}, counter=True),
+ ECA(IntField("received_gem_frames", None), {AA.R}, counter=True),
+ ECA(LongField("received_payload_bytes", None), {AA.R}, counter=True),
+ ECA(LongField("transmitted_payload_bytes", None), {AA.R}, counter=True),
+ ECA(IntField("encryption_key_errors", None), {AA.R}, tca=True, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 1: 'Encryption key errors',
+ }
+
+
+class XgPonTcPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 344
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("psbd_hec_error_count", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("xgtc_hec_error_count", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("unknown_profile_count", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("transmitted_xgem_frames", None), {AA.R}, counter=True),
+ ECA(IntField("fragment_xgem_frames", None), {AA.R}, counter=True),
+ ECA(IntField("xgem_hec_lost_words_count", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("xgem_key_errors", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("xgem_hec_error_count", None), {AA.R}, tca=True, counter=True)
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ optional_operations = {OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 1: 'PSBd HEC error count',
+ 2: 'XGTC HEC error count',
+ 3: 'Unknown profile count',
+ 4: 'XGEM HEC loss count',
+ 5: 'XGEM key errors',
+ 6: 'XGEM HEC error count',
+ }
+
+
+class XgPonDownstreamPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 345
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R},),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("ploam_mic_error_count", None), {AA.R}, tca=True, counter=True),
+ ECA(IntField("downstream_ploam_messages_count", None), {AA.R}, counter=True),
+ ECA(IntField("profile_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("ranging_time_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("deactivate_onu_id_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("disable_serial_number_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("request_registration_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("assign_alloc_id_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("key_control_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("sleep_allow_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("baseline_omci_messages_received_count", None), {AA.R}, counter=True),
+ ECA(IntField("extended_omci_messages_received_count", None), {AA.R}, counter=True),
+ ECA(IntField("assign_onu_id_messages_received", None), {AA.R}, counter=True),
+ ECA(IntField("omci_mic_error_count", None), {AA.R}, tca=True, counter=True),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ optional_operations = {OP.GetCurrentData}
+ notifications = {OP.AlarmNotification}
+ alarms = {
+ 1: 'PLOAM MIC error count',
+ 2: 'OMCI MIC error count',
+ }
+
+
+class XgPonUpstreamPerformanceMonitoringHistoryData(EntityClass):
+ class_id = 346
+ hidden = True
+ attributes = [
+ ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+ ECA(ByteField("interval_end_time", None), {AA.R}),
+ ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+ ECA(IntField("upstream_ploam_message_count", None), {AA.R}, counter=True),
+ ECA(IntField("serial_number_onu_message_count", None), {AA.R}, counter=True),
+ ECA(IntField("registration_message_count", None), {AA.R}, counter=True),
+ ECA(IntField("key_report_message_count", None), {AA.R}, counter=True),
+ ECA(IntField("acknowledge_message_count", None), {AA.R}, counter=True),
+ ECA(IntField("sleep_request_message_count", None), {AA.R}, counter=True),
+ ]
+ mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+ optional_operations = {OP.GetCurrentData}
+
+
+# entity class lookup table from entity_class values
+entity_classes_name_map = dict(
+ inspect.getmembers(sys.modules[__name__],
+ lambda o: inspect.isclass(o) and \
+ issubclass(o, EntityClass) and \
+ o is not EntityClass)
+)
+
+entity_classes = [c for c in entity_classes_name_map.itervalues()]
+entity_id_to_class_map = dict((c.class_id, c) for c in entity_classes)
diff --git a/python/adapters/extensions/omci/omci_fields.py b/python/adapters/extensions/omci/omci_fields.py
new file mode 100644
index 0000000..56e985b
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_fields.py
@@ -0,0 +1,242 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import binascii
+import json
+from scapy.fields import Field, StrFixedLenField, PadField, IntField, FieldListField, ByteField, StrField, \
+ StrFixedLenField, PacketField
+from scapy.packet import Raw
+
+class FixedLenField(PadField):
+ """
+ This Pad field limits parsing of its content to its size
+ """
+ def __init__(self, fld, align, padwith='\x00'):
+ super(FixedLenField, self).__init__(fld, align, padwith)
+
+ def getfield(self, pkt, s):
+ remain, val = self._fld.getfield(pkt, s[:self._align])
+ if isinstance(val.payload, Raw) and \
+ not val.payload.load.replace(self._padwith, ''):
+ # raw payload is just padding
+ val.remove_payload()
+ return remain + s[self._align:], val
+
+
+class StrCompoundField(Field):
+ __slots__ = ['flds']
+
+ def __init__(self, name, flds):
+ super(StrCompoundField, self).__init__(name=name, default=None, fmt='s')
+ self.flds = flds
+ for fld in self.flds:
+ assert not fld.holds_packets, 'compound field cannot have packet field members'
+
+ def addfield(self, pkt, s, val):
+ for fld in self.flds:
+ # run though fake add/get to consume the relevant portion of the input value for this field
+ x, extracted = fld.getfield(pkt, fld.addfield(pkt, '', val))
+ l = len(extracted)
+ s = fld.addfield(pkt, s, val[0:l])
+ val = val[l:]
+ return s;
+
+ def getfield(self, pkt, s):
+ data = ''
+ for fld in self.flds:
+ s, value = fld.getfield(pkt, s)
+ if not isinstance(value, str):
+ value = fld.i2repr(pkt, value)
+ data += value
+ return s, data
+
+
+class XStrFixedLenField(StrFixedLenField):
+ """
+ XStrFixedLenField which value is printed as hexadecimal.
+ """
+ def i2m(self, pkt, x):
+ l = self.length_from(pkt) * 2
+ return None if x is None else binascii.a2b_hex(x)[0:l+1]
+
+ def m2i(self, pkt, x):
+ return None if x is None else binascii.b2a_hex(x)
+
+
+class MultipleTypeField(object):
+ """MultipleTypeField are used for fields that can be implemented by
+ various Field subclasses, depending on conditions on the packet.
+
+ It is initialized with `flds` and `default`.
+
+ `default` is the default field type, to be used when none of the
+ conditions matched the current packet.
+
+ `flds` is a list of tuples (`fld`, `cond`), where `fld` if a field
+ type, and `cond` a "condition" to determine if `fld` is the field type
+ that should be used.
+
+ `cond` is either:
+
+ - a callable `cond_pkt` that accepts one argument (the packet) and
+ returns True if `fld` should be used, False otherwise.
+
+ - a tuple (`cond_pkt`, `cond_pkt_val`), where `cond_pkt` is the same
+ as in the previous case and `cond_pkt_val` is a callable that
+ accepts two arguments (the packet, and the value to be set) and
+ returns True if `fld` should be used, False otherwise.
+
+ See scapy.layers.l2.ARP (type "help(ARP)" in Scapy) for an example of
+ use.
+ """
+
+ __slots__ = ["flds", "default", "name"]
+
+ def __init__(self, flds, default):
+ self.flds = flds
+ self.default = default
+ self.name = self.default.name
+
+ def _find_fld_pkt(self, pkt):
+ """Given a Packet instance `pkt`, returns the Field subclass to be
+ used. If you know the value to be set (e.g., in .addfield()), use
+ ._find_fld_pkt_val() instead.
+ """
+ for fld, cond in self.flds:
+ if isinstance(cond, tuple):
+ cond = cond[0]
+ if cond(pkt):
+ return fld
+ return self.default
+
+ def _find_fld_pkt_val(self, pkt, val):
+ """Given a Packet instance `pkt` and the value `val` to be set,
+ returns the Field subclass to be used.
+ """
+ for fld, cond in self.flds:
+ if isinstance(cond, tuple):
+ if cond[1](pkt, val):
+ return fld
+ elif cond(pkt):
+ return fld
+ return self.default
+
+ def getfield(self, pkt, s):
+ return self._find_fld_pkt(pkt).getfield(pkt, s)
+
+ def addfield(self, pkt, s, val):
+ return self._find_fld_pkt_val(pkt, val).addfield(pkt, s, val)
+
+ def any2i(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).any2i(pkt, val)
+
+ def h2i(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).h2i(pkt, val)
+
+ def i2h(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).i2h(pkt, val)
+
+ def i2m(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).i2m(pkt, val)
+
+ def i2len(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).i2len(pkt, val)
+
+ def i2repr(self, pkt, val):
+ return self._find_fld_pkt_val(pkt, val).i2repr(pkt, val)
+
+ def register_owner(self, cls):
+ for fld, _ in self.flds:
+ fld.owners.append(cls)
+ self.dflt.owners.append(cls)
+
+ def __getattr__(self, attr):
+ return getattr(self._find_fld(), attr)
+
+class OmciSerialNumberField(StrCompoundField):
+ def __init__(self, name, default=None):
+ assert default is None or (isinstance(default, str) and len(default) == 12), 'invalid default serial number'
+ vendor_default = default[0:4] if default is not None else None
+ vendor_serial_default = default[4:12] if default is not None else None
+ super(OmciSerialNumberField, self).__init__(name,
+ [StrFixedLenField('vendor_id', vendor_default, 4),
+ XStrFixedLenField('vendor_serial_number', vendor_serial_default, 4)])
+
+class OmciTableField(MultipleTypeField):
+ def __init__(self, tblfld):
+ assert isinstance(tblfld, PacketField)
+ assert hasattr(tblfld.cls, 'index'), 'No index() method defined for OmciTableField row object'
+ assert hasattr(tblfld.cls, 'is_delete'), 'No delete() method defined for OmciTableField row object'
+ super(OmciTableField, self).__init__(
+ [
+ (IntField('table_length', 0), (self.cond_pkt, self.cond_pkt_val)),
+ (PadField(StrField('me_type_table', None), OmciTableField.PDU_SIZE),
+ (self.cond_pkt2, self.cond_pkt_val2))
+ ], tblfld)
+
+ PDU_SIZE = 29 # Baseline message set raw get-next PDU size
+ OmciGetResponseMessageId = 0x29 # Ugh circular dependency
+ OmciGetNextResponseMessageId = 0x3a # Ugh circular dependency
+
+ def cond_pkt(self, pkt):
+ return pkt is not None and pkt.message_id == self.OmciGetResponseMessageId
+
+ def cond_pkt_val(self, pkt, val):
+ return pkt is not None and pkt.message_id == self.OmciGetResponseMessageId
+
+ def cond_pkt2(self, pkt):
+ return pkt is not None and pkt.message_id == self.OmciGetNextResponseMessageId
+
+ def cond_pkt_val2(self, pkt, val):
+ return pkt is not None and pkt.message_id == self.OmciGetNextResponseMessageId
+
+ def to_json(self, new_values, old_values_json):
+ if not isinstance(new_values, list): new_values = [new_values] # If setting a scalar, augment the old table
+ else: old_values_json = None # If setting a vector of new values, erase all old_values
+
+ key_value_pairs = dict()
+
+ old_table = self.load_json(old_values_json)
+ for old in old_table:
+ index = old.index()
+ key_value_pairs[index] = old
+ for new in new_values:
+ index = new.index()
+ if new.is_delete():
+ del key_value_pairs[index]
+ else:
+ key_value_pairs[index] = new
+
+ new_table = []
+ for k, v in sorted(key_value_pairs.iteritems()):
+ assert isinstance(v, self.default.cls), 'object type for Omci Table row object invalid'
+ new_table.append(v.fields)
+
+ str_values = json.dumps(new_table, separators=(',', ':'))
+
+ return str_values
+
+ def load_json(self, json_str):
+ if json_str is None: json_str = '[]'
+ json_values = json.loads(json_str)
+ key_value_pairs = dict()
+ for json_value in json_values:
+ v = self.default.cls(**json_value)
+ index = v.index()
+ key_value_pairs[index] = v
+ table = []
+ for k, v in sorted(key_value_pairs.iteritems()):
+ table.append(v)
+ return table
\ No newline at end of file
diff --git a/python/adapters/extensions/omci/omci_frame.py b/python/adapters/extensions/omci/omci_frame.py
new file mode 100644
index 0000000..c0d7d4a
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_frame.py
@@ -0,0 +1,207 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from scapy.fields import ByteField, PacketField, IntField
+from scapy.fields import ShortField, ConditionalField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_fields import FixedLenField
+from voltha.extensions.omci.omci_messages import OmciCreate, OmciDelete, \
+ OmciDeleteResponse, OmciSet, OmciSetResponse, OmciGet, OmciGetResponse, \
+ OmciGetAllAlarms, OmciGetAllAlarmsResponse, OmciGetAllAlarmsNext, \
+ OmciMibResetResponse, OmciMibReset, OmciMibUploadNextResponse, \
+ OmciMibUploadNext, OmciMibUploadResponse, OmciMibUpload, \
+ OmciGetAllAlarmsNextResponse, OmciAttributeValueChange, \
+ OmciTestResult, OmciAlarmNotification, \
+ OmciReboot, OmciRebootResponse, OmciGetNext, OmciGetNextResponse, \
+ OmciSynchronizeTime, OmciSynchronizeTimeResponse, OmciGetCurrentData, \
+ OmciGetCurrentDataResponse, OmciStartSoftwareDownload, OmciStartSoftwareDownloadResponse, \
+ OmciDownloadSection, OmciDownloadSectionLast, OmciDownloadSectionResponse, \
+ OmciEndSoftwareDownload, OmciEndSoftwareDownloadResponse, \
+ OmciActivateImage, OmciActivateImageResponse, \
+ OmciCommitImage, OmciCommitImageResponse
+
+from voltha.extensions.omci.omci_messages import OmciCreateResponse
+
+
+class OmciFrame(Packet):
+ name = "OmciFrame"
+ fields_desc = [
+ ShortField("transaction_id", 0),
+ ByteField("message_type", None),
+ ByteField("omci", 0x0a),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciCreate), align=36),
+ lambda pkt: pkt.message_type == OmciCreate.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciCreateResponse), align=36),
+ lambda pkt: pkt.message_type == OmciCreateResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciDelete), align=36),
+ lambda pkt: pkt.message_type == OmciDelete.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciDeleteResponse), align=36),
+ lambda pkt: pkt.message_type == OmciDeleteResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciSet), align=36),
+ lambda pkt: pkt.message_type == OmciSet.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciSetResponse), align=36),
+ lambda pkt: pkt.message_type == OmciSetResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGet), align=36),
+ lambda pkt: pkt.message_type == OmciGet.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetResponse), align=36),
+ lambda pkt: pkt.message_type == OmciGetResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetAllAlarms), align=36),
+ lambda pkt: pkt.message_type == OmciGetAllAlarms.message_id),
+ ConditionalField(FixedLenField(
+ PacketField(
+ "omci_message", None, OmciGetAllAlarmsResponse), align=36),
+ lambda pkt:
+ pkt.message_type == OmciGetAllAlarmsResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetAllAlarmsNext), align=36),
+ lambda pkt: pkt.message_type == OmciGetAllAlarmsNext.message_id),
+ ConditionalField(FixedLenField(
+ PacketField(
+ "omci_message", None, OmciGetAllAlarmsNextResponse), align=36),
+ lambda pkt:
+ pkt.message_type == OmciGetAllAlarmsNextResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibUpload), align=36),
+ lambda pkt: pkt.message_type == OmciMibUpload.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibUploadResponse), align=36),
+ lambda pkt: pkt.message_type == OmciMibUploadResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibUploadNext), align=36),
+ lambda pkt:
+ pkt.message_type == OmciMibUploadNext.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibUploadNextResponse), align=36),
+ lambda pkt: pkt.message_type == OmciMibUploadNextResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibReset), align=36),
+ lambda pkt: pkt.message_type == OmciMibReset.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciMibResetResponse), align=36),
+ lambda pkt: pkt.message_type == OmciMibResetResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciAlarmNotification), align=36),
+ lambda pkt: pkt.message_type == OmciAlarmNotification.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciAttributeValueChange), align=36),
+ lambda pkt: pkt.message_type == OmciAttributeValueChange.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciTestResult), align=36),
+ lambda pkt: pkt.message_type == OmciTestResult.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciReboot), align=36),
+ lambda pkt: pkt.message_type == OmciReboot.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciRebootResponse), align=36),
+ lambda pkt: pkt.message_type == OmciRebootResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetNext), align=36),
+ lambda pkt: pkt.message_type == OmciGetNext.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetNextResponse), align=36),
+ lambda pkt: pkt.message_type == OmciGetNextResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciSynchronizeTime), align=36),
+ lambda pkt: pkt.message_type == OmciSynchronizeTime.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciSynchronizeTimeResponse), align=36),
+ lambda pkt: pkt.message_type == OmciSynchronizeTimeResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetCurrentData), align=36),
+ lambda pkt: pkt.message_type == OmciGetCurrentData.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciGetCurrentDataResponse), align=36),
+ lambda pkt: pkt.message_type == OmciGetCurrentDataResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciStartSoftwareDownload), align=36),
+ lambda pkt: pkt.message_type == OmciStartSoftwareDownload.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciStartSoftwareDownloadResponse), align=36),
+ lambda pkt: pkt.message_type == OmciStartSoftwareDownloadResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciDownloadSection), align=36),
+ lambda pkt: pkt.message_type == OmciDownloadSection.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciDownloadSectionLast), align=36),
+ lambda pkt: pkt.message_type == OmciDownloadSectionLast.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciDownloadSectionResponse), align=36),
+ lambda pkt: pkt.message_type == OmciDownloadSectionResponse.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciEndSoftwareDownload), align=36),
+ lambda pkt: pkt.message_type == OmciEndSoftwareDownload.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciEndSoftwareDownloadResponse), align=36),
+ lambda pkt: pkt.message_type == OmciEndSoftwareDownloadResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciActivateImage), align=36),
+ lambda pkt: pkt.message_type == OmciActivateImage.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciActivateImageResponse), align=36),
+ lambda pkt: pkt.message_type == OmciActivateImageResponse.message_id),
+
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciCommitImage), align=36),
+ lambda pkt: pkt.message_type == OmciCommitImage.message_id),
+ ConditionalField(FixedLenField(
+ PacketField("omci_message", None, OmciCommitImageResponse), align=36),
+ lambda pkt: pkt.message_type == OmciCommitImageResponse.message_id),
+
+ # TODO add entries for remaining OMCI message types
+
+ IntField("omci_trailer", 0x00000028)
+ ]
+
+ # We needed to patch the do_dissect(...) method of Packet, because
+ # it wiped out already dissected conditional fields with None if they
+ # referred to the same field name. We marked the only new line of code
+ # with "Extra condition added".
+ def do_dissect(self, s):
+ raw = s
+ self.raw_packet_cache_fields = {}
+ for f in self.fields_desc:
+ if not s:
+ break
+ s, fval = f.getfield(self, s)
+ # We need to track fields with mutable values to discard
+ # .raw_packet_cache when needed.
+ if f.islist or f.holds_packets:
+ self.raw_packet_cache_fields[f.name] = f.do_copy(fval)
+ # Extra condition added
+ if fval is not None or f.name not in self.fields:
+ self.fields[f.name] = fval
+ assert(raw.endswith(s))
+ self.raw_packet_cache = raw[:-len(s)] if s else raw
+ self.explicit = 1
+ return s
diff --git a/python/adapters/extensions/omci/omci_me.py b/python/adapters/extensions/omci/omci_me.py
new file mode 100644
index 0000000..a8a2d05
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_me.py
@@ -0,0 +1,939 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Managed Entity Frame support
+"""
+from voltha.extensions.omci.omci import *
+from voltha.extensions.omci.me_frame import MEFrame
+
+
+class CardholderFrame(MEFrame):
+ """
+ This managed entity represents fixed equipment slot configuration
+ for the ONU
+ """
+ def __init__(self, single, slot_number, attributes):
+ """
+ :param single:(bool) True if the ONU is a single piece of integrated equipment,
+ False if the ONU contains pluggable equipment modules
+ :param slot_number: (int) slot number (0..254)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Validate
+ MEFrame.check_type(single, bool)
+ MEFrame.check_type(slot_number, int)
+ if not 0 <= slot_number <= 254:
+ raise ValueError('slot_number should be 0..254')
+
+ entity_id = 256 + slot_number if single else slot_number
+
+ super(CardholderFrame, self).__init__(Cardholder, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class CircuitPackFrame(MEFrame):
+ """
+ This managed entity models a real or virtual circuit pack that is equipped in
+ a real or virtual ONU slot.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Its value is the same as that
+ of the cardholder managed entity containing this
+ circuit pack instance. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(CircuitPackFrame, self).__init__(CircuitPack, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class ExtendedVlanTaggingOperationConfigurationDataFrame(MEFrame):
+ """
+ This managed entity organizes data associated with VLAN tagging. Regardless
+ of its point of attachment, the specified tagging operations refer to the
+ upstream direction.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Its value is the same as that
+ of the cardholder managed entity containing this
+ circuit pack instance. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(ExtendedVlanTaggingOperationConfigurationDataFrame,
+ self).__init__(ExtendedVlanTaggingOperationConfigurationData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class IpHostConfigDataFrame(MEFrame):
+ """
+ The IP host config data configures IPv4 based services offered on the ONU.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(IpHostConfigDataFrame, self).__init__(IpHostConfigData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class GalEthernetProfileFrame(MEFrame):
+ """
+ This managed entity organizes data that describe the GTC adaptation layer
+ processing functions of the ONU for Ethernet services.
+ """
+ def __init__(self, entity_id, max_gem_payload_size=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param max_gem_payload_size: (int) This attribute defines the maximum payload
+ size generated in the associated GEM interworking
+ termination point managed entity. (0..65535
+ """
+ MEFrame.check_type(max_gem_payload_size, (int, type(None)))
+ if max_gem_payload_size is not None and not 0 <= max_gem_payload_size <= 0xFFFF: # TODO: verify min/max
+ raise ValueError('max_gem_payload_size should be 0..0xFFFF')
+
+ data = None if max_gem_payload_size is None else\
+ {
+ 'max_gem_payload_size': max_gem_payload_size
+ }
+ super(GalEthernetProfileFrame, self).__init__(GalEthernetProfile,
+ entity_id,
+ data)
+
+
+class GemInterworkingTpFrame(MEFrame):
+ """
+ An instance of this managed entity represents a point in the ONU where the
+ interworking of a bearer service (usually Ethernet) to the GEM layer takes
+ place.
+ """
+ def __init__(self, entity_id,
+ gem_port_network_ctp_pointer=None,
+ interworking_option=None,
+ service_profile_pointer=None,
+ interworking_tp_pointer=None,
+ pptp_counter=None,
+ gal_profile_pointer=None,
+ attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param gem_port_network_ctp_pointer: (int) This attribute points to an instance of
+ the GEM port network CTP. (0..65535)
+
+ :param interworking_option: (int) This attribute identifies the type
+ of non-GEM function that is being interworked.
+ The options are:
+ 0 Circuit-emulated TDM
+ 1 MAC bridged LAN
+ 2 Reserved
+ 3 Reserved
+ 4 Video return path
+ 5 IEEE 802.1p mapper
+ 6 Downstream broadcast
+ 7 MPLS PW TDM service
+
+ :param service_profile_pointer: (int) This attribute points to an instance of
+ a service profile.
+ CES service profile if interworking option = 0
+ MAC bridge service profile if interworking option = 1
+ Video return path service profile if interworking option = 4
+ IEEE 802.1p mapper service profile if interworking option = 5
+ Null pointer if interworking option = 6
+ CES service profile if interworking option = 7
+
+ :param interworking_tp_pointer: (int) This attribute is used for the circuit
+ emulation service and IEEE 802.1p mapper
+ service without a MAC bridge.
+
+ :param gal_profile_pointer: (int) This attribute points to an instance of
+ a service profile.
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified..
+ """
+ # Validate
+ self.check_type(gem_port_network_ctp_pointer, (int, type(None)))
+ self.check_type(interworking_option, (int, type(None)))
+ self.check_type(service_profile_pointer, (int, type(None)))
+ self.check_type(interworking_tp_pointer,(int, type(None)))
+ self.check_type(pptp_counter,(int, type(None)))
+ self.check_type(gal_profile_pointer, (int, type(None)))
+
+ if gem_port_network_ctp_pointer is not None and not 0 <= gem_port_network_ctp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('gem_port_network_ctp_pointer should be 0..0xFFFE')
+
+ if interworking_option is not None and not 0 <= interworking_option <= 7:
+ raise ValueError('interworking_option should be 0..7')
+
+ if service_profile_pointer is not None and not 0 <= service_profile_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('service_profile_pointer should be 0..0xFFFE')
+
+ if interworking_tp_pointer is not None and not 0 <= interworking_tp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+ if pptp_counter is not None and not 0 <= pptp_counter <= 255: # TODO: Verify max
+ raise ValueError('pptp_counter should be 0..255')
+
+ if gal_profile_pointer is not None and not 0 <= gal_profile_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('gal_profile_pointer should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if gem_port_network_ctp_pointer is not None or \
+ interworking_option is not None or \
+ service_profile_pointer is not None or \
+ interworking_tp_pointer is not None or \
+ gal_profile_pointer is not None:
+
+ data = data or dict()
+
+ if gem_port_network_ctp_pointer is not None:
+ data['gem_port_network_ctp_pointer'] = gem_port_network_ctp_pointer
+
+ if interworking_option is not None:
+ data['interworking_option'] = interworking_option
+
+ if service_profile_pointer is not None:
+ data['service_profile_pointer'] = service_profile_pointer
+
+ if interworking_tp_pointer is not None:
+ data['interworking_tp_pointer'] = interworking_tp_pointer
+
+ if gal_profile_pointer is not None:
+ data['gal_profile_pointer'] = gal_profile_pointer
+
+ super(GemInterworkingTpFrame, self).__init__(GemInterworkingTp,
+ entity_id,
+ data)
+
+
+class GemPortNetworkCtpFrame(MEFrame):
+ """
+ This managed entity represents the termination of a GEM port on an ONU.
+ """
+ def __init__(self, entity_id, port_id=None, tcont_id=None,
+ direction=None, upstream_tm=None, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param port_id: (int) This attribute is the port-ID of the GEM port associated
+ with this CTP
+
+ :param tcont_id: (int) This attribute points to a T-CONT instance
+
+ :param direction: (string) Data direction. Valid values are:
+ 'upstream' - UNI-to-ANI
+ 'downstream' - ANI-to-UNI
+ 'bi-directional' - guess :-)
+
+ :param upstream_tm: (int) If the traffic management option attribute in
+ the ONU-G ME is 0 (priority controlled) or 2
+ (priority and rate controlled), this pointer
+ specifies the priority queue ME serving this GEM
+ port network CTP. If the traffic management
+ option attribute is 1 (rate controlled), this
+ attribute redundantly points to the T-CONT serving
+ this GEM port network CTP.
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ _directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
+
+ # Validate
+ self.check_type(port_id, (int, type(None)))
+ self.check_type(tcont_id, (int, type(None)))
+ self.check_type(direction, (basestring, type(None)))
+ self.check_type(upstream_tm, (int, type(None)))
+
+ if port_id is not None and not 0 <= port_id <= 0xFFFE: # TODO: Verify max
+ raise ValueError('port_id should be 0..0xFFFE')
+
+ if tcont_id is not None and not 0 <= tcont_id <= 0xFFFE: # TODO: Verify max
+ raise ValueError('tcont_id should be 0..0xFFFE')
+
+ if direction is not None and str(direction).lower() not in _directions:
+ raise ValueError('direction should one of {}'.format(_directions.keys()))
+
+ if upstream_tm is not None and not 0 <= upstream_tm <= 0xFFFE: # TODO: Verify max
+ raise ValueError('upstream_tm should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if port_id is not None or tcont_id is not None or\
+ direction is not None or upstream_tm is not None:
+
+ data = data or dict()
+
+ if port_id is not None:
+ data['port_id'] = port_id
+ if tcont_id is not None:
+ data['tcont_pointer'] = tcont_id
+ if direction is not None:
+ data['direction'] = _directions[str(direction).lower()]
+ if upstream_tm is not None:
+ data['traffic_management_pointer_upstream'] = upstream_tm
+
+ super(GemPortNetworkCtpFrame, self).__init__(GemPortNetworkCtp,
+ entity_id,
+ data)
+
+
+class Ieee8021pMapperServiceProfileFrame(MEFrame):
+ """
+ This managed entity associates the priorities of IEEE 802.1p [IEEE
+ 802.1D] priority tagged frames with specific connections.
+ """
+ def __init__(self, entity_id, tp_pointer=None, interwork_tp_pointers=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param tp_pointer: (int) This attribute points to an instance of the
+ associated termination point. (0..65535)
+
+ :param interwork_tp_pointers: (list) List of 1 to 8 interworking termination
+ point IDs. The first entry is assigned
+ got p-bit priority 0. If less than 8 IDs
+ are provided, the last ID is used for
+ the remaining items.
+ """
+ if tp_pointer is None and interwork_tp_pointers is None:
+ data = dict(
+ tp_pointer=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_1=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_2=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_3=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_4=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_5=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_6=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_7=OmciNullPointer
+ )
+ else:
+ self.check_type(tp_pointer, (list, type(None)))
+ self.check_type(interwork_tp_pointers, (list, type(None)))
+
+ data = dict()
+
+ if tp_pointer is not None:
+ data['tp_pointer'] = tp_pointer
+
+ if interwork_tp_pointers is not None:
+ assert all(isinstance(tp, int) and 0 <= tp <= 0xFFFF
+ for tp in interwork_tp_pointers),\
+ 'Interworking TP IDs must be 0..0xFFFF'
+ assert 1 <= len(interwork_tp_pointers) <= 8, \
+ 'Invalid number of Interworking TP IDs. Must be 1..8'
+
+ data = dict()
+ for pbit in range(0, len(interwork_tp_pointers)):
+ data['interwork_tp_pointer_for_p_bit_priority_{}'.format(pbit)] = \
+ interwork_tp_pointers[pbit]
+
+ for pbit in range(len(interwork_tp_pointers), 8):
+ data['interwork_tp_pointer_for_p_bit_priority_{}'.format(pbit)] = \
+ interwork_tp_pointers[len(interwork_tp_pointers) - 1]
+
+ super(Ieee8021pMapperServiceProfileFrame, self).__init__(Ieee8021pMapperServiceProfile,
+ entity_id,
+ data)
+
+
+class MacBridgePortConfigurationDataFrame(MEFrame):
+ """
+ This managed entity represents the ONU as equipment.
+ """
+ def __init__(self, entity_id, bridge_id_pointer=None, port_num=None,
+ tp_type=None, tp_pointer=None, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param bridge_id_pointer: (int) This attribute points to an instance of the
+ MAC bridge service profile. (0..65535)
+
+ :param port_num: (int) This attribute is the bridge port number. (0..255)
+
+ :param tp_type: (int) This attribute identifies the type of termination point
+ associated with this MAC bridge port. Valid values are:
+ 1 Physical path termination point Ethernet UNI
+ 2 Interworking VCC termination point
+ 3 IEEE 802.1p mapper service profile
+ 4 IP host config data or IPv6 host config data
+ 5 GEM interworking termination point
+ 6 Multicast GEM interworking termination point
+ 7 Physical path termination point xDSL UNI part 1
+ 8 Physical path termination point VDSL UNI
+ 9 Ethernet flow termination point
+ 10 Reserved
+ 11 Virtual Ethernet interface point
+ 12 Physical path termination point MoCA UNI
+
+ :param tp_pointer: (int) This attribute points to the termination point
+ associated with this MAC bridge por. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Validate
+ self.check_type(bridge_id_pointer, (int, type(None)))
+ self.check_type(port_num, (int, type(None)))
+ self.check_type(tp_type, (int, type(None)))
+ self.check_type(tp_pointer, (int, type(None)))
+
+ if bridge_id_pointer is not None and not 0 <= bridge_id_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('bridge_id_pointer should be 0..0xFFFE')
+
+ if port_num is not None and not 0 <= port_num <= 255:
+ raise ValueError('port_num should be 0..255') # TODO: Verify min,max
+
+ if tp_type is not None and not 1 <= tp_type <= 12:
+ raise ValueError('service_profile_pointer should be 1..12')
+
+ if tp_pointer is not None and not 0 <= tp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if bridge_id_pointer is not None or \
+ port_num is not None or \
+ tp_type is not None or \
+ tp_pointer is not None:
+
+ data = data or dict()
+
+ if bridge_id_pointer is not None:
+ data['bridge_id_pointer'] = bridge_id_pointer
+
+ if port_num is not None:
+ data['port_num'] = port_num
+
+ if tp_type is not None:
+ data['tp_type'] = tp_type
+
+ if tp_pointer is not None:
+ data['tp_pointer'] = tp_pointer
+
+ super(MacBridgePortConfigurationDataFrame, self).\
+ __init__(MacBridgePortConfigurationData, entity_id, data)
+
+
+class MacBridgeServiceProfileFrame(MEFrame):
+ """
+ This managed entity models a MAC bridge in its entirety; any number
+ of ports may be associated with the bridge through pointers to the
+ MAC bridge service profile managed entity.
+ """
+ def __init__(self, entity_id, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(MacBridgeServiceProfileFrame, self).__init__(MacBridgeServiceProfile,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class OntGFrame(MEFrame):
+ """
+ This managed entity represents the ONU as equipment.
+ """
+ def __init__(self, attributes=None):
+ """
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(OntGFrame, self).__init__(OntG, 0,
+ MEFrame._attr_to_data(attributes))
+
+
+class Ont2GFrame(MEFrame):
+ """
+ This managed entity contains additional attributes associated with a PON ONU.
+ """
+ def __init__(self, attributes=None):
+ """
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Only one managed entity instance (Entity ID=0)
+ super(Ont2GFrame, self).__init__(Ont2G, 0,
+ MEFrame._attr_to_data(attributes))
+
+
+class PptpEthernetUniFrame(MEFrame):
+ """
+ This managed entity represents the point at an Ethernet UNI where the physical path
+ terminates and Ethernet physical level functions are performed.
+ """
+ def __init__(self, entity_id, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(PptpEthernetUniFrame, self).__init__(PptpEthernetUni, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class VeipUniFrame(MEFrame):
+ """
+ This managed entity represents the point a virtual UNI interfaces to a non omci management domain
+ This is typically seen in RG+ONU all-in-one type devices
+ """
+ def __init__(self, entity_id, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(VeipUniFrame, self).__init__(VeipUni, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class SoftwareImageFrame(MEFrame):
+ """
+ This managed entity models an executable software image stored in the ONU.
+ """
+ def __init__(self, entity_id):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+ """
+ super(SoftwareImageFrame, self).__init__(SoftwareImage, entity_id, None)
+
+
+class TcontFrame(MEFrame):
+ """
+ An instance of the traffic container managed entity T-CONT represents a
+ logical connection group associated with a G-PON PLOAM layer alloc-ID.
+ """
+ def __init__(self, entity_id, alloc_id=None, policy=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param alloc_id: (int) This attribute links the T-CONT with the alloc-ID
+ assigned by the OLT in the assign_alloc-ID PLOAM
+ message (0..0xFFF) or 0xFFFF to mark as free
+
+ :param policy: (int) This attribute indicates the T-CONT's traffic scheduling
+ policy. Valid values:
+ 0 - Null
+ 1 - Strict priority
+ 2 - WRR - Weighted round robin
+ """
+ # Validate
+ self.check_type(alloc_id, (int, type(None)))
+ self.check_type(policy, (int, type(None)))
+
+ if alloc_id is not None and not (0 <= alloc_id <= 0xFFF or alloc_id == 0xFFFF):
+ raise ValueError('alloc_id should be 0..0xFFF or 0xFFFF to mark it as free')
+
+ if policy is not None and not 0 <= policy <= 2:
+ raise ValueError('policy should be 0..2')
+
+ if alloc_id is None and policy is None:
+ data = None
+ else:
+ data = dict()
+
+ if alloc_id is not None:
+ data['alloc_id'] = alloc_id
+
+ if policy is not None:
+ data['policy'] = policy
+
+ super(TcontFrame, self).__init__(Tcont, entity_id, data)
+
+
+class VlanTaggingFilterDataFrame(MEFrame):
+ """
+ An instance of this managed entity represents a point in the ONU where the
+ interworking of a bearer service (usually Ethernet) to the GEM layer takes
+ place.
+ """
+ def __init__(self, entity_id, vlan_tcis=None, forward_operation=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param vlan_tcis: (list) This attribute is a list of provisioned TCI values
+ for the bridge port. (0..0xFFFF)
+
+ :param forward_operation: (int) What to do. See ITU spec for more information
+
+ """
+ # Validate
+ self.check_type(vlan_tcis, (list, type(None)))
+ self.check_type(forward_operation, (int, type(None)))
+
+ if forward_operation is not None and not 0 <= forward_operation <= 0x21:
+ raise ValueError('forward_operation should be 0..0x21')
+
+ if vlan_tcis is None and forward_operation is None:
+ data = None
+
+ else:
+ data = dict()
+
+ if vlan_tcis is not None:
+ num_tcis = len(vlan_tcis)
+
+ assert 0 <= num_tcis <= 12, 'Number of VLAN TCI values is 0..12'
+ assert all(isinstance(tci, int) and 0 <= tci <= 0xFFFF
+ for tci in vlan_tcis), "VLAN TCI's are 0..0xFFFF"
+
+ if num_tcis > 0:
+ vlan_filter_list = [0] * 12
+ for index in range(0, num_tcis):
+ vlan_filter_list[index] = vlan_tcis[index]
+
+ data['vlan_filter_list'] = vlan_filter_list
+ data['number_of_entries'] = num_tcis
+
+ if forward_operation is not None:
+ assert 0 <= forward_operation <= 0x21, \
+ 'forwarding_operation must be 0x00..0x21'
+ data['forward_operation'] = forward_operation
+
+ super(VlanTaggingFilterDataFrame, self).__init__(VlanTaggingFilterData,
+ entity_id,
+ data)
+
+
+class OntDataFrame(MEFrame):
+ """
+ This managed entity models the MIB itself
+ """
+ def __init__(self, mib_data_sync=None, sequence_number=None, ignore_arc=None):
+ """
+ For 'get', 'MIB reset', 'MIB upload', pass no value
+ For 'set' actions, pass mib_data_sync value (0..255)
+ For 'MIB upload next',and 'Get all alarms next' pass sequence_number value (0..65535)
+ For 'Get all alarms", set ignore_arc to True to get all alarms regadrless
+ of ARC status or False to get all alarms not currently
+ under ARC
+
+ :param mib_data_sync: (int) This attribute is used to check the alignment
+ of the MIB of the ONU with the corresponding MIB
+ in the OLT. (0..0xFF)
+ :param sequence_number: (int) This is used for MIB Upload Next (0..0xFFFF)
+ :param ignore_arc: (bool) None for all but 'get_all_alarm' commands
+ """
+ self.check_type(mib_data_sync, (int, type(None)))
+ if mib_data_sync is not None and not 0 <= mib_data_sync <= 0xFF:
+ raise ValueError('mib_data_sync should be 0..0xFF')
+
+ if sequence_number is not None and not 0 <= sequence_number <= 0xFFFF:
+ raise ValueError('sequence_number should be 0..0xFFFF')
+
+ if ignore_arc is not None and not isinstance(ignore_arc, bool):
+ raise TypeError('ignore_arc should be a boolean')
+
+ if mib_data_sync is not None:
+ # Note: Currently the Scapy decode/encode is 16-bits since we need
+ # the data field that large in order to support MIB and Alarm Upload Next
+ # commands. Push our 8-bit MDS value into the upper 8-bits so that
+ # it is encoded properly into the ONT_Data 'set' frame
+ data = {'mib_data_sync': mib_data_sync << 8}
+
+ elif sequence_number is not None:
+ data = {'mib_data_sync': sequence_number}
+
+ elif ignore_arc is not None:
+ data = {'mib_data_sync': 0 if ignore_arc else 1}
+
+ else:
+ data = {'mib_data_sync'} # Make Get's happy
+
+ super(OntDataFrame, self).__init__(OntData, 0, data)
+
+
+class OmciFrame(MEFrame):
+ """
+ This managed entity describes the ONU's general level of support for OMCI managed
+ entities and messages. This ME is not included in a MIB upload.
+ """
+ def __init__(self, me_type_table=None, message_type_table=None):
+ """
+ For 'get' request, set the type of table count you wish by
+ setting either me_me_type_table or message_type_table to
+ a boolean 'True' value
+
+ For 'get-next' requests, set the sequence number for the
+ table you wish to retrieve by setting either me_me_type_table or message_type_table to
+ a integer value.
+ """
+ if not isinstance(me_type_table, (bool, int, type(None))):
+ raise TypeError('Parameters must be a boolean or integer')
+
+ if not isinstance(message_type_table, (bool, int, type(None))):
+ raise TypeError('Parameters must be a boolean or integer')
+
+ if me_type_table is not None:
+ if isinstance(me_type_table, bool):
+ data = {'me_type_table'}
+ else:
+ data = {'me_type_table': me_type_table}
+
+ elif message_type_table is not None:
+ if isinstance('message_type_table', bool):
+ data = {'message_type_table'}
+ else:
+ data = {'message_type_table': message_type_table}
+ else:
+ raise NotImplemented('Unknown request')
+
+ super(OmciFrame, self).__init__(Omci, 0, data)
+
+
+class EthernetPMMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects some of the performance monitoring data for a physical
+ Ethernet interface
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance
+ of the physical path termination point Ethernet UNI
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(EthernetPMMonitoringHistoryDataFrame, self).__init__(
+ EthernetPMMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class FecPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with PON
+ downstream FEC counters.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ the ANI-G
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(FecPerformanceMonitoringHistoryDataFrame, self).__init__(
+ FecPerformanceMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class EthernetFrameDownstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with downstream
+ Ethernet frame delivery. It is based on the Etherstats group of [IETF RFC 2819].
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ a MAC bridge port configuration data
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(EthernetFrameDownstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+ EthernetFrameDownstreamPerformanceMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class EthernetFrameUpstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with upstream
+ Ethernet frame delivery. It is based on the Etherstats group of [IETF RFC 2819].
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ a MAC bridge port configuration data
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(EthernetFrameUpstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+ EthernetFrameUpstreamPerformanceMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class GemPortNetworkCtpMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects GEM frame performance monitoring data associated
+ with a GEM port network CTP
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance
+ of the GEM port network CTP.
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(GemPortNetworkCtpMonitoringHistoryDataFrame, self).__init__(
+ GemPortNetworkCtpMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class XgPonTcPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with
+ the XG-PON transmission convergence layer, as defined in [ITU-T G.987.3]
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ the ANI-G.
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(XgPonTcPerformanceMonitoringHistoryDataFrame, self).__init__(
+ XgPonTcPerformanceMonitoringHistoryData, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class XgPonDownstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with
+ the XG-PON ined in [ITU-T G.987.3]
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ the ANI-G.
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(XgPonDownstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+ XgPonDownstreamPerformanceMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class XgPonUpstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+ """
+ This managed entity collects performance monitoring data associated with
+ the XG-PON transmission convergence layer, as defined in [ITU-T G.987.3]
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) TThis attribute uniquely identifies each instance of
+ this managed entity. Through an identical ID, this
+ managed entity is implicitly linked to an instance of
+ the ANI-G.
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(XgPonUpstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+ XgPonUpstreamPerformanceMonitoringHistoryData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
diff --git a/python/adapters/extensions/omci/omci_messages.py b/python/adapters/extensions/omci/omci_messages.py
new file mode 100644
index 0000000..04d3e83
--- /dev/null
+++ b/python/adapters/extensions/omci/omci_messages.py
@@ -0,0 +1,551 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from scapy.fields import ByteField, ThreeBytesField, StrFixedLenField, ConditionalField, IntField, Field
+from scapy.fields import ShortField, BitField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_defs import AttributeAccess, OmciSectionDataSize
+from voltha.extensions.omci.omci_fields import OmciTableField
+import voltha.extensions.omci.omci_entities as omci_entities
+
+
+log = structlog.get_logger()
+
+
+class OmciData(Field):
+
+ __slots__ = Field.__slots__ + ['_entity_class']
+
+ def __init__(self, name, entity_class="entity_class"):
+ Field.__init__(self, name=name, default=None, fmt='s')
+ self._entity_class = entity_class
+
+ def addfield(self, pkt, s, val):
+ class_id = getattr(pkt, self._entity_class)
+ entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+ for attribute in entity_class.attributes:
+ if AttributeAccess.SetByCreate not in attribute.access:
+ continue
+ if attribute.field.name == 'managed_entity_id':
+ continue
+ fld = attribute.field
+ s = fld.addfield(pkt, s, val.get(fld.name, fld.default))
+ return s
+
+ def getfield(self, pkt, s):
+ """Extract an internal value from a string"""
+ class_id = getattr(pkt, self._entity_class)
+ entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+ data = {}
+ for attribute in entity_class.attributes:
+ if AttributeAccess.SetByCreate not in attribute.access:
+ continue
+ if attribute.field.name == 'managed_entity_id':
+ continue
+ fld = attribute.field
+ s, value = fld.getfield(pkt, s)
+ data[fld.name] = value
+ return s, data
+
+
+class OmciMaskedData(Field):
+
+ __slots__ = Field.__slots__ + ['_entity_class', '_attributes_mask']
+
+ def __init__(self, name, entity_class="entity_class",
+ attributes_mask="attributes_mask"):
+ Field.__init__(self, name=name, default=None, fmt='s')
+ self._entity_class = entity_class
+ self._attributes_mask = attributes_mask
+
+ def addfield(self, pkt, s, val):
+ class_id = getattr(pkt, self._entity_class)
+ attribute_mask = getattr(pkt, self._attributes_mask)
+ entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+ indices = entity_class.attribute_indices_from_mask(attribute_mask)
+ for index in indices:
+ fld = entity_class.attributes[index].field
+ s = fld.addfield(pkt, s, val[fld.name])
+ return s
+
+ def getfield(self, pkt, s):
+ """Extract an internal value from a string"""
+ class_id = getattr(pkt, self._entity_class)
+ attribute_mask = getattr(pkt, self._attributes_mask)
+ entity_class = omci_entities.entity_id_to_class_map[class_id]
+ indices = entity_class.attribute_indices_from_mask(attribute_mask)
+ data = {}
+ table_attribute_mask = 0
+ for index in indices:
+ try:
+ fld = entity_class.attributes[index].field
+ except IndexError, e:
+ log.error("attribute-decode-failure", attribute_index=index,
+ entity_class=entity_class, e=e)
+ continue
+ try:
+ s, value = fld.getfield(pkt, s)
+ except Exception, _e:
+ raise
+ if isinstance(pkt, OmciGetResponse) and isinstance(fld, OmciTableField):
+ data[fld.name + '_size'] = value
+ table_attribute_mask = table_attribute_mask | (1 << (15 - index))
+ else:
+ data[fld.name] = value
+ if table_attribute_mask:
+ data['table_attribute_mask'] = table_attribute_mask
+ return s, data
+
+
+class OmciMessage(Packet):
+ name = "OmciMessage"
+ message_id = None # OMCI message_type value, filled by derived classes
+ fields_desc = []
+
+
+class OmciCreate(OmciMessage):
+ name = "OmciCreate"
+ message_id = 0x44
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ OmciData("data")
+ ]
+
+
+class OmciCreateResponse(OmciMessage):
+ name = "OmciCreateResponse"
+ message_id = 0x24
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", None),
+ ByteField("success_code", 0),
+ ShortField("parameter_error_attributes_mask", None),
+ ]
+
+
+class OmciDelete(OmciMessage):
+ name = "OmciDelete"
+ message_id = 0x46
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", None),
+ ]
+
+
+class OmciDeleteResponse(OmciMessage):
+ name = "OmciDeleteResponse"
+ message_id = 0x26
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", None),
+ ByteField("success_code", 0),
+ ]
+
+
+class OmciSet(OmciMessage):
+ name = "OmciSet"
+ message_id = 0x48
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ShortField("attributes_mask", None),
+ OmciMaskedData("data")
+ ]
+
+
+class OmciSetResponse(OmciMessage):
+ name = "OmciSetResponse"
+ message_id = 0x28
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", None),
+ ByteField("success_code", 0),
+ ShortField("unsupported_attributes_mask", None),
+ ShortField("failed_attributes_mask", None),
+ ]
+
+
+class OmciGet(OmciMessage):
+ name = "OmciGet"
+ message_id = 0x49
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ShortField("attributes_mask", None)
+ ]
+
+
+class OmciGetResponse(OmciMessage):
+ name = "OmciGetResponse"
+ message_id = 0x29
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0),
+ ShortField("attributes_mask", None),
+ ConditionalField(
+ OmciMaskedData("data"), lambda pkt: pkt.success_code == 0)
+ ]
+
+
+class OmciGetAllAlarms(OmciMessage):
+ name = "OmciGetAllAlarms"
+ message_id = 0x4b
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0), # Always 0 (ONT instance)
+ ByteField("alarm_retrieval_mode", 0) # 0 or 1
+ ]
+
+
+class OmciGetAllAlarmsResponse(OmciMessage):
+ name = "OmciGetAllAlarmsResponse"
+ message_id = 0x2b
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("number_of_commands", None)
+ ]
+
+
+class OmciGetAllAlarmsNext(OmciMessage):
+ name = "OmciGetAllAlarmsNext"
+ message_id = 0x4c
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("command_sequence_number", None)
+ ]
+
+
+class OmciGetAllAlarmsNextResponse(OmciMessage):
+ name = "OmciGetAllAlarmsNextResponse"
+ message_id = 0x2c
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("alarmed_entity_class", None),
+ ShortField("alarmed_entity_id", 0),
+ BitField("alarm_bit_map", None, 224)
+ ]
+
+
+class OmciMibUpload(OmciMessage):
+ name = "OmciMibUpload"
+ message_id = 0x4d
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ]
+
+
+class OmciMibUploadResponse(OmciMessage):
+ name = "OmciMibUploadResponse"
+ message_id = 0x2d
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("number_of_commands", None)
+ ]
+
+
+class OmciMibUploadNext(OmciMessage):
+ name = "OmciMibUploadNext"
+ message_id = 0x4e
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("command_sequence_number", None)
+ ]
+
+
+class OmciMibUploadNextResponse(OmciMessage):
+ name = "OmciMibUploadNextResponse"
+ message_id = 0x2e
+ fields_desc = [
+ ShortField("entity_class", 2), # Always 2 (ONT data)
+ ShortField("entity_id", 0),
+ ShortField("object_entity_class", None),
+ ShortField("object_entity_id", 0),
+ ShortField("object_attributes_mask", None),
+ OmciMaskedData("object_data", entity_class='object_entity_class',
+ attributes_mask='object_attributes_mask')
+ ]
+
+
+class OmciMibReset(OmciMessage):
+ name = "OmciMibReset"
+ message_id = 0x4f
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0)
+ ]
+
+
+class OmciMibResetResponse(OmciMessage):
+ name = "OmciMibResetResponse"
+ message_id = 0x2f
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0)
+ ]
+
+
+class OmciAlarmNotification(OmciMessage):
+ name = "AlarmNotification"
+ message_id = 0x10
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ BitField("alarm_bit_map", 0, 224),
+ ThreeBytesField("zero_padding", 0),
+ ByteField("alarm_sequence_number", None)
+ ]
+
+
+class OmciAttributeValueChange(OmciMessage):
+ name = "AttributeValueChange"
+ message_id = 0x11
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ShortField("attributes_mask", None),
+ OmciMaskedData("data")
+ ]
+
+
+class OmciTestResult(OmciMessage):
+ name = "TestResult"
+ message_id = 0x1B
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0)
+ # ME Test specific message contents starts here
+ # TODO: Can this be coded easily with scapy?
+ ]
+
+
+class OmciReboot(OmciMessage):
+ name = "OmciOnuReboot"
+ message_id = 0x59
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("reboot_code", 0)
+ ]
+
+
+class OmciRebootResponse(OmciMessage):
+ name = "OmciOnuRebootResponse"
+ message_id = 0x39
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0)
+ ]
+
+
+class OmciGetNext(OmciMessage):
+ name = "OmciGetNext"
+ message_id = 0x5A
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ShortField("attributes_mask", None),
+ ShortField("command_sequence_number", None)
+ ]
+
+
+class OmciGetNextResponse(OmciMessage):
+ name = "OmciGetNextResponse"
+ message_id = 0x3A
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0),
+ ShortField("attributes_mask", None),
+ ConditionalField(OmciMaskedData("data"),
+ lambda pkt: pkt.success_code == 0)
+ ]
+
+
+class OmciSynchronizeTime(OmciMessage):
+ name = "OmciSynchronizeTime"
+ message_id = 0x58
+ fields_desc = [
+ ShortField("entity_class", 256), # OntG
+ ShortField("entity_id", 0),
+ ShortField("year", 0), # eg) 2018
+ ByteField("month", 0), # 1..12
+ ByteField("day", 0), # 1..31
+ ByteField("hour", 0), # 0..23
+ ByteField("minute", 0), # 0..59
+ ByteField("second", 0) # 0..59
+ ]
+
+
+class OmciSynchronizeTimeResponse(OmciMessage):
+ name = "OmciSynchronizeTimeResponse"
+ message_id = 0x38
+ fields_desc = [
+ ShortField("entity_class", 256), # OntG
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0),
+ ConditionalField(ShortField("success_info", None),
+ lambda pkt: pkt.success_code == 0)
+ ]
+
+
+class OmciGetCurrentData(OmciMessage):
+ name = "OmciGetCurrentData"
+ message_id = 0x5C
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ShortField("attributes_mask", None),
+ ]
+
+
+class OmciGetCurrentDataResponse(OmciMessage):
+ name = "OmciGetCurrentDataResponse"
+ message_id = 0x3C
+ fields_desc = [
+ ShortField("entity_class", None),
+ ShortField("entity_id", 0),
+ ByteField("success_code", 0),
+ ShortField("attributes_mask", None),
+ ShortField("unsupported_attributes_mask", None),
+ ShortField("failed_attributes_mask", None),
+ ConditionalField(
+ OmciMaskedData("data"), lambda pkt: pkt.success_code == 0)
+ ]
+
+class OmciStartSoftwareDownload(OmciMessage):
+ name = "OmciStartSoftwareDownload"
+ message_id = 0x53
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("window_size", 0),
+ IntField("image_size", 0),
+ ByteField("image_number", 1), # Always only 1 in parallel
+ ShortField("instance_id", None) # should be same as "entity_id"
+ ]
+
+class OmciStartSoftwareDownloadResponse(OmciMessage):
+ name = "OmciStartSoftwareDownloadResponse"
+ message_id = 0x33
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("result", 0),
+ ByteField("window_size", 0),
+ ByteField("image_number", 1), # Always only 1 in parallel
+ ShortField("instance_id", None) # should be same as "entity_id"
+ ]
+
+class OmciEndSoftwareDownload(OmciMessage):
+ name = "OmciEndSoftwareDownload"
+ message_id = 0x55
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ IntField("crc32", 0),
+ IntField("image_size", 0),
+ ByteField("image_number", 1), # Always only 1 in parallel
+ ShortField("instance_id", None),# should be same as "entity_id"
+ ]
+
+class OmciEndSoftwareDownloadResponse(OmciMessage):
+ name = "OmciEndSoftwareDownload"
+ message_id = 0x35
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("result", 0),
+ ByteField("image_number", 1), # Always only 1 in parallel
+ ShortField("instance_id", None),# should be same as "entity_id"
+ ByteField("result0", 0) # same as result
+ ]
+
+class OmciDownloadSection(OmciMessage):
+ name = "OmciDownloadSection"
+ message_id = 0x14
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("section_number", 0), # Always only 1 in parallel
+ StrFixedLenField("data", 0, length=OmciSectionDataSize) # section data
+ ]
+
+class OmciDownloadSectionLast(OmciMessage):
+ name = "OmciDownloadSection"
+ message_id = 0x54
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("section_number", 0), # Always only 1 in parallel
+ StrFixedLenField("data", 0, length=OmciSectionDataSize) # section data
+ ]
+
+class OmciDownloadSectionResponse(OmciMessage):
+ name = "OmciDownloadSectionResponse"
+ message_id = 0x34
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("result", 0),
+ ByteField("section_number", 0), # Always only 1 in parallel
+ ]
+
+class OmciActivateImage(OmciMessage):
+ name = "OmciActivateImage"
+ message_id = 0x56
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("activate_flag", 0) # Activate image unconditionally
+ ]
+
+class OmciActivateImageResponse(OmciMessage):
+ name = "OmciActivateImageResponse"
+ message_id = 0x36
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("result", 0) # Activate image unconditionally
+ ]
+
+class OmciCommitImage(OmciMessage):
+ name = "OmciCommitImage"
+ message_id = 0x57
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ]
+
+class OmciCommitImageResponse(OmciMessage):
+ name = "OmciCommitImageResponse"
+ message_id = 0x37
+ fields_desc = [
+ ShortField("entity_class", 7), # Always 7 (Software image)
+ ShortField("entity_id", None),
+ ByteField("result", 0) # Activate image unconditionally
+ ]
+
diff --git a/python/adapters/extensions/omci/onu_configuration.py b/python/adapters/extensions/omci/onu_configuration.py
new file mode 100644
index 0000000..1fa00fe
--- /dev/null
+++ b/python/adapters/extensions/omci/onu_configuration.py
@@ -0,0 +1,509 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+
+from voltha.protos.device_pb2 import Image
+from omci_entities import *
+from database.mib_db_api import *
+from enum import IntEnum
+
+
+class OMCCVersion(IntEnum):
+ Unknown = 0 # Unknown or unsupported version
+ G_984_4 = 0x80 # (06/04)
+ G_984_4_2005_Amd_1 = 0x81 # Amd.1 (06/05)
+ G_984_4_2006_Amd_2 = 0x82 # Amd.2 (03/06)
+ G_984_4_2006_Amd_3 = 0x83 # Amd.3 (12/06)
+ G_984_4_2008 = 0x84 # (02/08)
+ G_984_4_2009_Amd_1 = 0x85 # Amd.1 (06/09)
+ G_984_4_2009_Amd_2_Base = 0x86 # Amd.2 (2009) Baseline message set only, w/o the extended message set option
+ G_984_4_2009_Amd_2 = 0x96 # Amd.2 (2009) Extended message set option + baseline message set.
+ G_988_2010_Base = 0xA0 # (2010) Baseline message set only, w/o the extended message set option
+ G_988_2011_Amd_1_Base = 0xA1 # Amd.1 (2011) Baseline message set only
+ G_988_2012_Amd_2_Base = 0xA2 # Amd.2 (2012) Baseline message set only
+ G_988_2012_Base = 0xA3 # (2012) Baseline message set only
+ G_988_2010 = 0xB0 # (2010) Baseline and extended message set
+ G_988_2011_Amd_1 = 0xB1 # Amd.1 (2011) Baseline and extended message set
+ G_988_2012_Amd_2 = 0xB2 # Amd.2 (2012) Baseline and extended message set
+ G_988_2012 = 0xB3 # (2012)Baseline and extended message set
+
+ @staticmethod
+ def values():
+ return {OMCCVersion[member].value for member in OMCCVersion.__members__.keys()}
+
+ @staticmethod
+ def to_enum(value):
+ return next((v for k, v in OMCCVersion.__members__.items()
+ if v.value == value), OMCCVersion.Unknown)
+
+
+class OnuConfiguration(object):
+ """
+ Utility class to query OMCI MIB Database for various ONU/OMCI Configuration
+ and capabilties. These capabilities revolve around read-only MEs discovered
+ during the MIB Upload process.
+
+ There is also a 'omci_onu_capabilities' State Machine and an
+ 'onu_capabilities_task.py' OMCI Task that will query the ONU, via the
+ OMCI (ME#287) Managed entity to get the full list of supported OMCI ME's
+ and available actions/message-types supported.
+
+ NOTE: Currently this class is optimized/tested for ONUs that support the
+ OpenOMCI implementation.
+ """
+ def __init__(self, omci_agent, device_id):
+ """
+ Initialize this instance of the OnuConfiguration class
+
+ :param omci_agent: (OpenOMCIAgent) agent reference
+ :param device_id: (str) ONU Device ID
+
+ :raises KeyError: If ONU Device is not registered with OpenOMCI
+ """
+ self.log = structlog.get_logger(device_id=device_id)
+ self._device_id = device_id
+ self._onu_device = omci_agent.get_device(device_id)
+
+ # The capabilities
+ self._attributes = None
+ self.reset()
+
+ def _get_capability(self, attr, class_id, instance_id=None):
+ """
+ Get the OMCI capabilities for this device
+
+ :param attr: (str) OnuConfiguration attribute field
+ :param class_id: (int) ME Class ID
+ :param instance_id: (int) Instance ID. If not provided, all instances of the
+ specified class ID are returned if present in the DB.
+
+ :return: (dict) Class and/or Instances. None is returned if the CLASS is not present
+ """
+ try:
+ assert self._onu_device.mib_synchronizer.last_mib_db_sync is not None, \
+ 'MIB Database for ONU {} has never been synchronized'.format(self._device_id)
+
+ # Get the requested information
+ if self._attributes[attr] is None:
+ value = self._onu_device.query_mib(class_id, instance_id=instance_id)
+
+ if isinstance(value, dict) and len(value) > 0:
+ self._attributes[attr] = value
+
+ return self._attributes[attr]
+
+ except Exception as e:
+ self.log.exception('onu-capabilities', e=e, class_id=class_id,
+ instance_id=instance_id)
+ raise
+
+ def reset(self):
+ """
+ Reset the cached database entries to None. This method should be
+ called after any communications loss to the ONU (reboot, PON down, ...)
+ in case a new software load with different capabilities is available.
+ """
+ self._attributes = {
+ '_ont_g': None,
+ '_ont_2g': None,
+ '_ani_g': None,
+ '_uni_g': None,
+ '_cardholder': None,
+ '_circuit_pack': None,
+ '_software': None,
+ '_pptp': None,
+ '_veip': None
+ }
+
+ @property
+ def version(self):
+ """
+ This attribute identifies the version of the ONU as defined by the vendor
+ """
+ ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+ if ontg is None or ATTRIBUTES_KEY not in ontg:
+ return None
+
+ return ontg[ATTRIBUTES_KEY].get('version')
+
+ @property
+ def serial_number(self):
+ """
+ The serial number is unique for each ONU
+ """
+ ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+ if ontg is None or ATTRIBUTES_KEY not in ontg:
+ return None
+
+ return ontg[ATTRIBUTES_KEY].get('serial_number')
+
+ @property
+ def traffic_management_option(self):
+ """
+ This attribute identifies the upstream traffic management function
+ implemented in the ONU. There are three options:
+
+ 0 Priority controlled and flexibly scheduled upstream traffic. The traffic
+ scheduler and priority queue mechanism are used for upstream traffic.
+
+ 1 Rate controlled upstream traffic. The maximum upstream traffic of each
+ individual connection is guaranteed by shaping.
+
+ 2 Priority and rate controlled. The traffic scheduler and priority queue
+ mechanism are used for upstream traffic. The maximum upstream traffic
+ of each individual connection is guaranteed by shaping.
+ """
+ ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+ if ontg is None or ATTRIBUTES_KEY not in ontg:
+ return None
+
+ return ontg[ATTRIBUTES_KEY].get('traffic_management_option')
+
+ @property
+ def onu_survival_time(self):
+ """
+ This attribute indicates the minimum guaranteed time in milliseconds
+ between the loss of external power and the silence of the ONU. This does not
+ include survival time attributable to a backup battery. The value zero implies that
+ the actual time is not known.
+
+ Optional
+ """
+ ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+ if ontg is None or ATTRIBUTES_KEY not in ontg:
+ return None
+
+ return ontg[ATTRIBUTES_KEY].get('onu_survival_time', 0)
+
+ @property
+ def equipment_id(self):
+ """
+ This attribute may be used to identify the specific type of ONU. In some
+ environments, this attribute may include the equipment CLEI code.
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('equipment_id')
+
+ @property
+ def omcc_version(self):
+ """
+ This attribute identifies the version of the OMCC protocol being used by the
+ ONU. This allows the OLT to manage a network with ONUs that support different
+ OMCC versions. Release levels of [ITU-T G.984.4] are supported with code
+ points of the form 0x8y and 0x9y, where y is a hexadecimal digit in the range
+ 0..F. Support for continuing revisions of this Recommendation is defined in
+ the 0xAy range.
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return OMCCVersion.to_enum(ont2g[ATTRIBUTES_KEY].get('omcc_version', 0))
+
+ @property
+ def vendor_product_code(self):
+ """
+ This attribute contains a vendor-specific product code for the ONU
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('vendor_product_code')
+
+ @property
+ def total_priority_queues(self):
+ """
+ This attribute reports the total number of upstream priority queues
+ that are not associated with a circuit pack, but with the ONU in its entirety
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('total_priority_queue_number')
+
+ @property
+ def total_traffic_schedulers(self):
+ """
+ This attribute reports the total number of traffic schedulers that
+ are not associated with a circuit pack, but with the ONU in its entirety.
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('total_traffic_scheduler_number')
+
+ @property
+ def total_gem_ports(self):
+ """
+ This attribute reports the total number of GEM port-IDs supported
+ by the ONU.
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('total_gem_port_id_number')
+
+ @property
+ def uptime(self):
+ """
+ This attribute counts 10 ms intervals since the ONU was last initialized.
+ It rolls over to 0 when full
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('sys_uptime')
+
+ @property
+ def connectivity_capability(self):
+ """
+ This attribute indicates the Ethernet connectivity models that the ONU
+ can support. The value 0 indicates that the capability is not supported; 1 signifies
+ support.
+
+ Bit Model [Figure reference ITU-T 988]
+ 1 (LSB) N:1 bridging, Figure 8.2.2-3
+ 2 1:M mapping, Figure 8.2.2-4
+ 3 1:P filtering, Figure 8.2.2-5
+ 4 N:M bridge-mapping, Figure 8.2.2-6
+ 5 1:MP map-filtering, Figure 8.2.2-7
+ 6 N:P bridge-filtering, Figure 8.2.2-8
+ 7 to refer to N:MP bridge-map-filtering, Figure 8.2.2-9
+ 8...16 Reserved
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('connectivity_capability')
+
+ @property
+ def qos_configuration_flexibility(self):
+ """
+ This attribute reports whether various managed entities in the
+ ONU are fixed by the ONU's architecture or whether they are configurable. For
+ backward compatibility, and if the ONU does not support this attribute, all such
+ attributes are understood to be hard-wired.
+
+ Bit Interpretation when bit value = 1
+ 1 (LSB) Priority queue ME: Port field of related port attribute is
+ read-write and can point to any T-CONT or UNI port in the
+ same slot
+ 2 Priority queue ME: The traffic scheduler pointer is permitted
+ to refer to any other traffic scheduler in the same slot
+ 3 Traffic scheduler ME: T-CONT pointer is read-write
+ 4 Traffic scheduler ME: Policy attribute is read-write
+ 5 T-CONT ME: Policy attribute is read-write
+ 6 Priority queue ME: Priority field of related port attribute is
+ read-write
+ 7..16 Reserved
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('qos_configuration_flexibility')
+
+ @property
+ def priority_queue_scale_factor(self):
+ """
+ This specifies the scale factor of several attributes of the priority
+ queue managed entity of section 5.2.8
+ """
+ ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+ if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+ return None
+
+ return ont2g[ATTRIBUTES_KEY].get('priority_queue_scale_factor', 1)
+
+ @property
+ def cardholder_entities(self):
+ """
+ Return a dictionary containing some overall information on the CardHolder
+ instances for this ONU.
+ """
+ ch = self._get_capability('_cardholder', Cardholder.class_id)
+ results = dict()
+
+ if ch is not None:
+ for inst, inst_data in ch.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'is-single-piece': inst >= 256,
+ 'slot-number': inst & 0xff,
+ 'actual-plug-in-type': inst_data[ATTRIBUTES_KEY].get('actual_plugin_unit_type', 0),
+ 'actual-equipment-id': inst_data[ATTRIBUTES_KEY].get('actual_equipment_id', 0),
+ 'protection-profile-ptr': inst_data[ATTRIBUTES_KEY].get('protection_profile_pointer', 0),
+ }
+ return results if len(results) else None
+
+ @property
+ def circuitpack_entities(self):
+ """
+ This specifies the scale factor of several attributes of the priority
+ queue managed entity of section 5.2.8
+ """
+ cp = self._get_capability('_circuit_pack', CircuitPack.class_id)
+ results = dict()
+
+ if cp is not None:
+ for inst, inst_data in cp.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'number-of-ports': inst_data[ATTRIBUTES_KEY].get('number_of_ports', 0),
+ 'serial-number': inst_data[ATTRIBUTES_KEY].get('serial_number', 0),
+ 'version': inst_data[ATTRIBUTES_KEY].get('version', 0),
+ 'vendor-id': inst_data[ATTRIBUTES_KEY].get('vendor_id', 0),
+ 'total-tcont-count': inst_data[ATTRIBUTES_KEY].get('total_tcont_buffer_number', 0),
+ 'total-priority-queue-count': inst_data[ATTRIBUTES_KEY].get('total_priority_queue_number', 0),
+ 'total-traffic-sched-count': inst_data[ATTRIBUTES_KEY].get('total_traffic_scheduler_number', 0),
+ }
+
+ return results if len(results) else None
+
+ @property
+ def software_images(self):
+ """
+ Get a list of software image information for the ONU. The information is provided
+ so that it may be directly added to the protobuf Device information software list.
+ """
+ sw = self._get_capability('_software', SoftwareImage.class_id)
+ images = list()
+
+ if sw is not None:
+ for inst, inst_data in sw.items():
+ if isinstance(inst, int):
+ is_active = inst_data[ATTRIBUTES_KEY].get('is_active', False)
+
+ images.append(Image(name='running-revision' if is_active else 'candidate-revision',
+ version=str(inst_data[ATTRIBUTES_KEY].get('version',
+ 'Not Available').rstrip('\0')),
+ is_active=is_active,
+ is_committed=inst_data[ATTRIBUTES_KEY].get('is_committed',
+ False),
+ is_valid=inst_data[ATTRIBUTES_KEY].get('is_valid',
+ False),
+ install_datetime='Not Available',
+ hash=str(inst_data[ATTRIBUTES_KEY].get('image_hash',
+ 'Not Available').rstrip('\0'))))
+ return images if len(images) else None
+
+ @property
+ def ani_g_entities(self):
+ """
+ This managed entity organizes data associated with each access network
+ interface supported by a G-PON ONU. The ONU automatically creates one
+ instance of this managed entity for each PON physical port.
+ """
+ ag = self._get_capability('_ani_g', AniG.class_id)
+ results = dict()
+
+ if ag is not None:
+ for inst, inst_data in ag.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'slot-number': (inst >> 8) & 0xff,
+ 'port-number': inst & 0xff,
+ 'total-tcont-count': inst_data[ATTRIBUTES_KEY].get('total_tcont_number', 0),
+ 'piggyback-dba-reporting': inst_data[ATTRIBUTES_KEY].get('piggyback_dba_reporting', 0),
+ }
+ return results if len(results) else None
+
+ @property
+ def uni_g_entities(self):
+ """
+ This managed entity organizes data associated with user network interfaces
+ (UNIs) supported by GEM. One instance of the UNI-G managed entity exists
+ for each UNI supported by the ONU.
+
+ The ONU automatically creates or deletes instances of this managed entity
+ upon the creation or deletion of a real or virtual circuit pack managed
+ entity, one per port.
+ """
+ ug = self._get_capability('_uni_g', UniG.class_id)
+ results = dict()
+
+ if ug is not None:
+ for inst, inst_data in ug.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'management-capability': inst_data[ATTRIBUTES_KEY].get('management_capability', 0)
+ }
+ return results if len(results) else None
+
+ @property
+ def pptp_entities(self):
+ """
+ Returns discovered PPTP Ethernet entities. TODO more detail here
+ """
+ pptp = self._get_capability('_pptp', PptpEthernetUni.class_id)
+ results = dict()
+
+ if pptp is not None:
+ for inst, inst_data in pptp.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'expected-type': inst_data[ATTRIBUTES_KEY].get('expected_type', 0),
+ 'sensed-type': inst_data[ATTRIBUTES_KEY].get('sensed_type', 0),
+ 'autodetection-config': inst_data[ATTRIBUTES_KEY].get('auto_detection_configuration', 0),
+ 'ethernet-loopback-config': inst_data[ATTRIBUTES_KEY].get('ethernet_loopback_configuration', 0),
+ 'administrative-state': inst_data[ATTRIBUTES_KEY].get('administrative_state', 0),
+ 'operational-state': inst_data[ATTRIBUTES_KEY].get('operational_state', 0),
+ 'config-ind': inst_data[ATTRIBUTES_KEY].get('configuration_ind', 0),
+ 'max-frame-size': inst_data[ATTRIBUTES_KEY].get('max_frame_size', 0),
+ 'dte-dce-ind': inst_data[ATTRIBUTES_KEY].get('dte_or_dce_ind', 0),
+ 'pause-time': inst_data[ATTRIBUTES_KEY].get('pause_time', 0),
+ 'bridged-ip-ind': inst_data[ATTRIBUTES_KEY].get('bridged_or_ip_ind', 0),
+ 'arc': inst_data[ATTRIBUTES_KEY].get('arc', 0),
+ 'arc-interval': inst_data[ATTRIBUTES_KEY].get('arc_interval', 0),
+ 'pppoe-filter': inst_data[ATTRIBUTES_KEY].get('ppoe_filter', 0),
+ 'power-control': inst_data[ATTRIBUTES_KEY].get('power_control', 0)
+ }
+ return results if len(results) else None
+
+ @property
+ def veip_entities(self):
+ """
+ Returns discovered VEIP entities. TODO more detail here
+ """
+ veip = self._get_capability('_veip', VeipUni.class_id)
+ results = dict()
+
+ if veip is not None:
+ for inst, inst_data in veip.items():
+ if isinstance(inst, int):
+ results[inst] = {
+ 'entity-id': inst,
+ 'administrative-state': inst_data[ATTRIBUTES_KEY].get('administrative_state', 0),
+ 'operational-state': inst_data[ATTRIBUTES_KEY].get('operational_state', 0),
+ 'interdomain-name': inst_data[ATTRIBUTES_KEY].get('interdomain_name', ""),
+ 'tcp-udp-pointer': inst_data[ATTRIBUTES_KEY].get('tcp_udp_pointer', 0),
+ 'iana-assigned-port': inst_data[ATTRIBUTES_KEY].get('iana_assigned_port', 0)
+ }
+ return results if len(results) else None
diff --git a/python/adapters/extensions/omci/onu_device_entry.py b/python/adapters/extensions/omci/onu_device_entry.py
new file mode 100644
index 0000000..7a0c439
--- /dev/null
+++ b/python/adapters/extensions/omci/onu_device_entry.py
@@ -0,0 +1,635 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import structlog
+from copy import deepcopy
+from voltha.protos.device_pb2 import ImageDownload
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+import voltha.extensions.omci.omci_entities as omci_entities
+from voltha.extensions.omci.omci_cc import OMCI_CC
+from common.event_bus import EventBusClient
+from voltha.extensions.omci.tasks.task_runner import TaskRunner
+from voltha.extensions.omci.onu_configuration import OnuConfiguration
+from voltha.extensions.omci.tasks.reboot_task import OmciRebootRequest, RebootFlags
+from voltha.extensions.omci.tasks.omci_modify_request import OmciModifyRequest
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.state_machines.image_agent import ImageAgent
+
+from twisted.internet import reactor, defer
+from enum import IntEnum
+
+OP = EntityOperations
+RC = ReasonCodes
+
+ACTIVE_KEY = 'active'
+IN_SYNC_KEY = 'in-sync'
+LAST_IN_SYNC_KEY = 'last-in-sync-time'
+SUPPORTED_MESSAGE_ENTITY_KEY = 'managed-entities'
+SUPPORTED_MESSAGE_TYPES_KEY = 'message-type'
+
+
+class OnuDeviceEvents(IntEnum):
+ # Events of interest to Device Adapters and OpenOMCI State Machines
+ DeviceStatusEvent = 0 # OnuDeviceEntry running status changed
+ MibDatabaseSyncEvent = 1 # MIB database sync changed
+ OmciCapabilitiesEvent = 2 # OMCI ME and message type capabilities
+ AlarmDatabaseSyncEvent = 3 # Alarm database sync changed
+
+ # TODO: Add other events here as needed
+
+
+class OnuDeviceEntry(object):
+ """
+ An ONU Device entry in the MIB
+ """
+ def __init__(self, omci_agent, device_id, adapter_agent, custom_me_map,
+ mib_db, alarm_db, support_classes, clock=None):
+ """
+ Class initializer
+
+ :param omci_agent: (OpenOMCIAgent) Reference to OpenOMCI Agent
+ :param device_id: (str) ONU Device ID
+ :param adapter_agent: (AdapterAgent) Adapter agent for ONU
+ :param custom_me_map: (dict) Additional/updated ME to add to class map
+ :param mib_db: (MibDbApi) MIB Database reference
+ :param alarm_db: (MibDbApi) Alarm Table/Database reference
+ :param support_classes: (dict) State machines and tasks for this ONU
+ """
+ self.log = structlog.get_logger(device_id=device_id)
+
+ self._started = False
+ self._omci_agent = omci_agent # OMCI AdapterAgent
+ self._device_id = device_id # ONU Device ID
+ self._adapter_agent = adapter_agent
+ self._runner = TaskRunner(device_id, clock=clock) # OMCI_CC Task runner
+ self._deferred = None
+ # self._img_download_deferred = None # deferred of image file download from server
+ self._omci_upgrade_deferred = None # deferred of ONU OMCI upgrading procedure
+ self._omci_activate_deferred = None # deferred of ONU OMCI Softwre Image Activate
+ self._img_deferred = None # deferred returned to caller of do_onu_software_download
+ self._first_in_sync = False
+ self._first_capabilities = False
+ self._timestamp = None
+ # self._image_download = None # (voltha_pb2.ImageDownload)
+ self.reactor = clock if clock is not None else reactor
+
+ # OMCI related databases are on a per-agent basis. State machines and tasks
+ # are per ONU Vendor
+ #
+ self._support_classes = support_classes
+ self._configuration = None
+
+ try:
+ # MIB Synchronization state machine
+ self._mib_db_in_sync = False
+ mib_synchronizer_info = support_classes.get('mib-synchronizer')
+ advertise = mib_synchronizer_info['advertise-events']
+ self._mib_sync_sm = mib_synchronizer_info['state-machine'](self._omci_agent,
+ device_id,
+ mib_synchronizer_info['tasks'],
+ mib_db,
+ advertise_events=advertise)
+ # ONU OMCI Capabilities state machine
+ capabilities_info = support_classes.get('omci-capabilities')
+ advertise = capabilities_info['advertise-events']
+ self._capabilities_sm = capabilities_info['state-machine'](self._omci_agent,
+ device_id,
+ capabilities_info['tasks'],
+ advertise_events=advertise)
+ # ONU Performance Monitoring Intervals state machine
+ interval_info = support_classes.get('performance-intervals')
+ advertise = interval_info['advertise-events']
+ self._pm_intervals_sm = interval_info['state-machine'](self._omci_agent, device_id,
+ interval_info['tasks'],
+ advertise_events=advertise)
+
+ # ONU ALARM Synchronization state machine
+ self._alarm_db_in_sync = False
+ alarm_synchronizer_info = support_classes.get('alarm-synchronizer')
+ advertise = alarm_synchronizer_info['advertise-events']
+ self._alarm_sync_sm = alarm_synchronizer_info['state-machine'](self._omci_agent,
+ device_id,
+ alarm_synchronizer_info['tasks'],
+ alarm_db,
+ advertise_events=advertise)
+ # State machine of downloading image file from server
+ downloader_info = support_classes.get('image_downloader')
+ image_upgrader_info = support_classes.get('image_upgrader')
+ # image_activate_info = support_classes.get('image_activator')
+ advertise = downloader_info['advertise-event']
+ # self._img_download_sm = downloader_info['state-machine'](self._omci_agent, device_id,
+ # downloader_info['tasks'],
+ # advertise_events=advertise)
+ self._image_agent = ImageAgent(self._omci_agent, device_id,
+ downloader_info['state-machine'], downloader_info['tasks'],
+ image_upgrader_info['state-machine'], image_upgrader_info['tasks'],
+ # image_activate_info['state-machine'],
+ advertise_events=advertise, clock=clock)
+
+ # self._omci_upgrade_sm = image_upgrader_info['state-machine'](device_id, advertise_events=advertise)
+
+ except Exception as e:
+ self.log.exception('state-machine-create-failed', e=e)
+ raise
+
+ # Put state machines in the order you wish to start them
+
+ self._state_machines = []
+ self._on_start_state_machines = [ # Run when 'start()' called
+ self._mib_sync_sm,
+ self._capabilities_sm,
+ ]
+ self._on_sync_state_machines = [ # Run after first in_sync event
+ self._alarm_sync_sm,
+ ]
+ self._on_capabilities_state_machines = [ # Run after first capabilities events
+ self._pm_intervals_sm
+ ]
+ self._custom_me_map = custom_me_map
+ self._me_map = omci_entities.entity_id_to_class_map.copy()
+
+ if custom_me_map is not None:
+ self._me_map.update(custom_me_map)
+
+ self.event_bus = EventBusClient()
+
+ # Create OMCI communications channel
+ self._omci_cc = OMCI_CC(adapter_agent, self.device_id, self._me_map, clock=clock)
+
+ @staticmethod
+ def event_bus_topic(device_id, event):
+ """
+ Get the topic name for a given event for this ONU Device
+ :param device_id: (str) ONU Device ID
+ :param event: (OnuDeviceEvents) Type of event
+ :return: (str) Topic string
+ """
+ assert event in OnuDeviceEvents, \
+ 'Event {} is not an ONU Device Event'.format(event.name)
+ return 'omci-device:{}:{}'.format(device_id, event.name)
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def omci_cc(self):
+ return self._omci_cc
+
+ @property
+ def adapter_agent(self):
+ return self._adapter_agent
+
+ @property
+ def task_runner(self):
+ return self._runner
+
+ @property
+ def mib_synchronizer(self):
+ """
+ Reference to the OpenOMCI MIB Synchronization state machine for this ONU
+ """
+ return self._mib_sync_sm
+
+ @property
+ def omci_capabilities(self):
+ """
+ Reference to the OpenOMCI OMCI Capabilities state machine for this ONU
+ """
+ return self._capabilities_sm
+
+ @property
+ def pm_intervals_state_machine(self):
+ """
+ Reference to the OpenOMCI PM Intervals state machine for this ONU
+ """
+ return self._pm_intervals_sm
+
+ def set_pm_config(self, pm_config):
+ """
+ Set PM interval configuration
+
+ :param pm_config: (OnuPmIntervalMetrics) PM Interval configuration
+ """
+ self._pm_intervals_sm.set_pm_config(pm_config)
+
+ @property
+ def timestamp(self):
+ """Pollable Metrics last collected timestamp"""
+ return self._timestamp
+
+ @timestamp.setter
+ def timestamp(self, value):
+ self._timestamp = value
+
+ @property
+ def alarm_synchronizer(self):
+ """
+ Reference to the OpenOMCI Alarm Synchronization state machine for this ONU
+ """
+ return self._alarm_sync_sm
+
+ @property
+ def active(self):
+ """
+ Is the ONU device currently active/running
+ """
+ return self._started
+
+ @property
+ def custom_me_map(self):
+ """ Vendor-specific Managed Entity Map for this vendor's device"""
+ return self._custom_me_map
+
+ @property
+ def me_map(self):
+ """ Combined ME and Vendor-specific Managed Entity Map for this device"""
+ return self._me_map
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def mib_db_in_sync(self):
+ return self._mib_db_in_sync
+
+ @mib_db_in_sync.setter
+ def mib_db_in_sync(self, value):
+ if self._mib_db_in_sync != value:
+ # Save value
+ self._mib_db_in_sync = value
+
+ # Start up other state machines if needed
+ if self._first_in_sync:
+ self.first_in_sync_event()
+
+ # Notify any event listeners
+ topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+ OnuDeviceEvents.MibDatabaseSyncEvent)
+ msg = {
+ IN_SYNC_KEY: self._mib_db_in_sync,
+ LAST_IN_SYNC_KEY: self.mib_synchronizer.last_mib_db_sync
+ }
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ @property
+ def alarm_db_in_sync(self):
+ return self._alarm_db_in_sync
+
+ @alarm_db_in_sync.setter
+ def alarm_db_in_sync(self, value):
+ if self._alarm_db_in_sync != value:
+ # Save value
+ self._alarm_db_in_sync = value
+
+ # Start up other state machines if needed
+ if self._first_in_sync:
+ self.first_in_sync_event()
+
+ # Notify any event listeners
+ topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+ OnuDeviceEvents.AlarmDatabaseSyncEvent)
+ msg = {
+ IN_SYNC_KEY: self._alarm_db_in_sync
+ }
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ @property
+ def configuration(self):
+ """
+ Get the OMCI Configuration object for this ONU. This is a class that provides some
+ common database access functions for ONU capabilities and read-only configuration values.
+
+ :return: (OnuConfiguration)
+ """
+ return self._configuration
+
+ @property
+ def image_agent(self):
+ return self._image_agent
+
+ # @property
+ # def image_download(self):
+ # return self._image_download
+
+ def start(self):
+ """
+ Start the ONU Device Entry state machines
+ """
+ self.log.debug('OnuDeviceEntry.start', previous=self._started)
+ if self._started:
+ return
+
+ self._started = True
+ self._omci_cc.enabled = True
+ self._first_in_sync = True
+ self._first_capabilities = True
+ self._runner.start()
+ self._configuration = OnuConfiguration(self._omci_agent, self._device_id)
+
+ # Start MIB Sync and other state machines that can run before the first
+ # MIB Synchronization event occurs. Start 'later' so that any
+ # ONU Device, OMCI DB, OMCI Agent, and others are fully started before
+ # performing the start.
+
+ self._state_machines = []
+
+ def start_state_machines(machines):
+ for sm in machines:
+ self._state_machines.append(sm)
+ sm.start()
+
+ self._deferred = reactor.callLater(0, start_state_machines,
+ self._on_start_state_machines)
+ # Notify any event listeners
+ self._publish_device_status_event()
+
+ def stop(self):
+ """
+ Stop the ONU Device Entry state machines
+ """
+ if not self._started:
+ return
+
+ self._started = False
+ self._cancel_deferred()
+ self._omci_cc.enabled = False
+
+ # Halt MIB Sync and other state machines
+ for sm in self._state_machines:
+ sm.stop()
+
+ self._state_machines = []
+
+ # Stop task runner
+ self._runner.stop()
+
+ # Notify any event listeners
+ self._publish_device_status_event()
+
+ def first_in_sync_event(self):
+ """
+ This event is called on the first MIB synchronization event after
+ OpenOMCI has been started. It is responsible for starting any
+ other state machine and to initiate an ONU Capabilities report
+ """
+ if self._first_in_sync:
+ self._first_in_sync = False
+
+ # Start up the ONU Capabilities task
+ self._configuration.reset()
+
+ # Insure that the ONU-G Administrative lock is disabled
+ def failure(reason):
+ self.log.error('disable-admin-state-lock', reason=reason)
+
+ frame = OntGFrame(attributes={'administrative_state': 0}).set()
+ task = OmciModifyRequest(self._omci_agent, self.device_id, frame)
+ self.task_runner.queue_task(task).addErrback(failure)
+
+ # Start up any other remaining OpenOMCI state machines
+ def start_state_machines(machines):
+ for sm in machines:
+ self._state_machines.append(sm)
+ reactor.callLater(0, sm.start)
+
+ self._deferred = reactor.callLater(0, start_state_machines,
+ self._on_sync_state_machines)
+
+ # if an ongoing upgrading is not accomplished, restart it
+ if self._img_deferred is not None:
+ self._image_agent.onu_bootup()
+
+ def first_in_capabilities_event(self):
+ """
+ This event is called on the first capabilities event after
+ OpenOMCI has been started. It is responsible for starting any
+ other state machine. These are often state machines that have tasks
+ that are dependent upon knowing if various MEs are supported
+ """
+ if self._first_capabilities:
+ self._first_capabilities = False
+
+ # Start up any other remaining OpenOMCI state machines
+ def start_state_machines(machines):
+ for sm in machines:
+ self._state_machines.append(sm)
+ reactor.callLater(0, sm.start)
+
+ self._deferred = reactor.callLater(0, start_state_machines,
+ self._on_capabilities_state_machines)
+
+ # def __on_omci_download_success(self, image_download):
+ # self.log.debug("__on_omci_download_success", image=image_download)
+ # self._omci_upgrade_deferred = None
+ # # self._ret_deferred = None
+ # self._omci_activate_deferred = self._image_agent.activate_onu_image(image_download.name)
+ # self._omci_activate_deferred.addCallbacks(self.__on_omci_image_activate_success,
+ # self.__on_omci_image_activate_fail, errbackArgs=(image_name,))
+ # return image_name
+
+ # def __on_omci_download_fail(self, fail, image_name):
+ # self.log.debug("__on_omci_download_fail", failure=fail, image_name=image_name)
+ # self.reactor.callLater(0, self._img_deferred.errback, fail)
+ # self._omci_upgrade_deferred = None
+ # self._img_deferred = None
+
+ def __on_omci_image_activate_success(self, image_name):
+ self.log.debug("__on_omci_image_activate_success", image_name=image_name)
+ self._omci_activate_deferred = None
+ self._img_deferred.callback(image_name)
+ self._img_deferred = None
+ return image_name
+
+ def __on_omci_image_activate_fail(self, fail, image_name):
+ self.log.debug("__on_omci_image_activate_fail", faile=fail, image_name=image_name)
+ self._omci_activate_deferred = None
+ self._img_deferred.errback(fail)
+ self._img_deferred = None
+
+ def _publish_device_status_event(self):
+ """
+ Publish the ONU Device start/start status.
+ """
+ topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+ OnuDeviceEvents.DeviceStatusEvent)
+ msg = {ACTIVE_KEY: self._started}
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ def publish_omci_capabilities_event(self):
+ """
+ Publish the ONU Device start/start status.
+ """
+ if self.first_in_capabilities_event:
+ self.first_in_capabilities_event()
+
+ topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+ OnuDeviceEvents.OmciCapabilitiesEvent)
+ msg = {
+ SUPPORTED_MESSAGE_ENTITY_KEY: self.omci_capabilities.supported_managed_entities,
+ SUPPORTED_MESSAGE_TYPES_KEY: self.omci_capabilities.supported_message_types
+ }
+ self.event_bus.publish(topic=topic, msg=msg)
+
+ def delete(self):
+ """
+ Stop the ONU Device's state machine and remove the ONU, and any related
+ OMCI state information from the OpenOMCI Framework
+ """
+ self.stop()
+ self.mib_synchronizer.delete()
+
+ # OpenOMCI cleanup
+ if self._omci_agent is not None:
+ self._omci_agent.remove_device(self._device_id, cleanup=True)
+
+ def query_mib(self, class_id=None, instance_id=None, attributes=None):
+ """
+ Get MIB database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query', class_id=class_id, instance_id=instance_id,
+ attributes=attributes)
+
+ return self.mib_synchronizer.query_mib(class_id=class_id, instance_id=instance_id,
+ attributes=attributes)
+
+ def query_mib_single_attribute(self, class_id, instance_id, attribute):
+ """
+ Get MIB database information for a single specific attribute
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attribute: (str) Managed Entity instance's attribute
+
+ :return: (varies) The value requested. If class/inst/attribute is
+ not found, None is returned
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query-single', class_id=class_id,
+ instance_id=instance_id, attributes=attribute)
+ assert isinstance(attribute, basestring), \
+ 'Only a single attribute value can be retrieved'
+
+ entry = self.mib_synchronizer.query_mib(class_id=class_id,
+ instance_id=instance_id,
+ attributes=attribute)
+
+ return entry[attribute] if attribute in entry else None
+
+ def query_alarm_table(self, class_id=None, instance_id=None):
+ """
+ Get Alarm information
+
+ This method can be used to request information from the alarm database to
+ the detailed level requested
+
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self.log.debug('query', class_id=class_id, instance_id=instance_id)
+
+ return self.alarm_synchronizer.query_mib(class_id=class_id, instance_id=instance_id)
+
+ def reboot(self,
+ flags=RebootFlags.Reboot_Unconditionally,
+ timeout=OmciRebootRequest.DEFAULT_REBOOT_TIMEOUT):
+ """
+ Request a reboot of the ONU
+
+ :param flags: (RebootFlags) Reboot condition
+ :param timeout: (int) Reboot task priority
+ :return: (deferred) Fires upon completion or error
+ """
+ assert self.active, 'This device is not active'
+
+ return self.task_runner.queue_task(OmciRebootRequest(self._omci_agent,
+ self.device_id,
+ flags=flags,
+ timeout=timeout))
+
+ # def get_imagefile(self, local_name, local_dir, remote_url=None):
+ # """
+ # Return a Deferred that will be triggered if the file is locally available
+ # or downloaded successfully
+ # """
+ # self.log.info('start download from {}'.format(remote_url))
+
+ # # for debug purpose, start runner here to queue downloading task
+ # # self._runner.start()
+
+ # return self._image_agent.get_image(self._image_download)
+
+ def do_onu_software_download(self, image_dnld):
+ """
+ image_dnld: (ImageDownload)
+ : Return a Deferred that will be triggered when upgrading results in success or failure
+ """
+ self.log.debug('do_onu_software_download')
+ image_download = deepcopy(image_dnld)
+ # self._img_download_deferred = self._image_agent.get_image(self._image_download)
+ # self._img_download_deferred.addCallbacks(self.__on_download_success, self.__on_download_fail, errbackArgs=(self._image_download,))
+ # self._ret_deferred = defer.Deferred()
+ # return self._ret_deferred
+ return self._image_agent.get_image(image_download)
+
+ # def do_onu_software_switch(self):
+ def do_onu_image_activate(self, image_dnld_name):
+ """
+ Return a Deferred that will be triggered when switching software image results in success or failure
+ """
+ if self._img_deferred is None:
+ self.log.debug('do_onu_image_activate')
+ self._img_deferred = defer.Deferred()
+ self._omci_upgrade_deferred = self._image_agent.onu_omci_download(image_dnld_name)
+ self._omci_upgrade_deferred.addCallbacks(self.__on_omci_image_activate_success,
+ self.__on_omci_image_activate_fail, errbackArgs=(image_dnld_name,))
+ return self._img_deferred
+
+ def cancel_onu_software_download(self, image_name):
+ self.log.debug('cancel_onu_software_download')
+ self._image_agent.cancel_download_image(image_name)
+ self._image_agent.cancel_upgrade_onu()
+ if self._img_deferred and not self._img_deferred.called:
+ self._img_deferred.cancel()
+ self._img_deferred = None
+ # self._image_download = None
+
+ def get_image_download_status(self, image_name):
+ return self._image_agent.get_image_status(image_name)
+
diff --git a/python/adapters/extensions/omci/openomci_agent.py b/python/adapters/extensions/omci/openomci_agent.py
new file mode 100644
index 0000000..98ba684
--- /dev/null
+++ b/python/adapters/extensions/omci/openomci_agent.py
@@ -0,0 +1,283 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import reactor
+from voltha.extensions.omci.database.mib_db_dict import MibDbVolatileDict
+from voltha.extensions.omci.database.mib_db_ext import MibDbExternal
+from voltha.extensions.omci.state_machines.mib_sync import MibSynchronizer
+from voltha.extensions.omci.tasks.mib_upload import MibUploadTask
+from voltha.extensions.omci.tasks.get_mds_task import GetMdsTask
+from voltha.extensions.omci.tasks.mib_resync_task import MibResyncTask
+from voltha.extensions.omci.tasks.mib_reconcile_task import MibReconcileTask
+from voltha.extensions.omci.tasks.sync_time_task import SyncTimeTask
+from voltha.extensions.omci.state_machines.alarm_sync import AlarmSynchronizer
+from voltha.extensions.omci.tasks.alarm_resync_task import AlarmResyncTask
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+from voltha.extensions.omci.tasks.interval_data_task import IntervalDataTask
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEntry
+from voltha.extensions.omci.state_machines.omci_onu_capabilities import OnuOmciCapabilities
+from voltha.extensions.omci.tasks.onu_capabilities_task import OnuCapabilitiesTask
+from voltha.extensions.omci.state_machines.performance_intervals import PerformanceIntervals
+from voltha.extensions.omci.tasks.omci_create_pm_task import OmciCreatePMRequest
+from voltha.extensions.omci.tasks.omci_delete_pm_task import OmciDeletePMRequest
+from voltha.extensions.omci.state_machines.image_agent import ImageDownloadeSTM, OmciSoftwareImageDownloadSTM
+from voltha.extensions.omci.tasks.file_download_task import FileDownloadTask
+from voltha.extensions.omci.tasks.omci_sw_image_upgrade_task import OmciSwImageUpgradeTask
+
+OpenOmciAgentDefaults = {
+ 'mib-synchronizer': {
+ 'state-machine': MibSynchronizer, # Implements the MIB synchronization state machine
+ 'database': MibDbVolatileDict, # Implements volatile ME MIB database
+ #'database': MibDbExternal, # Implements persistent ME MIB database
+ 'advertise-events': True, # Advertise events on OpenOMCI event bus
+ 'tasks': {
+ 'mib-upload': MibUploadTask,
+ 'get-mds': GetMdsTask,
+ 'mib-audit': GetMdsTask,
+ 'mib-resync': MibResyncTask,
+ 'mib-reconcile': MibReconcileTask
+ }
+ },
+ 'omci-capabilities': {
+ 'state-machine': OnuOmciCapabilities, # Implements OMCI capabilities state machine
+ 'advertise-events': False, # Advertise events on OpenOMCI event bus
+ 'tasks': {
+ 'get-capabilities': OnuCapabilitiesTask # Get supported ME and Commands
+ }
+ },
+ 'performance-intervals': {
+ 'state-machine': PerformanceIntervals, # Implements PM Intervals State machine
+ 'advertise-events': False, # Advertise events on OpenOMCI event bus
+ 'tasks': {
+ 'sync-time': SyncTimeTask,
+ 'collect-data': IntervalDataTask,
+ 'create-pm': OmciCreatePMRequest,
+ 'delete-pm': OmciDeletePMRequest,
+ },
+ },
+ 'alarm-synchronizer': {
+ 'state-machine': AlarmSynchronizer, # Implements the Alarm sync state machine
+ 'database': AlarmDbExternal, # For any State storage needs
+ 'advertise-events': True, # Advertise events on OpenOMCI event bus
+ 'tasks': {
+ 'alarm-resync': AlarmResyncTask
+ }
+ },
+ 'image_downloader': {
+ 'state-machine': ImageDownloadeSTM,
+ 'advertise-event': True,
+ 'tasks': {
+ 'download-file': FileDownloadTask
+ }
+ },
+ 'image_upgrader': {
+ 'state-machine': OmciSoftwareImageDownloadSTM,
+ 'advertise-event': True,
+ 'tasks': {
+ 'omci_upgrade_task': OmciSwImageUpgradeTask
+ }
+ }
+ # 'image_activator': {
+ # 'state-machine': OmciSoftwareImageActivateSTM,
+ # 'advertise-event': True,
+ # }
+}
+
+
+class OpenOMCIAgent(object):
+ """
+ OpenOMCI for VOLTHA
+
+ This will become the primary interface into OpenOMCI for ONU Device Adapters
+ in VOLTHA v1.3 sprint 3 time frame.
+ """
+ def __init__(self, core, support_classes=OpenOmciAgentDefaults, clock=None):
+ """
+ Class initializer
+
+ :param core: (VolthaCore) VOLTHA Core
+ :param support_classes: (Dict) Classes to support OMCI
+ """
+ self.log = structlog.get_logger()
+ self._core = core
+ self.reactor = clock if clock is not None else reactor
+ self._started = False
+ self._devices = dict() # device-id -> DeviceEntry
+ self._event_bus = None
+
+ # OMCI related databases are on a per-agent basis. State machines and tasks
+ # are per ONU Vendore
+ #
+ # MIB Synchronization Database
+ self._mib_db = None
+ self._mib_database_cls = support_classes['mib-synchronizer']['database']
+
+ # Alarm Synchronization Database
+ self._alarm_db = None
+ self._alarm_database_cls = support_classes['alarm-synchronizer']['database']
+
+ @property
+ def core(self):
+ """ Return a reference to the VOLTHA Core component"""
+ return self._core
+
+ @property
+ def database_class(self):
+ return self._mib_database_cls
+
+ # TODO: Need to deprecate this. ImageAgent is using it and should not
+ @property
+ def database(self):
+ return self._mib_db
+
+ def start(self):
+ """
+ Start OpenOMCI
+ """
+ if self._started:
+ return
+
+ self.log.debug('OpenOMCIAgent.start')
+ self._started = True
+
+ try:
+ # Create all databases as needed. This should be done before
+ # State machines are started for the first time
+
+ if self._mib_db is None:
+ self._mib_db = self._mib_database_cls(self)
+
+ if self._alarm_db is None:
+ self._alarm_db = self._alarm_database_cls(self)
+
+ # Start/restore databases
+
+ self._mib_db.start()
+ self._alarm_db.start()
+
+ for device in self._devices.itervalues():
+ device.start()
+
+ except Exception as e:
+ self.log.exception('startup', e=e)
+
+ def stop(self):
+ """
+ Shutdown OpenOMCI
+ """
+ if not self._started:
+ return
+
+ self.log.debug('stop')
+ self._started = False
+ self._event_bus = None
+
+ # ONUs OMCI shutdown
+ for device in self._devices.itervalues():
+ device.stop()
+
+ # DB shutdown
+ self._mib_db.stop()
+ self._alarm_db.stop()
+
+ def mk_event_bus(self):
+ """ Get the event bus for OpenOMCI"""
+ if self._event_bus is None:
+ from voltha.extensions.omci.openomci_event_bus import OpenOmciEventBus
+ self._event_bus = OpenOmciEventBus()
+
+ return self._event_bus
+
+ def advertise(self, event_type, data):
+ """
+ Advertise an OpenOMCU event on the kafka bus
+ :param event_type: (int) Event Type (enumberation from OpenOMCI protobuf definitions)
+ :param data: (Message, dict, ...) Associated data (will be convert to a string)
+ """
+ if self._started:
+ try:
+ self.mk_event_bus().advertise(event_type, data)
+
+ except Exception as e:
+ self.log.exception('advertise-failure', e=e)
+
+ def add_device(self, device_id, adapter_agent, custom_me_map=None,
+ support_classes=OpenOmciAgentDefaults):
+ """
+ Add a new ONU to be managed.
+
+ To provide vendor-specific or custom Managed Entities, create your own Entity
+ ID to class mapping dictionary.
+
+ Since ONU devices can be added at any time (even during Device Handler
+ startup), the ONU device handler is responsible for calling start()/stop()
+ for this object.
+
+ :param device_id: (str) Device ID of ONU to add
+ :param adapter_agent: (AdapterAgent) Adapter agent for ONU
+ :param custom_me_map: (dict) Additional/updated ME to add to class map
+ :param support_classes: (dict) State machines and tasks for this ONU
+
+ :return: (OnuDeviceEntry) The ONU device
+ """
+ self.log.debug('OpenOMCIAgent.add-device', device_id=device_id)
+
+ device = self._devices.get(device_id)
+
+ if device is None:
+ device = OnuDeviceEntry(self, device_id, adapter_agent, custom_me_map,
+ self._mib_db, self._alarm_db, support_classes, clock=self.reactor)
+
+ self._devices[device_id] = device
+
+ return device
+
+ def remove_device(self, device_id, cleanup=False):
+ """
+ Remove a managed ONU
+
+ :param device_id: (str) Device ID of ONU to remove
+ :param cleanup: (bool) If true, scrub any state related information
+ """
+ self.log.debug('remove-device', device_id=device_id, cleanup=cleanup)
+
+ device = self._devices.get(device_id)
+
+ if device is not None:
+ device.stop()
+
+ if cleanup:
+ del self._devices[device_id]
+
+ def device_ids(self):
+ """
+ Get an immutable set of device IDs managed by this OpenOMCI instance
+
+ :return: (frozenset) Set of device IDs (str)
+ """
+ return frozenset(self._devices.keys())
+
+ def get_device(self, device_id):
+ """
+ Get ONU device entry. For external (non-OpenOMCI users) the ONU Device
+ returned should be used for read-only activity.
+
+ :param device_id: (str) ONU Device ID
+
+ :return: (OnuDeviceEntry) ONU Device entry
+ :raises KeyError: If device does not exist
+ """
+ return self._devices[device_id]
diff --git a/python/adapters/extensions/omci/openomci_event_bus.py b/python/adapters/extensions/omci/openomci_event_bus.py
new file mode 100644
index 0000000..5c67865
--- /dev/null
+++ b/python/adapters/extensions/omci/openomci_event_bus.py
@@ -0,0 +1,54 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+from common.event_bus import EventBusClient
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEvent
+from voltha.protos.omci_alarm_db_pb2 import AlarmOpenOmciEvent
+from common.utils.json_format import MessageToDict
+
+
+class OpenOmciEventBus(object):
+ """ Event bus for publishing OpenOMCI related events. """
+ __slots__ = (
+ '_event_bus_client', # The event bus client used to publish events.
+ '_topic' # the topic to publish to
+ )
+
+ def __init__(self):
+ self._event_bus_client = EventBusClient()
+ self._topic = 'openomci-events'
+
+ def message_to_dict(m):
+ return MessageToDict(m, True, True, False)
+
+ def advertise(self, event_type, data):
+ if isinstance(data, Message):
+ msg = dumps(MessageToDict(data, True, True))
+ elif isinstance(data, dict):
+ msg = dumps(data)
+ else:
+ msg = str(data)
+
+ event_func = AlarmOpenOmciEvent if 'AlarmSynchronizer' in msg \
+ else OpenOmciEvent
+ event = event_func(
+ type=event_type,
+ data=msg
+ )
+
+ self._event_bus_client.publish(self._topic, event)
diff --git a/python/adapters/extensions/omci/state_machines/__init__.py b/python/adapters/extensions/omci/state_machines/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/extensions/omci/state_machines/alarm_sync.py b/python/adapters/extensions/omci/state_machines/alarm_sync.py
new file mode 100644
index 0000000..c7b7d64
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/alarm_sync.py
@@ -0,0 +1,670 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from datetime import datetime
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, RX_RESPONSE_KEY
+from voltha.extensions.omci.omci_messages import OmciGetAllAlarmsResponse
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+from voltha.extensions.omci.omci_entities import CircuitPack, PptpEthernetUni, OntG, AniG
+
+from common.event_bus import EventBusClient
+from voltha.protos.omci_alarm_db_pb2 import AlarmOpenOmciEventType
+
+RxEvent = OmciCCRxEvents
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class AlarmSynchronizer(object):
+ """
+ OpenOMCI Alarm Synchronizer state machine
+ """
+ DEFAULT_STATES = ['disabled', 'starting', 'auditing', 'in_sync']
+
+ DEFAULT_TRANSITIONS = [
+ {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+
+ {'trigger': 'audit_alarm', 'source': 'starting', 'dest': 'auditing'},
+ {'trigger': 'sync_alarm', 'source': 'starting', 'dest': 'in_sync'},
+
+ {'trigger': 'success', 'source': 'auditing', 'dest': 'in_sync'},
+ {'trigger': 'audit_alarm', 'source': 'auditing', 'dest': 'auditing'},
+ {'trigger': 'failure', 'source': 'auditing', 'dest': 'auditing'},
+
+ {'trigger': 'audit_alarm', 'source': 'in_sync', 'dest': 'auditing'},
+
+ # Do wildcard 'stop' trigger last so it covers all previous states
+ {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+ ]
+ DEFAULT_TIMEOUT_RETRY = 15 # Seconds to delay after task failure/timeout
+ DEFAULT_AUDIT_DELAY = 180 # Periodic tick to audit the ONU's alarm table
+
+ def __init__(self, agent, device_id, alarm_sync_tasks, db,
+ advertise_events=False,
+ states=DEFAULT_STATES,
+ transitions=DEFAULT_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=DEFAULT_TIMEOUT_RETRY,
+ audit_delay=DEFAULT_AUDIT_DELAY):
+ """
+ Class initialization
+
+ :param agent: (OpenOmciAgent) Agent
+ :param device_id: (str) ONU Device ID
+ :param db: (MibDbApi) MIB/Alarm Database
+ :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+ :param alarm_sync_tasks: (dict) Tasks to run
+ :param states: (list) List of valid states
+ :param transitions: (dict) Dictionary of triggers and state changes
+ :param initial_state: (str) Initial state machine state
+ :param timeout_delay: (int/float) Number of seconds after a timeout to attempt
+ a retry (goes back to starting state)
+ :param audit_delay: (int) Seconds between Alarm audits while in sync. Set to
+ zero to disable audit. An operator can request
+ an audit manually by calling 'self.audit_alarm'
+ """
+
+ self.log = structlog.get_logger(device_id=device_id)
+
+ self._agent = agent
+ self._device_id = device_id
+ self._device = None
+ self._database = db
+ self._timeout_delay = timeout_delay
+ self._audit_delay = audit_delay
+ self._resync_task = alarm_sync_tasks['alarm-resync']
+ self._advertise_events = advertise_events
+ self._alarm_manager = None
+ self._onu_id = None
+ self._uni_ports = list()
+ self._ani_ports = list()
+
+ self._deferred = None
+ self._current_task = None
+ self._task_deferred = None
+ self._last_alarm_sequence_value = 0
+ self._device_in_db = False
+
+ self._event_bus = EventBusClient()
+ self._omci_cc_subscriptions = { # RxEvent.enum -> Subscription Object
+ RxEvent.Get_ALARM_Get: None,
+ RxEvent.Alarm_Notification: None
+ }
+ self._omci_cc_sub_mapping = {
+ RxEvent.Get_ALARM_Get: self.on_alarm_update_response,
+ RxEvent.Alarm_Notification: self.on_alarm_notification
+ }
+
+ # Statistics and attributes
+ # TODO: add any others if it will support problem diagnosis
+
+ # Set up state machine to manage states
+ self.machine = Machine(model=self, states=states,
+ transitions=transitions,
+ initial=initial_state,
+ queued=True,
+ name='{}-{}'.format(self.__class__.__name__,
+ device_id))
+
+ def _cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._task_deferred = self._task_deferred, None
+
+ for d in [d1, d1]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def __str__(self):
+ return 'Alarm Synchronizer: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+ def delete(self):
+ """
+ Cleanup any state information
+ """
+ self.stop()
+ db, self._database = self._database, None
+
+ if db is not None:
+ db.remove(self._device_id)
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def last_alarm_sequence(self):
+ return self._last_alarm_sequence_value
+
+ def reset_alarm_sequence(self):
+ if self._last_alarm_sequence_value != 0:
+ self._last_alarm_sequence_value = 0
+
+ def increment_alarm_sequence(self):
+ self._last_alarm_sequence_value += 1
+ if self._last_alarm_sequence_value > 255:
+ self._last_alarm_sequence_value = 1
+
+ @property
+ def advertise_events(self):
+ return self._advertise_events
+
+ @advertise_events.setter
+ def advertise_events(self, value):
+ if not isinstance(value, bool):
+ raise TypeError('Advertise event is a boolean')
+ self._advertise_events = value
+
+ def advertise(self, event, info):
+ """Advertise an event on the OpenOMCI event bus"""
+ if self._advertise_events:
+ self._agent.advertise(event,
+ {
+ 'state-machine': self.machine.name,
+ 'info': info,
+ 'time': str(datetime.utcnow())
+ })
+
+ def set_alarm_params(self, mgr=None, onu_id=None, uni_ports=None, ani_ports=None):
+ if mgr is not None:
+ self._alarm_manager = mgr
+
+ if onu_id is not None:
+ self._onu_id = onu_id
+
+ if uni_ports is not None:
+ assert isinstance(uni_ports, list)
+ self._uni_ports = uni_ports
+
+ if ani_ports is not None:
+ assert isinstance(ani_ports, list)
+ self._ani_ports = ani_ports
+
+ def on_enter_disabled(self):
+ """
+ State machine is being stopped
+ """
+ self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+ self._cancel_deferred()
+
+ task, self._current_task = self._current_task, None
+ if task is not None:
+ task.stop()
+
+ # Drop Response and Autonomous notification subscriptions
+ for event, sub in self._omci_cc_subscriptions.iteritems():
+ if sub is not None:
+ self._omci_cc_subscriptions[event] = None
+ self._device.omci_cc.event_bus.unsubscribe(sub)
+
+ def _seed_database(self):
+ if not self._device_in_db:
+ try:
+ try:
+ self._database.start()
+ self._database.add(self._device_id)
+ self.log.debug('seed-db-does-not-exist', device_id=self._device_id)
+
+ except KeyError:
+ # Device already is in database
+ self.log.debug('seed-db-exist', device_id=self._device_id)
+
+ self._device_in_db = True
+
+ except Exception as e:
+ self.log.exception('seed-database-failure', e=e)
+
+ def on_enter_starting(self):
+ """
+ Determine ONU status and start Alarm Synchronization tasks
+ """
+ self._device = self._agent.get_device(self._device_id)
+ self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+ # Make sure root of external Alarm Database exists
+ self._seed_database()
+
+ # Set up Response and Autonomous notification subscriptions
+ try:
+ for event, sub in self._omci_cc_sub_mapping.iteritems():
+ if self._omci_cc_subscriptions[event] is None:
+ self._omci_cc_subscriptions[event] = \
+ self._device.omci_cc.event_bus.subscribe(
+ topic=OMCI_CC.event_bus_topic(self._device_id, event),
+ callback=sub)
+
+ except Exception as e:
+ self.log.exception('omci-cc-subscription-setup', e=e)
+
+ # Schedule first audit if enabled
+ if self._audit_delay > 0:
+ # Note using the shorter timeout delay here since this is the first
+ # audit after startup
+ self._deferred = reactor.callLater(self._timeout_delay, self.audit_alarm)
+ else:
+ self._deferred = reactor.callLater(0, self.sync_alarm)
+
+ def on_enter_in_sync(self):
+ """
+ Schedule a tick to occur to in the future to request an audit
+ """
+ self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+ if self._audit_delay > 0:
+ # Note using the shorter timeout delay here since this is the first
+ # audit after startup
+ self._deferred = reactor.callLater(self._audit_delay, self.audit_alarm)
+
+ def on_enter_auditing(self):
+ """
+ Begin full Alarm data sync, Comparing the all alarms
+ """
+ self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+ def success(results):
+ self.log.debug('alarm-diff-success')
+ self._current_task = None
+
+ # Any differences found between ONU and OpenOMCI Alarm tables?
+ if results is None:
+ self._device.alarm_db_in_sync = True
+ self._deferred = reactor.callLater(0, self.success)
+ else:
+ # Reconcile the alarm table and re-run audit
+ self.reconcile_alarm_table(results)
+ self._deferred = reactor.callLater(5, self.audit_alarm)
+
+ def failure(reason):
+ self.log.info('alarm-update-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+ self._current_task = self._resync_task(self._agent, self._device_id)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def reconcile_alarm_table(self, results):
+ self.log.debug('alarm-reconcile', state=self.state, results=results)
+
+ onu_only = results['onu-only']
+ olt_only = results['olt-only']
+ attr_diffs = results['attr-diffs']
+ onu_db = results['onu-db']
+ olt_db = results['olt-db']
+
+ if any(item is not None for item in (onu_only, olt_only, attr_diffs)):
+ self._device.alarm_db_in_sync = False
+
+ # Compare the differences. During upload, if there are no alarms at all,
+ # then the ONU alarm table retrieved may be empty (instead of MEs with all
+ # bits cleared) depending upon the ONU's OMCI Stack.
+
+ if onu_only is not None:
+ self.process_onu_only_diffs(onu_only, onu_db)
+
+ if olt_only is not None:
+ self.process_olt_only_diffs(olt_only)
+
+ if attr_diffs is not None:
+ self.process_attr_diffs(attr_diffs, olt_db, onu_db)
+
+ def process_onu_only_diffs(self, onu_only, onu_db):
+ """
+ ONU only alarms will typically occur when doing the first audit as our
+ database is clear and we are seeding the alarm table. Save the entries
+ and if any are set, we need to raise that alarm.
+
+ :param onu_only: (list) Tuples with [0]=class ID, [1]=entity ID
+ :param onu_db: (dict) ONU Alarm database from the alarm audit upload
+ """
+ for cid_eid in onu_only:
+ class_id = cid_eid[0]
+ entity_id = cid_eid[1]
+ try:
+ bitmap = onu_db[class_id][entity_id][ATTRIBUTES_KEY][AlarmDbExternal.ALARM_BITMAP_KEY]
+ self.process_alarm_data(class_id, entity_id, bitmap, -1)
+
+ except KeyError as e:
+ self.log.error('alarm-not-found', class_id=class_id, entity_id=entity_id, e=e)
+
+ def process_olt_only_diffs(self, olt_only):
+ """
+ OLT only alarms may occur if the alarm(s) are no longer active on the ONU
+ and the notification was missed. Process this by sending a cleared bitmap
+ for any alarm in the OLT database only
+
+ :param olt_only: (list) Tuples with [0]=class ID, [1]=entity ID
+ """
+ for cid_eid in olt_only:
+ # First process the alarm clearing
+ self.process_alarm_data(cid_eid[0], cid_eid[1], 0, -1)
+ # Now remove from alarm DB so we match the ONU alarm table
+ self._database.delete(self._device_id, cid_eid[0], cid_eid[1])
+
+ def process_attr_diffs(self, attr_diffs, onu_db):
+ """
+ Mismatch in alarm settings. Note that the attribute should always be the
+ alarm bitmap attribute (long). For differences, the ONU is always right
+
+ :param attr_diffs: (list(int,int,str)) [0]=class ID, [1]=entity ID, [1]=attr
+ :param olt_db: (dict) OLT Alarm database snapshot from the alarm audit
+ :param onu_db: (dict) ONU Alarm database from the alarm audit upload
+ """
+ for cid_eid_attr in attr_diffs:
+ class_id = cid_eid_attr[0]
+ entity_id = cid_eid_attr[1]
+
+ try:
+ assert AlarmDbExternal.ALARM_BITMAP_KEY == cid_eid_attr[2]
+ bitmap = onu_db[class_id][entity_id][ATTRIBUTES_KEY][AlarmDbExternal.ALARM_BITMAP_KEY]
+ self.process_alarm_data(class_id, entity_id, bitmap, -1)
+
+ except KeyError as e:
+ self.log.error('alarm-not-found', class_id=class_id, entity_id=entity_id, e=e)
+
+ def on_alarm_update_response(self, _topic, msg):
+ """
+ Process a Get All Alarms response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-alarm-update-response', state=self.state, msg=msg)
+
+ if self._omci_cc_subscriptions[RxEvent.Get_ALARM_Get]:
+ if self.state == 'disabled':
+ self.log.error('rx-in-invalid-state', state=self.state)
+ return
+
+ try:
+ response = msg.get(RX_RESPONSE_KEY)
+
+ if isinstance(response, OmciFrame) and \
+ isinstance(response.fields.get('omci_message'), OmciGetAllAlarmsResponse):
+ # ONU will reset its last alarm sequence number to 0 on receipt of the
+ # Get All Alarms request
+ self.log.debug('received-alarm-response')
+ self.reset_alarm_sequence()
+
+ except Exception as e:
+ self.log.exception('upload-alarm-failure', e=e)
+
+ def on_alarm_notification(self, _topic, msg):
+ """
+ Process an alarm Notification
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with keys:
+ TX_REQUEST_KEY -> None (this is an autonomous msg)
+ RX_RESPONSE_KEY -> OmciMessage (Alarm notification frame)
+ """
+ self.log.debug('on-alarm-update-response', state=self.state, msg=msg)
+
+ alarm_msg = msg.get(RX_RESPONSE_KEY)
+ if alarm_msg is not None:
+ omci_msg = alarm_msg.fields['omci_message'].fields
+ class_id = omci_msg['entity_class']
+ seq_no = omci_msg['alarm_sequence_number']
+
+ # Validate that this ME supports alarm notifications
+ if class_id not in self._device.me_map or \
+ OP.AlarmNotification not in self._device.me_map[class_id].notifications or \
+ len(self._device.me_map[class_id].alarms) == 0:
+ self.log.warn('invalid-alarm-notification', class_id=class_id)
+ return
+
+ self.process_alarm_data(class_id,
+ omci_msg['entity_id'],
+ omci_msg['alarm_bit_map'],
+ seq_no)
+
+ def process_alarm_data(self, class_id, entity_id, bitmap, msg_seq_no):
+ """
+ Process new alarm data
+
+ :param class_id: (int) Class ID of alarm
+ :param entity_id: (int) Entity ID of alarm
+ :param bitmap: (long) Alarm bitmap value
+ :param msg_seq_no: (int) Alarm sequence number. -1 if generated during an audit
+ """
+ if msg_seq_no > 0:
+ # increment alarm number & compare to alarm # in message
+ # Signal early audit if no match and audits are enabled
+ self.increment_alarm_sequence()
+
+ if self.last_alarm_sequence != msg_seq_no and self._audit_delay > 0:
+ self._deferred = reactor.callLater(0, self.audit_alarm)
+
+ key = AlarmDbExternal.ALARM_BITMAP_KEY
+ prev_entry = self._database.query(self._device_id, class_id, entity_id)
+ try:
+ # Need to access the bit map structure which is nested in dict attributes
+ prev_bitmap = 0 if len(prev_entry) == 0 else long(prev_entry['attributes'][key])
+ except Exception as e:
+ self.log.exception('alarm-prev-entry-collection-failure', class_id=class_id,
+ device_id=self._device_id, entity_id=entity_id, value=bitmap, e=e)
+ # Save current entry before going on
+ try:
+ self._database.set(self._device_id, class_id, entity_id, {key: bitmap})
+
+ except Exception as e:
+ self.log.exception('alarm-save-failure', class_id=class_id,
+ device_id=self._device_id, entity_id=entity_id, value=bitmap, e=e)
+
+ if self._alarm_manager is not None:
+ # Generate a set of alarm number that are raised in current and previous
+ previously_raised = {alarm_no for alarm_no in xrange(224)
+ if prev_bitmap & (1L << (223-alarm_no)) != 0L}
+
+ currently_raised = {alarm_no for alarm_no in xrange(224)
+ if bitmap & (1L << (223-alarm_no)) != 0L}
+
+ newly_cleared = previously_raised - currently_raised
+ newly_raised = currently_raised - previously_raised
+
+ # Generate the set/clear alarms now
+ for alarm_number in newly_cleared:
+ reactor.callLater(0, self.clear_alarm, class_id, entity_id, alarm_number)
+
+ for alarm_number in newly_raised:
+ reactor.callLater(0, self.raise_alarm, class_id, entity_id, alarm_number)
+
+ def get_alarm_description(self, class_id, alarm_number):
+ """
+ Get the alarm description, both as a printable-string and also a CamelCase value
+ """
+ if alarm_number in self._device.me_map[class_id].alarms:
+ description = self._device.me_map[class_id].alarms[alarm_number]
+ elif alarm_number <= 207:
+ description = 'Reserved alarm {}'.format(alarm_number)
+ else:
+ description = 'Vendor specific alarm {}'.format(alarm_number)
+
+ # For CamelCase, replace hyphens with spaces before camel casing the string
+ return description, description.replace('-', ' ').title().replace(' ', '')
+
+ def raise_alarm(self, class_id, entity_id, alarm_number):
+ """
+ Raise an alarm on the ONU
+
+ :param class_id: (int) Class ID of the Alarm ME
+ :param entity_id: (int) Entity ID of the Alarm
+ :param alarm_number: (int) Alarm number (bit) that is alarmed
+ """
+ description, name = self.get_alarm_description(class_id, alarm_number)
+
+ self.log.warn('alarm-set', class_id=class_id, entity_id=entity_id,
+ alarm_number=alarm_number, name=name, description=description)
+
+ if self._alarm_manager is not None:
+ alarm = self.omci_alarm_to_onu_alarm(class_id, entity_id, alarm_number)
+ if alarm is not None:
+ alarm.raise_alarm()
+
+ def clear_alarm(self, class_id, entity_id, alarm_number):
+ """
+ Lower/clear an alarm on the ONU
+
+ :param class_id: (int) Class ID of the Alarm ME
+ :param entity_id: (int) Entity ID of the Alarm
+ :param alarm_number: (int) Alarm number (bit) that is alarmed
+ """
+ description, name = self.get_alarm_description(class_id, alarm_number)
+
+ self.log.info('alarm-cleared', class_id=class_id, entity_id=entity_id,
+ alarm_number=alarm_number, name=name, description=description)
+
+ if self._alarm_manager is not None:
+ alarm = self.omci_alarm_to_onu_alarm(class_id, entity_id, alarm_number)
+ if alarm is not None:
+ alarm.clear_alarm()
+
+ def query_mib(self, class_id=None, instance_id=None):
+ """
+ Get Alarm database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises DatabaseStateError: If the database is not enabled or does not exist
+ """
+ from voltha.extensions.omci.database.mib_db_api import DatabaseStateError
+
+ self.log.debug('query', class_id=class_id, instance_id=instance_id)
+ if self._database is None:
+ raise DatabaseStateError('Database does not yet exist')
+
+ return self._database.query(self._device_id, class_id=class_id, instance_id=instance_id)
+
+ def omci_alarm_to_onu_alarm(self, class_id, entity_id, alarm_number):
+ """
+ Map an OMCI Alarm Notification alarm to the proper ONU Alarm Library alarm
+
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Class instance ID
+ :param alarm_number: (int) Alarm Number
+ :return: (AlarmBase) Alarm library alarm or None if not supported/found
+ """
+ from voltha.extensions.alarms.onu.onu_dying_gasp_alarm import OnuDyingGaspAlarm
+ from voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+ from voltha.extensions.alarms.onu.onu_equipment_alarm import OnuEquipmentAlarm
+ from voltha.extensions.alarms.onu.onu_selftest_failure_alarm import OnuSelfTestFailureAlarm
+ from voltha.extensions.alarms.onu.onu_laser_eol_alarm import OnuLaserEolAlarm
+ from voltha.extensions.alarms.onu.onu_laser_bias_current_alarm import OnuLaserBiasAlarm
+ from voltha.extensions.alarms.onu.onu_temp_yellow_alarm import OnuTempYellowAlarm
+ from voltha.extensions.alarms.onu.onu_temp_red_alarm import OnuTempRedAlarm
+ from voltha.extensions.alarms.onu.onu_voltage_yellow_alarm import OnuVoltageYellowAlarm
+ from voltha.extensions.alarms.onu.onu_voltage_red_alarm import OnuVoltageRedAlarm
+ from voltha.extensions.alarms.onu.onu_low_rx_optical_power_alarm import OnuLowRxOpticalAlarm
+ from voltha.extensions.alarms.onu.onu_high_rx_optical_power_alarm import OnuHighRxOpticalAlarm
+ from voltha.extensions.alarms.onu.onu_low_tx_optical_power_alarm import OnuLowTxOpticalAlarm
+ from voltha.extensions.alarms.onu.onu_high_tx_optical_power_alarm import OnuHighTxOpticalAlarm
+
+ mgr = self._alarm_manager
+ if class_id in (CircuitPack.class_id, PptpEthernetUni.class_id):
+ intf_id = self.select_uni_port(class_id, entity_id)
+
+ elif class_id in (AniG.class_id, OntG.class_id):
+ intf_id = self.select_ani_port(class_id, entity_id)
+
+ else:
+ self.log.error('unsupported-class-id', class_id=class_id, alarm_number=alarm_number)
+ return
+
+ alarm_map = {
+ (CircuitPack.class_id, 0): OnuEquipmentAlarm,
+ (CircuitPack.class_id, 2): OnuSelfTestFailureAlarm,
+ (CircuitPack.class_id, 3): OnuLaserEolAlarm,
+ (CircuitPack.class_id, 4): OnuTempYellowAlarm,
+ (CircuitPack.class_id, 5): OnuTempRedAlarm,
+
+ (PptpEthernetUni.class_id, 0): OnuLosAlarm,
+
+ (OntG.class_id, 0): OnuEquipmentAlarm,
+ (OntG.class_id, 6): OnuSelfTestFailureAlarm,
+ (OntG.class_id, 7): OnuDyingGaspAlarm,
+ (OntG.class_id, 8): OnuTempYellowAlarm,
+ (OntG.class_id, 9): OnuTempRedAlarm,
+ (OntG.class_id, 10): OnuVoltageYellowAlarm,
+ (OntG.class_id, 11): OnuVoltageRedAlarm,
+
+ (AniG.class_id, 0): OnuLowRxOpticalAlarm,
+ (AniG.class_id, 1): OnuHighRxOpticalAlarm,
+ (AniG.class_id, 4): OnuLowTxOpticalAlarm,
+ (AniG.class_id, 5): OnuHighTxOpticalAlarm,
+ (AniG.class_id, 6): OnuLaserBiasAlarm,
+ }
+ alarm_cls = alarm_map.get((class_id, alarm_number))
+
+ return alarm_cls(mgr, self._onu_id, intf_id) if alarm_cls is not None else None
+
+ def select_uni_port(self, class_id, entity_id):
+ """
+ Select the best possible UNI Port (logical) interface number for this ME class and
+ entity ID.
+
+ This base implementation will assume that a UNI Port object has been registered
+ on startup and supports both an 'entity_id' and also 'logical_port_number'
+ property. See both the Adtran and BroadCom OpenOMCI ONU DA for an example
+ of this UNI port object.
+
+ :param class_id: (int) ME Class ID for which the alarms belongs to
+ :param entity_id: (int) Instance ID
+
+ :return: (int) Logical Port number for the UNI port
+ """
+ # NOTE: Of the three class ID's supported in this version of code, only the CircuitPack,
+ # and PptpEthernetUni MEs will map to the UNI port
+ assert class_id in (CircuitPack.class_id, PptpEthernetUni.class_id)
+
+ return next((uni.logical_port_number for uni in self._uni_ports if
+ uni.entity_id == entity_id), None)
+
+ def select_ani_port(self, class_id, _entity_id):
+ """
+ Select the best possible ANI Port (physical) interface number for this ME class and
+ entity ID.
+
+ Currently the base implementation assumes only a single PON port and it will be
+ chosen. A future implementation may want to have a PON Port object (similar to
+ the BroadCom Open OMCI and Adtran ONU's UNI Port object) that provides a match
+ for entity ID. This does assume that the PON port object supports a property
+ of 'port_number' to return the physical port number.
+
+ :param class_id: (int) ME Class ID for which the alarms belongs to
+ :param _entity_id: (int) Instance ID
+
+ :return: (int) Logical Port number for the UNI port
+ """
+ # NOTE: Of the three class ID's supported in this version of code, only the AniG
+ # MEs will map to the ANI port. For some the OntG alarms (Dying Gasp) the
+ # PON interface will also be selected.
+ assert class_id in (AniG.class_id, OntG.class_id)
+
+ return self._ani_ports[0].port_number if len(self._ani_ports) else None
diff --git a/python/adapters/extensions/omci/state_machines/image_agent.py b/python/adapters/extensions/omci/state_machines/image_agent.py
new file mode 100755
index 0000000..e6d5884
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/image_agent.py
@@ -0,0 +1,1024 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import structlog
+from datetime import datetime, timedelta
+from binascii import crc32, hexlify
+from transitions import Machine
+from transitions.extensions.nesting import HierarchicalMachine as HMachine
+from twisted.python import failure
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred, CancelledError
+from common.event_bus import EventBusClient
+from voltha.protos.voltha_pb2 import ImageDownload
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes, AttributeAccess, OmciSectionDataSize
+from voltha.extensions.omci.omci_entities import SoftwareImage
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+from voltha.extensions.omci.omci_messages import OmciEndSoftwareDownloadResponse, OmciActivateImageResponse
+
+###################################################################################
+## OLT out-of-band download image procedure
+###################################################################################
+
+class ImageDownloadeSTM(object):
+ DEFAULT_STATES = ['disabled', 'downloading', 'validating', 'done']
+ DEFAULT_TRANSITIONS = [
+ {'trigger': 'start', 'source': 'disabled', 'dest': 'downloading'},
+ {'trigger': 'stop', 'source': ['downloading', 'validating', 'done'], 'dest': 'disabled'},
+ {'trigger': 'dw_success', 'source': 'downloading', 'dest': 'validating'},
+ {'trigger': 'dw_fail', 'source': 'downloading', 'dest': 'done'},
+ {'trigger': 'validate_success', 'source': 'validating', 'dest': 'done'},
+ ]
+ DEFAULT_TIMEOUT_RETRY = 1000 # Seconds to delay after task failure/timeout
+
+ # def __init__(self, omci_agent, dev_id, local_name, local_dir, remote_url, download_task,
+ def __init__(self, omci_agent, image_download,
+ download_task_cls,
+ states=DEFAULT_STATES,
+ transitions=DEFAULT_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=DEFAULT_TIMEOUT_RETRY,
+ advertise_events=True, clock=None):
+ """
+ :Param: omci_agent: (OpenOMCIAgent)
+ :Param: image_dnld: (ImageDownload)
+ ImageDownload.id : device id
+ ImageDownload.name: file name of the image
+ ImageDownload.url : URL to download the image from server
+ ImageDownload.local_dir: local directory of the image file
+ """
+ self.log = structlog.get_logger(device_id=image_download.id)
+ self._agent = omci_agent
+ # self._imgdw = ImageDownload()
+ # self._imgdw.name = local_name
+ # self._imgdw.id = dev_id
+ # self._imgdw.url = remote_url
+ # self._imgdw.local_dir = local_dir
+ self._imgdw = image_download
+ # self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN # voltha_pb2
+
+ self._download_task_cls = download_task_cls
+ self._timeout_delay = timeout_delay
+
+ self._current_task = None
+ self._task_deferred = None
+ self._ret_deferred = None
+ self._timeout_dc = None # DelayedCall
+ self._advertise_events = advertise_events
+ self.reactor = clock if clock is not None else reactor
+
+ self.log.debug("ImageDownloadeSTM", image_download=self._imgdw)
+ self.machine = Machine(model=self, states=states,
+ transitions=transitions,
+ initial=initial_state,
+ queued=True,
+ name='{}-{}'.format(self.__class__.__name__, self._imgdw.id))
+ # @property
+ # def name(self):
+ # return self._imgdw.name
+
+ def _cancel_timeout(self):
+ d, self._timeout_dc = self._timeout_dc, None
+ if d is not None and not d.called:
+ d.cancel()
+
+ @property
+ def status(self):
+ return self._imgdw
+
+ @property
+ def deferred(self):
+ return self._ret_deferred
+
+ def advertise(self, event, info):
+ """Advertise an event on the OpenOMCI event bus"""
+ if self._advertise_events:
+ self._agent.advertise(event,
+ {
+ 'state-machine': self.machine.name,
+ 'info': info,
+ 'time': str(datetime.utcnow())
+ })
+
+ # def reset(self):
+ # """
+ # Reset all the state machine to intial state
+ # It is used to clear failed result in last downloading
+ # """
+ # self.log.debug('reset download', image_download=self._imgdw)
+ # if self._current_task is not None:
+ # self._current_task.stop()
+
+ # self._cancel_deferred()
+
+ # if self._ret_deferred is not None:
+ # self._ret_deferred.cancel()
+ # self._ret_deferred = None
+
+ # self.stop()
+ # self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN
+
+ def get_file(self):
+ """
+ return a Deferred object
+ Caller will register a callback to the Deferred to get notified once the image is available
+ """
+ # self.log.debug('get_file', image_download=self._imgdw)
+ if self._ret_deferred is None or self._ret_deferred.called:
+ self._ret_deferred = Deferred()
+
+ if self._imgdw.state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ self.log.debug('Image Available')
+ self.reactor.callLater(0, self._ret_deferred.callback, self._imgdw)
+ elif self._imgdw.state == ImageDownload.DOWNLOAD_FAILED or self._imgdw.state == ImageDownload.DOWNLOAD_UNSUPPORTED:
+ self.log.debug('Image not exist')
+ self.reactor.callLater(0, self._ret_deferred.errback, failure.Failure(Exception('Image Download Failed ' + self._imgdw.name)))
+ elif self._imgdw.state == ImageDownload.DOWNLOAD_UNKNOWN or self._imgdw.state == ImageDownload.DOWNLOAD_REQUESTED:
+ self.log.debug('Start Image STM')
+ self._imgdw.state = ImageDownload.DOWNLOAD_STARTED
+ self.reactor.callLater(0, self.start)
+ else:
+ self.log.debug('NO action', state=self._imgdw.state)
+
+ return self._ret_deferred
+
+ def timeout(self):
+ self.log.debug('Image Download Timeout', download_task=self._current_task);
+ if self._current_task:
+ self.reactor.callLater(0, self._current_task.stop)
+ # if self._task_deferred is not None and not self._task_deferred.called:
+ # self._task_deferred.cancel()
+ self._current_task = None
+ # else:
+ # self.dw_fail()
+
+ def on_enter_downloading(self):
+ self.log.debug("on_enter_downloading")
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ def success(results):
+ self.log.debug('image-download-success', results=results)
+ self._imgdw.state = ImageDownload.DOWNLOAD_SUCCEEDED
+ self._imgdw.reason = ImageDownload.NO_ERROR
+ self._current_task = None
+ self._task_deferred = None
+ self.dw_success()
+
+ def failure(reason):
+ self.log.info('image-download-failure', reason=reason)
+ if self._imgdw.state == ImageDownload.DOWNLOAD_STARTED:
+ self._imgdw.state = ImageDownload.DOWNLOAD_FAILED
+ if isinstance(reason, CancelledError):
+ self._imgdw.reason = ImageDownload.CANCELLED
+ self._current_task = None
+ self._task_deferred = None
+ self.dw_fail()
+
+ self._device = self._agent.get_device(self._imgdw.id)
+ self._current_task = self._download_task_cls(self._agent, self._imgdw, self.reactor)
+
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+ self._imgdw.state = ImageDownload.DOWNLOAD_STARTED
+
+ if self._timeout_delay > 0:
+ self._timeout_dc = self.reactor.callLater(self._timeout_delay, self.timeout)
+
+ def on_enter_validating(self):
+ self.log.debug("on_enter_validating")
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self.validate_success()
+
+ def on_enter_done(self):
+ self.log.debug("on_enter_done")
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_timeout()
+
+ d, self._ret_deferred = self._ret_deferred, None
+ if d is not None:
+ if self._imgdw.state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ self.reactor.callLater(0, d.callback, self._imgdw)
+ else: # failed
+ if self._imgdw.reason == ImageDownload.CANCELLED:
+ self.reactor.callLater(0, d.cancel)
+ else:
+ self.reactor.callLater(0, d.errback, failure.Failure(Exception('Image Download Failed ' + self._imgdw.name)))
+
+ def on_enter_disabled(self):
+ self.log.debug("on_enter_disabled")
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ self._cancel_timeout()
+ if self._current_task is not None:
+ self.reactor.callLater(0, self._current_task.stop)
+ self._current_task = None
+
+ if self._ret_deferred:
+ self.reactor.callLater(0, self._ret_deferred.cancel)
+ self._ret_deferred = None
+
+ # remove local file fragments if download failed
+ file_path = self._imgdw.local_dir + '/' + self._imgdw.name
+ if self._imgdw.state != ImageDownload.DOWNLOAD_SUCCEEDED and os.path.exists(file_path):
+ os.remove(file_path)
+ self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN
+
+###################################################################################
+## OMCI Software Image Download Procedure
+###################################################################################
+
+class OmciSoftwareImageDownloadSTM(object):
+
+ OMCI_SWIMG_DOWNLOAD_TIMEOUT = 5400 # TODO: Seconds for the full downloading procedure to avoid errors that cause infinte downloading
+ OMCI_SWIMG_DOWNLOAD_WINDOW_SIZE = 32
+ OMCI_SWIMG_WINDOW_RETRY_MAX = 2
+ OMCI_SWIMG_ACTIVATE_RETRY_MAX = 2
+ OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT = 10 # Seconds to delay after task failure/timeout
+
+ # def __init__(self, omci_agent, dev_id, img_path,
+ def __init__(self, image_id, omci_agent, image_dnld,
+ window_size=OMCI_SWIMG_DOWNLOAD_WINDOW_SIZE,
+ timeout_delay=OMCI_SWIMG_DOWNLOAD_TIMEOUT,
+ advertise_events=True,
+ clock=None):
+ """
+ omci_agent: (OpenOMCIAgent)
+ image_dnld: (ImageDownload)
+ ImageDownload.id : device id
+ ImageDownload.name: file name of the image
+ ImageDownload.url : URL to download the image from server
+ ImageDownload.local_dir: local directory of the image file
+ window_size: window size of OMCI download procedure
+ """
+ self.log = structlog.get_logger(device_id=image_dnld.id)
+ self._omci_agent = omci_agent
+ self._image_download = image_dnld
+ self._timeout = timeout_delay
+ self._timeout_dc = None
+ self._window_size = window_size
+ self.reactor = clock if clock is not None else reactor
+ self._offset = 0
+ # self._win_section = 0
+ self._win_retry = 0
+ self._device_id = image_dnld.id
+ self._device = omci_agent.get_device(image_dnld.id)
+ self.__init_state_machine()
+ self._ret_deferred = None
+ self._image_id = image_id # Target software image entity ID
+ self._image_file = image_dnld.local_dir + '/' + image_dnld.name
+ self._image_obj = open(self._image_file, mode='rb')
+ self._image_size = os.path.getsize(self._image_file)
+ self._crc32 = 0
+ self._win_crc32 = 0
+ self._win_data = None
+ self._current_deferred = None
+ self._result = None # ReasonCodes
+ self.crctable = []
+ self._crctable_init = False
+ self._actimg_retry_max = OmciSoftwareImageDownloadSTM.OMCI_SWIMG_ACTIVATE_RETRY_MAX
+ self._actimg_retry = 0
+ self.log.debug("DownloadSTM", image=self._image_file, image_size=self._image_size)
+
+ def __init_state_machine(self):
+
+ #### Download Window Sub State Machine ####
+ OMCI_DOWNLOAD_WINDOW_STATE = ['init_window', 'sending_sections', 'window_success', 'window_failed']
+ OMCI_DOWNLOAD_WINDOW_TRANSITIONS = [
+ {'trigger': 'send_sections', 'source': 'init_window', 'dest': 'sending_sections'},
+ # {'trigger': 'send_section_last', 'source': 'start_section', 'dest': 'last_section' },
+ {'trigger': 'rx_ack_success', 'source': 'sending_sections', 'dest': 'window_success' },
+ {'trigger': 'rx_ack_failed', 'source': 'sending_sections', 'dest': 'window_failed' },
+ # {'trigger': 'retry_window', 'source': 'window_failed', 'dest': 'start_section' },
+ {'trigger': 'reset_window', 'source': '*', 'dest': 'init_window' }
+ ]
+ self.win_machine = HMachine(model=self,
+ states=OMCI_DOWNLOAD_WINDOW_STATE,
+ transitions=OMCI_DOWNLOAD_WINDOW_TRANSITIONS,
+ initial='init_window',
+ queued=True,
+ name='{}-window_section_machine'.format(self.__class__.__name__))
+
+ #### Software Activation Sub State Machine ####
+ OMCI_SWIMG_ACTIVATE_STATES = ['init_act', 'activating', 'busy', 'rebooting', 'committing', 'done', 'failed']
+ OMCI_SWIMG_ACTIVATE_TRANSITIONS = [
+ {'trigger': 'activate', 'source': ['init_act', 'busy'], 'dest': 'activating'},
+ {'trigger': 'onu_busy', 'source': 'activating', 'dest': 'busy'},
+ {'trigger': 'reboot', 'source': 'activating', 'dest': 'rebooting'},
+ {'trigger': 'do_commit', 'source': ['activating', 'rebooting'], 'dest': 'committing'},
+ # {'trigger': 'commit_ok', 'source': 'committing', 'dest': 'done'},
+ {'trigger': 'reset_actimg', 'source': ['activating', 'rebooting', 'committing', 'failed'], 'dest': 'init_act'},
+ # {'trigger': 'actimg_fail', 'source': ['init_act', 'activating', 'rebooting', 'committing'], 'dest': 'failed'}
+ ]
+
+ self.activate_machine = HMachine(model=self,
+ states=OMCI_SWIMG_ACTIVATE_STATES,
+ transitions=OMCI_SWIMG_ACTIVATE_TRANSITIONS,
+ initial='init_act',
+ queued=True,
+ name='{}-activate_machine'.format(self.__class__.__name__))
+
+ #### Main State Machine ####
+ OMCI_SWIMG_DOWNLOAD_STATES = [ 'init_image', 'starting_image', 'ending_image', 'endimg_busy', 'done_image',
+ {'name': 'dwin', 'children': self.win_machine},
+ {'name': 'actimg', 'children': self.activate_machine}
+ ]
+ OMCI_SWIMG_DOWNLOAD_TRANSITIONS = [
+ {'trigger': 'start_image', 'source': 'init_image', 'dest': 'starting_image' },
+ {'trigger': 'download_window', 'source': 'starting_image', 'dest': 'dwin_init_window' },
+ {'trigger': 'download_success', 'source': 'dwin', 'dest': 'ending_image' },
+ {'trigger': 'onu_busy', 'source': 'ending_image', 'dest': 'endimg_busy' },
+ {'trigger': 'retry_endimg', 'source': 'endimg_busy', 'dest': 'ending_image' },
+ {'trigger': 'end_img_success', 'source': 'ending_image', 'dest': 'actimg_init_act' },
+ {'trigger': 'activate_done', 'source': 'actimg', 'dest': 'done_image' },
+ {'trigger': 'download_fail', 'source': '*', 'dest': 'done_image' },
+ {'trigger': 'reset_image', 'source': '*', 'dest': 'init_image' },
+ ]
+
+ self.img_machine = HMachine(model=self,
+ states=OMCI_SWIMG_DOWNLOAD_STATES,
+ transitions=OMCI_SWIMG_DOWNLOAD_TRANSITIONS,
+ initial='init_image',
+ queued=True,
+ name='{}-image_download_machine'.format(self.__class__.__name__))
+
+ # @property
+ # def image_filename(self):
+ # return self._image_file
+
+ # @image_filename.setter
+ # def image_filename(self, value):
+ # if self._image_fd is not None:
+ # self._image_fd.close()
+ # self._image_filename = value
+ # self._image_fd = open(self._image_filename, mode='rb')
+ # self._image_size = os.path.getsize(self._image_filename)
+ # print("Set image file: " + self._image_filename + " size: " + str(self._image_size))
+
+ def __omci_start_download_resp_success(self, rx_frame):
+ self.log.debug("__omci_download_resp_success")
+ self.download_window()
+ return rx_frame
+
+ def __omci_start_download_resp_fail(self, fail):
+ self.log.debug("__omci_download_resp_fail", failure=fail)
+ self._result = ReasonCodes.ProcessingError
+ self.download_fail()
+
+ def __omci_end_download_resp_success(self, rx_frame):
+ self.log.debug("__omci_end_download_resp_success")
+ if rx_frame.fields['message_type'] == OmciEndSoftwareDownloadResponse.message_id: # 0x35
+ omci_data = rx_frame.fields['omci_message']
+ if omci_data.fields['result'] == 0:
+ self.log.debug('OMCI End Image OK')
+ self._result = ReasonCodes.Success
+ self.end_img_success()
+ elif omci_data.fields['result'] == 6: # Device Busy
+ self.log.debug('OMCI End Image Busy')
+ self.onu_busy()
+ else:
+ self.log.debug('OMCI End Image Failed', reason=omci_data.fields['result'])
+ else:
+ self.log.debug('Receive Unexpected OMCI', message_type=rx_frame.fields['message_type'])
+
+ def __omci_end_download_resp_fail(self, fail):
+ self.log.debug("__omci_end_download_resp_fail", failure=fail)
+ self._result = ReasonCodes.ProcessingError
+ self.download_fail()
+
+ def __omci_send_window_resp_success(self, rx_frame, cur_state, datasize):
+ # self.log.debug("__omci_send_window_resp_success", current_state=cur_state)
+ self._offset += datasize
+ self._image_download.downloaded_bytes += datasize
+ self.rx_ack_success()
+
+ def __omci_send_window_resp_fail(self, fail, cur_state):
+ self.log.debug("__omci_send_window_resp_fail", current_state=cur_state)
+ self.rx_ack_failed()
+
+ def __activate_resp_success(self, rx_frame):
+ self._current_deferred = None
+ if rx_frame.fields['message_type'] == OmciActivateImageResponse.message_id: # 0x36
+ omci_data = rx_frame.fields['omci_message']
+ if omci_data.fields['result'] == 0:
+ self.log.debug("Activate software image success, rebooting ONU ...", device_id=self._device.device_id,
+ state=self._image_download.image_state)
+ standby_image_id = 0 if self._image_id else 1
+ self._omci_agent.database.set(self._device.device_id, SoftwareImage.class_id, self._image_id, {"is_active": 1})
+ self._omci_agent.database.set(self._device.device_id, SoftwareImage.class_id, standby_image_id, {"is_active": 0})
+ self.reboot()
+ elif omci_data.fields['result'] == 6: # Device Busy
+ self.log.debug('OMCI Activate Image Busy')
+ self.onu_busy()
+ else:
+ self.log.debug('OMCI Activate Image Failed', reason=omci_data['result'])
+ else:
+ self.log.debug('Receive Unexpected OMCI', message_type=rx_frame['message_type'])
+
+ def __activate_fail(self, fail):
+ self.log.debug("Activate software image failed", faile=fail)
+ self._current_deferred = None
+ self._result = ReasonCodes.ProcessingError
+ self.activate_done()
+
+ def __commit_success(self, rx_frame):
+ self.log.debug("Commit software success", device_id=self._device_id)
+ self._current_deferred = None
+ standby_image_id = 0 if self._image_id else 1
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._image_id, {"is_committed": 1})
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, standby_image_id, {"is_committed": 0})
+ self._image_download.image_state = ImageDownload.IMAGE_ACTIVE
+ self._result = ReasonCodes.Success
+ self.activate_done()
+
+ def __commit_fail(self, fail):
+ self.log.debug("Commit software image failed", faile=fail)
+ self._current_deferred = None
+ self._result = ReasonCodes.ProcessingError
+ self._image_download.image_state = ImageDownload.IMAGE_REVERT
+ self.activate_done()
+
+# @property
+# def image_id(self):
+# return self._image_id
+
+# @image_id.setter
+# def image_id(self, value):
+# self._image_id = value
+
+ @property
+ def status(self):
+ return self._image_download
+
+ def start(self):
+ self.log.debug("OmciSoftwareImageDownloadSTM.start", current_state=self.state)
+ if self._ret_deferred is None:
+ self._ret_deferred = Deferred()
+ if self.state == 'init_image':
+ self.reactor.callLater(0, self.start_image)
+ return self._ret_deferred
+
+ def stop(self):
+ self.log.debug("OmciSoftwareImageDownloadSTM.stop", current_state=self.state)
+ self._result = ReasonCodes.OperationCancelled
+ self.download_fail()
+
+ def on_enter_init_image(self):
+ self.log.debug("on_enter_init_image")
+ self._image_obj.seek(0)
+ self._offset = 0
+ # self._win_section = 0
+ self._win_retry = 0
+
+ def on_enter_starting_image(self):
+ self.log.debug("on_enter_starting_image")
+ self._image_download.downloaded_bytes = 0
+ self._current_deferred = self._device.omci_cc.send_start_software_download(self._image_id, self._image_size, self._window_size)
+ self._current_deferred.addCallbacks(self.__omci_start_download_resp_success, self.__omci_start_download_resp_fail)
+ # callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+ def on_enter_dwin_init_window(self):
+ # self.log.debug("on_enter_dwin_init_window", offset=self._offset, image_size=self._image_size)
+ if self._offset < self._image_size:
+ self.send_sections()
+
+ def on_enter_dwin_sending_sections(self):
+ # self.log.debug("on_enter_dwin_sending_sections", offset=self._offset)
+
+ if (self._offset + self._window_size * OmciSectionDataSize) <= self._image_size:
+ sections = self._window_size
+ mod = 0
+ datasize = self._window_size * OmciSectionDataSize
+ else:
+ datasize = self._image_size - self._offset
+ sections = datasize / OmciSectionDataSize
+ mod = datasize % OmciSectionDataSize
+ sections = sections + 1 if mod > 0 else sections
+
+ # self.log.debug("on_enter_dwin_sending_sections", offset=self._offset, datasize=datasize, sections=sections)
+ if self._win_retry == 0:
+ self._win_data = self._image_obj.read(datasize)
+ self._win_crc32 = self.crc32(self._crc32, self._win_data)
+ # self.log.debug("CRC32", crc32=self._win_crc32, offset=self._offset)
+ else:
+ self.log.debug("Retry download window with crc32", offset=self._offset)
+
+ sent = 0
+ for i in range(0, sections):
+ if i < sections - 1:
+ # self.log.debug("section data", data=hexlify(data[(self._offset+sent):(self._offset+sent+OmciSectionDataSize)]))
+ self._device.omci_cc.send_download_section(self._image_id, i,
+ self._win_data[sent:sent+OmciSectionDataSize])
+ sent += OmciSectionDataSize
+ else:
+ last_size = OmciSectionDataSize if mod == 0 else mod
+ self._current_deferred = self._device.omci_cc.send_download_section(self._image_id, i,
+ self._win_data[sent:sent+last_size],
+ timeout=DEFAULT_OMCI_TIMEOUT)
+ self._current_deferred.addCallbacks(self.__omci_send_window_resp_success, self.__omci_send_window_resp_fail,
+ callbackArgs=(self.state, datasize), errbackArgs=(self.state,))
+ sent += last_size
+ assert sent==datasize
+
+ # def on_enter_dwin_last_section(self):
+ # self._current_deferred = self._device.omci_cc.send_download_section, self._instance_id, self._win_section, data)
+ # self._current_deferred.addCallbacks(self.__omci_resp_success, self.__omci_resp_fail,
+ # callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+ def on_enter_dwin_window_success(self):
+ # self.log.debug("on_enter_dwin_window_success")
+ self._crc32 = self._win_crc32 if self._win_crc32 != 0 else self._crc32
+ self._win_crc32 = 0
+ self._win_retry = 0
+ if self._offset < self._image_size:
+ self.reset_window()
+ else:
+ self.download_success()
+
+ def on_enter_dwin_window_failed(self):
+ self.log.debug("on_enter_dwin_window_fail: ", retry=self._win_retry)
+ if self._win_retry < self.OMCI_SWIMG_WINDOW_RETRY_MAX:
+ self._win_retry += 1
+ self.reset_window()
+ else:
+ self._result = ReasonCodes.ProcessingError
+ self.download_fail()
+
+ def on_enter_ending_image(self):
+ self.log.debug("on_enter_ending_image", crc32=self._crc32)
+ self._current_deferred = self._device.omci_cc.send_end_software_download(self._image_id, self._crc32,
+ self._image_size, timeout=18)
+ self._current_deferred.addCallbacks(self.__omci_end_download_resp_success, self.__omci_end_download_resp_fail)
+ # callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+ def on_enter_endimg_busy(self):
+ self.log.debug("on_enter_endimg_busy")
+ self.reactor.callLater(3, self.retry_endimg)
+
+ def on_enter_actimg_init_act(self):
+ self.log.debug("on_enter_actimg_init_act", retry=self._actimg_retry, max_retry=self._actimg_retry_max)
+ # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+ # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+ # if (self._images[self._to_image]["is_active"] != 1 and self._images[self._to_image]["is_valid"] == 1):
+ if self._actimg_retry > self._actimg_retry_max:
+ self.log.debug("activate image failed: retry max", retries=self._actimg_retry)
+ self._result = ReasonCodes.ProcessingError
+ self.activate_done()
+ else:
+ self._image_download.image_state = ImageDownload.IMAGE_ACTIVATE
+ self.activate()
+
+ def on_enter_actimg_activating(self):
+ self.log.debug("on_enter_actimg_activating")
+ img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id,
+ self._image_id, ["is_active", "is_committed", "is_valid"])
+
+ self.log.debug("on_enter_actimg_activating", instance=self._image_id, state=img)
+ if img["is_active"] == 0:
+ #if img["is_valid"] == 1:
+ self._current_deferred = self._device.omci_cc.send_active_image(self._image_id)
+ self._current_deferred.addCallbacks(self.__activate_resp_success, self.__activate_fail)
+ #else:
+ # self.fail()
+ else:
+ self.do_commit()
+
+ def on_enter_actimg_busy(self):
+ self.log.debug("on_enter_actimg_busy")
+ self.reactor.callLater(3, self.activate)
+
+ def __on_reboot_timeout(self):
+ self.log.debug("on_reboot_timeout")
+ self._timeout_dc = None
+ self._result = ReasonCodes.ProcessingError
+ self.activate_done()
+
+ def on_enter_actimg_rebooting(self):
+ self.log.debug("on_enter_actimg_rebooting")
+ if self._timeout_dc == None:
+ self._timeout_dc = self.reactor.callLater(self._timeout, self.__on_reboot_timeout)
+
+ def on_exit_actimg_rebooting(self):
+ self.log.debug("on_exit_actimg_rebooting", timeout=self._timeout_dc)
+ if self._timeout_dc and self._timeout_dc.active:
+ self._timeout_dc.cancel()
+ self._timeout_dc = None
+
+ def on_enter_actimg_committing(self):
+ # self.log.debug("on_enter_committing")
+ img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id,
+ self._image_id, ["is_active", "is_committed", "is_valid"])
+ self.log.debug("on_enter_actimg_committing", instance=self._image_id, state=img)
+ if (img['is_active'] == 0):
+ self._actimg_retry += 1
+ self.log.debug("do retry", retry=self._actimg_retry)
+ self.reset_actimg()
+ else:
+ self._actimg_retry = 0
+ self._current_deferred = self._device.omci_cc.send_commit_image(self._image_id)
+ self._current_deferred.addCallbacks(self.__commit_success, self.__commit_fail)
+
+ def on_enter_done_image(self):
+ self.log.debug("on_enter_done_image", result=self._result)
+ if self._result == ReasonCodes.Success:
+ self.reactor.callLater(0, self._ret_deferred.callback, self._image_download) # (str(self._instance_id))
+ else:
+ self._ret_deferred.errback(failure.Failure(Exception('ONU Software Download Failed, instance ' + str(self._image_id))))
+
+ def __crc_GenTable32(self):
+ if self._crctable_init:
+ return
+
+ # x32 + x26 + x23 + x22 + x16 + x12 + x11 + x10 + x8 + x7 + x5 + x4 + x2 + x + 1
+ pn32 = [0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26]
+ poly = 0
+ for i in pn32:
+ poly |= (1 << i)
+
+ for i in range(0, 256):
+ _accum = (i << 24) & 0xFFFFFFFF
+ for j in range(0, 8):
+ if _accum & (1 << 31):
+ _accum = (_accum << 1) ^ poly
+ else:
+ _accum = (_accum << 1) & 0xFFFFFFFF
+ # self.crctable[i] = accum
+ self.crctable.append(_accum)
+ self._crctable_init = True
+
+ def crc32(self, accum, data):
+ self.__crc_GenTable32()
+ _accum = ~accum & 0xFFFFFFFF
+ num = len(data)
+ for i in range(0, num):
+ _accum = self.crctable[((_accum >> 24) ^ ord(data[i])) & 0xFF] ^ ((_accum << 8) & 0xFFFFFFFF)
+
+ return ~_accum & 0xFFFFFFFF
+
+###################################################################################
+## OMCI Software Image Activation/Committing Procedure
+###################################################################################
+'''
+class OmciSoftwareImageActivateSTM(object):
+ OMCI_SWIMG_ACTIVATE_STATES = ['starting', 'activating', 'busy', 'rebooting', 'committing', 'done', 'failed']
+ OMCI_SWIMG_ACTIVATE_TRANSITIONS = [
+ {'trigger': 'activate', 'source': ['starting', 'busy'], 'dest': 'activating'},
+ {'trigger': 'onu_busy', 'source': 'activating', 'dest': 'busy'},
+ {'trigger': 'reboot', 'source': 'activating', 'dest': 'rebooting'},
+ {'trigger': 'do_commit', 'source': ['activating', 'rebooting'], 'dest': 'committing'},
+ {'trigger': 'commit_ok', 'source': 'committing', 'dest': 'done'},
+ {'trigger': 'reset', 'source': ['activating', 'rebooting', 'committing', 'failed'], 'dest': 'starting'},
+ {'trigger': 'fail', 'source': ['starting', 'activating', 'rebooting', 'committing'], 'dest': 'failed'}
+ ]
+ OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT = 10 # Seconds to delay after task failure/timeout
+ OMCI_SWIMG_ACTIVATE_RETRY_MAX = 2
+ def __init__(self, omci_agent, dev_id, target_img_entity_id, image_download,
+ states=OMCI_SWIMG_ACTIVATE_STATES,
+ transitions=OMCI_SWIMG_ACTIVATE_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT,
+ advertise_events=True,
+ clock=None):
+ self.log = structlog.get_logger(device_id=dev_id)
+ self._omci_agent = omci_agent
+ self._device_id = dev_id
+ self._device = omci_agent.get_device(dev_id)
+ self._to_image = target_img_entity_id
+ self._from_image = 0 if self._to_image == 1 else 1
+ self._image_download = image_download
+ # self._images = dict()
+ self._timeout = timeout_delay
+ self._timeout_dc = None
+ self.reactor = clock if clock is not None else reactor
+ self._retry_max = OmciSoftwareImageActivateSTM.OMCI_SWIMG_ACTIVATE_RETRY_MAX
+ self._retry = 0
+ self._deferred = None
+ self.ret_deferred = None
+ self.machine = Machine(model=self,
+ states=states,
+ transitions=transitions,
+ initial='starting',
+ queued=True,
+ name='{}-image_activate_machine'.format(self.__class__.__name__))
+ self.log.debug("OmciSoftwareImageActivateSTM", target=self._to_image)
+
+ def __activate_resp_success(self, rx_frame):
+ if rx_frame.fields['message_type'] == 0x36: # (OmciActivateImageResponse)
+ omci_data = rx_frame.fields['omci_message']
+ if omci_data.fields['result'] == 0:
+ self.log.debug("Activate software image success, rebooting ONU ...", device_id=self._device_id)
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._to_image, {"is_active": 1})
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._from_image, {"is_active": 0})
+ self.reboot()
+ elif omci_data.fields['result'] == 6: # Device Busy
+ self.log.debug('OMCI Activate Image Busy')
+ self.onu_busy()
+ else:
+ self.log.debug('OMCI Activate Image Failed', reason=omci_data['result'])
+ else:
+ self.log.debug('Receive Unexpected OMCI', message_type=rx_frame['message_type'])
+
+ def __activate_fail(self, fail):
+ self.log.debug("Activate software image failed", faile=fail)
+
+ def __commit_success(self, rx_frame):
+ self.log.debug("Commit software success", device_id=self._device_id)
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._to_image, {"is_committed": 1})
+ self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._from_image, {"is_committed": 0})
+ self.commit_ok()
+
+ def __commit_fail(self, fail):
+ self.log.debug("Commit software image failed", faile=fail)
+
+ @property
+ def status(self):
+ return self._image_download
+
+ def start(self):
+ self.log.debug("Start switch software image", target=self._to_image)
+ # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+ # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+ # if (self._images[self._to_image]["is_active"] == 0 and self._images[self._to_image]["is_valid"] == 1):
+ self.ret_deferred = Deferred()
+ self._image_download.image_state = ImageDownload.IMAGE_ACTIVATE
+ self.reactor.callLater(0, self.activate)
+ return self.ret_deferred
+
+ def on_enter_starting(self):
+ # self.log.debug("on_enter_starting")
+ # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+ # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+ # if (self._images[self._to_image]["is_active"] != 1 and self._images[self._to_image]["is_valid"] == 1):
+ if self._retry > self._retry_max:
+ self.log.debug("failed: retry max", retries=self._retry)
+ self.fail()
+ else:
+ self.activate()
+
+ def on_enter_activating(self):
+ img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id,
+ self._to_image, ["is_active", "is_committed", "is_valid"])
+
+ self.log.debug("on_enter_activating", instance=self._to_image, state=img)
+ if img["is_active"] == 0:
+ #if img["is_valid"] == 1:
+ self._deferred = self._device.omci_cc.send_active_image(self._to_image)
+ self._deferred.addCallbacks(self.__activate_resp_success, self.__activate_fail)
+ #else:
+ # self.fail()
+ else:
+ self.do_commit()
+
+ def on_enter_busy(self):
+ self.log.debug("on_enter_busy")
+ self.reactor.callLater(3, self.activate)
+
+ def on_enter_rebooting(self):
+ self.log.debug("on_enter_rebooting")
+ if self._timeout_dc == None:
+ self._timeout_dc = self.reactor.callLater(self._timeout, self.fail)
+
+ def on_exit_rebooting(self):
+ self.log.debug("on_exit_rebooting")
+ if self._timeout_dc and self._timeout_dc.active:
+ self._timeout_dc.cancel()
+ self._timeout_dc = None
+
+ def on_enter_committing(self):
+ # self.log.debug("on_enter_committing")
+ img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id,
+ self._to_image, ["is_active", "is_committed", "is_valid"])
+ self.log.debug("on_enter_committing", instance=self._to_image, state=img)
+ if (img['is_active'] == 0):
+ self._retry += 1
+ self.log.debug("do retry", retry=self._retry)
+ self.reset()
+ else:
+ self._retry = 0
+ self._deferred = self._device.omci_cc.send_commit_image(self._to_image)
+ self._deferred.addCallbacks(self.__commit_success, self.__commit_fail)
+
+ def on_enter_done(self):
+ self.log.debug("on_enter_done")
+ self._image_download.image_state = ImageDownload.IMAGE_ACTIVE
+ self.ret_deferred.callback(self._to_image)
+
+ def on_enter_failed(self):
+ self.log.debug("on_enter_failed")
+ self._image_download.image_state = ImageDownload.IMAGE_REVERT
+ self.ret_deferred.errback(failure.Failure(Exception('ONU Software Activating Failed, instance ' + str(self._to_image))))
+'''
+
+###################################################################################
+## Image Agent for OLT/ONT software image handling
+###################################################################################
+class ImageAgent(object):
+ """
+ Image Agent supports multiple state machines running at the same time:
+ """
+
+ DEFAULT_LOCAL_ROOT = "/"
+
+ # def __init__(self, omci_agent, dev_id, stm_cls, img_tasks, advertise_events=True):
+ def __init__(self, omci_agent, dev_id,
+ dwld_stm_cls, dwld_img_tasks,
+ upgrade_onu_stm_cls, upgrade_onu_tasks,
+ # image_activate_stm_cls,
+ advertise_events=True, local_dir=None, clock=None):
+ """
+ Class initialization
+
+ :param omci_agent: (OpenOmciAgent) Agent
+ :param dev_id : (str) ONU Device ID
+ :param dwld_stm_cls : (ImageDownloadeSTM) Image download state machine class
+ :param dwld_img_tasks : (FileDownloadTask) file download task
+ :param upgrade_onu_stm_cls : (OmciSoftwareImageDownloadSTM) ONU Image upgrade state machine class
+ :param upgrade_onu_tasks : ({OmciSwImageUpgradeTask})
+ # :param image_activate_stm_cls: (OmciSoftwareImageActivateSTM)
+ """
+
+ self.log = structlog.get_logger(device_id=dev_id)
+
+ self._omci_agent = omci_agent
+ self._device_id = dev_id
+ self._dwld_stm_cls = dwld_stm_cls
+ # self._image_download_sm = None
+ self._images = dict()
+ self._download_task_cls = dwld_img_tasks['download-file']
+
+ self._omci_upgrade_sm_cls = upgrade_onu_stm_cls
+ self._omci_upgrade_task_cls = upgrade_onu_tasks['omci_upgrade_task']
+ self._omci_upgrade_task = None
+ self._omci_upgrade_deferred = None
+
+ # self._omci_activate_img_sm_cls = image_activate_stm_cls
+ # self._omci_activate_img_sm = None
+ self.reactor = clock if clock is not None else reactor
+
+ self._advertise_events = advertise_events
+ # self._local_dir = None
+
+ self._device = None
+ # onu_dev = self._omci_agent.get_device(self._device_id)
+ # assert device
+
+ # self._local_dir = DEFAULT_LOCAL_ROOT + onu_dev.adapter_agent.name
+ # self.log.debug("ImageAgent", local_dir=self._local_dir)
+
+
+ def __get_standby_image_instance(self):
+ instance_id = None
+ instance_0 = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed"])
+ if instance_0['is_active'] == 1:
+ instance_id = 1
+ else:
+ instance_1 = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed"])
+ if instance_1['is_active'] == 1:
+ instance_id = 0
+ return instance_id
+
+ def __clear_task(self, arg):
+ self.__omci_upgrade_task = None
+
+ # def get_image(self, name, local_dir, remote_url, timeout_delay=ImageDownloadeSTM.DEFAULT_TIMEOUT_RETRY):
+ def get_image(self, image_download, timeout_delay=ImageDownloadeSTM.DEFAULT_TIMEOUT_RETRY):
+
+ """
+ Get named image from servers
+ :param image_download: (voltha_pb2.ImageDownload)
+ :param timeout_delay : (number) timeout for download task
+ :
+ :Return a Deferred that will be triggered if the file is locally availabe or downloaded sucessfully
+ : Caller will register callback and errback to the returned defer to get notified
+ """
+ self.log.debug("get_image", download=image_download)
+
+ # if self._local_dir is None:
+ # onu_dev = self._omci_agent.get_device(self._device_id)
+ # assert onu_dev
+ # if image_download.local_dir is None:
+ # self._local_dir = ImageAgent.DEFAULT_LOCAL_ROOT + onu_dev.adapter_agent.name
+ # else:
+ # self._local_dir = image_download.local_dir + '/' + onu_dev.adapter_agent.name
+
+ # self.log.debug("ImageAgent", local_dir=self._local_dir)
+ # image_download.local_dir = self._local_dir
+
+ # if os.path.isfile(self._local_dir + '/' + image_download.name): # image file exists
+ # d = Deferred()
+ # self.reactor.callLater(0, d.callback, image_download)
+ # self.log.debug("Image file exists")
+ # return d
+
+ img_dnld_sm = self._images.get(image_download.name)
+ if img_dnld_sm is None:
+ img_dnld_sm = self._dwld_stm_cls(self._omci_agent, # self._device_id, name, local_dir, remote_url,
+ image_download,
+ self._download_task_cls,
+ timeout_delay=timeout_delay,
+ clock=self.reactor
+ )
+ self._images[image_download.name] = img_dnld_sm
+
+ # if self._image_download_sm is None:
+ # self._image_download_sm = self._dwld_stm_cls(self._omci_agent, # self._device_id, name, local_dir, remote_url,
+ # image_download,
+ # self._download_task_cls,
+ # timeout_delay=timeout_delay,
+ # clock=self.reactor
+ # )
+ # else:
+ # if self._image_download_sm.download_status.state != ImageDownload.DOWNLOAD_SUCCEEDED:
+ # self._image_download_sm.reset()
+
+ d = img_dnld_sm.get_file()
+ return d
+
+ def cancel_download_image(self, name):
+ img_dnld_sm = self._images.pop(name, None)
+ if img_dnld_sm is not None:
+ img_dnld_sm.stop()
+
+
+ def onu_omci_download(self, image_dnld_name):
+ """
+ Start upgrading ONU.
+ image_dnld: (ImageDownload)
+ : Return Defer instance to get called after upgrading success or failed.
+ : Or return None if image does not exist
+ """
+ self.log.debug("onu_omci_download", image=image_dnld_name)
+
+ image_dnld_sm = self._images.get(image_dnld_name)
+ if image_dnld_sm is None:
+ return None
+
+ self._device = self._omci_agent.get_device(image_dnld_sm.status.id) if self._device is None else self._device
+
+ # if restart:
+ # self.cancel_upgrade_onu()
+
+ if self._omci_upgrade_task is None:
+ img_id = self.__get_standby_image_instance()
+ self.log.debug("start task", image_Id=img_id, task=self._omci_upgrade_sm_cls)
+ self._omci_upgrade_task = self._omci_upgrade_task_cls(img_id,
+ self._omci_upgrade_sm_cls,
+ self._omci_agent,
+ image_dnld_sm.status, clock=self.reactor)
+ self.log.debug("task created but not started")
+ # self._device.task_runner.start()
+ self._omci_upgrade_deferred = self._device.task_runner.queue_task(self._omci_upgrade_task)
+ self._omci_upgrade_deferred.addBoth(self.__clear_task)
+ return self._omci_upgrade_deferred
+
+
+ def cancel_upgrade_onu(self):
+ self.log.debug("cancel_upgrade_onu")
+ if self._omci_upgrade_task is not None:
+ self.log.debug("cancel_upgrade_onu", running=self._omci_upgrade_task.running)
+ # if self._omci_upgrade_task.running:
+ self._omci_upgrade_task.stop()
+ self._omci_upgrade_task = None
+ if self._omci_upgrade_deferred is not None:
+ self.reactor.callLater(0, self._omci_upgrade_deferred.cancel)
+ self._omci_upgrade_deferred = None
+
+
+ # def activate_onu_image(self, image_name):
+ # self.log.debug("activate_onu_image", image=image_name)
+ # img_dnld = self.get_image_status(image_name)
+ # if img_dnld is None:
+ # return None
+
+ # img_dnld.image_state = ImageDownload.IMAGE_INACTIVE
+ # if self._omci_activate_img_sm is None:
+ # self._omci_activate_img_sm = self._omci_activate_img_sm_cls(self._omci_agent, self._device_id,
+ # self.__get_standby_image_instance(),
+ # img_dnld, clock=self.reactor)
+ # return self._omci_activate_img_sm.start()
+ # else:
+ # return None
+
+ def onu_bootup(self):
+ if self._omci_upgrade_task is not None:
+ self._omci_upgrade_task.onu_bootup()
+
+ def get_image_status(self, image_name):
+ """
+ Return (ImageDownload)
+ """
+ sm = self._images.get(image_name)
+ return sm.status if sm is not None else None
+
diff --git a/python/adapters/extensions/omci/state_machines/mib_sync.py b/python/adapters/extensions/omci/state_machines/mib_sync.py
new file mode 100644
index 0000000..d257257
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/mib_sync.py
@@ -0,0 +1,942 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from datetime import datetime, timedelta
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.database.mib_db_api import MDS_KEY
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes, \
+ AttributeAccess
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, TX_REQUEST_KEY, \
+ RX_RESPONSE_KEY
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEvents, OnuDeviceEntry, \
+ SUPPORTED_MESSAGE_ENTITY_KEY, SUPPORTED_MESSAGE_TYPES_KEY
+from voltha.extensions.omci.omci_entities import OntData
+from common.event_bus import EventBusClient
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+
+RxEvent = OmciCCRxEvents
+DevEvent = OnuDeviceEvents
+OP = EntityOperations
+RC = ReasonCodes
+AA = AttributeAccess
+
+
+class MibSynchronizer(object):
+ """
+ OpenOMCI MIB Synchronizer state machine
+ """
+ DEFAULT_STATES = ['disabled', 'starting', 'uploading', 'examining_mds',
+ 'in_sync', 'out_of_sync', 'auditing', 'resynchronizing']
+
+ DEFAULT_TRANSITIONS = [
+ {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+
+ {'trigger': 'upload_mib', 'source': 'starting', 'dest': 'uploading'},
+ {'trigger': 'examine_mds', 'source': 'starting', 'dest': 'examining_mds'},
+
+ {'trigger': 'success', 'source': 'uploading', 'dest': 'in_sync'},
+
+ {'trigger': 'success', 'source': 'examining_mds', 'dest': 'in_sync'},
+ {'trigger': 'mismatch', 'source': 'examining_mds', 'dest': 'resynchronizing'},
+
+ {'trigger': 'audit_mib', 'source': 'in_sync', 'dest': 'auditing'},
+
+ {'trigger': 'success', 'source': 'out_of_sync', 'dest': 'in_sync'},
+ {'trigger': 'audit_mib', 'source': 'out_of_sync', 'dest': 'auditing'},
+
+ {'trigger': 'success', 'source': 'auditing', 'dest': 'in_sync'},
+ {'trigger': 'mismatch', 'source': 'auditing', 'dest': 'resynchronizing'},
+ {'trigger': 'force_resync', 'source': 'auditing', 'dest': 'resynchronizing'},
+
+ {'trigger': 'success', 'source': 'resynchronizing', 'dest': 'in_sync'},
+ {'trigger': 'diffs_found', 'source': 'resynchronizing', 'dest': 'out_of_sync'},
+
+ # Do wildcard 'timeout' trigger that sends us back to start
+ {'trigger': 'timeout', 'source': '*', 'dest': 'starting'},
+
+ # Do wildcard 'stop' trigger last so it covers all previous states
+ {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+ ]
+ DEFAULT_TIMEOUT_RETRY = 5 # Seconds to delay after task failure/timeout
+ DEFAULT_AUDIT_DELAY = 60 # Periodic tick to audit the MIB Data Sync
+ DEFAULT_RESYNC_DELAY = 300 # Periodically force a resync
+
+ def __init__(self, agent, device_id, mib_sync_tasks, db,
+ advertise_events=False,
+ states=DEFAULT_STATES,
+ transitions=DEFAULT_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=DEFAULT_TIMEOUT_RETRY,
+ audit_delay=DEFAULT_AUDIT_DELAY,
+ resync_delay=DEFAULT_RESYNC_DELAY):
+ """
+ Class initialization
+
+ :param agent: (OpenOmciAgent) Agent
+ :param device_id: (str) ONU Device ID
+ :param db: (MibDbVolatileDict) MIB Database
+ :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+ :param mib_sync_tasks: (dict) Tasks to run
+ :param states: (list) List of valid states
+ :param transitions: (dict) Dictionary of triggers and state changes
+ :param initial_state: (str) Initial state machine state
+ :param timeout_delay: (int/float) Number of seconds after a timeout to attempt
+ a retry (goes back to starting state)
+ :param audit_delay: (int) Seconds between MIB audits while in sync. Set to
+ zero to disable audit. An operator can request
+ an audit manually by calling 'self.audit_mib'
+ :param resync_delay: (int) Seconds in sync before performing a forced MIB
+ resynchronization
+ """
+ self.log = structlog.get_logger(device_id=device_id)
+
+ self._agent = agent
+ self._device_id = device_id
+ self._device = None
+ self._database = db
+ self._timeout_delay = timeout_delay
+ self._audit_delay = audit_delay
+ self._resync_delay = resync_delay
+
+ self._upload_task = mib_sync_tasks['mib-upload']
+ self._get_mds_task = mib_sync_tasks['get-mds']
+ self._audit_task = mib_sync_tasks['mib-audit']
+ self._resync_task = mib_sync_tasks['mib-resync']
+ self._reconcile_task = mib_sync_tasks['mib-reconcile']
+ self._advertise_events = advertise_events
+
+ self._deferred = None
+ self._current_task = None # TODO: Support multiple running tasks after v.2.0 release
+ self._task_deferred = None
+ self._mib_data_sync = 0
+ self._last_mib_db_sync_value = None
+ self._device_in_db = False
+ self._next_resync = None
+
+ self._on_olt_only_diffs = None
+ self._on_onu_only_diffs = None
+ self._attr_diffs = None
+ self._audited_olt_db = None
+ self._audited_onu_db = None
+
+ self._event_bus = EventBusClient()
+ self._omci_cc_subscriptions = { # RxEvent.enum -> Subscription Object
+ RxEvent.MIB_Reset: None,
+ RxEvent.AVC_Notification: None,
+ RxEvent.MIB_Upload: None,
+ RxEvent.MIB_Upload_Next: None,
+ RxEvent.Create: None,
+ RxEvent.Delete: None,
+ RxEvent.Set: None,
+ }
+ self._omci_cc_sub_mapping = {
+ RxEvent.MIB_Reset: self.on_mib_reset_response,
+ RxEvent.AVC_Notification: self.on_avc_notification,
+ RxEvent.MIB_Upload: self.on_mib_upload_response,
+ RxEvent.MIB_Upload_Next: self.on_mib_upload_next_response,
+ RxEvent.Create: self.on_create_response,
+ RxEvent.Delete: self.on_delete_response,
+ RxEvent.Set: self.on_set_response,
+ }
+ self._onu_dev_subscriptions = { # DevEvent.enum -> Subscription Object
+ DevEvent.OmciCapabilitiesEvent: None
+ }
+ self._onu_dev_sub_mapping = {
+ DevEvent.OmciCapabilitiesEvent: self.on_capabilities_event
+ }
+
+ # Statistics and attributes
+ # TODO: add any others if it will support problem diagnosis
+
+ # Set up state machine to manage states
+ self.machine = Machine(model=self, states=states,
+ transitions=transitions,
+ initial=initial_state,
+ queued=True,
+ name='{}-{}'.format(self.__class__.__name__,
+ device_id))
+ try:
+ import logging
+ logging.getLogger('transitions').setLevel(logging.WARNING)
+ except Exception as e:
+ self.log.exception('log-level-failed', e=e)
+
+ def _cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._task_deferred = self._task_deferred, None
+
+ for d in [d1, d1]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def __str__(self):
+ return 'MIBSynchronizer: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+ def delete(self):
+ """
+ Cleanup any state information
+ """
+ self.stop()
+ db, self._database = self._database, None
+
+ if db is not None:
+ db.remove(self._device_id)
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def mib_data_sync(self):
+ return self._mib_data_sync
+
+ def increment_mib_data_sync(self):
+ self._mib_data_sync += 1
+ if self._mib_data_sync > 255:
+ self._mib_data_sync = 0
+
+ if self._database is not None:
+ self._database.save_mib_data_sync(self._device_id,
+ self._mib_data_sync)
+
+ @property
+ def last_mib_db_sync(self):
+ return self._last_mib_db_sync_value
+
+ @last_mib_db_sync.setter
+ def last_mib_db_sync(self, value):
+ self._last_mib_db_sync_value = value
+ if self._database is not None:
+ self._database.save_last_sync(self.device_id, value)
+
+ @property
+ def is_new_onu(self):
+ """
+ Is this a new ONU (has never completed MIB synchronization)
+ :return: (bool) True if this ONU should be considered new
+ """
+ return self.last_mib_db_sync is None
+
+ @property
+ def advertise_events(self):
+ return self._advertise_events
+
+ @advertise_events.setter
+ def advertise_events(self, value):
+ if not isinstance(value, bool):
+ raise TypeError('Advertise event is a boolean')
+ self._advertise_events = value
+
+ def advertise(self, event, info):
+ """Advertise an event on the OpenOMCI event bus"""
+ if self._advertise_events:
+ self._agent.advertise(event,
+ {
+ 'state-machine': self.machine.name,
+ 'info': info,
+ 'time': str(datetime.utcnow())
+ })
+
+ def on_enter_disabled(self):
+ """
+ State machine is being stopped
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ self._cancel_deferred()
+ if self._device is not None:
+ self._device.mib_db_in_sync = False
+
+ task, self._current_task = self._current_task, None
+ if task is not None:
+ task.stop()
+
+ # Drop Response and Autonomous notification subscriptions
+ for event, sub in self._omci_cc_subscriptions.iteritems():
+ if sub is not None:
+ self._omci_cc_subscriptions[event] = None
+ self._device.omci_cc.event_bus.unsubscribe(sub)
+
+ for event, sub in self._onu_dev_subscriptions.iteritems():
+ if sub is not None:
+ self._onu_dev_subscriptions[event] = None
+ self._device.event_bus.unsubscribe(sub)
+
+ # TODO: Stop and remove any currently running or scheduled tasks
+ # TODO: Anything else?
+
+ def _seed_database(self):
+ if not self._device_in_db:
+ try:
+ try:
+ self._database.start()
+ self._database.add(self._device_id)
+ self.log.debug('seed-db-does-not-exist', device_id=self._device_id)
+
+ except KeyError:
+ # Device already is in database
+ self.log.debug('seed-db-exist', device_id=self._device_id)
+ self._mib_data_sync = self._database.get_mib_data_sync(self._device_id)
+ self._last_mib_db_sync_value = self._database.get_last_sync(self._device_id)
+
+ self._device_in_db = True
+
+ except Exception as e:
+ self.log.exception('seed-database-failure', e=e)
+
+ def on_enter_starting(self):
+ """
+ Determine ONU status and start/re-start MIB Synchronization tasks
+ """
+ self._device = self._agent.get_device(self._device_id)
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ # Make sure root of external MIB Database exists
+ self._seed_database()
+
+ # Set up Response and Autonomous notification subscriptions
+ try:
+ for event, sub in self._omci_cc_sub_mapping.iteritems():
+ if self._omci_cc_subscriptions[event] is None:
+ self._omci_cc_subscriptions[event] = \
+ self._device.omci_cc.event_bus.subscribe(
+ topic=OMCI_CC.event_bus_topic(self._device_id, event),
+ callback=sub)
+
+ except Exception as e:
+ self.log.exception('omci-cc-subscription-setup', e=e)
+
+ # Set up ONU device subscriptions
+ try:
+ for event, sub in self._onu_dev_sub_mapping.iteritems():
+ if self._onu_dev_subscriptions[event] is None:
+ self._onu_dev_subscriptions[event] = \
+ self._device.event_bus.subscribe(
+ topic=OnuDeviceEntry.event_bus_topic(self._device_id, event),
+ callback=sub)
+
+ except Exception as e:
+ self.log.exception('dev-subscription-setup', e=e)
+
+ # Clear any previous audit results
+ self._on_olt_only_diffs = None
+ self._on_onu_only_diffs = None
+ self._attr_diffs = None
+ self._audited_olt_db = None
+ self._audited_onu_db = None
+
+ # Determine if this ONU has ever synchronized
+ if self.is_new_onu:
+ # Start full MIB upload
+ self._deferred = reactor.callLater(0, self.upload_mib)
+
+ else:
+ # Examine the MIB Data Sync
+ self._deferred = reactor.callLater(0, self.examine_mds)
+
+ def on_enter_uploading(self):
+ """
+ Begin full MIB data upload, starting with a MIB RESET
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ def success(results):
+ self.log.debug('mib-upload-success', results=results)
+ self._current_task = None
+ self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+ self._deferred = reactor.callLater(0, self.success)
+
+ def failure(reason):
+ self.log.info('mib-upload-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ self._device.mib_db_in_sync = False
+ self._current_task = self._upload_task(self._agent, self._device_id)
+
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_examining_mds(self):
+ """
+ Create a simple task to fetch the MIB Data Sync value and
+ determine if the ONU value matches what is in the MIB database
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ self._mib_data_sync = self._database.get_mib_data_sync(self._device_id) or 0
+
+ def success(onu_mds_value):
+ self.log.debug('examine-mds-success', onu_mds_value=onu_mds_value, olt_mds_value=self.mib_data_sync)
+ self._current_task = None
+
+ # Examine MDS value
+ if self.mib_data_sync == onu_mds_value:
+ self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+ self._deferred = reactor.callLater(0, self.success)
+ else:
+ self._deferred = reactor.callLater(0, self.mismatch)
+
+ def failure(reason):
+ self.log.info('examine-mds-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ self._device.mib_db_in_sync = False
+ self._current_task = self._get_mds_task(self._agent, self._device_id)
+
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_in_sync(self):
+ """
+ The OLT/OpenOMCI MIB Database is in sync with the ONU MIB Database.
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self.last_mib_db_sync = datetime.utcnow()
+ self._device.mib_db_in_sync = True
+
+ if self._audit_delay > 0:
+ self._deferred = reactor.callLater(self._audit_delay, self.audit_mib)
+
+ def on_enter_out_of_sync(self):
+ """
+ The MIB in OpenOMCI and the ONU are out of sync. This can happen if:
+
+ o the MIB_Data_Sync values are not equal, or
+ o the MIBs were compared and differences were found.
+
+ Schedule a task to reconcile the differences
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ # We are only out-of-sync if there were differences. If here due to MDS
+ # value differences, still run the reconcile so we up date the ONU's MDS
+ # value to match ours.
+
+ self._device.mib_db_in_sync = self._attr_diffs is None and \
+ self._on_onu_only_diffs is None and \
+ self._on_olt_only_diffs is None
+
+ def success(onu_mds_value):
+ self.log.debug('reconcile-success', mds_value=onu_mds_value)
+ self._current_task = None
+ self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+ self._deferred = reactor.callLater(0, self.success)
+
+ def failure(reason):
+ self.log.info('reconcile-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ diff_collection = {
+ 'onu-only': self._on_onu_only_diffs,
+ 'olt-only': self._on_olt_only_diffs,
+ 'attributes': self._attr_diffs,
+ 'olt-db': self._audited_olt_db,
+ 'onu-db': self._audited_onu_db
+ }
+ # Clear out results since reconciliation task will be handling them
+ self._on_olt_only_diffs = None
+ self._on_onu_only_diffs = None
+ self._attr_diffs = None
+ self._audited_olt_db = None
+ self._audited_onu_db = None
+
+ self._current_task = self._reconcile_task(self._agent, self._device_id, diff_collection)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_auditing(self):
+ """
+ Perform a MIB Audit. If our last MIB resync was too long in the
+ past, perform a resynchronization anyway
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ if self._next_resync is None:
+ self.log.error('next-forced-resync-error', msg='Next Resync should always be valid at this point')
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ if datetime.utcnow() >= self._next_resync:
+ self._deferred = reactor.callLater(0, self.force_resync)
+ else:
+ def success(onu_mds_value):
+ self.log.debug('audit-success', onu_mds_value=onu_mds_value, olt_mds_value=self.mib_data_sync)
+ self._current_task = None
+
+ # Examine MDS value
+ if self.mib_data_sync == onu_mds_value:
+ self._deferred = reactor.callLater(0, self.success)
+ else:
+ self._device.mib_db_in_sync = False
+ self._deferred = reactor.callLater(0, self.mismatch)
+
+ def failure(reason):
+ self.log.info('audit-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ self._current_task = self._audit_task(self._agent, self._device_id)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_resynchronizing(self):
+ """
+ Perform a resynchronization of the MIB database
+
+ First calculate any differences
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ def success(results):
+ self.log.debug('resync-success', results=results)
+
+ on_olt_only = results.get('on-olt-only')
+ on_onu_only = results.get('on-onu-only')
+ attr_diffs = results.get('attr-diffs')
+ olt_db = results.get('olt-db')
+ onu_db = results.get('onu-db')
+
+ self._current_task = None
+ self._on_olt_only_diffs = on_olt_only if on_olt_only and len(on_olt_only) else None
+ self._on_onu_only_diffs = on_onu_only if on_onu_only and len(on_onu_only) else None
+ self._attr_diffs = attr_diffs if attr_diffs and len(attr_diffs) else None
+ self._audited_olt_db = olt_db
+ self._audited_onu_db = onu_db
+
+ mds_equal = self.mib_data_sync == self._audited_onu_db[MDS_KEY]
+
+ if mds_equal and all(diff is None for diff in [self._on_olt_only_diffs,
+ self._on_onu_only_diffs,
+ self._attr_diffs]):
+ self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+ self._deferred = reactor.callLater(0, self.success)
+ else:
+ self._deferred = reactor.callLater(0, self.diffs_found)
+
+ def failure(reason):
+ self.log.info('resync-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+ self._current_task = self._resync_task(self._agent, self._device_id)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_mib_reset_response(self, _topic, msg):
+ """
+ Called upon receipt of a MIB Reset Response for this ONU
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-mib-reset-response', state=self.state)
+ try:
+ response = msg[RX_RESPONSE_KEY]
+
+ # Check if expected in current mib_sync state
+ if self.state != 'uploading' or self._omci_cc_subscriptions[RxEvent.MIB_Reset] is None:
+ self.log.error('rx-in-invalid-state', state=self.state)
+
+ else:
+ now = datetime.utcnow()
+
+ if not isinstance(response, OmciFrame):
+ raise TypeError('Response should be an OmciFrame')
+
+ omci_msg = response.fields['omci_message'].fields
+ status = omci_msg['success_code']
+
+ assert status == RC.Success, 'Unexpected MIB reset response status: {}'. \
+ format(status)
+
+ self._device.mib_db_in_sync = False
+ self._mib_data_sync = 0
+ self._device._modified = now
+ self._database.on_mib_reset(self._device_id)
+
+ except KeyError:
+ pass # NOP
+
+ def on_avc_notification(self, _topic, msg):
+ """
+ Process an Attribute Value Change Notification
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-avc-notification', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.AVC_Notification]:
+ try:
+ notification = msg[RX_RESPONSE_KEY]
+
+ if self.state == 'disabled':
+ self.log.error('rx-in-invalid-state', state=self.state)
+
+ # Inspect the notification
+ omci_msg = notification.fields['omci_message'].fields
+ class_id = omci_msg['entity_class']
+ instance_id = omci_msg['entity_id']
+ data = omci_msg['data']
+ attributes = [data.keys()]
+
+ # Look up ME Instance in Database. Not-found can occur if a MIB
+ # reset has occurred
+ info = self._database.query(self.device_id, class_id, instance_id, attributes)
+ # TODO: Add old/new info to log message
+ self.log.debug('avc-change', class_id=class_id, instance_id=instance_id)
+
+ # Save the changed data to the MIB.
+ self._database.set(self.device_id, class_id, instance_id, data)
+
+ # Autonomous creation and deletion of managed entities do not
+ # result in an increment of the MIB data sync value. However,
+ # AVC's in response to a change by the Operator do incur an
+ # increment of the MIB Data Sync. If here during uploading,
+ # we issued a MIB-Reset which may generate AVC. (TODO: Focus testing during hardening)
+ if self.state == 'uploading':
+ self.increment_mib_data_sync()
+
+ except KeyError:
+ pass # NOP
+
+ def on_mib_upload_response(self, _topic, msg):
+ """
+ Process a MIB Upload response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-mib-upload-next-response', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.MIB_Upload]:
+ # Check if expected in current mib_sync state
+ if self.state == 'resynchronizing':
+ # The resync task handles this
+ # TODO: Remove this subscription if we never do anything with the response
+ return
+
+ if self.state != 'uploading':
+ self.log.error('rx-in-invalid-state', state=self.state)
+
+ def on_mib_upload_next_response(self, _topic, msg):
+ """
+ Process a MIB Upload Next response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-mib-upload-next-response', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.MIB_Upload_Next]:
+ try:
+ if self.state == 'resynchronizing':
+ # The resync task handles this
+ return
+
+ # Check if expected in current mib_sync state
+ if self.state != 'uploading':
+ self.log.error('rx-in-invalid-state', state=self.state)
+
+ else:
+ response = msg[RX_RESPONSE_KEY]
+
+ # Extract entity instance information
+ omci_msg = response.fields['omci_message'].fields
+
+ class_id = omci_msg['object_entity_class']
+ entity_id = omci_msg['object_entity_id']
+
+ # Filter out the 'mib_data_sync' from the database. We save that at
+ # the device level and do not want it showing up during a re-sync
+ # during data compares
+
+ if class_id == OntData.class_id:
+ return
+
+ attributes = {k: v for k, v in omci_msg['object_data'].items()}
+
+ # Save to the database
+ self._database.set(self._device_id, class_id, entity_id, attributes)
+
+ except KeyError:
+ pass # NOP
+ except Exception as e:
+ self.log.exception('upload-next', e=e)
+
+ def on_create_response(self, _topic, msg):
+ """
+ Process a Set response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-create-response', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.Create]:
+ if self.state in ['disabled', 'uploading']:
+ self.log.error('rx-in-invalid-state', state=self.state)
+ return
+ try:
+ request = msg[TX_REQUEST_KEY]
+ response = msg[RX_RESPONSE_KEY]
+ status = response.fields['omci_message'].fields['success_code']
+
+ if status != RC.Success and status != RC.InstanceExists:
+ # TODO: Support offline ONTs in post VOLTHA v1.3.0
+ omci_msg = response.fields['omci_message']
+ self.log.warn('set-response-failure',
+ class_id=omci_msg.fields['entity_class'],
+ instance_id=omci_msg.fields['entity_id'],
+ status=omci_msg.fields['success_code'],
+ status_text=self._status_to_text(omci_msg.fields['success_code']),
+ parameter_error_attributes_mask=omci_msg.fields['parameter_error_attributes_mask'])
+ else:
+ omci_msg = request.fields['omci_message'].fields
+ class_id = omci_msg['entity_class']
+ entity_id = omci_msg['entity_id']
+ attributes = {k: v for k, v in omci_msg['data'].items()}
+
+ # Save to the database
+ created = self._database.set(self._device_id, class_id, entity_id, attributes)
+
+ if created:
+ self.increment_mib_data_sync()
+
+ # If the ME contains set-by-create or writeable values that were
+ # not specified in the create command, the ONU will have
+ # initialized those fields
+
+ if class_id in self._device.me_map:
+ sbc_w_set = {attr.field.name for attr in self._device.me_map[class_id].attributes
+ if (AA.SBC in attr.access or AA.W in attr.access)
+ and attr.field.name != 'managed_entity_id'}
+
+ missing = sbc_w_set - {k for k in attributes.iterkeys()}
+
+ if len(missing):
+ # Request the missing attributes
+ self.update_sbc_w_items(class_id, entity_id, missing)
+
+ except KeyError as e:
+ pass # NOP
+
+ except Exception as e:
+ self.log.exception('create', e=e)
+
+ def update_sbc_w_items(self, class_id, entity_id, missing_attributes):
+ """
+ Perform a get-request for Set-By-Create (SBC) or writable (w) attributes
+ that were not specified in the original Create request.
+
+ :param class_id: (int) Class ID
+ :param entity_id: (int) Instance ID
+ :param missing_attributes: (set) Missing SBC or Writable attribute
+ """
+ if len(missing_attributes) and class_id in self._device.me_map:
+ from voltha.extensions.omci.tasks.omci_get_request import OmciGetRequest
+
+ self.log.info('update-sbc-items', class_id=class_id, entity_id=entity_id,
+ attributes=missing_attributes)
+
+ def success(results):
+ self._database.set(self._device_id, class_id, entity_id, results.attributes)
+
+ def failure(reason):
+ self.log.warn('update-sbc-w-failed', reason=reason, class_id=class_id,
+ entity_id=entity_id, attributes=missing_attributes)
+
+ d = self._device.task_runner.queue_task(OmciGetRequest(self._agent, self._device_id,
+ self._device.me_map[class_id],
+ entity_id, missing_attributes,
+ allow_failure=True))
+ d.addCallbacks(success, failure)
+
+ def on_delete_response(self, _topic, msg):
+ """
+ Process a Delete response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-delete-response', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.Delete]:
+ if self.state in ['disabled', 'uploading']:
+ self.log.error('rx-in-invalid-state', state=self.state)
+ return
+ try:
+ request = msg[TX_REQUEST_KEY]
+ response = msg[RX_RESPONSE_KEY]
+
+ if response.fields['omci_message'].fields['success_code'] != RC.Success:
+ # TODO: Support offline ONTs in post VOLTHA v1.3.0
+ omci_msg = response.fields['omci_message']
+ self.log.warn('set-response-failure',
+ class_id=omci_msg.fields['entity_class'],
+ instance_id=omci_msg.fields['entity_id'],
+ status=omci_msg.fields['success_code'],
+ status_text=self._status_to_text(omci_msg.fields['success_code']))
+ else:
+ omci_msg = request.fields['omci_message'].fields
+ class_id = omci_msg['entity_class']
+ entity_id = omci_msg['entity_id']
+
+ # Remove from the database
+ deleted = self._database.delete(self._device_id, class_id, entity_id)
+
+ if deleted:
+ self.increment_mib_data_sync()
+
+ except KeyError as e:
+ pass # NOP
+ except Exception as e:
+ self.log.exception('delete', e=e)
+
+ def on_set_response(self, _topic, msg):
+ """
+ Process a Set response
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-set-response', state=self.state)
+
+ if self._omci_cc_subscriptions[RxEvent.Set]:
+ if self.state in ['disabled', 'uploading']:
+ self.log.error('rx-in-invalid-state', state=self.state)
+ try:
+ request = msg[TX_REQUEST_KEY]
+ response = msg[RX_RESPONSE_KEY]
+
+ if response.fields['omci_message'].fields['success_code'] != RC.Success:
+ # TODO: Support offline ONTs in post VOLTHA v1.3.0
+ omci_msg = response.fields['omci_message']
+ self.log.warn('set-response-failure',
+ class_id=omci_msg.fields['entity_class'],
+ instance_id=omci_msg.fields['entity_id'],
+ status=omci_msg.fields['success_code'],
+ status_text=self._status_to_text(omci_msg.fields['success_code']),
+ unsupported_attribute_mask=omci_msg.fields['unsupported_attributes_mask'],
+ failed_attribute_mask=omci_msg.fields['failed_attributes_mask'])
+ else:
+ omci_msg = request.fields['omci_message'].fields
+ class_id = omci_msg['entity_class']
+ entity_id = omci_msg['entity_id']
+ attributes = {k: v for k, v in omci_msg['data'].items()}
+
+ # Save to the database (Do not save 'sets' of the mib-data-sync however)
+ if class_id != OntData.class_id:
+ modified = self._database.set(self._device_id, class_id, entity_id, attributes)
+ if modified:
+ self.increment_mib_data_sync()
+
+ except KeyError as _e:
+ pass # NOP
+ except Exception as e:
+ self.log.exception('set', e=e)
+
+ # TODO: Future -> Monitor Software download start, section, activate, and commit responses
+ # and increment MIB Data Sync per Table 11.2.2-1 of ITUT-T G.988 (11/2017)
+ # on page 515. Eventually also monitor set-table responses once the
+ # extended message set is supported.
+ def on_capabilities_event(self, _topic, msg):
+ """
+ Process a OMCI capabilties event
+ :param _topic: (str) OnuDeviceEntry Capabilities event
+ :param msg: (dict) Message Entities & Message Types supported
+ """
+ self._database.update_supported_managed_entities(self.device_id,
+ msg[SUPPORTED_MESSAGE_ENTITY_KEY])
+ self._database.update_supported_message_types(self.device_id,
+ msg[SUPPORTED_MESSAGE_TYPES_KEY])
+
+ def _status_to_text(self, success_code):
+ return {
+ RC.Success: "Success",
+ RC.ProcessingError: "Processing Error",
+ RC.NotSupported: "Not Supported",
+ RC.ParameterError: "Paremeter Error",
+ RC.UnknownEntity: "Unknown Entity",
+ RC.UnknownInstance: "Unknown Instance",
+ RC.DeviceBusy: "Device Busy",
+ RC.InstanceExists: "Instance Exists"
+ }.get(success_code, 'Unknown status code: {}'.format(success_code))
+
+ def query_mib(self, class_id=None, instance_id=None, attributes=None):
+ """
+ Get MIB database information.
+
+ This method can be used to request information from the database to the detailed
+ level requested
+
+ :param class_id: (int) Managed Entity class ID
+ :param instance_id: (int) Managed Entity instance
+ :param attributes: (list or str) Managed Entity instance's attributes
+
+ :return: (dict) The value(s) requested. If class/inst/attribute is
+ not found, an empty dictionary is returned
+ :raises DatabaseStateError: If the database is not enabled or does not exist
+ """
+ from voltha.extensions.omci.database.mib_db_api import DatabaseStateError
+
+ self.log.debug('query', class_id=class_id,
+ instance_id=instance_id, attributes=attributes)
+ if self._database is None:
+ raise DatabaseStateError('Database does not yet exist')
+
+ return self._database.query(self._device_id, class_id=class_id,
+ instance_id=instance_id,
+ attributes=attributes)
+
+ def mib_set(self, class_id, entity_id, attributes):
+ """
+ Set attributes of an existing ME Class instance
+
+ This method is primarily used by other state machines to save ME specific
+ information to the persistent database. Access by objects external to the
+ OpenOMCI library is discouraged.
+
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Class entity ID
+ :param attributes: (dict) attribute -> value pairs to set
+ """
+ # It must exist first (but attributes can be new)
+ if isinstance(attributes, dict) and len(attributes) and\
+ self.query_mib(class_id, entity_id) is not None:
+ self._database.set(self._device_id, class_id, entity_id, attributes)
+
+ def mib_delete(self, class_id, entity_id):
+ """
+ Delete an existing ME Class instance
+
+ This method is primarily used by other state machines to delete an ME
+ from the MIB database
+
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME Class entity ID
+
+ :raises KeyError: If device does not exist
+ :raises DatabaseStateError: If the database is not enabled
+ """
+ self._database.delete(self._device_id, class_id, entity_id)
diff --git a/python/adapters/extensions/omci/state_machines/omci_onu_capabilities.py b/python/adapters/extensions/omci/state_machines/omci_onu_capabilities.py
new file mode 100644
index 0000000..c13739e
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/omci_onu_capabilities.py
@@ -0,0 +1,262 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEntry, OnuDeviceEvents, IN_SYNC_KEY
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+
+
+class OnuOmciCapabilities(object):
+ """
+ OpenOMCI ONU OMCI Capabilities State machine
+ """
+ DEFAULT_STATES = ['disabled', 'out_of_sync', 'in_sync', 'idle']
+
+ DEFAULT_TRANSITIONS = [
+ {'trigger': 'start', 'source': 'disabled', 'dest': 'out_of_sync'},
+ {'trigger': 'synchronized', 'source': 'out_of_sync', 'dest': 'in_sync'},
+
+ {'trigger': 'success', 'source': 'in_sync', 'dest': 'idle'},
+ {'trigger': 'failure', 'source': 'in_sync', 'dest': 'out_of_sync'},
+
+ {'trigger': 'not_synchronized', 'source': 'idle', 'dest': 'out_of_sync'},
+
+ # Do wildcard 'stop' trigger last so it covers all previous states
+ {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+ ]
+ DEFAULT_RETRY = 10 # Seconds to delay after task failure/timeout/poll
+
+ def __init__(self, agent, device_id, tasks,
+ advertise_events=False,
+ states=DEFAULT_STATES,
+ transitions=DEFAULT_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=DEFAULT_RETRY):
+ """
+ Class initialization
+
+ :param agent: (OpenOmciAgent) Agent
+ :param device_id: (str) ONU Device ID
+ :param tasks: (dict) Tasks to run
+ :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+ :param states: (list) List of valid states
+ :param transitions: (dict) Dictionary of triggers and state changes
+ :param initial_state: (str) Initial state machine state
+ :param timeout_delay: (int/float) Number of seconds after a timeout or poll
+ """
+ self.log = structlog.get_logger(device_id=device_id)
+
+ self._agent = agent
+ self._device_id = device_id
+ self._device = None
+ self._timeout_delay = timeout_delay
+
+ self._get_capabilities_task = tasks['get-capabilities']
+ self._advertise_events = advertise_events
+
+ self._deferred = None
+ self._current_task = None
+ self._task_deferred = None
+ self._supported_entities = frozenset()
+ self._supported_msg_types = frozenset()
+
+ self._subscriptions = { # RxEvent.enum -> Subscription Object
+ OnuDeviceEvents.MibDatabaseSyncEvent: None
+ }
+ self._sub_mapping = {
+ OnuDeviceEvents.MibDatabaseSyncEvent: self.on_mib_sync_event
+ }
+ # Statistics and attributes
+ # TODO: add any others if it will support problem diagnosis
+
+ # Set up state machine to manage states
+ self.machine = Machine(model=self, states=states,
+ transitions=transitions,
+ initial=initial_state,
+ queued=True,
+ name='{}-{}'.format(self.__class__.__name__,
+ device_id))
+
+ def _cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._task_deferred = self._task_deferred, None
+
+ for d in [d1, d2]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def _cancel_tasks(self):
+ task, self._current_task = self._current_task, None
+ if task is not None:
+ task.stop()
+
+ def __str__(self):
+ return 'OnuOmciCapabilities: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+ def delete(self):
+ """
+ Cleanup any state information
+ """
+ self.stop()
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def supported_managed_entities(self):
+ """
+ Return a set of the Managed Entity class IDs supported on this ONU
+ None is returned if no MEs have been discovered
+
+ :return: (set of ints)
+ """
+ return self._supported_entities if len(self._supported_entities) else None
+
+ @property
+ def supported_message_types(self):
+ """
+ Return a set of the Message Types supported on this ONU
+ None is returned if no message types have been discovered
+
+ :return: (set of EntityOperations)
+ """
+ return self._supported_msg_types if len(self._supported_msg_types) else None
+
+ @property
+ def advertise_events(self):
+ return self._advertise_events
+
+ @advertise_events.setter
+ def advertise_events(self, value):
+ if not isinstance(value, bool):
+ raise TypeError('Advertise event is a boolean')
+ self._advertise_events = value
+
+ def advertise(self, event, info):
+ """Advertise an event on the OpenOMCI event bus"""
+ from datetime import datetime
+
+ if self._advertise_events:
+ self._agent.advertise(event,
+ {
+ 'state-machine': self.machine.name,
+ 'info': info,
+ 'time': str(datetime.utcnow())
+ })
+
+ def on_enter_disabled(self):
+ """
+ State machine is being stopped
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._cancel_tasks()
+
+ self._supported_entities = frozenset()
+ self._supported_msg_types = frozenset()
+
+ # Drop Response and Autonomous notification subscriptions
+ for event, sub in self._subscriptions.iteritems():
+ if sub is not None:
+ self._subscriptions[event] = None
+ self._device.event_bus.unsubscribe(sub)
+
+ def on_enter_out_of_sync(self):
+ """
+ State machine has just started or the MIB database has transitioned
+ to an out-of-synchronization state
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._device = self._agent.get_device(self._device_id)
+
+ # Subscribe to events of interest
+ try:
+ for event, sub in self._sub_mapping.iteritems():
+ if self._subscriptions[event] is None:
+ self._subscriptions[event] = \
+ self._device.event_bus.subscribe(
+ topic=OnuDeviceEntry.event_bus_topic(self._device_id,
+ event),
+ callback=sub)
+
+ except Exception as e:
+ self.log.exception('subscription-setup', e=e)
+
+ # Periodically check/poll for in-sync in case subscription was missed or
+ # already in sync
+ self._deferred = reactor.callLater(0, self.check_in_sync)
+
+ def check_in_sync(self):
+ if self._device.mib_db_in_sync:
+ self.synchronized()
+ else:
+ self._deferred = reactor.callLater(self._timeout_delay,
+ self.check_in_sync)
+
+ def on_enter_in_sync(self):
+ """
+ State machine has just transitioned to an in-synchronization state
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+
+ def success(results):
+ self.log.debug('capabilities-success', results=results)
+ self._supported_entities = self._current_task.supported_managed_entities
+ self._supported_msg_types = self._current_task.supported_message_types
+ self._current_task = None
+ self._deferred = reactor.callLater(0, self.success)
+
+ def failure(reason):
+ self.log.info('capabilities-failure', reason=reason)
+ self._supported_entities = frozenset()
+ self._supported_msg_types = frozenset()
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+ # Schedule a task to read the ONU's OMCI capabilities
+ self._current_task = self._get_capabilities_task(self._agent, self._device_id)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_idle(self):
+ """
+ Notify any subscribers for a capabilities event and wait until
+ stopped or ONU MIB database goes out of sync
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._device.publish_omci_capabilities_event()
+
+ def on_mib_sync_event(self, _topic, msg):
+ """
+ Handle In-Sync/Out-of-Sync for the MIB database
+ :param _topic: (str) Subscription topic
+ :param msg: (dict) In-Sync event data
+ """
+ if self._subscriptions.get(OnuDeviceEvents.MibDatabaseSyncEvent) is None:
+ return
+
+ if msg[IN_SYNC_KEY]:
+ self.synchronized()
+ else:
+ self.not_synchronized()
diff --git a/python/adapters/extensions/omci/state_machines/performance_intervals.py b/python/adapters/extensions/omci/state_machines/performance_intervals.py
new file mode 100644
index 0000000..78cfa74
--- /dev/null
+++ b/python/adapters/extensions/omci/state_machines/performance_intervals.py
@@ -0,0 +1,904 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+import arrow
+from transitions import Machine
+from datetime import datetime, timedelta
+from random import uniform, shuffle
+from twisted.internet import reactor
+from common.utils.indexpool import IndexPool
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, TX_REQUEST_KEY, \
+ RX_RESPONSE_KEY
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+from voltha.extensions.omci.tasks.omci_get_request import OmciGetRequest
+from voltha.extensions.omci.omci_entities import MacBridgePortConfigurationData
+from voltha.extensions.omci.omci_entities import EthernetPMMonitoringHistoryData, \
+ FecPerformanceMonitoringHistoryData, \
+ XgPonTcPerformanceMonitoringHistoryData, \
+ XgPonDownstreamPerformanceMonitoringHistoryData, \
+ XgPonUpstreamPerformanceMonitoringHistoryData, \
+ EthernetFrameUpstreamPerformanceMonitoringHistoryData, \
+ EthernetFrameDownstreamPerformanceMonitoringHistoryData, \
+ EthernetFrameExtendedPerformanceMonitoring, \
+ EthernetFrameExtendedPerformanceMonitoring64Bit, AniG
+
+
+RxEvent = OmciCCRxEvents
+OP = EntityOperations
+RC = ReasonCodes
+
+
+class PerformanceIntervals(object):
+ """
+ OpenOMCI ONU Performance Monitoring Intervals State machine
+
+ This state machine focuses on L2 Internet Data Service and Classical
+ PM (for the v2.0 release).
+ """
+ DEFAULT_STATES = ['disabled', 'starting', 'synchronize_time', 'idle', 'create_pm_me',
+ 'collect_data', 'threshold_exceeded']
+
+ DEFAULT_TRANSITIONS = [
+ {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+ {'trigger': 'tick', 'source': 'starting', 'dest': 'synchronize_time'},
+
+ {'trigger': 'success', 'source': 'synchronize_time', 'dest': 'idle'},
+ {'trigger': 'failure', 'source': 'synchronize_time', 'dest': 'synchronize_time'},
+
+ {'trigger': 'tick', 'source': 'idle', 'dest': 'collect_data'},
+ {'trigger': 'add_me', 'source': 'idle', 'dest': 'create_pm_me'},
+ {'trigger': 'delete_me', 'source': 'idle', 'dest': 'delete_pm_me'},
+
+ # TODO: Can these be combined into one?
+ {'trigger': 'success', 'source': 'create_pm_me', 'dest': 'idle'},
+ {'trigger': 'failure', 'source': 'create_pm_me', 'dest': 'idle'},
+
+ # TODO: Can these be combined into one?
+ {'trigger': 'success', 'source': 'delete_pm_me', 'dest': 'idle'},
+ {'trigger': 'failure', 'source': 'delete_pm_me', 'dest': 'idle'},
+
+ # TODO: Can these be combined into one?
+ {'trigger': 'success', 'source': 'collect_data', 'dest': 'idle'},
+ {'trigger': 'failure', 'source': 'collect_data', 'dest': 'idle'},
+
+ # TODO: Add rebooted event transitions to disabled or synchronize_time
+ # TODO: Need to capture Threshold Crossing Alarms appropriately
+
+ # Do wildcard 'stop' trigger last so it covers all previous states
+ {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+ {'trigger': 'reboot', 'source': '*', 'dest': 'rebooted'},
+ ]
+ DEFAULT_RETRY = 10 # Seconds to delay after task failure/timeout/poll
+ DEFAULT_TICK_DELAY = 15 # Seconds between checks for collection tick
+ DEFAULT_INTERVAL_SKEW = 10 * 60 # Seconds to skew past interval boundary
+ DEFAULT_COLLECT_ATTEMPTS = 3 # Maximum number of collection fetch attempts
+ DEFAULT_CREATE_ATTEMPTS = 15 # Maximum number of attempts to create a PM Managed Entities
+
+ def __init__(self, agent, device_id, tasks,
+ advertise_events=False,
+ states=DEFAULT_STATES,
+ transitions=DEFAULT_TRANSITIONS,
+ initial_state='disabled',
+ timeout_delay=DEFAULT_RETRY,
+ tick_delay=DEFAULT_TICK_DELAY,
+ interval_skew=DEFAULT_INTERVAL_SKEW,
+ collect_attempts=DEFAULT_COLLECT_ATTEMPTS,
+ create_attempts=DEFAULT_CREATE_ATTEMPTS):
+ """
+ Class initialization
+
+ :param agent: (OpenOmciAgent) Agent
+ :param device_id: (str) ONU Device ID
+ :param tasks: (dict) Tasks to run
+ :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+ :param states: (list) List of valid states
+ :param transitions: (dict) Dictionary of triggers and state changes
+ :param initial_state: (str) Initial state machine state
+ :param timeout_delay: (int/float) Number of seconds after a timeout to pause
+ :param tick_delay: (int/float) Collection poll check delay while idle
+ :param interval_skew: (int/float) Seconds to randomly skew the next interval
+ collection to spread out requests for PM intervals
+ :param collect_attempts: (int) Max requests for a single PM interval before fail
+ :param create_attempts: (int) Max attempts to create PM Managed entities before stopping state machine
+ """
+ self.log = structlog.get_logger(device_id=device_id)
+
+ self._agent = agent
+ self._device_id = device_id
+ self._device = None
+ self._pm_config = None
+ self._timeout_delay = timeout_delay
+ self._tick_delay = tick_delay
+ self._interval_skew = interval_skew
+ self._collect_attempts = collect_attempts
+ self._create_attempts = create_attempts
+
+ self._sync_time_task = tasks['sync-time']
+ self._get_interval_task = tasks['collect-data']
+ self._create_pm_task = tasks['create-pm']
+ self._delete_pm_task = tasks['delete-pm']
+ self._advertise_events = advertise_events
+
+ self._omci_cc_subscriptions = { # RxEvent.enum -> Subscription Object
+ RxEvent.MIB_Reset: None,
+ RxEvent.Create: None,
+ RxEvent.Delete: None
+ }
+ self._omci_cc_sub_mapping = {
+ RxEvent.MIB_Reset: self.on_mib_reset_response,
+ RxEvent.Create: self.on_create_response,
+ RxEvent.Delete: self.on_delete_response,
+ }
+ self._me_watch_list = {
+ MacBridgePortConfigurationData.class_id: {
+ 'create-delete': self.add_remove_enet_frame_pm,
+ 'instances': dict() # BP entity_id -> (PM class_id, PM entity_id)
+ }
+ }
+ self._deferred = None
+ self._task_deferred = None
+ self._current_task = None
+ self._add_me_deferred = None
+ self._delete_me_deferred = None
+ self._next_interval = None
+ self._enet_entity_id = IndexPool(1024, 1)
+ self._add_pm_me_retry = 0
+
+ # (Class ID, Instance ID) -> Collect attempts remaining
+ self._pm_me_collect_retries = dict()
+ self._pm_me_extended_info = dict()
+ self._add_pm_me = dict() # (pm cid, pm eid) -> (me cid, me eid, upstream)
+ self._del_pm_me = set()
+
+ # Pollable PM items
+ # Note that some items the KPI extracts are not listed below. These are the
+ # administrative states, operational states, and sensed ethernet type. The values
+ # in the MIB database should be accurate for these items.
+
+ self._ani_g_items = ["optical_signal_level", "transmit_optical_level"]
+ self._next_poll_time = datetime.utcnow()
+ self._poll_interval = 60 # TODO: Fixed at once a minute
+
+ # Statistics and attributes
+ # TODO: add any others if it will support problem diagnosis
+
+ # Set up state machine to manage states
+ self.machine = Machine(model=self, states=states,
+ transitions=transitions,
+ initial=initial_state,
+ queued=True,
+ ignore_invalid_triggers=True,
+ name='{}-{}'.format(self.__class__.__name__,
+ device_id))
+ try:
+ import logging
+ logging.getLogger('transitions').setLevel(logging.WARNING)
+ except Exception as e:
+ self.log.exception('log-level-failed', e=e)
+
+
+ def _cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._task_deferred = self._task_deferred, None
+ d3, self._add_me_deferred = self._add_me_deferred, None
+ d4, self._delete_me_deferred = self._delete_me_deferred, None
+
+ for d in [d1, d2, d3, d4]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def _cancel_tasks(self):
+ task, self._current_task = self._current_task, None
+ if task is not None:
+ task.stop()
+
+ def __str__(self):
+ return 'PerformanceIntervals: Device ID: {}, State:{}'.format(self._device_id,
+ self.state)
+
+ def delete(self):
+ """
+ Cleanup any state information
+ """
+ self.stop()
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def advertise_events(self):
+ return self._advertise_events
+
+ @advertise_events.setter
+ def advertise_events(self, value):
+ if not isinstance(value, bool):
+ raise TypeError('Advertise event is a boolean')
+ self._advertise_events = value
+
+ def advertise(self, event, info):
+ """Advertise an event on the OpenOMCI event bus"""
+ if self._advertise_events:
+ self._agent.advertise(event,
+ {
+ 'state-machine': self.machine.name,
+ 'info': info,
+ 'time': str(datetime.utcnow()),
+ 'next': str(self._next_interval)
+ })
+
+ def set_pm_config(self, pm_config):
+ """
+ Set PM interval configuration
+
+ :param pm_config: (OnuPmIntervalMetrics) PM Interval configuration
+ :return:
+ """
+ self._pm_config = pm_config
+
+ def _me_is_supported(self, class_id):
+ """
+ Check to see if ONU supports this ME
+ :param class_id: (int) ME Class ID
+ :return: (bool) If ME is supported
+ """
+ #
+ supported = self._device.omci_capabilities.supported_managed_entities
+ return class_id in supported if supported is not None else False
+
+ def add_pm_me(self, pm_class_id, pm_entity_id, cid=0, eid=0, upstream=False):
+ """
+ Add a new Performance Monitoring ME.
+
+ The ME ID will be added to an internal list and will be added the next
+ time the idle state is reached. An 'add_pm_me' trigger will be raised in
+ case already in the Idle state.
+
+ :param pm_class_id: (int) ME Class ID (1..0xFFFE)
+ :param pm_entity_id: (int) Instance ID (1..0xFFFE)
+ :param cid: (int) Class ID of entity monitored, may be None
+ :param eid: (int) Instance ID of entity monitored, may be None
+ :param upstream: (bool): Flag indicating if PM is for upstream traffic
+ """
+ if not isinstance(pm_class_id, int):
+ raise TypeError('PM ME Instance ID is an integer')
+ if not 0 < pm_class_id < 0xFFFF:
+ raise ValueError('PM ME Instance ID must be 1..65534')
+
+ # Check to see if ONU supports this ME
+ if not self._me_is_supported(pm_class_id):
+ self.log.warn('unsupported-PM-me', class_id=pm_class_id)
+ return
+
+ key = (pm_class_id, pm_entity_id)
+ entry = (cid, eid, upstream)
+
+ if key not in self._pm_me_collect_retries and key not in self._add_pm_me:
+ self._add_pm_me[key] = entry
+
+ if self._add_me_deferred is None:
+ self._add_me_deferred = reactor.callLater(0, self.add_me)
+
+ if (pm_class_id, pm_entity_id) in self._del_pm_me:
+ self._del_pm_me.remove((pm_class_id, pm_entity_id))
+
+ def delete_pm_me(self, class_id, entity_id):
+ """
+ Remove a new Performance Monitoring ME.
+
+ The ME ID will be added to an internal list and will be removed the next
+ time the idle state is reached. An 'delete_pm_me' trigger will be raised in
+ case already in the Idle state.
+
+ :param class_id: (int) ME Class ID (1..0xFFFE)
+ :param entity_id: (int) Instance ID (1..0xFFFE)
+ """
+ if not isinstance(class_id, int):
+ raise TypeError('PM ME Class ID is an integer')
+ if not 0 < class_id < 0xFFFF:
+ raise ValueError('PM ME Class ID must be 1..65534')
+
+ # Check to see if ONU supports this ME
+ if not self._me_is_supported(class_id):
+ self.log.warn('unsupported-PM-me', class_id=class_id)
+ return
+
+ key = (class_id, entity_id)
+
+ if key in self._pm_me_collect_retries and key not in self._del_pm_me:
+ self._del_pm_me.add(key)
+
+ if self._delete_me_deferred is None:
+ self._delete_me_deferred = reactor.callLater(0, self.delete_me)
+
+ if key in self._add_pm_me:
+ self._add_pm_me.pop(key)
+
+ def on_enter_disabled(self):
+ """
+ State machine is being stopped
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._cancel_tasks()
+ self._next_interval = None
+
+ # Drop OMCI ME Response subscriptions
+ for event, sub in self._omci_cc_subscriptions.iteritems():
+ if sub is not None:
+ self._omci_cc_subscriptions[event] = None
+ self._device.omci_cc.event_bus.unsubscribe(sub)
+
+ # Manually remove ani ANI/PON and UNI PM interval MEs
+ config = self._device.configuration
+ anis = config.ani_g_entities
+ unis = config.uni_g_entities
+
+ if anis is not None:
+ for entity_id in anis.iterkeys():
+ self.delete_pm_me(FecPerformanceMonitoringHistoryData.class_id, entity_id)
+ self.delete_pm_me(XgPonTcPerformanceMonitoringHistoryData.class_id, entity_id)
+ self.delete_pm_me(XgPonDownstreamPerformanceMonitoringHistoryData.class_id, entity_id)
+ self.delete_pm_me(XgPonUpstreamPerformanceMonitoringHistoryData.class_id, entity_id)
+
+ if unis is not None:
+ for entity_id in config.uni_g_entities.iterkeys():
+ self.delete_pm_me(EthernetPMMonitoringHistoryData.class_id, entity_id)
+
+ def on_enter_starting(self):
+ """ Add the PON/ANI and UNI PM intervals"""
+ self.advertise(OpenOmciEventType.state_change, self.state)
+
+ self._device = self._agent.get_device(self._device_id)
+ self._cancel_deferred()
+
+ # Set up OMCI ME Response subscriptions
+ try:
+ for event, sub in self._omci_cc_sub_mapping.iteritems():
+ if self._omci_cc_subscriptions[event] is None:
+ self._omci_cc_subscriptions[event] = \
+ self._device.omci_cc.event_bus.subscribe(
+ topic=OMCI_CC.event_bus_topic(self._device_id, event),
+ callback=sub)
+
+ except Exception as e:
+ self.log.exception('omci-cc-subscription-setup', e=e)
+
+ try:
+ # Manually start some ANI/PON and UNI PM interval MEs
+ config = self._device.configuration
+ anis = config.ani_g_entities
+ unis = config.uni_g_entities
+
+ if anis is not None:
+ for entity_id in anis.iterkeys():
+ self.add_pm_me(FecPerformanceMonitoringHistoryData.class_id,
+ entity_id)
+ self.add_pm_me(XgPonTcPerformanceMonitoringHistoryData.class_id,
+ entity_id)
+ self.add_pm_me(XgPonDownstreamPerformanceMonitoringHistoryData.class_id,
+ entity_id)
+ self.add_pm_me(XgPonUpstreamPerformanceMonitoringHistoryData.class_id,
+ entity_id)
+
+ if unis is not None:
+ for entity_id in config.uni_g_entities.iterkeys():
+ self.add_pm_me(EthernetPMMonitoringHistoryData.class_id, entity_id)
+
+ # Look for existing instances of dynamically created ME's that have PM
+ # associated with them and add them now
+ for class_id in self._me_watch_list.iterkeys():
+ instances = {k: v for k, v in
+ self._device.query_mib(class_id=class_id).items()
+ if isinstance(k, int)}
+
+ for entity_id, data in instances.items():
+ method = self._me_watch_list[class_id]['create-delete']
+ cid, eid = method(None, class_id, entity_id,
+ add=True, attributes=data[ATTRIBUTES_KEY])
+ if cid > 0:
+ # BP entity_id -> (PM class_id, PM entity_id)
+ instances = self._me_watch_list[class_id]['instances']
+ instances[entity_id] = (cid, eid)
+
+ except Exception as e:
+ self.log.exception('pm-me-setup', class_id=class_id, e=e)
+
+ # Got to synchronize_time state
+ self._deferred = reactor.callLater(0, self.tick)
+
+ def on_enter_synchronize_time(self):
+ """
+ State machine has just transitioned to the synchronize_time state
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+
+ def success(_results):
+ self.log.debug('sync-time-success')
+ self._current_task = None
+ self._deferred = reactor.callLater(0, self.success)
+ # Calculate next interval time
+ self._next_interval = self.get_next_interval
+
+ def failure(reason):
+ self.log.info('sync-time-failure', reason=reason)
+ self._current_task = None
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+ # Schedule a task to set the ONU time
+ self._current_task = self._sync_time_task(self._agent, self._device_id)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_idle(self):
+ """
+ State machine has just transitioned to the idle state
+
+ In this state, any added PM MEs that need to be created will be.
+ TODO: some non-interval PM stats (if there are any) are collected here
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+
+ if len(self._del_pm_me) and self._delete_me_deferred is None:
+ self._delete_me_deferred = reactor.callLater(0, self.delete_me)
+
+ elif len(self._add_pm_me) and self._add_me_deferred is None:
+ self._add_me_deferred = reactor.callLater(0, self.add_me)
+
+ elif datetime.utcnow() >= self._next_poll_time:
+ def success(results):
+ self._device.timestamp = arrow.utcnow().float_timestamp
+ self._device.mib_synchronizer.mib_set(results.me_class.class_id,
+ results.entity_id,
+ results.attributes)
+ self._next_poll_time = datetime.utcnow() + timedelta(seconds=self._poll_interval)
+
+ def failure(reason):
+ self.log.info('poll-failure', reason=reason)
+ self._device.timestamp = None
+ return None
+
+ # Scan all ANI-G ports
+ ani_g_entities = self._device.configuration.ani_g_entities
+ ani_g_entities_ids = ani_g_entities.keys() if ani_g_entities is not None else None
+
+ if ani_g_entities_ids is not None and len(ani_g_entities_ids):
+ for entity_id in ani_g_entities_ids:
+ task = OmciGetRequest(self._agent, self.device_id,
+ AniG, entity_id,
+ self._ani_g_items, allow_failure=True)
+ self._task_deferred = self._device.task_runner.queue_task(task)
+ self._task_deferred.addCallbacks(success, failure)
+ else:
+ self.log.warn('poll-pm-no-anis')
+ self._next_poll_time = datetime.utcnow() + timedelta(seconds=self._poll_interval)
+
+ # TODO: Compute a better mechanism than just polling here, perhaps based on
+ # the next time to fetch data for 'any' interval
+ self._deferred = reactor.callLater(self._tick_delay, self.tick)
+
+ def on_enter_create_pm_me(self):
+ """
+ State machine has just transitioned to the create_pm_me state
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._cancel_tasks()
+ mes, self._add_pm_me = self._add_pm_me, dict()
+
+ def success(results):
+ self.log.debug('create-me-success', results=results)
+
+ # Check if already here. The create request could have received
+ # an already-exists status code which we consider successful
+ for pm, me in mes.items():
+ self._pm_me_collect_retries[pm] = self.pm_collected(pm)
+ self._pm_me_extended_info[pm] = me
+
+ self._current_task = None
+ self._deferred = reactor.callLater(0, self.success)
+
+ def failure(reason):
+ self.log.info('create-me-failure', reason=reason, retries=self._add_pm_me_retry)
+ self._current_task = None
+ if self._add_pm_me_retry <= self._create_attempts:
+ for pm, me in mes.items():
+ self._add_pm_me[pm] = me
+ self._add_pm_me_retry += 1
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+ else:
+ # we cant seem to create any collection me, no point in doing anything
+ self.log.warn('unable-to-create-pm-me-disabling-collection', reason=reason, device_id=self._device_id)
+ self._deferred = reactor.callLater(self._timeout_delay, self.stop)
+
+ self._current_task = self._create_pm_task(self._agent, self._device_id, mes)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_delete_pm_me(self):
+ """
+ State machine has just transitioned to the delete_pm_me state
+ """
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._cancel_tasks()
+
+ mes, self._del_pm_me = self._del_pm_me, set()
+
+ def success(results):
+ self.log.debug('delete-me-success', results=results)
+ self._current_task = None
+ for me in mes:
+ self._pm_me_collect_retries.pop(me)
+
+ self._deferred = reactor.callLater(0, self.success)
+
+ def failure(reason):
+ self.log.info('delete-me-failure', reason=reason)
+ self._current_task = None
+ for me in mes:
+ self._del_pm_me.add(me)
+
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+ self._current_task = self._delete_pm_task(self._agent, self._device_id, mes)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+
+ def on_enter_collect_data(self):
+ """
+ State machine has just transitioned to the collect_data state
+ """
+
+ if self._next_interval is not None and self._next_interval > datetime.utcnow():
+ self.log.debug('wait-next-interval')
+ # Not ready for next interval, transition back to idle and we should get
+ # called again after a short delay
+ reactor.callLater(0, self.success)
+ return
+
+ self.advertise(OpenOmciEventType.state_change, self.state)
+ self._cancel_deferred()
+ self._cancel_tasks()
+ keys = self._pm_me_collect_retries.keys()
+ shuffle(keys)
+
+ for key in keys:
+ class_id = key[0]
+ entity_id = key[1]
+
+ self.log.debug("in-enter-collect-data", data_key=key,
+ retries=self._pm_me_collect_retries[key])
+
+ # Collect the data ?
+ if self._pm_me_collect_retries[key] > 0:
+ def success(results):
+ self.log.debug('collect-success', results=results,
+ class_id=results.get('class_id'),
+ entity_id=results.get('entity_id'))
+ self._current_task = None
+ self._pm_me_collect_retries[key] = 0
+ self._deferred = reactor.callLater(0, self.success)
+ return results
+
+ def failure(reason):
+ self.log.info('collect-failure', reason=reason)
+ self._current_task = None
+ self._pm_me_collect_retries[key] -= 1
+ self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+ return reason # Halt callback processing
+
+ # start the task
+ if key in self._pm_me_extended_info:
+ self.log.debug('collect-extended-info-found', data_key=key,
+ extended_info=self._pm_me_extended_info[key])
+ parent_class_id = self._pm_me_extended_info[key][0]
+ parent_entity_id = self._pm_me_extended_info[key][1]
+ upstream = self._pm_me_extended_info[key][2]
+ else:
+ self.log.debug('collect-extended-info-not-found', data_key=key)
+ parent_class_id = None
+ parent_entity_id = None
+ upstream = None
+
+ self._current_task = self._get_interval_task(self._agent, self._device_id,
+ class_id, entity_id,
+ parent_class_id=parent_class_id,
+ parent_entity_id=parent_entity_id,
+ upstream=upstream)
+ self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+ self._task_deferred.addCallbacks(success, failure)
+ self._task_deferred.addCallback(self.publish_data)
+ return
+
+ # Here if all intervals have been collected (we are up to date)
+ self._next_interval = self.get_next_interval
+ self.log.debug('collect-calculate-next', next=self._next_interval)
+
+ self._pm_me_collect_retries = dict.fromkeys(self._pm_me_collect_retries, self._collect_attempts)
+ reactor.callLater(0, self.success)
+
+ def on_enter_threshold_exceeded(self):
+ """
+ State machine has just transitioned to the threshold_exceeded state
+ """
+ pass # TODO: Not sure if we want this state. Need to get alarm synchronizer working first
+
+ @property
+ def get_next_interval(self):
+ """
+ Determine the time for the next interval collection for all of this
+ ONUs PM Intervals. Earliest fetch time is at least 1 minute into the
+ next interval.
+
+ :return: (datetime) UTC time to get the next interval
+ """
+ now = datetime.utcnow()
+
+ # Get delta seconds to at least 1 minute into next interval
+ next_delta_secs = (16 - (now.minute % 15)) * 60
+ next_interval = now + timedelta(seconds=next_delta_secs)
+
+ # NOTE: For debugging, uncomment next section to perform collection
+ # right after initial code startup/mib-sync
+ if self._next_interval is None:
+ return now # Do it now (just for debugging purposes)
+
+ # Skew the next time up to the maximum specified
+ # TODO: May want to skew in a shorter range and select the minute
+ # based off some device property value to make collection a
+ # little more predictable on a per-ONU basis.
+ return next_interval + timedelta(seconds=uniform(0, self._interval_skew))
+
+ def pm_collected(self, key):
+ """
+ Query database and determine if PM data needs to be collected for this ME
+ """
+ class_id = key[0]
+ entity_id = key[1]
+
+ return self._collect_attempts # TODO: Implement persistent storage
+
+ def publish_data(self, results):
+ """
+ Publish the PM interval results on the appropriate bus. The results are
+ a dictionary with the following format.
+
+ 'class-id': (int) ME Class ID,
+ 'entity-id': (int) ME Entity ID,
+ 'me-name': (str) ME Class name, # Mostly for debugging...
+ 'interval-end-time': None,
+ 'interval-utc-time': (DateTime) UTC time when retrieved from ONU,
+
+ Counters added here as they are retrieved with the format of
+ 'counter-attribute-name': value (int)
+
+ :param results: (dict) PM results
+ """
+ self.log.debug('collect-publish', results=results)
+
+ if self._pm_config is not None:
+ self._pm_config.publish_metrics(results)
+
+ pass # TODO: Save off last time interval fetched to persistent storage?
+
+ def on_mib_reset_response(self, _topic, msg):
+ """
+ Called upon receipt of a MIB Reset Response for this ONU
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-mib-reset-response', state=self.state)
+ try:
+ response = msg[RX_RESPONSE_KEY]
+ omci_msg = response.fields['omci_message'].fields
+ status = omci_msg['success_code']
+
+ if status == RC.Success:
+ for class_id in self._me_watch_list.iterkeys():
+ # BP entity_id -> (PM class_id, PM entity_id)
+ instances = self._me_watch_list[class_id]['instances']
+ for _, me_pair in instances.items():
+ self._me_watch_list[class_id]['create-delete'](None, me_pair[0],
+ me_pair[1], add=False)
+ self._me_watch_list[class_id]['instances'] = dict()
+
+ except KeyError:
+ pass # NOP
+
+ def on_create_response(self, _topic, msg):
+ """
+ Called upon receipt of a Create Response for this ONU.
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-create-response', state=self.state)
+
+ def valid_request(stat, c_id, e_id):
+ return self._omci_cc_subscriptions[RxEvent.Delete] is not None\
+ and stat in (RC.Success, RC.InstanceExists) \
+ and c_id in self._me_watch_list.keys() \
+ and e_id not in self._me_watch_list[c_id]['instances']
+
+ response = msg[RX_RESPONSE_KEY]
+ omci = response.fields['omci_message'].fields
+ class_id = omci['entity_class']
+ entity_id = omci['entity_id']
+ status = omci['success_code']
+
+ if valid_request(status, class_id, entity_id):
+ request = msg[TX_REQUEST_KEY]
+ method = self._me_watch_list[class_id]['create-delete']
+ cid, eid = method(request, class_id, entity_id, add=True)
+
+ if cid > 0:
+ # BP entity_id -> (PM class_id, PM entity_id)
+ instances = self._me_watch_list[class_id]['instances']
+ instances[entity_id] = (cid, eid)
+
+ def on_delete_response(self, _topic, msg):
+ """
+ Called upon receipt of a Delete Response for this ONU
+
+ :param _topic: (str) OMCI-RX topic
+ :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+ """
+ self.log.debug('on-delete-response', state=self.state)
+
+ def valid_request(stat, cid, eid):
+ return self._omci_cc_subscriptions[RxEvent.Delete] is not None\
+ and stat in (RC.Success, RC.UnknownInstance) \
+ and cid in self._me_watch_list.keys() \
+ and eid in self._me_watch_list[cid]['instances']
+
+ response = msg[RX_RESPONSE_KEY]
+ omci = response.fields['omci_message'].fields
+ class_id = omci['entity_class']
+ entity_id = omci['entity_id']
+ status = omci['success_code']
+
+ if valid_request(status, class_id, entity_id):
+ request = msg[TX_REQUEST_KEY]
+ method = self._me_watch_list[class_id]['create-delete']
+
+ method(request, class_id, entity_id, add=False)
+ # BP entity_id -> (PM class_id, PM entity_id)
+ instances = self._me_watch_list[class_id]['instances']
+ del instances[entity_id]
+
+ def get_pm_entity_id_for_add(self, pm_cid, eid):
+ """
+ Select the Entity ID to use for a specific PM Class ID. For extended
+ PM ME's, an entity id (>0) is allocated
+
+ :param pm_cid: (int) PM ME Class ID to create/get entry ID for
+ :param eid: (int) Reference class's entity ID. Used as PM entity ID for non-
+ extended PM history PMs
+ :return: (int) Entity ID to use
+ """
+ if pm_cid in (EthernetFrameExtendedPerformanceMonitoring.class_id,
+ EthernetFrameExtendedPerformanceMonitoring64Bit.class_id):
+ return self._enet_entity_id.get_next()
+ return eid
+
+ def release_pm_entity_id(self, pm_cid, eid):
+ if pm_cid in (EthernetFrameExtendedPerformanceMonitoring.class_id,
+ EthernetFrameExtendedPerformanceMonitoring64Bit.class_id):
+ try:
+ self._enet_entity_id.release(eid)
+ except:
+ pass
+
+ def add_remove_enet_frame_pm(self, request, class_id, entity_id,
+ add=True,
+ attributes=None):
+ """
+ Add/remove PM for the dynamic MAC Port configuration data.
+
+ This can be called in a variety of ways:
+
+ o If from an Response event from OMCI_CC, the request will contain
+ the original create/delete request. The class_id and entity_id will
+ be the MAC Data Configuration Data class and instance ID.
+ add = True if create, False if delete
+
+ o If starting up (and the associated ME is already created), the MAC
+ Data Configuration Data class and instance ID, and attributes are
+ provided. request = None and add = True
+
+ o If cleaning up (stopping), the PM ME class_id, entity_id are provided.
+ request = None and add = False
+
+ :return: (int, int) PM ME class_id and entity_id for add/remove was performed.
+ class and entity IDs are non-zero on success
+ """
+ pm_entity_id = 0
+ cid = 0
+ eid = 0
+ upstream = False
+
+ def tp_type_to_pm(tp):
+ # TODO: Support 64-bit extended Monitoring MEs.
+ # This will result in the need to maintain entity IDs of PMs differently
+ upstream_types = [ # EthernetFrameExtendedPerformanceMonitoring64Bit.class_id,
+ EthernetFrameExtendedPerformanceMonitoring.class_id,
+ EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id], True
+ downstream_types = [ # EthernetFrameExtendedPerformanceMonitoring64Bit.class_id,
+ EthernetFrameExtendedPerformanceMonitoring.class_id,
+ EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id], False
+ return {
+ 1: downstream_types,
+ 3: upstream_types,
+ 5: downstream_types,
+ 6: downstream_types,
+ }.get(tp, None)
+
+ if request is not None:
+ assert class_id == MacBridgePortConfigurationData.class_id
+
+ # Is this associated with the ANI or the UNI side of the bridge?
+ # For VOLTHA v2.0, only high-speed internet data service is
+ attributes = request.fields['omci_message'].fields['data']
+ pm_class_ids, upstream = tp_type_to_pm(attributes['tp_type'])
+ cid = request.fields['omci_message'].fields['entity_class']
+ eid = request.fields['omci_message'].fields['entity_id']
+ if not add:
+ instances = self._me_watch_list[cid]['instances']
+ _, pm_entity_id = instances.get(eid, (None, None))
+
+ elif add:
+ assert class_id == MacBridgePortConfigurationData.class_id
+ assert isinstance(attributes, dict)
+
+ # Is this associated with the ANI or the UNI side of the bridge?
+ pm_class_ids, upstream = tp_type_to_pm(attributes.get('tp_type'))
+ cid = class_id
+ eid = entity_id
+
+ else:
+ assert class_id in (EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id,
+ EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id,
+ EthernetFrameExtendedPerformanceMonitoring.class_id,
+ EthernetFrameExtendedPerformanceMonitoring64Bit.class_id)
+ pm_class_ids = [class_id]
+
+ if pm_class_ids is None:
+ return False # Unable to select a supported ME for this ONU
+
+ if add:
+ for pm_class_id in pm_class_ids:
+ if self._me_is_supported(pm_class_id):
+ pm_entity_id = self.get_pm_entity_id_for_add(pm_class_id, eid)
+ self.add_pm_me(pm_class_id, pm_entity_id, cid=cid, eid=eid,
+ upstream=upstream)
+ return pm_class_id, pm_entity_id
+ else:
+ for pm_class_id in pm_class_ids:
+ if self._me_is_supported(pm_class_id):
+ self.delete_pm_me(pm_class_id, pm_entity_id)
+ self.release_pm_entity_id(pm_class_id, pm_entity_id)
+ return pm_class_id, pm_entity_id
+
+ return 0, 0
diff --git a/python/adapters/extensions/omci/tasks/__init__.py b/python/adapters/extensions/omci/tasks/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/extensions/omci/tasks/alarm_resync_task.py b/python/adapters/extensions/omci/tasks/alarm_resync_task.py
new file mode 100644
index 0000000..a16f3a2
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/alarm_resync_task.py
@@ -0,0 +1,393 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, returnValue
+from twisted.internet import reactor
+from common.utils.asleep import asleep
+from voltha.extensions.omci.database.mib_db_dict import *
+from voltha.extensions.omci.omci_defs import AttributeAccess
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+
+AA = AttributeAccess
+
+
+class AlarmCopyException(Exception):
+ pass
+
+
+class AlarmDownloadException(Exception):
+ pass
+
+
+class AlarmResyncException(Exception):
+ pass
+
+
+class AlarmResyncTask(Task):
+ """
+ OpenOMCI ALARM resynchronization Task
+
+ This task should get a copy of the ALARM and compare compare it to a
+ copy of the database. When the ALARM Upload command is sent to the ONU,
+ it should make a copy and source the data requested from this database.
+ The ONU can still source AVC's and the the OLT can still send config
+ commands to the actual.
+ """
+ task_priority = Task.DEFAULT_PRIORITY
+ name = "ALARM Resynchronization Task"
+
+ max_retries = 3
+ retry_delay = 7
+
+ max_alarm_upload_next_retries = 3
+ alarm_upload_next_delay = 10 # Max * delay < 60 seconds
+
+ def __init__(self, omci_agent, device_id):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ """
+ super(AlarmResyncTask, self).__init__(AlarmResyncTask.name,
+ omci_agent,
+ device_id,
+ priority=AlarmResyncTask.task_priority,
+ exclusive=False)
+ self._local_deferred = None
+ self._device = omci_agent.get_device(device_id)
+ self._db_active = MibDbVolatileDict(omci_agent)
+ self._db_active.start()
+
+ def cancel_deferred(self):
+ super(AlarmResyncTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start ALARM Re-Synchronization task
+ """
+ super(AlarmResyncTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_alarm_resync)
+ self._db_active.start()
+ self._db_active.add(self.device_id)
+
+ def stop(self):
+ """
+ Shutdown ALARM Re-Synchronization task
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ self._device = None
+ self._db_active.stop()
+ self._db_active = None
+ super(AlarmResyncTask, self).stop()
+
+ @inlineCallbacks
+ def perform_alarm_resync(self):
+ """
+ Perform the ALARM Resynchronization sequence
+
+ The sequence to be performed is:
+ - get a copy of the current ALARM database
+
+ - perform ALARM upload commands to get ONU's database and save this
+ to a local DB.
+ During the alarm upload process, the maximum time between alarm upload next
+ requests is 1 minute.
+ """
+ self.log.debug('perform-alarm-resync')
+
+ try:
+ self.strobe_watchdog()
+ results = yield self.snapshot_alarm()
+ olt_db_copy = results[0]
+ number_of_commands = results[1]
+
+ if olt_db_copy is None:
+ e = AlarmCopyException('Failed to get local database copy')
+ self.deferred.errback(failure.Failure(e))
+ else:
+ # Start the ALARM upload sequence, save alarms to the table
+ self.strobe_watchdog()
+
+ if number_of_commands > 0:
+ commands_retrieved = yield self.upload_alarm(number_of_commands)
+ else:
+ commands_retrieved = 0
+
+ if commands_retrieved != number_of_commands:
+ e = AlarmDownloadException('Only retrieved {} of {} instances'.
+ format(commands_retrieved, number_of_commands))
+ self.deferred.errback(failure.Failure(e))
+ else:
+ # Compare the databases
+ onu_db_copy = self._db_active.query(self.device_id)
+
+ on_olt_only, on_onu_only, attr_diffs = \
+ self.compare_mibs(olt_db_copy, onu_db_copy)
+
+ on_olt_only = on_olt_only if len(on_olt_only) else None
+ on_onu_only = on_onu_only if len(on_onu_only) else None
+ attr_diffs = attr_diffs if len(attr_diffs) else None
+
+ on_olt_only_diffs = on_olt_only if on_olt_only and len(on_olt_only) else None
+ on_onu_only_diffs = on_onu_only if on_onu_only and len(on_onu_only) else None
+ attr_diffs = attr_diffs if attr_diffs and len(attr_diffs) else None
+
+ if all(diff is None for diff in [on_olt_only_diffs, on_onu_only_diffs, attr_diffs]):
+ results = None
+ else:
+ results = {
+ 'onu-only': on_onu_only_diffs,
+ 'olt-only': on_olt_only_diffs,
+ 'attr-diffs': attr_diffs,
+ 'onu-db': onu_db_copy,
+ 'olt-db': olt_db_copy
+ }
+ self.deferred.callback(results)
+
+ except Exception as e:
+ self.log.exception('resync', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ @inlineCallbacks
+ def snapshot_alarm(self):
+ """
+ Snapshot the ALARM on the ONU and create a copy of our local ALARM database
+
+ :return: (pair) (command_sequence_number)
+ """
+ olt_db_copy = None
+ command_sequence_number = None
+
+ try:
+ max_tries = AlarmResyncTask.max_retries - 1
+
+ for retries in xrange(0, max_tries + 1):
+ # Send ALARM Upload so ONU snapshots its ALARM
+ try:
+ command_sequence_number = yield self.send_alarm_upload()
+ self.strobe_watchdog()
+
+ if command_sequence_number is None:
+ if retries >= max_tries:
+ olt_db_copy = None
+ break
+
+ except TimeoutError as e:
+ self.log.warn('timeout', e=e)
+ if retries >= max_tries:
+ raise
+
+ self.strobe_watchdog()
+ yield asleep(AlarmResyncTask.retry_delay)
+ continue
+
+ # Get a snapshot of the local MIB database
+ olt_db_copy = self._device.query_alarm_table()
+ # if we made it this far, no need to keep trying
+ break
+
+ except Exception as e:
+ self.log.exception('alarm-resync', e=e)
+ raise
+
+ # Handle initial failures
+
+ if olt_db_copy is None or command_sequence_number is None:
+ raise AlarmCopyException('Failed to snapshot ALARM copy after {} retries'.
+ format(AlarmResyncTask.max_retries))
+
+ returnValue((olt_db_copy, command_sequence_number))
+
+ @inlineCallbacks
+ def send_alarm_upload(self):
+ """
+ Perform ALARM upload command and get the number of entries to retrieve
+
+ :return: (int) Number of commands to execute or None on error
+ """
+ ########################################
+ # Begin ALARM Upload
+ try:
+ results = yield self._device.omci_cc.send_get_all_alarm()
+ self.strobe_watchdog()
+ command_sequence_number = results.fields['omci_message'].fields['number_of_commands']
+
+ if command_sequence_number < 0:
+ raise ValueError('Number of commands was {}'.format(command_sequence_number))
+
+ returnValue(command_sequence_number)
+
+ except TimeoutError as e:
+ self.log.warn('alarm-resync-get-timeout', e=e)
+ raise
+
+ @inlineCallbacks
+ def upload_alarm(self, command_sequence_number):
+ ########################################
+ # Begin ALARM Upload
+ seq_no = None
+
+ for seq_no in xrange(command_sequence_number):
+ max_tries = AlarmResyncTask.max_alarm_upload_next_retries
+
+ for retries in xrange(0, max_tries):
+ try:
+ response = yield self._device.omci_cc.send_get_all_alarm_next(seq_no)
+ self.strobe_watchdog()
+
+ omci_msg = response.fields['omci_message'].fields
+ alarm_class_id = omci_msg['alarmed_entity_class']
+ alarm_entity_id = omci_msg['alarmed_entity_id']
+
+ alarm_bit_map = omci_msg['alarm_bit_map']
+ attributes = {AlarmDbExternal.ALARM_BITMAP_KEY: alarm_bit_map}
+
+ # Save to the database
+ self._db_active.set(self.device_id, alarm_class_id,
+ alarm_entity_id, attributes)
+ break
+
+ except TimeoutError:
+ self.log.warn('alarm-resync-timeout', seq_no=seq_no,
+ command_sequence_number=command_sequence_number)
+
+ if retries < max_tries - 1:
+ yield asleep(AlarmResyncTask.alarm_upload_next_delay)
+ self.strobe_watchdog()
+ else:
+ raise
+
+ except Exception as e:
+ self.log.exception('resync', e=e, seq_no=seq_no,
+ command_sequence_number=command_sequence_number)
+
+ returnValue(seq_no + 1) # seq_no is zero based and alarm table.
+
+ def compare_mibs(self, db_copy, db_active):
+ """
+ Compare the our db_copy with the ONU's active copy
+
+ :param db_copy: (dict) OpenOMCI's copy of the database
+ :param db_active: (dict) ONU's database snapshot
+ :return: (dict), (dict), dict() Differences
+ """
+ self.strobe_watchdog()
+
+ # Class & Entities only in local copy (OpenOMCI)
+ on_olt_only = self.get_lsh_only_dict(db_copy, db_active)
+
+ # Class & Entities only on remote (ONU)
+ on_onu_only = self.get_lsh_only_dict(db_active, db_copy)
+
+ # Class & Entities on both local & remote, but one or more attributes
+ # are different on the ONU. This is the value that the local (OpenOMCI)
+ # thinks should be on the remote (ONU)
+
+ me_map = self.omci_agent.get_device(self.device_id).me_map
+ attr_diffs = self.get_attribute_diffs(db_copy, db_active, me_map)
+
+ return on_olt_only, on_onu_only, attr_diffs
+
+ def get_lsh_only_dict(self, lhs, rhs):
+ """
+ Compare two MIB database dictionaries and return the ME Class ID and
+ instances that are unique to the lhs dictionary. Both parameters
+ should be in the common MIB Database output dictionary format that
+ is returned by the mib 'query' command.
+
+ :param lhs: (dict) Left-hand-side argument.
+ :param rhs: (dict) Right-hand-side argument
+
+ return: (list(int,int)) List of tuples where (class_id, inst_id)
+ """
+ results = list()
+
+ for cls_id, cls_data in lhs.items():
+ # Get unique classes
+ #
+ # Skip keys that are not class IDs
+ if not isinstance(cls_id, int):
+ continue
+
+ if cls_id not in rhs:
+ results.extend([(cls_id, inst_id) for inst_id in cls_data.keys()
+ if isinstance(inst_id, int)])
+ else:
+ # Get unique instances of a class
+ lhs_cls = cls_data
+ rhs_cls = rhs[cls_id]
+
+ for inst_id, _ in lhs_cls.items():
+ # Skip keys that are not instance IDs
+ if isinstance(cls_id, int) and inst_id not in rhs_cls:
+ results.extend([(cls_id, inst_id)])
+
+ return results
+
+ def get_attribute_diffs(self, omci_copy, onu_copy, me_map):
+ """
+ Compare two OMCI MIBs and return the ME class and instance IDs that exists
+ on both the local copy and the remote ONU that have different attribute
+ values. Both parameters should be in the common MIB Database output
+ dictionary format that is returned by the mib 'query' command.
+
+ :param omci_copy: (dict) OpenOMCI copy (OLT-side) of the MIB Database
+ :param onu_copy: (dict) active ONU latest copy its database
+ :param me_map: (dict) ME Class ID MAP for this ONU
+
+ return: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+ points to the specific ME instance where attributes
+ are different
+ """
+ results = list()
+
+ # Get class ID's that are in both
+ class_ids = {cls_id for cls_id, _ in omci_copy.items()
+ if isinstance(cls_id, int) and cls_id in onu_copy}
+
+ for cls_id in class_ids:
+ # Get unique instances of a class
+ olt_cls = omci_copy[cls_id]
+ onu_cls = onu_copy[cls_id]
+
+ # Get set of common instance IDs
+ inst_ids = {inst_id for inst_id, _ in olt_cls.items()
+ if isinstance(inst_id, int) and inst_id in onu_cls}
+
+ for inst_id in inst_ids:
+ omci_attributes = {k for k in olt_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+ onu_attributes = {k for k in onu_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+
+ # Get attributes that exist in one database, but not the other
+ sym_diffs = (omci_attributes ^ onu_attributes)
+ results.extend([(cls_id, inst_id, attr) for attr in sym_diffs])
+
+ # Get common attributes with different values
+ common_attributes = (omci_attributes & onu_attributes)
+ results.extend([(cls_id, inst_id, attr) for attr in common_attributes
+ if olt_cls[inst_id][ATTRIBUTES_KEY][attr] !=
+ onu_cls[inst_id][ATTRIBUTES_KEY][attr]])
+ return results
diff --git a/python/adapters/extensions/omci/tasks/file_download_task.py b/python/adapters/extensions/omci/tasks/file_download_task.py
new file mode 100755
index 0000000..63da427
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/file_download_task.py
@@ -0,0 +1,108 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, AlreadyCalledError
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+import requests
+import os
+import time
+
+class FileDownloadTask(Task):
+ name = "Image File Download Task"
+ CHUNK_SIZE = 1024
+
+ def __init__(self, omci_agent, img_dnld, clock= None): #device_id, url, local_path)
+ super(FileDownloadTask, self).__init__(FileDownloadTask.name, omci_agent, img_dnld.id,
+ exclusive=False,
+ watchdog_timeout=45)
+ # self.url = url
+ # self.local_path = local_path
+ self._image_download = img_dnld
+ self.reactor = clock if clock is not None else reactor
+ self._local_deferred = None
+ # self._request = None
+ # self._file = None
+ # self.log.debug('{} running'.format(FileDownloadTask.name))
+
+ # def __save_data(self):
+ # chunk = self._request.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE)
+ # if len(chunk) == 0:
+ # self._file.close()
+ # self.deferred.callback(self._image_download)
+ # else:
+ # self._file.write(chunk)
+ # self._image_download.downloaded_bytes += len(chunk)
+ # self.reactor.callLater(0, self.__save_data)
+
+ @inlineCallbacks
+ def perform_download_data(self):
+ try:
+ r = requests.get(self._image_download.url, stream=True)
+ with open(self._image_download.local_dir + '/' + self._image_download.name, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE):
+ self.strobe_watchdog()
+ if chunk: # filter out keep-alive new chunks
+ yield f.write(chunk)
+ self._image_download.file_size += len(chunk)
+ # yield time.sleep(1)
+ self.deferred.callback(self._image_download)
+ except Exception as e:
+ self.deferred.errback(failure.Failure(e))
+
+ def start(self):
+ super(FileDownloadTask, self).start()
+ if not os.path.exists(self._image_download.local_dir):
+ os.makedirs(self._image_download.local_dir)
+
+ self.strobe_watchdog()
+ self._image_download.file_size = 0
+ self._local_deferred = self.reactor.callLater(0, self.perform_download_data)
+ # try:
+ # if not os.path.exists(self._image_download.local_dir):
+ # os.makedirs(self._image_download.local_dir)
+
+ # self.strobe_watchdog()
+ # self._image_download.downloaded_bytes = 0
+ # self.reactor.callLater(0, self.perform_download_data)
+
+ # self._request = requests.get(self._image_download.url, stream=True)
+ # with open(self._image_download.local_dir + '/' + self._image_download.name, 'wb') as f:
+ # for chunk in r.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE):
+ # self.strobe_watchdog()
+ # if chunk: # filter out keep-alive new chunks
+ # f.write(chunk)
+ # self._image_download.downloaded_bytes += len(chunk)
+
+ # self.deferred.callback(self._image_download)
+ # except Exception as e:
+ # self.deferred.errback(failure.Failure(e))
+
+ # def stop(self):
+ # # self.cancel_deferred()
+ # super(FileDownloadTask, self).stop()
+
+ def cancel_deferred(self):
+ self.log.debug('FileDownloadTask cancel_deferred')
+ super(FileDownloadTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
diff --git a/python/adapters/extensions/omci/tasks/get_mds_task.py b/python/adapters/extensions/omci/tasks/get_mds_task.py
new file mode 100644
index 0000000..1560c83
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/get_mds_task.py
@@ -0,0 +1,112 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_me import OntDataFrame
+from voltha.extensions.omci.omci_defs import ReasonCodes as RC
+
+
+class GetMdsTask(Task):
+ """
+ OpenOMCI Get MIB Data Sync value task
+
+ On successful completion, this task will call the 'callback' method of the
+ deferred returned by the start method and return the value of the MIB
+ Data Sync attribute of the ONT Data ME
+ """
+ task_priority = Task.DEFAULT_PRIORITY
+ name = "Get MDS Task"
+
+ def __init__(self, omci_agent, device_id):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ """
+ super(GetMdsTask, self).__init__(GetMdsTask.name,
+ omci_agent,
+ device_id,
+ priority=GetMdsTask.task_priority)
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(GetMdsTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start MIB Synchronization tasks
+ """
+ super(GetMdsTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_get_mds)
+
+ def stop(self):
+ """
+ Shutdown MIB Synchronization tasks
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ super(GetMdsTask, self).stop()
+
+ @inlineCallbacks
+ def perform_get_mds(self):
+ """
+ Get the 'mib_data_sync' attribute of the ONU
+ """
+ self.log.debug('perform-get-mds')
+
+ try:
+ device = self.omci_agent.get_device(self.device_id)
+
+ #########################################
+ # Request (MDS supplied value does not matter for a 'get' request)
+
+ self.strobe_watchdog()
+ results = yield device.omci_cc.send(OntDataFrame().get())
+
+ omci_msg = results.fields['omci_message'].fields
+ status = omci_msg['success_code']
+
+ # Note: Currently the data reported by the Scapy decode is 16-bits since we need
+ # the data field that large in order to support MIB and Alarm Upload Next
+ # commands. Select only the first 8-bits since that is the size of the MIB
+ # Data Sync attribute
+ mds = (omci_msg['data']['mib_data_sync'] >> 8) & 0xFF \
+ if 'data' in omci_msg and 'mib_data_sync' in omci_msg['data'] else -1
+
+ self.log.debug('ont-data-mds', status=status, mib_data_sync=mds)
+
+ assert status == RC.Success, 'Unexpected Response Status: {}'.format(status)
+
+ # Successful if here
+ self.deferred.callback(mds)
+
+ except TimeoutError as e:
+ self.log.warn('get-mds-timeout', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ except Exception as e:
+ self.log.exception('get-mds', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/interval_data_task.py b/python/adapters/extensions/omci/tasks/interval_data_task.py
new file mode 100644
index 0000000..d41c1d0
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/interval_data_task.py
@@ -0,0 +1,198 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from datetime import datetime
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_defs import ReasonCodes
+from voltha.extensions.omci.omci_frame import OmciFrame, OmciGet
+
+
+class IntervalDataTaskFailure(Exception):
+ pass
+
+
+class IntervalDataTask(Task):
+ """
+ OpenOMCI Performance Interval Get Request
+ """
+ task_priority = Task.DEFAULT_PRIORITY
+ name = "Interval Data Task"
+ max_payload = 29
+
+ def __init__(self, omci_agent, device_id, class_id, entity_id,
+ max_get_response_payload=max_payload,
+ parent_class_id=None,
+ parent_entity_id=None,
+ upstream=None):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param class_id: (int) ME Class ID
+ :param entity_id: (int) ME entity ID
+ :param max_get_response_payload: (int) Maximum number of octets in a
+ single GET response frame
+ """
+ super(IntervalDataTask, self).__init__(IntervalDataTask.name,
+ omci_agent,
+ device_id,
+ priority=IntervalDataTask.task_priority,
+ exclusive=False)
+ self._local_deferred = None
+ self._class_id = class_id
+ self._entity_id = entity_id
+
+ self._parent_class_id = parent_class_id
+ self._parent_entity_id = parent_entity_id
+ self._upstream = upstream
+
+ me_map = self.omci_agent.get_device(self.device_id).me_map
+ if self._class_id not in me_map:
+ msg = "The requested ME Class () does not exist in the ONU's ME Map".format(self._class_id)
+ self.log.warn('unknown-pm-me', msg=msg)
+ raise IntervalDataTaskFailure(msg)
+
+ self._entity = me_map[self._class_id]
+ self._counter_attributes = self.get_counter_attributes_names_and_size()
+ self._max_payload = max_get_response_payload
+
+ def cancel_deferred(self):
+ super(IntervalDataTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start the tasks
+ """
+ super(IntervalDataTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_get_interval)
+
+ def stop(self):
+ """
+ Shutdown the tasks
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ super(IntervalDataTask, self).stop()
+
+ def get_counter_attributes_names_and_size(self):
+ """
+ Get all of the counter attributes names and the amount of storage they take
+
+ :return: (dict) Attribute name -> length
+ """
+ return {name: self._entity.attributes[attr_index].field.sz
+ for name, attr_index in self._entity.attribute_name_to_index_map.items()
+ if self._entity.attributes[attr_index].is_counter}
+
+ @inlineCallbacks
+ def perform_get_interval(self):
+ """
+ Sync the time
+ """
+ self.log.info('perform-get-interval', class_id=self._class_id,
+ entity_id=self._entity_id)
+
+ device = self.omci_agent.get_device(self.device_id)
+ attr_names = self._counter_attributes.keys()
+
+ final_results = {
+ 'class_id': self._class_id,
+ 'entity_id': self._entity_id,
+ 'me_name': self._entity.__name__, # Mostly for debugging...
+ 'interval_utc_time': None,
+ 'parent_class_id': self._parent_class_id,
+ 'parent_entity_id': self._parent_entity_id,
+ 'upstream': self._upstream
+ # Counters added here as they are retrieved
+ }
+ last_end_time = None
+
+ while len(attr_names) > 0:
+ # Get as many attributes that will fit. Always include the 1 octet
+ # Interval End Time Attribute and 2 octets for the Entity ID
+
+ remaining_payload = self._max_payload - 3
+ attributes = list()
+ for name in attr_names:
+ if self._counter_attributes[name] > remaining_payload:
+ break
+
+ attributes.append(name)
+ remaining_payload -= self._counter_attributes[name]
+
+ attr_names = attr_names[len(attributes):]
+ attributes.append('interval_end_time')
+
+ frame = OmciFrame(
+ transaction_id=None,
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=self._class_id,
+ entity_id=self._entity_id,
+ attributes_mask=self._entity.mask_for(*attributes)
+ )
+ )
+ self.log.debug('interval-get-request', class_id=self._class_id,
+ entity_id=self._entity_id)
+ try:
+ self.strobe_watchdog()
+ results = yield device.omci_cc.send(frame)
+
+ omci_msg = results.fields['omci_message'].fields
+ status = omci_msg['success_code']
+ end_time = omci_msg['data'].get('interval_end_time')
+
+ self.log.debug('interval-get-results', class_id=self._class_id,
+ entity_id=self._entity_id, status=status,
+ end_time=end_time)
+
+ if status != ReasonCodes.Success:
+ raise IntervalDataTaskFailure('Unexpected Response Status: {}, Class ID: {}'.
+ format(status, self._class_id))
+ if last_end_time is None:
+ last_end_time = end_time
+
+ elif end_time != last_end_time:
+ msg = 'Interval End Time Changed during retrieval from {} to {}'\
+ .format(last_end_time, end_time)
+ self.log.info('interval-roll-over', msg=msg, class_id=self._class_id)
+ raise IntervalDataTaskFailure(msg)
+
+ final_results['interval_utc_time'] = datetime.utcnow()
+ for attribute in attributes:
+ final_results[attribute] = omci_msg['data'].get(attribute)
+
+ except TimeoutError as e:
+ self.log.warn('interval-get-timeout', e=e, class_id=self._class_id,
+ entity_id=self._entity_id, attributes=attributes)
+ self.deferred.errback(failure.Failure(e))
+
+ except Exception as e:
+ self.log.exception('interval-get-failure', e=e, class_id=self._class_id)
+ self.deferred.errback(failure.Failure(e))
+
+ # Successful if here
+ self.deferred.callback(final_results)
diff --git a/python/adapters/extensions/omci/tasks/mib_reconcile_task.py b/python/adapters/extensions/omci/tasks/mib_reconcile_task.py
new file mode 100644
index 0000000..38e29dc
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/mib_reconcile_task.py
@@ -0,0 +1,693 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from common.utils.asleep import asleep
+from voltha.extensions.omci.tasks.task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue, TimeoutError
+from voltha.extensions.omci.omci_defs import *
+from voltha.extensions.omci.omci_me import OntDataFrame
+from voltha.extensions.omci.omci_frame import OmciFrame, OmciDelete, OmciCreate, OmciSet
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+
+OP = EntityOperations
+RC = ReasonCodes
+AA = AttributeAccess
+
+
+class MibReconcileException(Exception):
+ pass
+
+
+class MibPartialSuccessException(Exception):
+ pass
+
+
+class MibReconcileTask(Task):
+ """
+ OpenOMCI MIB Reconcile Task
+
+ This task attempts to resynchronize the MIB. Note that it runs in exclusive
+ OMCI-CC mode so that it can query the current database/ONU to verify the
+ differences still exist before correcting them.
+ """
+ task_priority = 240
+ name = "MIB Reconcile Task"
+ max_sequential_db_updates = 5 # Be kind, rewind
+ db_update_pause = 0.05 # 50mS
+
+ def __init__(self, omci_agent, device_id, diffs):
+ """
+ Class initialization
+
+ :param omci_agent: (OpenOMCIAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param diffs: (dict) Dictionary of what was found to be invalid
+ """
+ super(MibReconcileTask, self).__init__(MibReconcileTask.name,
+ omci_agent,
+ device_id,
+ priority=MibReconcileTask.task_priority,
+ exclusive=False)
+ self._local_deferred = None
+ self._diffs = diffs
+ self._device = None
+ self._sync_sm = None
+ self._db_updates = 0 # For tracking sequential blocking consul/etcd updates
+
+ def cancel_deferred(self):
+ super(MibReconcileTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start MIB Reconcile task
+ """
+ super(MibReconcileTask, self).start()
+
+ self._device = self.omci_agent.get_device(self.device_id)
+
+ if self._device is None:
+ e = MibReconcileException('Device {} no longer exists'.format(self.device_id))
+ self.deferred.errback(failure.Failure(e))
+ return
+
+ self._sync_sm = self._device.mib_synchronizer
+
+ if self._device is None:
+ e = MibReconcileException('Device {} MIB State machine no longer exists'.format(self.device_id))
+ self.deferred.errback(failure.Failure(e))
+ return
+
+ self._local_deferred = reactor.callLater(0, self.perform_mib_reconcile)
+
+ def stop(self):
+ """
+ Shutdown MIB Reconcile task
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ self._device = None
+ super(MibReconcileTask, self).stop()
+
+ @inlineCallbacks
+ def perform_mib_reconcile(self):
+ """
+ Perform the MIB Reconciliation sequence.
+
+ The sequence to reconcile will be to clean up ONU only MEs, followed by
+ OLT/OpenOMCI-only MEs, and then finally correct common MEs with differing
+ attributes.
+ """
+ self.log.debug('perform-mib-reconcile')
+
+ try:
+ successes = 0
+ failures = 0
+
+ if self._diffs['onu-only'] is not None and len(self._diffs['onu-only']):
+ results = yield self.fix_onu_only(self._diffs['onu-only'],
+ self._diffs['onu-db'])
+ self.log.debug('onu-only-results', good=results[0], bad=results[1])
+ successes += results[0]
+ failures += results[1]
+
+ if self._diffs['olt-only'] is not None and len(self._diffs['olt-only']):
+ results = yield self.fix_olt_only(self._diffs['olt-only'],
+ self._diffs['onu-db'],
+ self._diffs['olt-db'])
+ self.log.debug('olt-only-results', good=results[0], bad=results[1])
+ successes += results[0]
+ failures += results[1]
+
+ if self._diffs['attributes'] is not None and len(self._diffs['attributes']):
+ results = yield self.fix_attributes_only(self._diffs['attributes'],
+ self._diffs['onu-db'],
+ self._diffs['olt-db'])
+ self.log.debug('attributes-results', good=results[0], bad=results[1])
+ successes += results[0]
+ failures += results[1]
+
+ # Success? Update MIB-data-sync
+ if failures == 0:
+ results = yield self.update_mib_data_sync()
+ successes += results[0]
+ failures += results[1]
+
+ # Send back final status
+ if failures > 0:
+ msg = '{} Successful updates, {} failures'.format(successes, failure)
+ error = MibPartialSuccessException(msg) if successes \
+ else MibReconcileException(msg)
+ self.deferred.errback(failure.Failure(error))
+ else:
+ self.deferred.callback('{} Successful updates'.format(successes))
+
+ except Exception as e:
+ if not self.deferred.called:
+ self.log.exception('reconcile', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ @inlineCallbacks
+ def fix_onu_only(self, onu, onu_db):
+ """
+ Fix ME's that were only found on the ONU. For ONU only MEs there are
+ the following things that will be checked.
+
+ o ME's that do not have an OpenOMCI class decoder. These are stored
+ as binary blobs in the MIB database. Since we do not ever set them
+ (since no encoder as well), just store them in the OLT/OpenOMCI MIB
+ Database.
+
+ o For ME's that are created by the ONU (no create/delete access), the
+ MEs 'may' be due to a firmware upgrade and reboot or in response to
+ an OLT creating another ME entity and then creating this ME. Place
+ these 'new' into the database.
+
+ o For ME's that are created by the OLT/OpenOMCI, delete them from the
+ ONU
+
+ :param onu: (list(int,int)) List of tuples where (class_id, inst_id)
+ :param onu_db: (dict) ONU Database snapshot at time of audit
+
+ :return: (int, int) successes, failures
+ """
+ successes = 0
+ failures = 0
+ me_map = self._device.me_map
+
+ ####################################################################
+ # First the undecodables and onu-created (treated the same)
+ undecodable = self._undecodable(onu, me_map)
+ onu_created = self._onu_created(onu, me_map)
+
+ if len(undecodable) or len(onu_created):
+ results = yield self.fix_onu_only_save_to_db(undecodable, onu_created, onu_db)
+ successes += results[0]
+ failures += results[1]
+
+ ####################################################################
+ # Last the OLT created values, resend these to the ONU
+
+ olt_created = self._olt_created(onu, me_map)
+ if len(olt_created):
+ results = yield self.fix_onu_only_remove_from_onu(olt_created)
+ successes += results[0]
+ failures += results[1]
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_onu_only_save_to_db(self, undecodable, onu_created, onu_db):
+ """
+ In ONU database and needs to be saved to OLT/OpenOMCI database.
+
+ Note that some, perhaps all, of these instances could be ONU create
+ in response to the OLT creating some other ME instance. So treat
+ the Database operation as a create.
+ """
+ successes = 0
+ failures = 0
+
+ for cid, eid in undecodable + onu_created:
+ if self.deferred.called: # Check if task canceled
+ break
+ try:
+ # If in current MIB, had an audit issue or other MIB operation
+ # put it into the database, declare it a failure so we audit again
+ try:
+ olt_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+
+ except KeyError: # Common for ONU created MEs during audit
+ olt_entry = None
+
+ if olt_entry is not None and len(olt_entry):
+ self.log.debug('onu-only-in-current', cid=cid, eid=eid)
+ failures += 1 # Mark as failure so we audit again
+
+ elif cid not in onu_db:
+ self.log.warn('onu-only-not-in-audit', cid=cid, eid=eid)
+ failures += 1
+
+ else:
+ entry = onu_db[cid][eid]
+ self.strobe_watchdog()
+ self._sync_sm.mib_set(cid, eid, entry[ATTRIBUTES_KEY])
+ successes += 1
+
+ # If we do nothing but DB updates for ALOT of MEs, we are
+ # blocking other async twisted tasks, be kind and pause
+ self._db_updates += 1
+
+ if self._db_updates >= MibReconcileTask.max_sequential_db_updates:
+ self._db_updates = 0
+ self._local_deferred = yield asleep(MibReconcileTask.db_update_pause)
+
+ except Exception as e:
+ self.log.warn('onu-only-error', e=e)
+ failures += 1
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_onu_only_remove_from_onu(self, olt_created,):
+ """ On ONU, but no longer on OLT/OpenOMCI, delete it """
+ successes = 0
+ failures = 0
+
+ for cid, eid in olt_created:
+ if self.deferred.called: # Check if task canceled
+ break
+ try:
+ # If in current MIB, had an audit issue, declare it an error
+ # and next audit should clear it up
+ try:
+ current_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+
+ except KeyError:
+ # Expected if no other entities with same class present in MIB
+ current_entry = None
+
+ if current_entry is not None and len(current_entry):
+ self.log.debug('onu-only-in-current', cid=cid, eid=eid)
+ failures += 1
+
+ else:
+ # Delete it from the ONU. Assume success
+ frame = OmciFrame(transaction_id=None,
+ message_type=OmciDelete.message_id,
+ omci_message=OmciDelete(entity_class=cid, entity_id=eid))
+
+ self._local_deferred = yield self._device.omci_cc.send(frame)
+ self.check_status_and_state(self._local_deferred, 'onu-attribute-update')
+ successes += 1
+ self._db_updates = 0
+
+ except Exception as e:
+ self.log.warn('olt-only-error', e=e)
+ failures += 1
+ self.strobe_watchdog()
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_olt_only(self, olt, onu_db, olt_db):
+ """
+ Fix ME's that were only found on the OLT. For OLT only MEs there are
+ the following things that will be checked.
+
+ o ME's that do not have an OpenOMCI class decoder. These are stored
+ as binary blobs in the MIB database. Since the OLT will never
+ create these (all are learned from ONU), it is assumed the ONU
+ has removed them for some purpose. So delete them from the OLT
+ database.
+
+ o For ME's that are created by the ONU (no create/delete access), the
+ MEs 'may' not be on the ONU because of a reboot or an OLT created
+ ME was deleted and the ONU gratuitously removes it. So delete them
+ from the OLT database.
+
+ o For ME's that are created by the OLT/OpenOMCI, delete them from the
+ ONU
+
+ :param olt: (list(int,int)) List of tuples where (class_id, inst_id)
+ :param onu_db: (dict) ONU Database snapshot at time of audit
+ :param olt_db: (dict) OLT Database snapshot at time of audit
+
+ :return: (int, int) successes, failures
+ """
+ successes = 0
+ failures = 0
+ me_map = self._device.me_map
+
+ ####################################################################
+ # First the undecodables and onu-created (treated the same) remove
+ # from OpenOMCI database
+ undecodable = self._undecodable(olt, me_map)
+ onu_created = self._onu_created(olt, me_map)
+
+ if len(undecodable) or len(onu_created):
+ good, bad = self.fix_olt_only_remove_from_db(undecodable, onu_created)
+ successes += good
+ failures += bad
+
+ ####################################################################
+ # Last the OLT created
+
+ olt_created = self._olt_created(olt, me_map)
+ if len(olt_created):
+ results = yield self.fix_olt_only_create_on_onu(olt_created, me_map)
+ successes += results[0]
+ failures += results[1]
+
+ returnValue((successes, failures))
+
+ def fix_olt_only_remove_from_db(self, undecodable, onu_created):
+ """ On OLT, but not on ONU and are ONU created, delete from OLT/OpenOMCI DB """
+ successes = 0
+ failures = 0
+
+ for cid, eid in undecodable + onu_created:
+ if self.deferred.called: # Check if task canceled
+ break
+ try:
+ # Delete it. If already deleted (KeyError), then that is okay
+ self._sync_sm.mib_delete(cid, eid)
+ self.strobe_watchdog()
+
+ except KeyError:
+ successes += 1 # Not found in DB anymore, assume success
+
+ except Exception as e:
+ self.log.warn('olt-only-db-error', cid=cid, eid=eid, e=e)
+ failures += 1
+
+ return successes, failures
+
+ @inlineCallbacks
+ def fix_olt_only_create_on_onu(self, olt_created, me_map):
+ """ Found on OLT and created by OLT, so create on ONU"""
+ successes = 0
+ failures = 0
+
+ for cid, eid in olt_created:
+ if self.deferred.called: # Check if task canceled
+ break
+
+ try:
+ # Get current entry, use it if found
+ olt_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+ me_entry = me_map[cid]
+
+ if olt_entry is None or len(olt_entry) == 0:
+ successes += 1 # Deleted before task got to run
+ else:
+ # Create it in the ONU. Only set-by-create attributes allowed
+ sbc_data = {k: v for k, v in olt_entry[ATTRIBUTES_KEY].items()
+ if AA.SetByCreate in
+ next((attr.access for attr in me_entry.attributes
+ if attr.field.name == k), set())}
+
+ frame = OmciFrame(transaction_id=None,
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(entity_class=cid,
+ entity_id=eid,
+ data=sbc_data))
+
+ self._local_deferred = yield self._device.omci_cc.send(frame)
+ self.check_status_and_state(self._local_deferred, 'olt-create-sbc')
+ successes += 1
+ self._db_updates = 0
+
+ # Try any writeable attributes now (but not set-by-create)
+ writeable_data = {k: v for k, v in olt_entry[ATTRIBUTES_KEY].items()
+ if AA.Writable in
+ next((attr.access for attr in me_entry.attributes
+ if attr.field.name == k), set())
+ and AA.SetByCreate not in
+ next((attr.access for attr in me_entry.attributes
+ if attr.field.name == k), set())}
+
+ if len(writeable_data):
+ attributes_mask = me_entry.mask_for(*writeable_data.keys())
+ frame = OmciFrame(transaction_id=None,
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(entity_class=cid,
+ entity_id=eid,
+ attributes_mask=attributes_mask,
+ data=writeable_data))
+
+ self._local_deferred = yield self._device.omci_cc.send(frame)
+ self.check_status_and_state(self._local_deferred, 'olt-set-writeable')
+ successes += 1
+
+ except Exception as e:
+ self.log.exception('olt-only-fix', e=e, cid=cid, eid=eid)
+ failures += 1
+ self.strobe_watchdog()
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_attributes_only(self, attrs, onu_db, olt_db):
+ """
+ Fix ME's that were found on both the ONU and OLT, but had differing
+ attribute values. There are several cases to handle here
+
+ o For ME's created on the ONU that have write attributes that
+ only exist in the ONU's database, copy these to the OLT/OpenOMCI
+ database
+
+ o For all other writeable attributes, the OLT value takes precedence
+
+ :param attrs: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+ points to the specific ME instance where attributes
+ are different
+ :param onu_db: (dict) ONU Database snapshot at time of audit
+ :param olt_db: (dict) OLT Database snapshot at time of audit
+
+ :return: (int, int) successes, failures
+ """
+ successes = 0
+ failures = 0
+ me_map = self._device.me_map
+
+ # Collect up attributes on a per CID/EID basis. This will result in
+ # the minimal number of operations to either the database of over
+ # the OMCI-CC to the ONU
+
+ attr_map = dict()
+ for cid, eid, attribute in attrs:
+ if (cid, eid) not in attr_map:
+ attr_map[(cid, eid)] = {attribute}
+ else:
+ attr_map[(cid, eid)].add(attribute)
+
+ for entity_pair, attributes in attr_map.items():
+ cid = entity_pair[0]
+ eid = entity_pair[1]
+
+ # Skip MEs we cannot encode/decode
+ if cid not in me_map:
+ self.log.warn('no-me-map-decoder', class_id=cid)
+ failures += 1
+ continue
+
+ if self.deferred.called: # Check if task canceled
+ break
+
+ # Build up MIB set commands and ONU Set (via OMCI) commands
+ # based of the attributes
+ me_entry = me_map[cid]
+ mib_data_to_save = dict()
+ onu_data_to_set = dict()
+ olt_attributes = olt_db[cid][eid][ATTRIBUTES_KEY]
+ onu_attributes = onu_db[cid][eid][ATTRIBUTES_KEY]
+
+ for attribute in attributes:
+ map_access = next((attr.access for attr in me_entry.attributes
+ if attr.field.name == attribute), set())
+ writeable = AA.Writable in map_access or AA.SetByCreate in map_access
+
+ # If only in ONU database snapshot, save it to OLT
+ if attribute in onu_attributes and attribute not in olt_attributes:
+ # On onu only
+ mib_data_to_save[attribute] = onu_attributes[attribute]
+
+ elif writeable:
+ # On olt only or in both. Either way OLT wins
+ onu_data_to_set[attribute] = olt_attributes[attribute]
+
+ # Now do the bulk operations For both, check to see if the target
+ # is still the same as when the audit was performed. If it is, do
+ # the commit. If not, mark as a failure so an expedited audit will
+ # occur and check again.
+
+ if len(mib_data_to_save):
+ results = yield self.fix_attributes_only_in_mib(cid, eid, mib_data_to_save)
+ successes += results[0]
+ failures += results[1]
+
+ if len(onu_data_to_set):
+ results = yield self.fix_attributes_only_on_olt(cid, eid, onu_data_to_set, olt_db, me_entry)
+ successes += results[0]
+ failures += results[1]
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_attributes_only_in_mib(self, cid, eid, mib_data):
+ successes = 0
+ failures = 0
+ try:
+ # Get current and verify same as during audit it is missing from our DB
+ attributes = mib_data.keys()
+ current_entry = self._device.query_mib(cid, eid, attributes)
+
+ if current_entry is not None and len(current_entry):
+ clashes = {k: v for k, v in current_entry.items()
+ if k in attributes and v != mib_data[k]}
+
+ if len(clashes):
+ raise ValueError('Existing DB entry for {}/{} attributes clash with audit data. Clash: {}'.
+ format(cid, eid, clashes))
+
+ self._sync_sm.mib_set(cid, eid, mib_data)
+ successes += len(mib_data)
+ self.strobe_watchdog()
+
+ # If we do nothing but DB updates for ALOT of MEs, we are
+ # blocking other async twisted tasks, be kind and yield
+ self._db_updates += 1
+ if self._db_updates >= MibReconcileTask.max_sequential_db_updates:
+ self._db_updates = 0
+ self._local_deferred = yield asleep(MibReconcileTask.db_update_pause)
+
+ except ValueError as e:
+ self.log.debug('attribute-changed', e)
+ failures += len(mib_data)
+
+ except Exception as e:
+ self.log.exception('attribute-only-fix-mib', e=e, cid=cid, eid=eid)
+ failures += len(mib_data)
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def fix_attributes_only_on_olt(self, cid, eid, onu_data, olt_db, me_entry):
+ successes = 0
+ failures = 0
+
+ try:
+ # On olt only or in both. Either way OLT wins, first verify that
+ # the OLT version is still the same data that we want to
+ # update on the ONU. Verify the data for the OLT is the same as
+ # at time of audit
+ olt_db_entries = {k: v for k, v in olt_db[cid][eid][ATTRIBUTES_KEY].items()
+ if k in onu_data.keys()}
+ current_entries = self._sync_sm.query_mib(class_id=cid, instance_id=eid,
+ attributes=onu_data.keys())
+
+ still_the_same = all(current_entries.get(k) == v for k, v in olt_db_entries.items())
+ if not still_the_same:
+ returnValue((0, len(onu_data))) # Wait for it to stabilize
+
+ # OLT data still matches, do the set operations now
+ # while len(onu_data):
+ attributes_mask = me_entry.mask_for(*onu_data.keys())
+ frame = OmciFrame(transaction_id=None,
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(entity_class=cid,
+ entity_id=eid,
+ attributes_mask=attributes_mask,
+ data=onu_data))
+
+ results = yield self._device.omci_cc.send(frame)
+ self.check_status_and_state(results, 'onu-attribute-update')
+ successes += len(onu_data)
+ self._db_updates = 0
+
+ except Exception as e:
+ self.log.exception('attribute-only-fix-onu', e=e, cid=cid, eid=eid)
+ failures += len(onu_data)
+ self.strobe_watchdog()
+
+ returnValue((successes, failures))
+
+ @inlineCallbacks
+ def update_mib_data_sync(self):
+ """
+ As the final step of MIB resynchronization, the OLT sets the MIB data sync
+ attribute of the ONU data ME to some suitable value of its own choice. It
+ then sets its own record of the same attribute to the same value,
+ incremented by 1, as explained in clause
+
+ :return: (int, int) success, failure counts
+ """
+ # Get MDS to set, do not user zero
+
+ new_mds_value = self._sync_sm.mib_data_sync
+ if new_mds_value == 0:
+ self._sync_sm.increment_mib_data_sync()
+ new_mds_value = self._sync_sm.mib_data_sync
+
+ # Update it. The set response will be sent on the OMCI-CC pub/sub bus
+ # and the MIB Synchronizer will update this MDS value in the database
+ # if successful.
+ try:
+ frame = OntDataFrame(mib_data_sync=new_mds_value).set()
+
+ results = yield self._device.omci_cc.send(frame)
+ self.check_status_and_state(results, 'ont-data-mbs-update')
+ returnValue((1, 0))
+
+ except TimeoutError as e:
+ self.log.debug('ont-data-send-timeout', e=e)
+ returnValue((0, 1))
+
+ except Exception as e:
+ self.log.exception('ont-data-send', e=e, mds=new_mds_value)
+ returnValue((0, 1))
+
+ def check_status_and_state(self, results, operation=''):
+ """
+ Check the results of an OMCI response. An exception is thrown
+ if the task was cancelled or an error was detected.
+
+ :param results: (OmciFrame) OMCI Response frame
+ :param operation: (str) what operation was being performed
+ :return: True if successful, False if the entity existed (already created)
+ """
+ omci_msg = results.fields['omci_message'].fields
+ status = omci_msg['success_code']
+ error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+ failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+ unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+ self.strobe_watchdog()
+
+ self.log.debug(operation, status=status, error_mask=error_mask,
+ failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+ if status == RC.Success:
+ return True
+
+ elif status == RC.InstanceExists:
+ return False
+
+ msg = '{} failed with a status of {}, error_mask: {}, failed_mask: {}, unsupported_mask: {}'.\
+ format(operation, status, error_mask, failed_mask, unsupported_mask)
+
+ raise MibReconcileException(msg)
+
+ def _undecodable(self, cid_eid_list, me_map):
+ return [(cid, eid) for cid, eid in cid_eid_list if cid not in me_map]
+
+ def _onu_created(self, cid_eid_list, me_map):
+ return [(cid, eid) for cid, eid in cid_eid_list if cid in me_map and
+ (OP.Create not in me_map[cid].mandatory_operations and
+ OP.Create not in me_map[cid].optional_operations)]
+
+ def _olt_created(self, cid_eid_list, me_map):
+ return [(cid, eid) for cid, eid in cid_eid_list if cid in me_map and
+ (OP.Create in me_map[cid].mandatory_operations or
+ OP.Create in me_map[cid].optional_operations)]
diff --git a/python/adapters/extensions/omci/tasks/mib_resync_task.py b/python/adapters/extensions/omci/tasks/mib_resync_task.py
new file mode 100644
index 0000000..ef9c531
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/mib_resync_task.py
@@ -0,0 +1,427 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, returnValue
+from twisted.internet import reactor
+from common.utils.asleep import asleep
+from voltha.extensions.omci.database.mib_db_dict import *
+from voltha.extensions.omci.omci_entities import OntData
+from voltha.extensions.omci.omci_defs import AttributeAccess, EntityOperations
+
+AA = AttributeAccess
+OP = EntityOperations
+
+class MibCopyException(Exception):
+ pass
+
+
+class MibDownloadException(Exception):
+ pass
+
+
+class MibResyncException(Exception):
+ pass
+
+
+class MibResyncTask(Task):
+ """
+ OpenOMCI MIB resynchronization Task
+
+ This task should get a copy of the MIB and compare compare it to a
+ copy of the database. When the MIB Upload command is sent to the ONU,
+ it should make a copy and source the data requested from this database.
+ The ONU can still source AVC's and the the OLT can still send config
+ commands to the actual.
+ """
+ task_priority = 240
+ name = "MIB Resynchronization Task"
+
+ max_db_copy_retries = 3
+ db_copy_retry_delay = 7
+
+ max_mib_upload_next_retries = 3
+ mib_upload_next_delay = 10 # Max * delay < 60 seconds
+ watchdog_timeout = 15 # Should be > max delay
+
+ def __init__(self, omci_agent, device_id):
+ """
+ Class initialization
+
+ :param omci_agent: (OpenOMCIAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ """
+ super(MibResyncTask, self).__init__(MibResyncTask.name,
+ omci_agent,
+ device_id,
+ priority=MibResyncTask.task_priority,
+ exclusive=False)
+ self._local_deferred = None
+ self._device = omci_agent.get_device(device_id)
+ self._db_active = MibDbVolatileDict(omci_agent)
+ self._db_active.start()
+
+ def cancel_deferred(self):
+ super(MibResyncTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start MIB Re-Synchronization task
+ """
+ super(MibResyncTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_mib_resync)
+ self._db_active.start()
+ self._db_active.add(self.device_id)
+
+ def stop(self):
+ """
+ Shutdown MIB Re-Synchronization task
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ self._device = None
+ self._db_active.stop()
+ self._db_active = None
+ super(MibResyncTask, self).stop()
+
+ @inlineCallbacks
+ def perform_mib_resync(self):
+ """
+ Perform the MIB Resynchronization sequence
+
+ The sequence to be performed is:
+ - get a copy of the current MIB database (db_copy)
+
+ - perform MIB upload commands to get ONU's database and save this
+ to a local DB (db_active). Note that the ONU can still receive
+ create/delete/set/get operations from the operator and source
+ AVC notifications as well during this period.
+
+ - Compare the information in the db_copy to the db_active
+
+ During the mib upload process, the maximum time between mib upload next
+ requests is 1 minute.
+ """
+ self.log.debug('perform-mib-resync')
+
+ try:
+ results = yield self.snapshot_mib()
+ db_copy = results[0]
+
+ if db_copy is None:
+ e = MibCopyException('Failed to get local database copy')
+ self.deferred.errback(failure.Failure(e))
+
+ else:
+ number_of_commands = results[1]
+
+ # Start the MIB upload sequence
+ self.strobe_watchdog()
+ commands_retrieved = yield self.upload_mib(number_of_commands)
+
+ if commands_retrieved < number_of_commands:
+ e = MibDownloadException('Only retrieved {} of {} instances'.
+ format(commands_retrieved, number_of_commands))
+ self.deferred.errback(failure.Failure(e))
+ else:
+ # Compare the databases
+ active_copy = self._db_active.query(self.device_id)
+ on_olt_only, on_onu_only, attr_diffs = \
+ self.compare_mibs(db_copy, active_copy)
+
+ self.deferred.callback(
+ {
+ 'on-olt-only': on_olt_only if len(on_olt_only) else None,
+ 'on-onu-only': on_onu_only if len(on_onu_only) else None,
+ 'attr-diffs': attr_diffs if len(attr_diffs) else None,
+ 'olt-db': db_copy,
+ 'onu-db': active_copy
+ })
+
+ except Exception as e:
+ self.log.exception('resync', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ @inlineCallbacks
+ def snapshot_mib(self):
+ """
+ Snapshot the MIB on the ONU and create a copy of our local MIB database
+
+ :return: (pair) (db_copy, number_of_commands)
+ """
+ db_copy = None
+ number_of_commands = None
+
+ try:
+ max_tries = MibResyncTask.max_db_copy_retries - 1
+
+ for retries in xrange(0, max_tries + 1):
+ # Send MIB Upload so ONU snapshots its MIB
+ try:
+ self.strobe_watchdog()
+ number_of_commands = yield self.send_mib_upload()
+
+ if number_of_commands is None:
+ if retries >= max_tries:
+ db_copy = None
+ break
+
+ except (TimeoutError, ValueError) as e:
+ self.log.warn('timeout-or-value-error', e=e)
+ if retries >= max_tries:
+ raise
+
+ self.strobe_watchdog()
+ yield asleep(MibResyncTask.db_copy_retry_delay)
+ continue
+
+ # Get a snapshot of the local MIB database
+ db_copy = self._device.query_mib()
+ # if we made it this far, no need to keep trying
+ break
+
+ except Exception as e:
+ self.log.exception('mib-resync', e=e)
+ raise
+
+ # Handle initial failures
+
+ if db_copy is None or number_of_commands is None:
+ raise MibCopyException('Failed to snapshot MIB copy after {} retries'.
+ format(MibResyncTask.max_db_copy_retries))
+
+ returnValue((db_copy, number_of_commands))
+
+ @inlineCallbacks
+ def send_mib_upload(self):
+ """
+ Perform MIB upload command and get the number of entries to retrieve
+
+ :return: (int) Number of commands to execute or None on error
+ """
+ ########################################
+ # Begin MIB Upload
+ try:
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send_mib_upload()
+
+ number_of_commands = results.fields['omci_message'].fields['number_of_commands']
+
+ if number_of_commands is None or number_of_commands <= 0:
+ raise ValueError('Number of commands was {}'.format(number_of_commands))
+
+ returnValue(number_of_commands)
+
+ except TimeoutError as e:
+ self.log.warn('mib-resync-get-timeout', e=e)
+ raise
+
+ @inlineCallbacks
+ def upload_mib(self, number_of_commands):
+ ########################################
+ # Begin MIB Upload
+ seq_no = None
+
+ for seq_no in xrange(number_of_commands):
+ max_tries = MibResyncTask.max_mib_upload_next_retries
+
+ for retries in xrange(0, max_tries):
+ try:
+ self.strobe_watchdog()
+ response = yield self._device.omci_cc.send_mib_upload_next(seq_no)
+
+ omci_msg = response.fields['omci_message'].fields
+ class_id = omci_msg['object_entity_class']
+ entity_id = omci_msg['object_entity_id']
+
+ # Filter out the 'mib_data_sync' from the database. We save that at
+ # the device level and do not want it showing up during a re-sync
+ # during data comparison
+ from binascii import hexlify
+ if class_id == OntData.class_id:
+ break
+
+ # The T&W ONU reports an ME with class ID 0 but only on audit. Perhaps others do as well.
+ if class_id == 0 or class_id > 0xFFFF:
+ self.log.warn('invalid-class-id', class_id=class_id)
+ break
+
+ attributes = {k: v for k, v in omci_msg['object_data'].items()}
+
+ # Save to the database
+ self._db_active.set(self.device_id, class_id, entity_id, attributes)
+ break
+
+ except TimeoutError:
+ self.log.warn('mib-resync-timeout', seq_no=seq_no,
+ number_of_commands=number_of_commands)
+
+ if retries < max_tries - 1:
+ self.strobe_watchdog()
+ yield asleep(MibResyncTask.mib_upload_next_delay)
+ else:
+ raise
+
+ except Exception as e:
+ self.log.exception('resync', e=e, seq_no=seq_no,
+ number_of_commands=number_of_commands)
+
+ returnValue(seq_no + 1) # seq_no is zero based.
+
+ def compare_mibs(self, db_copy, db_active):
+ """
+ Compare the our db_copy with the ONU's active copy
+
+ :param db_copy: (dict) OpenOMCI's copy of the database
+ :param db_active: (dict) ONU's database snapshot
+ :return: (dict), (dict), (list) Differences
+ """
+ self.strobe_watchdog()
+ me_map = self.omci_agent.get_device(self.device_id).me_map
+
+ # Class & Entities only in local copy (OpenOMCI)
+ on_olt_temp = self.get_lhs_only_dict(db_copy, db_active)
+
+ # Remove any entries that are not reported during an upload (but could
+ # be in our database copy. Retain undecodable class IDs.
+ on_olt_only = [(cid, eid) for cid, eid in on_olt_temp
+ if cid not in me_map or not me_map[cid].hidden]
+
+ # Further reduce the on_olt_only MEs reported in an audit to not
+ # include missed MEs that are ONU created. Not all ONUs report MEs
+ # that are ONU created unless we are doing the initial MIB upload.
+ # Adtran does report them, T&W may not as well as a few others
+ on_olt_only = [(cid, eid) for cid, eid in on_olt_only if cid in me_map and
+ (OP.Create in me_map[cid].mandatory_operations or
+ OP.Create in me_map[cid].optional_operations)]
+
+ # Class & Entities only on remote (ONU)
+ on_onu_only = self.get_lhs_only_dict(db_active, db_copy)
+
+ # Class & Entities on both local & remote, but one or more attributes
+ # are different on the ONU. This is the value that the local (OpenOMCI)
+ # thinks should be on the remote (ONU)
+
+ attr_diffs = self.get_attribute_diffs(db_copy, db_active, me_map)
+
+ # TODO: Note that certain MEs are excluded from the MIB upload. In particular,
+ # instances of some general purpose MEs, such as the Managed Entity ME and
+ # and the Attribute ME are not included in the MIB upload. Also all table
+ # attributes are not included in the MIB upload (but we do not yet support
+ # tables in this OpenOMCI implementation (VOLTHA v1.3.0)
+
+ return on_olt_only, on_onu_only, attr_diffs
+
+ def get_lhs_only_dict(self, lhs, rhs):
+ """
+ Compare two MIB database dictionaries and return the ME Class ID and
+ instances that are unique to the lhs dictionary. Both parameters
+ should be in the common MIB Database output dictionary format that
+ is returned by the mib 'query' command.
+
+ :param lhs: (dict) Left-hand-side argument.
+ :param rhs: (dict) Right-hand-side argument
+
+ return: (list(int,int)) List of tuples where (class_id, inst_id)
+ """
+ results = list()
+
+ for cls_id, cls_data in lhs.items():
+ # Get unique classes
+ #
+ # Skip keys that are not class IDs
+ if not isinstance(cls_id, int):
+ continue
+
+ if cls_id not in rhs:
+ results.extend([(cls_id, inst_id) for inst_id in cls_data.keys()
+ if isinstance(inst_id, int)])
+ else:
+ # Get unique instances of a class
+ lhs_cls = cls_data
+ rhs_cls = rhs[cls_id]
+
+ for inst_id, _ in lhs_cls.items():
+ # Skip keys that are not instance IDs
+ if isinstance(cls_id, int) and inst_id not in rhs_cls:
+ results.extend([(cls_id, inst_id)])
+
+ return results
+
+ def get_attribute_diffs(self, omci_copy, onu_copy, me_map):
+ """
+ Compare two OMCI MIBs and return the ME class and instance IDs that exists
+ on both the local copy and the remote ONU that have different attribute
+ values. Both parameters should be in the common MIB Database output
+ dictionary format that is returned by the mib 'query' command.
+
+ :param omci_copy: (dict) OpenOMCI copy (OLT-side) of the MIB Database
+ :param onu_copy: (dict) active ONU latest copy its database
+ :param me_map: (dict) ME Class ID MAP for this ONU
+
+ return: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+ points to the specific ME instance where attributes
+ are different
+ """
+ results = list()
+ ro_set = {AA.R}
+
+ # Get class ID's that are in both
+ class_ids = {cls_id for cls_id, _ in omci_copy.items()
+ if isinstance(cls_id, int) and cls_id in onu_copy}
+
+ for cls_id in class_ids:
+ # Get unique instances of a class
+ olt_cls = omci_copy[cls_id]
+ onu_cls = onu_copy[cls_id]
+
+ # Weed out read-only attributes. Attributes on onu may be read-only. These
+ # will only show up it the OpenOMCI (OLT-side) database if it changed and
+ # an AVC Notification was sourced by the ONU
+ # TODO: These class IDs could be calculated once at ONU startup (at device add)
+ if cls_id in me_map:
+ ro_attrs = {attr.field.name for attr in me_map[cls_id].attributes
+ if attr.access == ro_set}
+ else:
+ # Here if partially defined ME (not defined in ME Map)
+ from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+ ro_attrs = {UNKNOWN_CLASS_ATTRIBUTE_KEY}
+
+ # Get set of common instance IDs
+ inst_ids = {inst_id for inst_id, _ in olt_cls.items()
+ if isinstance(inst_id, int) and inst_id in onu_cls}
+
+ for inst_id in inst_ids:
+ omci_attributes = {k for k in olt_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+ onu_attributes = {k for k in onu_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+
+ # Get attributes that exist in one database, but not the other
+ sym_diffs = (omci_attributes ^ onu_attributes) - ro_attrs
+ results.extend([(cls_id, inst_id, attr) for attr in sym_diffs])
+
+ # Get common attributes with different values
+ common_attributes = (omci_attributes & onu_attributes) - ro_attrs
+ results.extend([(cls_id, inst_id, attr) for attr in common_attributes
+ if olt_cls[inst_id][ATTRIBUTES_KEY][attr] !=
+ onu_cls[inst_id][ATTRIBUTES_KEY][attr]])
+ return results
diff --git a/python/adapters/extensions/omci/tasks/mib_upload.py b/python/adapters/extensions/omci/tasks/mib_upload.py
new file mode 100644
index 0000000..4afd234
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/mib_upload.py
@@ -0,0 +1,158 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, AlreadyCalledError
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+
+
+class MibUploadFailure(Exception):
+ """
+ This error is raised by default when the upload fails
+ """
+
+
+class MibUploadTask(Task):
+ """
+ OpenOMCI MIB upload task
+
+ On successful completion, this task will call the 'callback' method of the
+ deferred returned by the start method. Only a textual message is provided as
+ the successful results and it lists the number of ME entities successfully
+ retrieved.
+
+ Note that the MIB Synchronization State Machine will get event subscription
+ information for the MIB Reset and MIB Upload Next requests and it is the
+ MIB Synchronization State Machine that actually populates the MIB Database.
+ """
+ task_priority = 250
+ name = "MIB Upload Task"
+
+ def __init__(self, omci_agent, device_id):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ """
+ super(MibUploadTask, self).__init__(MibUploadTask.name,
+ omci_agent,
+ device_id,
+ priority=MibUploadTask.task_priority)
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(MibUploadTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start MIB Synchronization tasks
+ """
+ super(MibUploadTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_mib_upload)
+
+ def stop(self):
+ """
+ Shutdown MIB Synchronization tasks
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ super(MibUploadTask, self).stop()
+
+ @inlineCallbacks
+ def perform_mib_upload(self):
+ """
+ Perform the MIB Upload sequence
+ """
+ self.log.debug('perform-mib-upload')
+
+ seq_no = 0
+ number_of_commands = 0
+
+ try:
+ device = self.omci_agent.get_device(self.device_id)
+
+ #########################################
+ # MIB Reset
+ self.strobe_watchdog()
+ results = yield device.omci_cc.send_mib_reset()
+
+ status = results.fields['omci_message'].fields['success_code']
+ if status != ReasonCodes.Success.value:
+ raise MibUploadFailure('MIB Reset request failed with status code: {}'.
+ format(status))
+
+ ########################################
+ # Begin MIB Upload
+ self.strobe_watchdog()
+ results = yield device.omci_cc.send_mib_upload()
+
+ number_of_commands = results.fields['omci_message'].fields['number_of_commands']
+
+ for seq_no in xrange(number_of_commands):
+ if not device.active or not device.omci_cc.enabled:
+ raise MibUploadFailure('OMCI and/or ONU is not active')
+
+ for retry in range(0, 3):
+ try:
+ self.log.debug('mib-upload-next-request', seq_no=seq_no,
+ retry=retry,
+ number_of_commands=number_of_commands)
+ self.strobe_watchdog()
+ yield device.omci_cc.send_mib_upload_next(seq_no)
+
+ self.log.debug('mib-upload-next-success', seq_no=seq_no,
+ number_of_commands=number_of_commands)
+ break
+
+ except TimeoutError as e:
+ from common.utils.asleep import asleep
+ self.log.warn('mib-upload-timeout', e=e, seq_no=seq_no,
+ number_of_commands=number_of_commands)
+ if retry >= 2:
+ raise MibUploadFailure('Upload timeout failure on req {} of {}'.
+ format(seq_no + 1, number_of_commands))
+ self.strobe_watchdog()
+ yield asleep(0.3)
+
+ # Successful if here
+ self.log.info('mib-synchronized')
+ self.deferred.callback('success, loaded {} ME Instances'.
+ format(number_of_commands))
+
+ except TimeoutError as e:
+ self.log.warn('mib-upload-timeout-on-reset', e=e, seq_no=seq_no,
+ number_of_commands=number_of_commands)
+ self.deferred.errback(failure.Failure(e))
+
+ except AlreadyCalledError:
+ # Can occur if task canceled due to MIB Sync state change
+ self.log.debug('already-called-exception', seq_no=seq_no,
+ number_of_commands=number_of_commands)
+ assert self.deferred.called, \
+ 'Unexpected AlreadyCalledError exception: seq: {} of {}'.format(seq_no,
+ number_of_commands)
+ except Exception as e:
+ self.log.exception('mib-upload', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/omci_create_pm_task.py b/python/adapters/extensions/omci/tasks/omci_create_pm_task.py
new file mode 100644
index 0000000..355e26a
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/omci_create_pm_task.py
@@ -0,0 +1,150 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, TimeoutError
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciCreate
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class CreatePMException(Exception):
+ pass
+
+
+class OmciCreatePMRequest(Task):
+ """
+ OpenOMCI routine to create the requested PM Interval MEs
+
+ TODO: Support of thresholding crossing alarms will be in a future VOLTHA release
+ """
+ task_priority = Task.DEFAULT_PRIORITY
+ name = "ONU OMCI Create PM ME Task"
+
+ def __init__(self, omci_agent, device_id, me_dict, exclusive=False):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param me_dict: (dict) (pm cid, pm eid) -> (me cid, me eid, upstream)
+ :param exclusive: (bool) True if this Create request Task exclusively own the
+ OMCI-CC while running. Default: False
+ """
+ super(OmciCreatePMRequest, self).__init__(OmciCreatePMRequest.name,
+ omci_agent,
+ device_id,
+ priority=OmciCreatePMRequest.task_priority,
+ exclusive=exclusive)
+ self._device = omci_agent.get_device(device_id)
+ self._me_dict = me_dict
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(OmciCreatePMRequest, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """ Start task """
+ super(OmciCreatePMRequest, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_create)
+
+ @inlineCallbacks
+ def perform_create(self):
+ """ Perform the create requests """
+
+ try:
+ for pm, me in self._me_dict.items():
+ pm_class_id = pm[0]
+ pm_entity_id = pm[1]
+ me_class_id = me[0]
+ me_entity_id = me[1]
+ upstream = me[2]
+ self.log.debug('create-pm-me', class_id=pm_class_id, entity_id=pm_entity_id)
+
+ if me_class_id == 0:
+ # Typical/common PM interval format
+ frame = OmciFrame(
+ transaction_id=None, # OMCI-CC will set
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=pm_class_id,
+ entity_id=pm_entity_id,
+ data=dict()
+ )
+ )
+ else:
+ # Extended PM interval format. See ITU-T G.988 Section 9.3.32.
+ # Bit 1 - continuous accumulation if set, 15-minute interval if unset
+ # Bit 2 - directionality (0=upstream, 1=downstream)
+ # Bit 3..14 - Reserved
+ # Bit 15 - Use P bits of TCI field to filter
+ # Bit 16 - Use VID bits of TCI field to filter
+ bitmap = 0 if upstream else 1 << 1
+
+ data = {'control_block': [
+ 0, # Threshold data 1/2 ID
+ me_class_id, # Parent ME Class
+ me_entity_id, # Parent ME Instance
+ 0, # Accumulation disable
+ 0, # TCA Disable
+ bitmap, # Control fields bitmap
+ 0, # TCI
+ 0 # Reserved
+ ]}
+ frame = OmciFrame(
+ transaction_id=None, # OMCI-CC will set
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=pm_class_id,
+ entity_id=pm_entity_id,
+ data=data
+ )
+ )
+ self.strobe_watchdog()
+ try:
+ results = yield self._device.omci_cc.send(frame)
+ except TimeoutError:
+ self.log.warning('perform-create-timeout', me_class_id=me_class_id, me_entity_id=me_entity_id,
+ pm_class_id=pm_class_id, pm_entity_id=pm_entity_id)
+ raise
+
+ status = results.fields['omci_message'].fields['success_code']
+ self.log.debug('perform-create-status', status=status)
+
+ # Did it fail
+ if status != RC.Success.value and status != RC.InstanceExists.value:
+ msg = 'ME: {}, entity: {} failed with status {}'.format(pm_class_id,
+ pm_entity_id,
+ status)
+ raise CreatePMException(msg)
+
+ self.log.debug('create-pm-success', class_id=pm_class_id,
+ entity_id=pm_entity_id)
+
+ self.deferred.callback(self)
+
+ except Exception as e:
+ self.log.exception('perform-create', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/omci_delete_pm_task.py b/python/adapters/extensions/omci/tasks/omci_delete_pm_task.py
new file mode 100644
index 0000000..adf1ce2
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/omci_delete_pm_task.py
@@ -0,0 +1,108 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciDelete
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class DeletePMException(Exception):
+ pass
+
+
+class OmciDeletePMRequest(Task):
+ """
+ OpenOMCI routine to delete the requested PM Interval MEs
+ """
+ task_priority = Task.DEFAULT_PRIORITY
+ name = "ONU OMCI Delete PM ME Task"
+
+ def __init__(self, omci_agent, device_id, me_set, exclusive=False):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param me_set: (set) Tuples of class_id / entity_id to create
+ :param exclusive: (bool) True if this Create request Task exclusively own the
+ OMCI-CC while running. Default: False
+ """
+ super(OmciDeletePMRequest, self).__init__(OmciDeletePMRequest.name,
+ omci_agent,
+ device_id,
+ priority=OmciDeletePMRequest.task_priority,
+ exclusive=exclusive)
+ self._device = omci_agent.get_device(device_id)
+ self._me_tuples = me_set
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(OmciDeletePMRequest, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """ Start task """
+ super(OmciDeletePMRequest, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_delete)
+
+ @inlineCallbacks
+ def perform_delete(self):
+ """ Perform the delete requests """
+ self.log.debug('perform-delete')
+
+ try:
+ for me in self._me_tuples:
+ class_id = me[0]
+ entity_id = me[1]
+
+ frame = OmciFrame(
+ transaction_id=None,
+ message_type=OmciDelete.message_id,
+ omci_message=OmciDelete(
+ entity_class=class_id,
+ entity_id=entity_id
+ )
+ )
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ status = results.fields['omci_message'].fields['success_code']
+ self.log.debug('perform-delete-status', status=status)
+
+ # Did it fail, it instance does not exist, not an error
+ if status != RC.Success.value and status != RC.UnknownInstance.value:
+ msg = 'ME: {}, entity: {} failed with status {}'.format(class_id,
+ entity_id,
+ status)
+ raise DeletePMException(msg)
+
+ self.log.debug('delete-pm-success', class_id=class_id,
+ entity_id=entity_id)
+ self.deferred.callback(self)
+
+ except Exception as e:
+ self.log.exception('perform-create', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/omci_get_request.py b/python/adapters/extensions/omci/tasks/omci_get_request.py
new file mode 100644
index 0000000..c325278
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/omci_get_request.py
@@ -0,0 +1,356 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import failure, inlineCallbacks, TimeoutError, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import MEFrame
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+from voltha.extensions.omci.omci_messages import OmciGet
+from voltha.extensions.omci.omci_fields import OmciTableField
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class GetException(Exception):
+ pass
+
+
+class OmciGetRequest(Task):
+ """
+ OpenOMCI Get an OMCI ME Instance Attributes
+
+ Upon completion, the Task deferred callback is invoked with a reference of
+ this Task object.
+
+ The Task has an initializer option (allow_failure) that will retry all
+ requested attributes if the original request fails with a status code of
+ 9 (Attributes failed or unknown). This result means that an attribute
+ is not supported by the ONU or that a mandatory/optional attribute could
+ not be executed by the ONU, even if it is supported, for example,
+ because of a range or type violation.
+ """
+ task_priority = 128
+ name = "ONU OMCI Get Task"
+
+ def __init__(self, omci_agent, device_id, entity_class, entity_id, attributes,
+ exclusive=False, allow_failure=False):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param entity_class: (EntityClass) ME Class to retrieve
+ :param entity_id: (int) ME Class instance ID to retrieve
+ :param attributes: (list or set) Name of attributes to retrieve
+ :param exclusive: (bool) True if this GET request Task exclusively own the
+ OMCI-CC while running. Default: False
+ :param allow_failure: (bool) If true, attempt to get all valid attributes
+ if the original request receives an error
+ code of 9 (Attributes failed or unknown).
+ """
+ super(OmciGetRequest, self).__init__(OmciGetRequest.name,
+ omci_agent,
+ device_id,
+ priority=OmciGetRequest.task_priority,
+ exclusive=exclusive)
+ self._device = omci_agent.get_device(device_id)
+ self._entity_class = entity_class
+ self._entity_id = entity_id
+ self._attributes = attributes
+ self._allow_failure = allow_failure
+ self._failed_or_unknown_attributes = set()
+ self._results = None
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(OmciGetRequest, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def me_class(self):
+ """The OMCI Managed Entity Class associated with this request"""
+ return self._entity_class
+
+ @property
+ def entity_id(self):
+ """The ME Entity ID associated with this request"""
+ return self._entity_id
+
+ @property
+ def attributes(self):
+ """
+ Return a dictionary of attributes for the request if the Get was
+ successfully completed. None otherwise
+ """
+ if self._results is None:
+ return None
+
+ omci_msg = self._results.fields['omci_message'].fields
+ return omci_msg['data'] if 'data' in omci_msg else None
+
+ @property
+ def success_code(self):
+ """
+ Return the OMCI success/reason code for the Get Response.
+ """
+ if self._results is None:
+ return None
+
+ return self._results.fields['omci_message'].fields['success_code']
+
+ @property
+ def raw_results(self):
+ """
+ Return the raw Get Response OMCIFrame
+ """
+ return self._results
+
+ def start(self):
+ """
+ Start MIB Capabilities task
+ """
+ super(OmciGetRequest, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_get_omci)
+
+ @property
+ def failed_or_unknown_attributes(self):
+ """
+ Returns a set attributes that failed or unknown in the original get
+ request that resulted in an initial status code of 9 (Attributes
+ failed or unknown).
+
+ :return: (set of str) attributes
+ """
+ return self._failed_or_unknown_attributes
+
+ @inlineCallbacks
+ def perform_get_omci(self):
+ """
+ Perform the initial get request
+ """
+ self.log.info('perform-get', entity_class=self._entity_class,
+ entity_id=self._entity_id, attributes=self._attributes)
+ try:
+ # If one or more attributes is a table attribute, get it separately
+ def is_table_attr(attr):
+ index = self._entity_class.attribute_name_to_index_map[attr]
+ attr_def = self._entity_class.attributes[index]
+ return isinstance(attr_def.field, OmciTableField)
+
+ first_attributes = {attr for attr in self._attributes if not is_table_attr(attr)}
+ table_attributes = {attr for attr in self._attributes if is_table_attr(attr)}
+
+ frame = MEFrame(self._entity_class, self._entity_id, first_attributes).get()
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ status = results.fields['omci_message'].fields['success_code']
+ self.log.debug('perform-get-status', status=status)
+
+ # Success?
+ if status == RC.Success.value:
+ self._results = results
+ results_omci = results.fields['omci_message'].fields
+
+ # Were all attributes fetched?
+ missing_attr = frame.fields['omci_message'].fields['attributes_mask'] ^ \
+ results_omci['attributes_mask']
+
+ if missing_attr > 0 or len(table_attributes) > 0:
+ self.log.info('perform-get-missing', num_missing=missing_attr,
+ table_attr=table_attributes)
+ self.strobe_watchdog()
+ self._local_deferred = reactor.callLater(0,
+ self.perform_get_missing_attributes,
+ missing_attr,
+ table_attributes)
+ returnValue(self._local_deferred)
+
+ elif status == RC.AttributeFailure.value:
+ # What failed? Note if only one attribute was attempted, then
+ # that is an overall failure
+
+ if not self._allow_failure or len(self._attributes) <= 1:
+ raise GetException('Get failed with status code: {}'.
+ format(RC.AttributeFailure.value))
+
+ self.strobe_watchdog()
+ self._local_deferred = reactor.callLater(0,
+ self.perform_get_failed_attributes,
+ results,
+ self._attributes)
+ returnValue(self._local_deferred)
+
+ else:
+ raise GetException('Get failed with status code: {}'.format(status))
+
+ self.log.debug('get-completed')
+ self.deferred.callback(self)
+
+ except TimeoutError as e:
+ self.deferred.errback(failure.Failure(e))
+
+ except Exception as e:
+ self.log.exception('perform-get', e=e, class_id=self._entity_class,
+ entity_id=self._entity_id, attributes=self._attributes)
+ self.deferred.errback(failure.Failure(e))
+
+ @inlineCallbacks
+ def perform_get_missing_attributes(self, missing_attr, table_attributes):
+ """
+ This method is called when the original Get requests completes with success
+ but not all attributes were returned. This can happen if one or more of the
+ attributes would have exceeded the space available in the OMCI frame.
+
+ This routine iterates through the missing attributes and attempts to retrieve
+ the ones that were missing.
+
+ :param missing_attr: (int) Missing attributes bitmask
+ :param table_attributes: (set) Attributes that need table get/get-next support
+ """
+ self.log.debug('perform-get-missing', attrs=missing_attr, tbl=table_attributes)
+
+ # Retrieve missing attributes first (if any)
+ results_omci = self._results.fields['omci_message'].fields
+
+ for index in xrange(16):
+ attr_mask = 1 << index
+
+ if attr_mask & missing_attr:
+ # Get this attribute
+ frame = OmciFrame(
+ transaction_id=None, # OMCI-CC will set
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=self._entity_class.class_id,
+ entity_id=self._entity_id,
+ attributes_mask=attr_mask
+ )
+ )
+ try:
+ self.strobe_watchdog()
+ get_results = yield self._device.omci_cc.send(frame)
+
+ get_omci = get_results.fields['omci_message'].fields
+ if get_omci['success_code'] != RC.Success.value:
+ continue
+
+ assert attr_mask == get_omci['attributes_mask'], 'wrong attribute'
+ results_omci['attributes_mask'] |= attr_mask
+
+ if results_omci.get('data') is None:
+ results_omci['data'] = dict()
+
+ results_omci['data'].update(get_omci['data'])
+
+ except TimeoutError:
+ self.log.debug('missing-timeout')
+
+ except Exception as e:
+ self.log.exception('missing-failure', e=e)
+
+ # Now any table attributes. OMCI_CC handles background get/get-next sequencing
+ for tbl_attr in table_attributes:
+ attr_mask = self._entity_class.mask_for(tbl_attr)
+ frame = OmciFrame(
+ transaction_id=None, # OMCI-CC will set
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=self._entity_class.class_id,
+ entity_id=self._entity_id,
+ attributes_mask=attr_mask
+ )
+ )
+ try:
+ timeout = 2 * DEFAULT_OMCI_TIMEOUT # Multiple frames expected
+ self.strobe_watchdog()
+ get_results = yield self._device.omci_cc.send(frame,
+ timeout=timeout)
+ self.strobe_watchdog()
+ get_omci = get_results.fields['omci_message'].fields
+ if get_omci['success_code'] != RC.Success.value:
+ continue
+
+ if results_omci.get('data') is None:
+ results_omci['data'] = dict()
+
+ results_omci['data'].update(get_omci['data'])
+
+ except TimeoutError:
+ self.log.debug('tbl-attr-timeout')
+
+ except Exception as e:
+ self.log.exception('tbl-attr-timeout', e=e)
+
+ self.deferred.callback(self)
+
+ @inlineCallbacks
+ def perform_get_failed_attributes(self, tmp_results, attributes):
+ """
+
+ :param tmp_results:
+ :param attributes:
+ :return:
+ """
+ self.log.debug('perform-get-failed', attrs=attributes)
+
+ for attr in attributes:
+ try:
+ frame = MEFrame(self._entity_class, self._entity_id, {attr}).get()
+
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ status = results.fields['omci_message'].fields['success_code']
+
+ if status == RC.AttributeFailure.value:
+ self.log.debug('unknown-or-invalid-attribute', attr=attr, status=status)
+ self._failed_or_unknown_attributes.add(attr)
+
+ elif status != RC.Success.value:
+ self.log.warn('invalid-get', class_id=self._entity_class,
+ attribute=attr, status=status)
+ self._failed_or_unknown_attributes.add(attr)
+
+ else:
+ # Add to partial results and correct the status
+ tmp_results.fields['omci_message'].fields['success_code'] = status
+ tmp_results.fields['omci_message'].fields['attributes_mask'] |= \
+ results.fields['omci_message'].fields['attributes_mask']
+
+ if tmp_results.fields['omci_message'].fields.get('data') is None:
+ tmp_results.fields['omci_message'].fields['data'] = dict()
+
+ tmp_results.fields['omci_message'].fields['data'][attr] = \
+ results.fields['omci_message'].fields['data'][attr]
+
+ except TimeoutError as e:
+ self.log.debug('attr-timeout')
+
+ except Exception as e:
+ self.log.exception('attr-failure', e=e)
+
+ self._results = tmp_results
+ self.deferred.callback(self)
diff --git a/python/adapters/extensions/omci/tasks/omci_modify_request.py b/python/adapters/extensions/omci/tasks/omci_modify_request.py
new file mode 100644
index 0000000..da7bff5
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/omci_modify_request.py
@@ -0,0 +1,171 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import MEFrame
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciCreate, OmciSet, OmciDelete
+from voltha.extensions.omci.omci_entities import EntityClass
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class ModifyException(Exception):
+ pass
+
+
+class OmciModifyRequest(Task):
+ """
+ OpenOMCI Generic Create, Set, or Delete Frame support Task.
+
+ This task allows an ONU to send a Create, Set, or Delete request from any point in their
+ code while properly using the OMCI-CC channel. Direct access to the OMCI-CC object
+ to send requests by an ONU is highly discouraged.
+ """
+ task_priority = 128
+ name = "ONU OMCI Modify Task"
+
+ def __init__(self, omci_agent, device_id, frame, priority=task_priority, exclusive=False):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param frame: (OmciFrame) Frame to send
+ :param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
+ :param exclusive: (bool) True if this GET request Task exclusively own the
+ OMCI-CC while running. Default: False
+ """
+ super(OmciModifyRequest, self).__init__(OmciModifyRequest.name,
+ omci_agent,
+ device_id,
+ priority=priority,
+ exclusive=exclusive)
+ self._device = omci_agent.get_device(device_id)
+ self._frame = frame
+ self._results = None
+ self._local_deferred = None
+
+ # Validate message type
+ self._msg_type = frame.fields['message_type']
+ if self._msg_type not in (OmciCreate.message_id, OmciSet.message_id, OmciDelete.message_id):
+ raise TypeError('Invalid Message type: {}, must be Create, Set, or Delete'.
+ format(self._msg_type))
+
+ def cancel_deferred(self):
+ super(OmciModifyRequest, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def success_code(self):
+ """
+ Return the OMCI success/reason code for the Get Response.
+ """
+ if self._results is None:
+ return None
+
+ return self._results.fields['omci_message'].fields['success_code']
+
+ @property
+ def illegal_attributes_mask(self):
+ """
+ For Create & Set requests, a failure may indicate that one or more
+ attributes have an illegal value. This property returns any illegal
+ attributes
+
+ :return: None if not a create/set request, otherwise the attribute mask
+ of illegal attributes
+ """
+ if self._results is None:
+ return None
+
+ omci_msg = self._results.fields['omci_message'].fields
+
+ if self._msg_type == OmciCreate.message_id:
+ if self.success_code != RC.ParameterError:
+ return 0
+ return omci_msg['parameter_error_attributes_mask']
+
+ elif self._msg_type == OmciSet.message_id:
+ if self.success_code != RC.AttributeFailure:
+ return 0
+ return omci_msg['failed_attributes_mask']
+
+ return None
+
+ @property
+ def unsupported_attributes_mask(self):
+ """
+ For Set requests, a failure may indicate that one or more attributes
+ are not supported by this ONU. This property returns any those unsupported attributes
+
+ :return: None if not a set request, otherwise the attribute mask of any illegal
+ parameters
+ """
+ if self._msg_type != OmciSet.message_id or self._results is None:
+ return None
+
+ if self.success_code != RC.AttributeFailure:
+ return 0
+
+ return self._results.fields['omci_message'].fields['unsupported_attributes_mask']
+
+ @property
+ def raw_results(self):
+ """
+ Return the raw Response OMCIFrame
+ """
+ return self._results
+
+ def start(self):
+ """
+ Start MIB Capabilities task
+ """
+ super(OmciModifyRequest, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_omci)
+
+ @inlineCallbacks
+ def perform_omci(self):
+ """
+ Perform the request
+ """
+ self.log.debug('perform-request')
+
+ try:
+ self.strobe_watchdog()
+ self._results = yield self._device.omci_cc.send(self._frame)
+
+ status = self._results.fields['omci_message'].fields['success_code']
+ self.log.debug('response-status', status=status)
+
+ # Success?
+ if status in (RC.Success.value, RC.InstanceExists):
+ self.deferred.callback(self)
+ else:
+ raise ModifyException('Failed with status {}'.format(status))
+
+ except Exception as e:
+ self.log.exception('perform-modify', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/omci_sw_image_upgrade_task.py b/python/adapters/extensions/omci/tasks/omci_sw_image_upgrade_task.py
new file mode 100644
index 0000000..5eaa87c
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/omci_sw_image_upgrade_task.py
@@ -0,0 +1,64 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from task import Task
+from twisted.internet import reactor
+from voltha.protos.voltha_pb2 import ImageDownload
+
+class OmciSwImageUpgradeTask(Task):
+ name = "OMCI Software Image Upgrade Task"
+
+
+ def __init__(self, img_id, omci_upgrade_sm_cls, omci_agent, image_download, clock=None):
+ super(OmciSwImageUpgradeTask, self).__init__(OmciSwImageUpgradeTask.name, omci_agent, image_download.id,
+ exclusive=False,
+ watchdog_timeout=45)
+ self.log.debug("OmciSwImageUpgradeTask create ", image_id=img_id)
+ self._image_id = img_id
+ self._omci_upgrade_sm_cls = omci_upgrade_sm_cls
+ # self._omci_agent = omci_agent
+ self._image_download = image_download
+ self.reactor = clock if clock is not None else reactor
+ self._omci_upgrade_sm = None
+ self.log.debug("OmciSwImageUpgradeTask create end", image_id=img_id)
+
+ @property
+ def status(self):
+ return self._image_download
+
+ def start(self):
+ self.log.debug("OmciSwImageUpgradeTask start")
+ super(OmciSwImageUpgradeTask, self).start()
+ if self._omci_upgrade_sm is None:
+ self._omci_upgrade_sm = self._omci_upgrade_sm_cls(self._image_id, self.omci_agent, self._image_download, clock=self.reactor)
+ d = self._omci_upgrade_sm.start()
+ d.chainDeferred(self.deferred)
+ #else:
+ # if restart:
+ # self._omci_upgrade_sm.reset_image()
+
+ def stop(self):
+ self.log.debug("OmciSwImageUpgradeTask stop")
+ if self._omci_upgrade_sm is not None:
+ self._omci_upgrade_sm.stop()
+ self._omci_upgrade_sm = None
+
+ def onu_bootup(self):
+ self.log.debug("onu_bootup", state=self._omci_upgrade_sm.status.image_state);
+ if self._omci_upgrade_sm is not None \
+ and self._omci_upgrade_sm.status.image_state == ImageDownload.IMAGE_ACTIVATE:
+ self._omci_upgrade_sm.do_commit()
+
diff --git a/python/adapters/extensions/omci/tasks/onu_capabilities_task.py b/python/adapters/extensions/omci/tasks/onu_capabilities_task.py
new file mode 100644
index 0000000..048382c
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/onu_capabilities_task.py
@@ -0,0 +1,282 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from binascii import hexlify
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+from voltha.extensions.omci.omci_me import OmciFrame
+from voltha.extensions.omci.omci import EntityOperations
+
+
+class GetNextException(Exception):
+ pass
+
+
+class GetCapabilitiesFailure(Exception):
+ pass
+
+
+class OnuCapabilitiesTask(Task):
+ """
+ OpenOMCI MIB Capabilities Task
+
+ This task requests information on supported MEs via the OMCI (ME#287)
+ Managed entity.
+
+ This task should be ran after MIB Synchronization and before any MIB
+ Downloads to the ONU.
+
+ Upon completion, the Task deferred callback is invoked with dictionary
+ containing the supported managed entities and message types.
+
+ results = {
+ 'supported-managed-entities': {set of supported managed entities},
+ 'supported-message-types': {set of supported message types}
+ }
+ """
+ task_priority = 240
+ name = "ONU Capabilities Task"
+
+ max_mib_get_next_retries = 3
+ mib_get_next_delay = 5
+ DEFAULT_OCTETS_PER_MESSAGE = 29
+
+ def __init__(self, omci_agent, device_id, omci_pdu_size=DEFAULT_OCTETS_PER_MESSAGE):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param omci_pdu_size: (int) OMCI Data payload size (not counting any trailers)
+ """
+ super(OnuCapabilitiesTask, self).__init__(OnuCapabilitiesTask.name,
+ omci_agent,
+ device_id,
+ priority=OnuCapabilitiesTask.task_priority)
+ self._local_deferred = None
+ self._device = omci_agent.get_device(device_id)
+ self._pdu_size = omci_pdu_size
+ self._supported_entities = set()
+ self._supported_msg_types = set()
+
+ def cancel_deferred(self):
+ super(OnuCapabilitiesTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def supported_managed_entities(self):
+ """
+ Return a set of the Managed Entity class IDs supported on this ONU
+
+ None is returned if no MEs have been discovered
+
+ :return: (set of ints)
+ """
+ return frozenset(self._supported_entities) if len(self._supported_entities) else None
+
+ @property
+ def supported_message_types(self):
+ """
+ Return a set of the Message Types supported on this ONU
+
+ None is returned if no message types have been discovered
+
+ :return: (set of EntityOperations)
+ """
+ return frozenset(self._supported_msg_types) if len(self._supported_msg_types) else None
+
+ def start(self):
+ """
+ Start MIB Capabilities task
+ """
+ super(OnuCapabilitiesTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_get_capabilities)
+
+ def stop(self):
+ """
+ Shutdown MIB Capabilities task
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ self._device = None
+ super(OnuCapabilitiesTask, self).stop()
+
+ @inlineCallbacks
+ def perform_get_capabilities(self):
+ """
+ Perform the MIB Capabilities sequence.
+
+ The sequence is to perform a Get request with the attribute mask equal
+ to 'me_type_table'. The response to this request will carry the size
+ of (number of get-next sequences).
+
+ Then a loop is entered and get-next commands are sent for each sequence
+ requested.
+ """
+ self.log.debug('perform-get')
+
+ try:
+ self.strobe_watchdog()
+ self._supported_entities = yield self.get_supported_entities()
+
+ self.strobe_watchdog()
+ self._supported_msg_types = yield self.get_supported_message_types()
+
+ self.log.debug('get-success',
+ supported_entities=self.supported_managed_entities,
+ supported_msg_types=self.supported_message_types)
+ results = {
+ 'supported-managed-entities': self.supported_managed_entities,
+ 'supported-message-types': self.supported_message_types
+ }
+ self.deferred.callback(results)
+
+ except Exception as e:
+ self.log.exception('perform-get', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ def get_count_from_data_buffer(self, data):
+ """
+ Extract the 4 octet buffer length from the OMCI PDU contents
+ """
+ self.log.debug('get-count-buffer', data=hexlify(data))
+ return int(hexlify(data[:4]), 16)
+
+ @inlineCallbacks
+ def get_supported_entities(self):
+ """
+ Get the supported ME Types for this ONU.
+ """
+ try:
+ # Get the number of requests needed
+ frame = OmciFrame(me_type_table=True).get()
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ omci_msg = results.fields['omci_message']
+ status = omci_msg.fields['success_code']
+
+ if status != ReasonCodes.Success.value:
+ raise GetCapabilitiesFailure('Get count of supported entities failed with status code: {}'.
+ format(status))
+ data = omci_msg.fields['data']['me_type_table']
+ count = self.get_count_from_data_buffer(bytearray(data))
+
+ seq_no = 0
+ data_buffer = bytearray(0)
+ self.log.debug('me-type-count', octets=count, data=hexlify(data))
+
+ # Start the loop
+ for offset in xrange(0, count, self._pdu_size):
+ frame = OmciFrame(me_type_table=seq_no).get_next()
+ seq_no += 1
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ omci_msg = results.fields['omci_message']
+ status = omci_msg.fields['success_code']
+
+ if status != ReasonCodes.Success.value:
+ raise GetCapabilitiesFailure(
+ 'Get supported entities request at offset {} of {} failed with status code: {}'.
+ format(offset + 1, count, status))
+
+ # Extract the data
+ num_octets = count - offset
+ if num_octets > self._pdu_size:
+ num_octets = self._pdu_size
+
+ data = omci_msg.fields['data']['me_type_table']
+ data_buffer += bytearray(data[:num_octets])
+
+ me_types = {(data_buffer[x] << 8) + data_buffer[x + 1]
+ for x in xrange(0, len(data_buffer), 2)}
+ returnValue(me_types)
+
+ except Exception as e:
+ self.log.exception('get-entities', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ @inlineCallbacks
+ def get_supported_message_types(self):
+ """
+ Get the supported Message Types (actions) for this ONU.
+ """
+ try:
+ # Get the number of requests needed
+ frame = OmciFrame(message_type_table=True).get()
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ omci_msg = results.fields['omci_message']
+ status = omci_msg.fields['success_code']
+
+ if status != ReasonCodes.Success.value:
+ raise GetCapabilitiesFailure('Get count of supported msg types failed with status code: {}'.
+ format(status))
+
+ data = omci_msg.fields['data']['message_type_table']
+ count = self.get_count_from_data_buffer(bytearray(data))
+
+ seq_no = 0
+ data_buffer = list()
+ self.log.debug('me-type-count', octets=count, data=hexlify(data))
+
+ # Start the loop
+ for offset in xrange(0, count, self._pdu_size):
+ frame = OmciFrame(message_type_table=seq_no).get_next()
+ seq_no += 1
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame)
+
+ omci_msg = results.fields['omci_message']
+ status = omci_msg.fields['success_code']
+
+ if status != ReasonCodes.Success.value:
+ raise GetCapabilitiesFailure(
+ 'Get supported msg types request at offset {} of {} failed with status code: {}'.
+ format(offset + 1, count, status))
+
+ # Extract the data
+ num_octets = count - offset
+ if num_octets > self._pdu_size:
+ num_octets = self._pdu_size
+
+ data = omci_msg.fields['data']['message_type_table']
+ data_buffer += data[:num_octets]
+
+ def buffer_to_message_type(value):
+ """
+ Convert an integer value to the appropriate EntityOperations enumeration
+ :param value: (int) Message type value (4..29)
+ :return: (EntityOperations) Enumeration, None on failure
+ """
+ next((v for k, v in EntityOperations.__members__.items() if v.value == value), None)
+
+ msg_types = {buffer_to_message_type(v) for v in data_buffer if v is not None}
+ returnValue({msg_type for msg_type in msg_types if msg_type is not None})
+
+ except Exception as e:
+ self.log.exception('get-msg-types', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/reboot_task.py b/python/adapters/extensions/omci/tasks/reboot_task.py
new file mode 100644
index 0000000..316e23b
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/reboot_task.py
@@ -0,0 +1,125 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from enum import IntEnum
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, TimeoutError
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class RebootException(Exception):
+ pass
+
+
+class DeviceBusy(Exception):
+ pass
+
+
+class RebootFlags(IntEnum):
+ Reboot_Unconditionally = 0,
+ Reboot_If_No_POTS_VoIP_In_Progress = 1,
+ Reboot_If_No_Emergency_Call_In_Progress = 2
+
+
+class OmciRebootRequest(Task):
+ """
+ OpenOMCI routine to request reboot of an ONU
+ """
+ task_priority = Task.MAX_PRIORITY
+ name = "ONU OMCI Reboot Task"
+ # adopt the global default
+ DEFAULT_REBOOT_TIMEOUT = DEFAULT_OMCI_TIMEOUT
+
+ def __init__(self, omci_agent, device_id,
+ flags=RebootFlags.Reboot_Unconditionally,
+ timeout=DEFAULT_REBOOT_TIMEOUT):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param flags: (RebootFlags) Reboot condition
+ """
+ super(OmciRebootRequest, self).__init__(OmciRebootRequest.name,
+ omci_agent,
+ device_id,
+ priority=OmciRebootRequest.task_priority,
+ exclusive=True)
+ self._device = omci_agent.get_device(device_id)
+ self._flags = flags
+ self._timeout = timeout
+ self._local_deferred = None
+
+ def cancel_deferred(self):
+ super(OmciRebootRequest, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """ Start task """
+ super(OmciRebootRequest, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_reboot)
+
+ @inlineCallbacks
+ def perform_reboot(self):
+ """
+ Perform the reboot requests
+
+ Depending on the ONU implementation, a response may not be returned. For this
+ reason, a timeout is considered successful.
+ """
+ self.log.info('perform-reboot')
+
+ try:
+ frame = OntGFrame().reboot(reboot_code=self._flags)
+ self.strobe_watchdog()
+ results = yield self._device.omci_cc.send(frame, timeout=self._timeout)
+
+ status = results.fields['omci_message'].fields['success_code']
+ self.log.debug('reboot-status', status=status)
+
+ # Did it fail
+ if status != RC.Success.value:
+ if self._flags != RebootFlags.Reboot_Unconditionally and\
+ status == RC.DeviceBusy.value:
+ raise DeviceBusy('ONU is busy, try again later')
+ else:
+ msg = 'Reboot request failed with status {}'.format(status)
+ raise RebootException(msg)
+
+ self.log.info('reboot-success')
+ self.deferred.callback(self)
+
+ except TimeoutError:
+ self.log.info('timeout', msg='Request timeout is not considered an error')
+ self.deferred.callback(None)
+
+ except DeviceBusy as e:
+ self.log.warn('perform-reboot', msg=e)
+ self.deferred.errback(failure.Failure(e))
+
+ except Exception as e:
+ self.log.exception('perform-reboot', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/sync_time_task.py b/python/adapters/extensions/omci/tasks/sync_time_task.py
new file mode 100644
index 0000000..b5b1dc9
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/sync_time_task.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.omci_defs import ReasonCodes as RC
+from datetime import datetime
+
+
+class SyncTimeTask(Task):
+ """
+ OpenOMCI - Synchronize the ONU time with server
+ """
+ task_priority = Task.DEFAULT_PRIORITY + 10
+ name = "Sync Time Task"
+
+ def __init__(self, omci_agent, device_id, use_utc=True):
+ """
+ Class initialization
+
+ :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+ :param device_id: (str) ONU Device ID
+ :param use_utc: (bool) Use UTC time if True, otherwise local time
+ """
+ super(SyncTimeTask, self).__init__(SyncTimeTask.name,
+ omci_agent,
+ device_id,
+ priority=SyncTimeTask.task_priority,
+ exclusive=False)
+ self._local_deferred = None
+ self._use_utc = use_utc
+
+ def cancel_deferred(self):
+ super(SyncTimeTask, self).cancel_deferred()
+
+ d, self._local_deferred = self._local_deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start the tasks
+ """
+ super(SyncTimeTask, self).start()
+ self._local_deferred = reactor.callLater(0, self.perform_sync_time)
+
+ def stop(self):
+ """
+ Shutdown the tasks
+ """
+ self.log.debug('stopping')
+
+ self.cancel_deferred()
+ super(SyncTimeTask, self).stop()
+
+ @inlineCallbacks
+ def perform_sync_time(self):
+ """
+ Sync the time
+ """
+ self.log.debug('perform-sync-time')
+
+ try:
+ device = self.omci_agent.get_device(self.device_id)
+
+ #########################################
+ # ONT-G (ME #256)
+ dt = datetime.utcnow() if self._use_utc else datetime.now()
+
+ results = yield device.omci_cc.send(OntGFrame().synchronize_time(dt))
+
+ omci_msg = results.fields['omci_message'].fields
+ status = omci_msg['success_code']
+ self.log.debug('sync-time', status=status)
+
+ if status == RC.Success:
+ self.log.info('sync-time', success_info=omci_msg['success_info'] & 0x0f)
+
+ assert status == RC.Success, 'Unexpected Response Status: {}'.format(status)
+
+ # Successful if here
+ self.deferred.callback(results)
+
+ except TimeoutError as e:
+ self.log.warn('sync-time-timeout', e=e)
+ self.deferred.errback(failure.Failure(e))
+
+ except Exception as e:
+ self.log.exception('sync-time', e=e)
+ self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/extensions/omci/tasks/task.py b/python/adapters/extensions/omci/tasks/task.py
new file mode 100644
index 0000000..36020c0
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/task.py
@@ -0,0 +1,188 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import defer, reactor
+from twisted.internet.defer import failure
+
+
+class WatchdogTimeoutFailure(Exception):
+ """Task callback/errback not called properly before watchdog expiration"""
+ pass
+
+
+class Task(object):
+ """
+ OpenOMCI Base Task implementation
+
+ An OMCI task can be one or more OMCI requests, comparisons, or whatever
+ is needed to do a specific unit of work that needs to be ran to completion
+ successfully.
+
+ On successful completion, the task should called the 'callback' method of
+ the deferred and pass back whatever is meaningful to the user/state-machine
+ that launched it.
+
+ On failure, the 'errback' routine should be called with an appropriate
+ Failure object.
+ """
+ DEFAULT_PRIORITY = 128
+ MIN_PRIORITY = 0
+ MAX_PRIORITY = 255
+ DEFAULT_WATCHDOG_SECS = 10 # 10 seconds
+ MIN_WATCHDOG_SECS = 3 # 3 seconds
+ MAX_WATCHDOG_SECS = 60 # 60 seconds
+
+ _next_task_id = 0
+
+ def __init__(self, name, omci_agent, device_id, priority=DEFAULT_PRIORITY,
+ exclusive=True, watchdog_timeout=DEFAULT_WATCHDOG_SECS):
+ """
+ Class initialization
+
+ :param name: (str) Task Name
+ :param device_id: (str) ONU Device ID
+ :param priority: (int) Task priority (0..255) 255 Highest
+ :param exclusive: (bool) If True, this task needs exclusive access to the
+ OMCI Communications channel when it runs
+ :param watchdog_timeout (int or float) Watchdog timeout (seconds) after task start, to
+ run longer, periodically call 'strobe_watchdog()' to reschedule.
+ """
+ assert Task.MIN_PRIORITY <= priority <= Task.MAX_PRIORITY, \
+ 'Priority should be {}..{}'.format(Task.MIN_PRIORITY, Task.MAX_PRIORITY)
+
+ assert Task.MIN_WATCHDOG_SECS <= watchdog_timeout <= Task.MAX_WATCHDOG_SECS, \
+ 'Watchdog timeout should be {}..{} seconds'
+
+ Task._next_task_id += 1
+ self._task_id = Task._next_task_id
+ self.log = structlog.get_logger(device_id=device_id, name=name,
+ task_id=self._task_id)
+ self.name = name
+ self.device_id = device_id
+ self.omci_agent = omci_agent
+ self._running = False
+ self._exclusive = exclusive
+ self._deferred = defer.Deferred() # Fires upon completion
+ self._watchdog = None
+ self._watchdog_timeout = watchdog_timeout
+ self._priority = priority
+
+ def __str__(self):
+ return 'Task: {}, ID:{}, Priority: {}, Exclusive: {}, Watchdog: {}'.format(
+ self.name, self.task_id, self.priority, self.exclusive, self.watchdog_timeout)
+
+ @property
+ def priority(self):
+ return self._priority
+
+ @property
+ def task_id(self):
+ return self._task_id
+
+ @property
+ def exclusive(self):
+ return self._exclusive
+
+ @property
+ def watchdog_timeout(self):
+ return self._watchdog_timeout
+
+ @property
+ def deferred(self):
+ return self._deferred
+
+ @property
+ def running(self):
+ # Is the Task running?
+ #
+ # Can be useful for tasks that use inline callbacks to detect
+ # if the task has been canceled.
+ #
+ return self._running
+
+ def cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._watchdog = self._watchdog, None
+
+ for d in [d1, d2]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def start(self):
+ """
+ Start task operations
+ """
+ self.log.debug('starting')
+ assert self._deferred is not None and not self._deferred.called, \
+ 'Cannot re-use the same task'
+ self._running = True
+ self.strobe_watchdog()
+
+ def stop(self):
+ """
+ Stop task synchronization
+ """
+ self.log.debug('stopping')
+ self._running = False
+ self.cancel_deferred()
+ self.omci_agent = None # Should only start/stop once
+
+ def task_cleanup(self):
+ """
+ This method should only be called from the TaskRunner's callback/errback
+ that is added when the task is initially queued. It is responsible for
+ clearing of the 'running' flag and canceling of the watchdog time
+ """
+ self._running = False
+ d, self._watchdog = self._watchdog, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def strobe_watchdog(self):
+ """
+ Signal that we have not hung/deadlocked
+ """
+ # Create if first time (called at Task start)
+
+ def watchdog_timeout():
+ # Task may have hung (blocked) or failed to call proper success/error
+ # completion callback/errback
+ if not self.deferred.called:
+ err_msg = 'Task {}:{} watchdog timeout'.format(self.name, self.task_id)
+ self.log.error("task-watchdog-timeout", running=self.running,
+ timeout=self.watchdog_timeout, error=err_msg)
+
+ self.deferred.errback(failure.Failure(WatchdogTimeoutFailure(err_msg)))
+ self.deferred.cancel()
+
+ if self._watchdog is not None:
+ if self._watchdog.called:
+ # Too late, timeout failure in progress
+ self.log.warn('task-watchdog-tripped', running=self.running,
+ timeout=self.watchdog_timeout)
+ return
+
+ d, self._watchdog = self._watchdog, None
+ d.cancel()
+
+ # Schedule/re-schedule the watchdog timer
+ self._watchdog = reactor.callLater(self.watchdog_timeout, watchdog_timeout)
diff --git a/python/adapters/extensions/omci/tasks/task_runner.py b/python/adapters/extensions/omci/tasks/task_runner.py
new file mode 100644
index 0000000..eb7a252
--- /dev/null
+++ b/python/adapters/extensions/omci/tasks/task_runner.py
@@ -0,0 +1,285 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import reactor
+
+
+class TaskRunner(object):
+ """
+ Control the number of running tasks utilizing the OMCI Communications
+ channel (OMCI_CC
+ """
+ def __init__(self, device_id, clock=None):
+ self.log = structlog.get_logger(device_id=device_id)
+ self._pending_queue = dict() # task-priority -> [tasks]
+ self._running_queue = dict() # task-id -> task
+ self._active = False
+
+ self._successful_tasks = 0
+ self._failed_tasks = 0
+ self._watchdog_timeouts = 0
+ self._last_watchdog_failure_task = ''
+ self.reactor = clock if clock is not None else reactor
+
+ def __str__(self):
+ return 'TaskRunner: Pending: {}, Running:{}'.format(self.pending_tasks,
+ self.running_tasks)
+
+ @property
+ def active(self):
+ return self._active
+
+ @property
+ def pending_tasks(self):
+ """
+ Get the number of tasks pending to run
+ """
+ count = 0
+ for tasks in self._pending_queue.itervalues():
+ count += len(tasks)
+ return count
+
+ @property
+ def running_tasks(self):
+ """
+ Get the number of tasks currently running
+ """
+ return len(self._running_queue)
+
+ @property
+ def successful_tasks_completed(self):
+ return self._successful_tasks
+
+ @property
+ def failed_tasks(self):
+ return self._failed_tasks
+
+ @property
+ def watchdog_timeouts(self):
+ return self._watchdog_timeouts
+
+ @property
+ def last_watchdog_failure_task(self):
+ """ Task name of last tasks to fail due to watchdog"""
+ return self._last_watchdog_failure_task
+
+ # TODO: add properties for various stats as needed
+
+ def start(self):
+ """
+ Start the Task runner
+ """
+ self.log.debug('starting', active=self._active)
+
+ if not self._active:
+ assert len(self._running_queue) == 0, 'Running task queue not empty'
+ self._active = True
+ self._run_next_task()
+
+ def stop(self):
+ """
+ Stop the Task runner, first stopping any tasks and flushing the queue
+ """
+ self.log.debug('stopping', active=self._active)
+
+ if self._active:
+ self._active = False
+
+ pq, self._pending_queue = self._pending_queue, dict()
+ rq, self._running_queue = self._running_queue, dict()
+
+ # Stop running tasks
+ for task in rq.itervalues():
+ try:
+ task.stop()
+ except:
+ pass
+
+ # Kill pending tasks
+ for d in pq.iterkeys():
+ try:
+ d.cancel()
+ except:
+ pass
+
+ def _run_next_task(self):
+ """
+ Search for next task to run, if one can
+ :return:
+ """
+ self.log.debug('run-next', active=self._active,
+ num_running=len(self._running_queue),
+ num_pending=len(self._pending_queue))
+
+ if self._active and len(self._pending_queue) > 0:
+ # Cannot run a new task if a running one needs the OMCI_CC exclusively
+
+ if any(task.exclusive for task in self._running_queue.itervalues()):
+ self.log.debug('exclusive-running')
+ return # An exclusive task is already running
+
+ try:
+ priorities = [k for k in self._pending_queue.iterkeys()]
+ priorities.sort(reverse=True)
+ highest_priority = priorities[0] if len(priorities) else None
+
+ if highest_priority is not None:
+ queue = self._pending_queue[highest_priority]
+ next_task = queue[0] if len(queue) else None
+
+ if next_task is not None:
+ if next_task.exclusive and len(self._running_queue) > 0:
+ self.log.debug('next-is-exclusive', task=str(next_task))
+ return # Next task to run needs exclusive access
+
+ queue.pop(0)
+ if len(queue) == 0:
+ del self._pending_queue[highest_priority]
+
+ self.log.debug('starting-task', task=str(next_task),
+ running=len(self._running_queue),
+ pending=len(self._pending_queue))
+
+ self._running_queue[next_task.task_id] = next_task
+ self.reactor.callLater(0, next_task.start)
+
+ # Run again if others are waiting
+ if len(self._pending_queue):
+ self._run_next_task()
+
+ except Exception as e:
+ self.log.exception('run-next', e=e)
+
+ def _on_task_success(self, results, task):
+ """
+ A task completed successfully callback
+ :param results: deferred results
+ :param task: (Task) The task that succeeded
+ :return: deferred results
+ """
+ self.log.debug('task-success', task_id=str(task),
+ running=len(self._running_queue),
+ pending=len(self._pending_queue))
+ try:
+ assert task is not None and task.task_id in self._running_queue,\
+ 'Task not found in running queue'
+
+ task.task_cleanup()
+ self._successful_tasks += 1
+ del self._running_queue[task.task_id]
+
+ except Exception as e:
+ self.log.exception('task-error', task=str(task), e=e)
+
+ finally:
+ reactor.callLater(0, self._run_next_task)
+
+ return results
+
+ def _on_task_failure(self, failure, task):
+ """
+ A task completed with failure callback
+ :param failure: (Failure) Failure results
+ :param task: (Task) The task that failed
+ :return: (Failure) Failure results
+ """
+ from voltha.extensions.omci.tasks.task import WatchdogTimeoutFailure
+
+ self.log.debug('task-failure', task_id=str(task),
+ running=len(self._running_queue),
+ pending=len(self._pending_queue))
+ try:
+ assert task is not None and task.task_id in self._running_queue,\
+ 'Task not found in running queue'
+
+ task.task_cleanup()
+ self._failed_tasks += 1
+ del self._running_queue[task.task_id]
+
+ if isinstance(failure.value, WatchdogTimeoutFailure):
+ self._watchdog_timeouts += 1
+ self._last_watchdog_failure_task = task.name
+
+ except Exception as e:
+ # Check the pending queue
+
+ for priority, tasks in self._pending_queue.iteritems():
+ found = next((t for t in tasks if t.task_id == task.task_id), None)
+
+ if found is not None:
+ self._pending_queue[task.priority].remove(task)
+ if len(self._pending_queue[task.priority]) == 0:
+ del self._pending_queue[task.priority]
+ return failure
+
+ self.log.exception('task-error', task=str(task), e=e)
+ raise
+
+ finally:
+ reactor.callLater(0, self._run_next_task)
+
+ return failure
+
+ def queue_task(self, task):
+ """
+ Place a task on the queue to run
+
+ :param task: (Task) task to run
+ :return: (deferred) Deferred that will fire on task completion
+ """
+ self.log.debug('queue-task', active=self._active, task=str(task),
+ running=len(self._running_queue),
+ pending=len(self._pending_queue))
+
+ if task.priority not in self._pending_queue:
+ self._pending_queue[task.priority] = []
+
+ task.deferred.addCallbacks(self._on_task_success, self._on_task_failure,
+ callbackArgs=[task], errbackArgs=[task])
+
+ self._pending_queue[task.priority].append(task)
+ self._run_next_task()
+
+ return task.deferred
+
+ def cancel_task(self, task_id):
+ """
+ Cancel a pending or running task. The cancel method will be called
+ for the task's deferred
+
+ :param task_id: (int) Task identifier
+ """
+ task = self._running_queue.get(task_id, None)
+
+ if task is not None:
+ try:
+ task.stop()
+ except Exception as e:
+ self.log.exception('stop-error', task=str(task), e=e)
+
+ reactor.callLater(0, self._run_next_task)
+
+ else:
+ for priority, tasks in self._pending_queue.iteritems():
+ task = next((t for t in tasks if t.task_id == task_id), None)
+
+ if task is not None:
+ try:
+ task.deferred.cancel()
+ except Exception as e:
+ self.log.exception('cancel-error', task=str(task), e=e)
+ return
+