xPON complete and refactored common classes between ADTN OLT and ONU
Change-Id: I7cddafd21324ab9029e28db2c60397ec550dd912
diff --git a/voltha/adapters/adtran_olt/README.md b/voltha/adapters/adtran_olt/README.md
index 2205d81..891df93 100644
--- a/voltha/adapters/adtran_olt/README.md
+++ b/voltha/adapters/adtran_olt/README.md
@@ -95,3 +95,11 @@
```bash
curl -k -s https://${VOLTHA_IP}:${REST_PORT}/api/v1/local/devices | json_pp
```
+
+# Tested OLT Device Driver versions
+
+The minimum version number of for the OLT software is: *_11971320F1-ML-2287_*
+The specific PON-Agent version number is: _*ngpon2_agent-4.0.37-1.545.702565*_
+
+At this time, the version numbers above are also the latest ones tested. Work on validating
+newer releases is currently underway.
diff --git a/voltha/adapters/adtran_olt/adapter_pm_metrics.py b/voltha/adapters/adtran_olt/adapter_pm_metrics.py
deleted file mode 100644
index dc74877..0000000
--- a/voltha/adapters/adtran_olt/adapter_pm_metrics.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2017-present Adtran, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import structlog
-from twisted.internet.task import LoopingCall
-# from voltha.protos import ponsim_pb2
-from voltha.protos.device_pb2 import PmConfig, PmConfigs
-from google.protobuf.empty_pb2 import Empty
-
-
-class AdapterPmMetrics:
- def __init__(self, adapter, device):
- # self.pm_names = {'tx_64_pkts', 'tx_65_127_pkts', 'tx_128_255_pkts',
- # 'tx_256_511_pkts', 'tx_512_1023_pkts',
- # 'tx_1024_1518_pkts', 'tx_1519_9k_pkts',
- # 'rx_64_pkts', 'rx_65_127_pkts',
- # 'rx_128_255_pkts', 'rx_256_511_pkts',
- # 'rx_512_1023_pkts', 'rx_1024_1518_pkts',
- # 'rx_1519_9k_pkts'}
- self.pm_names = {'rx_frames', 'tx_frames'}
- self.log = structlog.get_logger(device_id=device.id)
- self.device = device
- self.id = device.id
- self.name = adapter.name
- self.default_freq = 150
- self.grouped = False
- self.freq_override = False
- self.pon_metrics_config = dict()
- self.nni_metrics_config = dict()
- self.lc = None
-
- for m in self.pm_names:
- self.pon_metrics_config[m] = PmConfig(name=m, type=PmConfig.COUNTER,
- enabled=True)
- self.nni_metrics_config[m] = PmConfig(name=m, type=PmConfig.COUNTER,
- enabled=True)
-
- def update(self, pm_config):
- if self.default_freq != pm_config.default_freq:
- # Update the callback to the new frequency.
- self.default_freq = pm_config.default_freq
- self.lc.stop()
- self.lc.start(interval=self.default_freq / 10)
-
- for m in pm_config.metrics:
- self.pon_metrics_config[m.name].enabled = m.enabled
- self.nni_metrics_config[m.name].enabled = m.enabled
-
- def make_proto(self):
- pm_config = PmConfigs(id=self.id, default_freq=self.default_freq,
- grouped=False, freq_override=False)
-
- for m in sorted(self.pon_metrics_config):
- pm = self.pon_metrics_config[m] # Either will do they're the same
- pm_config.metrics.extend([PmConfig(name=pm.name, type=pm.type,
- enabled=pm.enabled)])
- return pm_config
-
- def collect_port_metrics(self):
- port_metrics = dict()
- # TODO: Implement
- stats = {}
- port_metrics['pon'] = self.extract_pon_metrics(stats, 100)
- port_metrics['nni'] = self.extract_nni_metrics(stats, 200)
- return port_metrics
-
- def extract_pon_metrics(self, stats, fake_value):
- return {
- 'rx_frames': fake_value,
- 'tx_frames': fake_value
- }
- # rtrn_pon_metrics = dict()
- #
- # for m in stats.metrics:
- # if m.port_name == "pon":
- # for p in m.packets:
- # if self.pon_metrics_config[p.name].enabled:
- # rtrn_pon_metrics[p.name] = p.value
- # return rtrn_pon_metrics
-
- def extract_nni_metrics(self, stats, fake_value):
- return {
- 'rx_frames': fake_value,
- 'tx_frames': fake_value
- }
- # rtrn_pon_metrics = dict()
- # for m in stats.metrics:
- # if m.port_name == "nni":
- # for p in m.packets:
- # if self.pon_metrics_config[p.name].enabled:
- # rtrn_pon_metrics[p.name] = p.value
- # return rtrn_pon_metrics
-
- def start_collector(self, callback):
- self.log.info("starting-pm-collection", device_name=self.name,
- device_id=self.device.id)
- prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
- self.lc = LoopingCall(callback, self.device.id, prefix)
- self.lc.start(interval=self.default_freq / 10)
diff --git a/voltha/adapters/adtran_olt/adtran_device_handler.py b/voltha/adapters/adtran_olt/adtran_device_handler.py
index 295fb32..59ed01b 100644
--- a/voltha/adapters/adtran_olt/adtran_device_handler.py
+++ b/voltha/adapters/adtran_olt/adtran_device_handler.py
@@ -31,16 +31,14 @@
from voltha.adapters.adtran_olt.net.adtran_rest import AdtranRestClient
from voltha.protos import third_party
from voltha.protos.common_pb2 import OperStatus, AdminState, ConnectStatus
-from voltha.protos.events_pb2 import AlarmEventType, \
- AlarmEventSeverity, AlarmEventState, AlarmEventCategory
from voltha.protos.device_pb2 import Image
from voltha.protos.logical_device_pb2 import LogicalDevice
from voltha.protos.openflow_13_pb2 import ofp_desc, ofp_switch_features, OFPC_PORT_STATS, \
OFPC_GROUP_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS
from voltha.registry import registry
-from adapter_alarms import AdapterAlarms
+from alarms.adapter_alarms import AdapterAlarms
from common.frameio.frameio import BpfProgramFilter, hexify
-from adapter_pm_metrics import AdapterPmMetrics
+from pki.olt_pm_metrics import OltPmMetrics
from common.utils.asleep import asleep
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import Raw
@@ -92,9 +90,15 @@
# RPC XML shortcuts
RESTART_RPC = '<system-restart xmlns="urn:ietf:params:xml:ns:yang:ietf-system"/>'
- def __init__(self, adapter, device_id, timeout=20):
+ def __init__(self, **kwargs):
from net.adtran_zmq import DEFAULT_ZEROMQ_OMCI_TCP_PORT
+ super(AdtranDeviceHandler, self).__init__()
+
+ adapter = kwargs['adapter']
+ device_id = kwargs['device-id']
+ timeout = kwargs.get('timeout', 20)
+
self.adapter = adapter
self.adapter_agent = adapter.adapter_agent
self.device_id = device_id
@@ -108,6 +112,7 @@
self.alarms = None
self.packet_in_vlan = DEFAULT_PACKET_IN_VLAN
self.multicast_vlans = [DEFAULT_MULTICAST_VLAN]
+ self.default_mac_addr = '00:13:95:00:00:00'
# Northbound and Southbound ports
self.northbound_ports = {} # port number -> Port
@@ -158,8 +163,9 @@
self.heartbeat = None
self.heartbeat_last_reason = ''
- # Virtualized OLT Support
+ # Virtualized OLT Support & async command support
self.is_virtual_olt = False
+ self.is_async_control = False
# Installed flows
self._evcs = {} # Flow ID/name -> FlowEntry
@@ -278,6 +284,19 @@
self._autoactivate = args.autoactivate
+ if not self.rest_username:
+ self.rest_username = 'NDE0NDRkNDk0ZQ==\n'.\
+ decode('base64').decode('hex')
+ if not self.rest_password:
+ self.rest_password = 'NTA0MTUzNTM1NzRmNTI0NA==\n'.\
+ decode('base64').decode('hex')
+ if not self.netconf_username:
+ self.netconf_username = 'Njg3Mzc2NzI2ZjZmNzQ=\n'.\
+ decode('base64').decode('hex')
+ if not self.netconf_password:
+ self.netconf_password = 'NDI0ZjUzNDM0Zg==\n'.\
+ decode('base64').decode('hex')
+
except argparse.ArgumentError as e:
self.activate_failed(device,
'Invalid arguments: {}'.format(e.message),
@@ -297,226 +316,242 @@
return self._autoactivate
@inlineCallbacks
- def activate(self, device, reconciling=False):
+ def activate(self, device, done_deferred=None, reconciling=False):
"""
Activate the OLT device
:param device: A voltha.Device object, with possible device-type
specific extensions.
+ :param done_deferred: (Deferred) Deferred to fire when done
:param reconciling: If True, this adapter is taking over for a previous adapter
for an existing OLT
"""
self.log.info('AdtranDeviceHandler.activating', reconciling=reconciling)
if self.logical_device_id is None:
- # Parse our command line options for this device
- self.parse_provisioning_options(device)
-
- ############################################################################
- # Start initial discovery of RESTCONF support (if any)
-
try:
- self.startup = self.make_restconf_connection()
- results = yield self.startup
- self.log.debug('HELLO_Contents: {}'.format(pprint.PrettyPrinter().pformat(results)))
+ # Parse our command line options for this device
+ self.parse_provisioning_options(device)
- # See if this is a virtualized OLT. If so, no NETCONF support available
+ ############################################################################
+ # Start initial discovery of RESTCONF support (if any)
- self.is_virtual_olt = 'module-info' in results and\
- any(mod.get('module-name', None) == 'adtran-ont-mock'
- for mod in results['module-info'])
-
- except Exception as e:
- self.log.exception('Initial_RESTCONF_hello_failed', e=e)
- self.activate_failed(device, e.message, reachable=False)
-
- ############################################################################
- # Start initial discovery of NETCONF support (if any)
-
- try:
- self.startup = self.make_netconf_connection()
- yield self.startup
-
- except Exception as e:
- self.log.exception('NETCONF_connection_failed', e=e)
- self.activate_failed(device, e.message, reachable=False)
-
- ############################################################################
- # Get the device Information
-
- if reconciling:
- device.connect_status = ConnectStatus.REACHABLE
- self.adapter_agent.update_device(device)
- else:
try:
- self.startup = self.get_device_info(device)
+ self.startup = self.make_restconf_connection()
results = yield self.startup
+ self.log.debug('HELLO_Contents: {}'.format(pprint.PrettyPrinter().pformat(results)))
- device.model = results.get('model', 'unknown')
- device.hardware_version = results.get('hardware_version', 'unknown')
- device.firmware_version = results.get('firmware_version', 'unknown')
- device.serial_number = results.get('serial_number', 'unknown')
+ # See if this is a virtualized OLT. If so, no NETCONF support available
- def get_software_images():
- leafs = ['running-revision', 'candidate-revision', 'startup-revision']
- image_names = list(set([results.get(img, 'unknown') for img in leafs]))
-
- images = []
- image_count = 1
- for name in image_names:
- # TODO: Look into how to find out hash, is_valid, and install date/time
- image = Image(name='Candidate_{}'.format(image_count),
- version=name,
- is_active=(name == results.get('running-revision', 'xxx')),
- is_committed=True,
- is_valid=True,
- install_datetime='Not Available')
- image_count += 1
- images.append(image)
- return images
-
- device.images.image.extend(get_software_images())
- device.root = True
- device.vendor = results.get('vendor', 'Adtran, Inc.')
- device.connect_status = ConnectStatus.REACHABLE
- self.adapter_agent.update_device(device)
+ if 'module-info' in results:
+ self.is_virtual_olt = any(mod.get('module-name', None) == 'adtran-ont-mock'
+ for mod in results['module-info'])
+ self.is_async_control = any(mod.get('module-name', None) == 'adtran-olt-pon-control'
+ for mod in results['module-info'])
except Exception as e:
- self.log.exception('Device_info_failed', e=e)
+ self.log.exception('Initial_RESTCONF_hello_failed', e=e)
self.activate_failed(device, e.message, reachable=False)
- try:
- # Enumerate and create Northbound NNI interfaces
+ ############################################################################
+ # Start initial discovery of NETCONF support (if any)
- device.reason = 'Enumerating NNI Interfaces'
- self.adapter_agent.update_device(device)
- self.startup = self.enumerate_northbound_ports(device)
- results = yield self.startup
+ try:
+ self.startup = self.make_netconf_connection()
+ yield self.startup
- self.startup = self.process_northbound_ports(device, results)
- yield self.startup
+ except Exception as e:
+ self.log.exception('NETCONF_connection_failed', e=e)
+ self.activate_failed(device, e.message, reachable=False)
- device.reason = 'Adding NNI Interfaces to Adapter'
+ ############################################################################
+ # Get the device Information
+
+ if reconciling:
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+ else:
+ try:
+ self.startup = self.get_device_info(device)
+ results = yield self.startup
+
+ device.model = results.get('model', 'unknown')
+ device.hardware_version = results.get('hardware_version', 'unknown')
+ device.firmware_version = results.get('firmware_version', 'unknown')
+ device.serial_number = results.get('serial_number', 'unknown')
+
+ def get_software_images():
+ leafs = ['running-revision', 'candidate-revision', 'startup-revision']
+ image_names = list(set([results.get(img, 'unknown') for img in leafs]))
+
+ images = []
+ image_count = 1
+ for name in image_names:
+ # TODO: Look into how to find out hash, is_valid, and install date/time
+ image = Image(name='Candidate_{}'.format(image_count),
+ version=name,
+ is_active=(name == results.get('running-revision', 'xxx')),
+ is_committed=True,
+ is_valid=True,
+ install_datetime='Not Available')
+ image_count += 1
+ images.append(image)
+ return images
+
+ device.images.image.extend(get_software_images())
+ device.root = True
+ device.vendor = results.get('vendor', 'Adtran, Inc.')
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+
+ except Exception as e:
+ self.log.exception('Device_info_failed', e=e)
+ self.activate_failed(device, e.message, reachable=False)
+
+ try:
+ # Enumerate and create Northbound NNI interfaces
+
+ device.reason = 'Enumerating NNI Interfaces'
+ self.adapter_agent.update_device(device)
+ self.startup = self.enumerate_northbound_ports(device)
+ results = yield self.startup
+
+ self.startup = self.process_northbound_ports(device, results)
+ yield self.startup
+
+ device.reason = 'Adding NNI Interfaces to Adapter'
+ self.adapter_agent.update_device(device)
+
+ if not reconciling:
+ for port in self.northbound_ports.itervalues():
+ self.adapter_agent.add_port(device.id, port.get_port())
+
+ except Exception as e:
+ self.log.exception('NNI_enumeration', e=e)
+ self.activate_failed(device, e.message)
+
+ try:
+ # Enumerate and create southbound interfaces
+
+ device.reason = 'Enumerating PON Interfaces'
+ self.adapter_agent.update_device(device)
+ self.startup = self.enumerate_southbound_ports(device)
+ results = yield self.startup
+
+ self.startup = self.process_southbound_ports(device, results)
+ yield self.startup
+
+ device.reason = 'Adding PON Interfaces to Adapter'
+ self.adapter_agent.update_device(device)
+
+ if not reconciling:
+ for port in self.southbound_ports.itervalues():
+ self.adapter_agent.add_port(device.id, port.get_port())
+
+ except Exception as e:
+ self.log.exception('PON_enumeration', e=e)
+ self.activate_failed(device, e.message)
+
+ if reconciling:
+ if device.admin_state == AdminState.ENABLED:
+ if device.parent_id:
+ self.logical_device_id = device.parent_id
+ self.adapter_agent.reconcile_logical_device(device.parent_id)
+ else:
+ self.log.info('no-logical-device-set')
+
+ # Reconcile child devices
+ self.adapter_agent.reconcile_child_devices(device.id)
+ ld_initialized = self.adapter_agent.get_logical_device()
+ assert device.parent_id == ld_initialized.id, \
+ 'parent ID not Logical device ID'
+
+ else:
+ # Complete activation by setting up logical device for this OLT and saving
+ # off the devices parent_id
+
+ ld_initialized = self.create_logical_device(device)
+
+ ############################################################################
+ # Setup PM configuration for this device
+ try:
+ device.reason = 'Setting up PM configuration'
+ self.adapter_agent.update_device(device)
+
+ self.pm_metrics = OltPmMetrics(self, device, grouped=True, freq_override=False)
+ pm_config = self.pm_metrics.make_proto()
+ self.log.info("initial-pm-config", pm_config=pm_config)
+ self.adapter_agent.update_device_pm_config(pm_config, init=True)
+
+ except Exception as e:
+ self.log.exception('pm-setup', e=e)
+ self.activate_failed(device, e.message)
+
+ ############################################################################
+ # Setup Alarm handler
+
+ device.reason = 'Setting up Adapter Alarms'
self.adapter_agent.update_device(device)
- if not reconciling:
- for port in self.northbound_ports.itervalues():
- self.adapter_agent.add_port(device.id, port.get_port())
+ self.alarms = AdapterAlarms(self.adapter, device.id)
+
+ ############################################################################
+ # Create logical ports for all southbound and northbound interfaces
+ try:
+ device.reason = 'Creating logical ports'
+ self.adapter_agent.update_device(device)
+ self.startup = self.create_logical_ports(device, ld_initialized, reconciling)
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('logical-port', e=e)
+ self.activate_failed(device, e.message)
+
+ ############################################################################
+ # Register for ONU detection
+ # self.adapter_agent.register_for_onu_detect_state(device.id)
+
+ # Complete device specific steps
+ try:
+ self.log.debug('device-activation-procedures')
+ self.startup = self.complete_device_specific_activation(device, reconciling)
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('device-activation-procedures', e=e)
+ self.activate_failed(device, e.message)
+
+ # Schedule the heartbeat for the device
+
+ self.log.debug('starting-heartbeat')
+ self.start_heartbeat(delay=10)
+
+ device = self.adapter_agent.get_device(device.id)
+ device.parent_id = ld_initialized.id
+ device.oper_status = OperStatus.ACTIVE
+ device.reason = ''
+ self.adapter_agent.update_device(device)
+ self.logical_device_id = ld_initialized.id
+
+ # finally, open the frameio port to receive in-band packet_in messages
+ self._activate_io_port()
+
+ # Start collecting stats from the device after a brief pause
+ reactor.callLater(10, self.start_kpi_collection, device.id)
+
+ # Signal completion
+
+ self.log.info('activated')
except Exception as e:
- self.log.exception('NNI_enumeration', e=e)
- self.activate_failed(device, e.message)
+ self.log.exception('activate', e=e)
+ if done_deferred is not None:
+ done_deferred.errback(e)
+ raise
+ if done_deferred is not None:
+ done_deferred.callback('activated')
- try:
- # Enumerate and create southbound interfaces
-
- device.reason = 'Enumerating PON Interfaces'
- self.adapter_agent.update_device(device)
- self.startup = self.enumerate_southbound_ports(device)
- results = yield self.startup
-
- self.startup = self.process_southbound_ports(device, results)
- yield self.startup
-
- device.reason = 'Adding PON Interfaces to Adapter'
- self.adapter_agent.update_device(device)
-
- if not reconciling:
- for port in self.southbound_ports.itervalues():
- self.adapter_agent.add_port(device.id, port.get_port())
-
- except Exception as e:
- self.log.exception('PON_enumeration', e=e)
- self.activate_failed(device, e.message)
-
- if reconciling:
- if device.admin_state == AdminState.ENABLED:
- if device.parent_id:
- self.logical_device_id = device.parent_id
- self.adapter_agent.reconcile_logical_device(device.parent_id)
- else:
- self.log.info('no-logical-device-set')
-
- # Reconcile child devices
- self.adapter_agent.reconcile_child_devices(device.id)
- ld_initialized = self.adapter_agent.get_logical_device()
- assert device.parent_id == ld_initialized.id, \
- 'parent ID not Logical device ID'
-
- else:
- # Complete activation by setting up logical device for this OLT and saving
- # off the devices parent_id
-
- ld_initialized = self.create_logical_device(device)
-
- ############################################################################
- # Setup PM configuration for this device
- try:
- device.reason = 'Setting up PM configuration'
- self.adapter_agent.update_device(device)
-
- self.pm_metrics = AdapterPmMetrics(self.adapter, device)
- pm_config = self.pm_metrics.make_proto()
- self.log.info("initial-pm-config", pm_config=pm_config)
- self.adapter_agent.update_device_pm_config(pm_config, init=True)
-
- except Exception as e:
- self.log.exception('pm-setup', e=e)
- self.activate_failed(device, e.message)
-
- ############################################################################
- # Setup Alarm handler
-
- device.reason = 'Setting up Adapter Alarms'
- self.adapter_agent.update_device(device)
-
- self.alarms = AdapterAlarms(self.adapter, device)
-
- ############################################################################
- # Create logical ports for all southbound and northbound interfaces
- try:
- device.reason = 'Creating logical ports'
- self.adapter_agent.update_device(device)
- self.startup = self.create_logical_ports(device, ld_initialized, reconciling)
- yield self.startup
-
- except Exception as e:
- self.log.exception('logical-port', e=e)
- self.activate_failed(device, e.message)
-
- ############################################################################
- # Register for ONU detection
- # self.adapter_agent.register_for_onu_detect_state(device.id)
-
- # Complete device specific steps
- try:
- self.log.debug('device-activation-procedures')
- self.startup = self.complete_device_specific_activation(device, reconciling)
- yield self.startup
-
- except Exception as e:
- self.log.exception('device-activation-procedures', e=e)
- self.activate_failed(device, e.message)
-
- # Schedule the heartbeat for the device
-
- self.log.debug('starting-heartbeat')
- self.start_heartbeat(delay=10)
-
- device = self.adapter_agent.get_device(device.id)
- device.parent_id = ld_initialized.id
- device.oper_status = OperStatus.ACTIVE
- device.reason = ''
- self.adapter_agent.update_device(device)
- self.logical_device_id = ld_initialized.id
-
- # finally, open the frameio port to receive in-band packet_in messages
- self._activate_io_port()
-
- # Start collecting stats from the device after a brief pause
- reactor.callLater(10, self.start_kpi_collection, device.id)
-
- self.log.info('activated')
+ returnValue(done_deferred)
def activate_failed(self, device, reason, reachable=True):
"""
@@ -535,7 +570,7 @@
device.reason = reason
self.adapter_agent.update_device(device)
- raise RuntimeError('Failed to activate OLT: {}'.format(device.reason))
+ raise Exception('Failed to activate OLT: {}'.format(device.reason))
@inlineCallbacks
def make_netconf_connection(self, connect_timeout=None,
@@ -627,8 +662,8 @@
OFPC_PORT_STATS)),
root_device_id=device.id)
- ld_initialized = self.adapter_agent.create_logical_device(ld)
-
+ ld_initialized = self.adapter_agent.create_logical_device(ld,
+ dpid=self.default_mac_addr)
return ld_initialized
@inlineCallbacks
@@ -790,21 +825,6 @@
def complete_device_specific_activation(self, _device, _reconciling):
return defer.succeed('NOP')
- def deactivate(self, device):
- # Clear off logical device ID
- self.logical_device_id = None
-
- # Kill any heartbeat poll
- h, self.heartbeat = self.heartbeat, None
-
- try:
- if h is not None and not h.called:
- h.cancel()
- except:
- pass
-
- # TODO: What else (delete logical device, ???)
-
def disable(self):
"""
This is called when a previously enabled device needs to be disabled based on a NBI call.
@@ -868,7 +888,7 @@
# NOTE: Flows removed before this method is called
# Wait for completion
- self.startup = defer.gatherResults(dl)
+ self.startup = defer.gatherResults(dl, consumeErrors=True)
def _drop_netconf():
return self.netconf_client.close() if \
@@ -891,9 +911,10 @@
return self.startup
@inlineCallbacks
- def reenable(self):
+ def reenable(self, done_deferred=None):
"""
This is called when a previously disabled device needs to be enabled based on a NBI call.
+ :param done_deferred: (Deferred) Deferred to fire when done
"""
self.log.info('re-enabling', device_id=self.device_id)
@@ -914,6 +935,10 @@
# Set all ports to enabled
self.adapter_agent.enable_all_ports(self.device_id)
+ # Recreate the logical device
+
+ ld_initialized = self.create_logical_device(device)
+
try:
yield self.make_restconf_connection()
@@ -926,10 +951,6 @@
except Exception as e:
self.log.exception('NETCONF-re-connection', e=e)
- # Recreate the logical device
-
- ld_initialized = self.create_logical_device(device)
-
# Create logical ports for all southbound and northbound interfaces
try:
@@ -964,7 +985,7 @@
# Wait for completion
- self.startup = defer.gatherResults(dl)
+ self.startup = defer.gatherResults(dl, consumeErrors=True)
results = yield self.startup
# Re-subscribe for ONU detection
@@ -977,6 +998,10 @@
self._activate_io_port()
self.log.info('re-enabled', device_id=device.id)
+
+ if done_deferred is not None:
+ done_deferred.callback('Done')
+
returnValue(results)
@inlineCallbacks
@@ -1114,7 +1139,7 @@
dl.append(port.restart())
try:
- yield defer.gatherResults(dl)
+ yield defer.gatherResults(dl, consumeErrors=True)
except Exception as e:
self.log.exception('port-restart', e=e)
@@ -1277,24 +1302,20 @@
# TODO: This has not been tested
def _collect(device_id, prefix):
from voltha.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
- import random
try:
# Step 1: gather metrics from device
port_metrics = self.pm_metrics.collect_port_metrics()
# Step 2: prepare the KpiEvent for submission
- # we can time-stamp them here (or could use time derived from OLT
+ # we can time-stamp them here or could use time derived from OLT
ts = arrow.utcnow().timestamp
kpi_event = KpiEvent(
type=KpiEventType.slice,
ts=ts,
prefixes={
- # OLT NNI port
- prefix + '.nni': MetricValuePairs(metrics=port_metrics['nni']),
- # OLT PON port
- prefix + '.pon': MetricValuePairs(metrics=port_metrics['pon'])
- }
+ prefix + '.{}'.format(k): MetricValuePairs(metrics=port_metrics[k])
+ for k in port_metrics.keys()}
)
# Step 3: submit
self.adapter_agent.submit_kpis(kpi_event)
@@ -1336,6 +1357,10 @@
except Exception as e:
self.heartbeat = reactor.callLater(5, self._heartbeat_fail, e)
+ def on_heatbeat_alarm(self, active):
+ if active and self.netconf_client is None or not self.netconf_client.connected:
+ self.make_netconf_connection(close_existing_client=True)
+
def heartbeat_check_status(self, _):
"""
Check the number of heartbeat failures against the limit and emit an alarm if needed
@@ -1343,6 +1368,8 @@
device = self.adapter_agent.get_device(self.device_id)
try:
+ from alarms.heartbeat_alarm import HeartbeatAlarm
+
if self.heartbeat_miss >= self.heartbeat_failed_limit:
if device.connect_status == ConnectStatus.REACHABLE:
self.log.warning('heartbeat-failed', count=self.heartbeat_miss)
@@ -1350,7 +1377,8 @@
device.oper_status = OperStatus.FAILED
device.reason = self.heartbeat_last_reason
self.adapter_agent.update_device(device)
- self.heartbeat_alarm(True, self.heartbeat_miss)
+ HeartbeatAlarm(self, 'olt', self.heartbeat_miss).raise_alarm()
+ self.on_heatbeat_alarm(True)
else:
# Update device states
if device.connect_status != ConnectStatus.REACHABLE:
@@ -1358,7 +1386,8 @@
device.oper_status = OperStatus.ACTIVE
device.reason = ''
self.adapter_agent.update_device(device)
- self.heartbeat_alarm(False)
+ HeartbeatAlarm(self, 'olt').clear_alarm()
+ self.on_heatbeat_alarm(False)
if self.netconf_client is None or not self.netconf_client.connected:
self.make_netconf_connection(close_existing_client=True)
@@ -1385,38 +1414,9 @@
self.heartbeat_last_reason = 'RESTCONF connectivity error'
self.heartbeat_check_status(None)
- def heartbeat_alarm(self, raise_alarm, heartbeat_misses=0):
- alarm = 'Heartbeat'
- alarm_data = {
- 'ts': arrow.utcnow().timestamp,
- 'description': self.alarms.format_description('olt', alarm,
- raise_alarm),
- 'id': self.alarms.format_id(alarm),
- 'type': AlarmEventType.EQUIPMENT,
- 'category': AlarmEventCategory.PON,
- 'severity': AlarmEventSeverity.CRITICAL,
- 'state': AlarmEventState.RAISED if raise_alarm else AlarmEventState.CLEARED
- }
- context_data = {'heartbeats_missed': heartbeat_misses}
- self.alarms.send_alarm(context_data, alarm_data)
-
@staticmethod
def parse_module_revision(revision):
try:
return datetime.datetime.strptime(revision, '%Y-%m-%d')
except Exception:
return None
-
- @staticmethod
- def _dict_diff(lhs, rhs):
- """
- Compare the values of two dictionaries and return the items in 'rhs'
- that are different than 'lhs. The RHS dictionary keys can be a subset of the
- LHS dictionary, or the RHS dictionary keys can contain new values.
-
- :param lhs: (dict) Original dictionary values
- :param rhs: (dict) New dictionary values to compare to the original (lhs) dict
- :return: (dict) Dictionary with differences from the RHS dictionary
- """
- assert len(lhs.keys()) == len(set(lhs.iterkeys()) & (rhs.iterkeys())), 'Dictionary Keys do not match'
- return {k: v for k, v in rhs.items() if k not in lhs or lhs[k] != rhs[k]}
diff --git a/voltha/adapters/adtran_olt/adtran_olt.py b/voltha/adapters/adtran_olt/adtran_olt.py
index d34f9e5..ba88d9b 100644
--- a/voltha/adapters/adtran_olt/adtran_olt.py
+++ b/voltha/adapters/adtran_olt/adtran_olt.py
@@ -16,7 +16,7 @@
Adtran 1-U OLT adapter.
"""
import structlog
-from twisted.internet import reactor
+from twisted.internet import reactor, defer
from zope.interface import implementer
from adtran_olt_handler import AdtranOltHandler
@@ -51,7 +51,7 @@
self.descriptor = Adapter(
id=self.name,
vendor='Adtran, Inc.',
- version='0.9',
+ version='0.12',
config=AdapterConfig(log_level=LogLevel.INFO)
)
log.debug('adtran_olt.__init__', adapter_agent=adapter_agent)
@@ -132,9 +132,15 @@
:return: (Deferred) Shall be fired to acknowledge device ownership.
"""
log.info('adopt-device', device=device)
- self.devices_handlers[device.id] = AdtranOltHandler(self, device.id)
- reactor.callLater(0, self.devices_handlers[device.id].activate, device)
- return device
+ kwargs = {
+ 'adapter': self,
+ 'device-id': device.id
+ }
+ self.devices_handlers[device.id] = AdtranOltHandler(**kwargs)
+ d = defer.Deferred()
+ reactor.callLater(0, self.devices_handlers[device.id].activate,
+ device, done_deferred=d)
+ return d
def reconcile_device(self, device):
"""
@@ -148,9 +154,15 @@
:return: (Deferred) Shall be fired to acknowledge device ownership.
"""
log.info('reconcile-device', device=device)
- self.devices_handlers[device.id] = AdtranOltHandler(self, device.id)
- reactor.callLater(0, self.devices_handlers[device.id].activate, device, reconciling=True)
- return device
+ kwargs = {
+ 'adapter': self,
+ 'device-id': device.id
+ }
+ self.devices_handlers[device.id] = AdtranOltHandler(**kwargs)
+ d = defer.Deferred()
+ reactor.callLater(0, self.devices_handlers[device.id].activate, device,
+ done_deferred=d, reconciling=True)
+ return d
def abandon_device(self, device):
"""
@@ -172,8 +184,10 @@
:return: (Deferred) Shall be fired to acknowledge disabling the device.
"""
log.info('disable-device', device=device)
- reactor.callLater(0, self.devices_handlers[device.id].disable)
- return device
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ reactor.callLater(0, handler.disable)
+ return device
def reenable_device(self, device):
"""
@@ -184,45 +198,54 @@
:return: (Deferred) Shall be fired to acknowledge re-enabling the device.
"""
log.info('reenable-device', device=device)
- reactor.callLater(0, self.devices_handlers[device.id].reenable)
- return device
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ d = defer.Deferred()
+ reactor.callLater(0, handler.reenable, done_deferred=d)
+ return d
def reboot_device(self, device):
"""
- This is called to reboot a device based on a NBI call. The admin
- state of the device will not change after the reboot
+ This is called to reboot a device based on a NBI call. The admin state of the device
+ will not change after the reboot
:param device: A Voltha.Device object.
:return: (Deferred) Shall be fired to acknowledge the reboot.
"""
log.info('reboot_device', device=device)
- reactor.callLater(0, self.devices_handlers[device.id].reboot)
- return device
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ reactor.callLater(0, handler.reboot)
+ return device
def download_image(self, device, request):
"""
- This is called to request downloading a specified image into
- the standby partition of a device based on a NBI call.
- This call is expected to be non-blocking.
+ This is called to request downloading a specified image into the standby partition
+ of a device based on a NBI call.
+
:param device: A Voltha.Device object.
- A Voltha.ImageDownload object.
+ :param request: A Voltha.ImageDownload object.
:return: (Deferred) Shall be fired to acknowledge the download.
"""
log.info('image_download', device=device, request=request)
- raise NotImplementedError()
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.start_download(device, request, defer.Deferred())
def get_image_download_status(self, device, request):
"""
- This is called to inquire about a requested image download
- status based on a NBI call.
- The adapter is expected to update the DownloadImage DB object
+ This is called to inquire about a requested image download status based
+ on a NBI call. The adapter is expected to update the DownloadImage DB object
with the query result
+
:param device: A Voltha.Device object.
- A Voltha.ImageDownload object.
+ :param request: A Voltha.ImageDownload object.
:return: (Deferred) Shall be fired to acknowledge
"""
log.info('get_image_download', device=device, request=request)
- raise NotImplementedError()
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.download_status(device, request, defer.Deferred())
def cancel_image_download(self, device, request):
"""
@@ -230,11 +253,13 @@
based on a NBI call. The admin state of the device will not
change after the download.
:param device: A Voltha.Device object.
- A Voltha.ImageDownload object.
+ :param request: A Voltha.ImageDownload object.
:return: (Deferred) Shall be fired to acknowledge
"""
log.info('cancel_image_download', device=device)
- raise NotImplementedError()
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.cancel_download(device, request, defer.Deferred())
def activate_image_update(self, device, request):
"""
@@ -246,11 +271,13 @@
activated image running on device
This call is expected to be non-blocking.
:param device: A Voltha.Device object.
- A Voltha.ImageDownload object.
+ :param request: A Voltha.ImageDownload object.
:return: (Deferred) OperationResponse object.
"""
log.info('activate_image_update', device=device, request=request)
- raise NotImplementedError()
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.activate_image(device, request, defer.Deferred())
def revert_image_update(self, device, request):
"""
@@ -263,11 +290,13 @@
previous image running on device
This call is expected to be non-blocking.
:param device: A Voltha.Device object.
- A Voltha.ImageDownload object.
+ :param request: A Voltha.ImageDownload object.
:return: (Deferred) OperationResponse object.
"""
log.info('revert_image_update', device=device, request=request)
- raise NotImplementedError()
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.revert_image(device, request, defer.Deferred())
def self_test_device(self, device):
"""
@@ -277,7 +306,6 @@
"""
from voltha.protos.voltha_pb2 import SelfTestResponse
log.info('self-test-device', device=device.id)
-
# TODO: Support self test?
return SelfTestResponse(result=SelfTestResponse.NOT_SUPPORTED)
@@ -290,7 +318,9 @@
:return: (Deferred) Shall be fired to acknowledge the deletion.
"""
log.info('delete-device', device=device)
- reactor.callLater(0, self.devices_handlers[device.id].delete)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ reactor.callLater(0, handler.delete)
return device
def get_device_details(self, device):
@@ -319,8 +349,9 @@
groups=groups, num_flows=len(flows.items))
assert len(groups.items) == 0, "Cannot yet deal with groups"
- handler = self.devices_handlers[device.id]
- return handler.update_flow_table(flows.items, device)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.update_flow_table(flows.items, device)
def update_flows_incrementally(self, device, flow_changes, group_changes):
"""
@@ -342,8 +373,9 @@
:param pm_configs: A Pms
"""
log.debug('update_pm_config', device=device, pm_configs=pm_configs)
- handler = self.devices_handlers[device.id]
- handler.update_pm_config(device, pm_configs)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.update_pm_config(device, pm_configs)
def send_proxied_message(self, proxy_address, msg):
"""
@@ -359,8 +391,9 @@
indicate that the message was successfully *sent*.
"""
log.debug('send-proxied-message', proxy_address=proxy_address, msg=msg)
- handler = self.devices_handlers[proxy_address.device_id]
- handler.send_proxied_message(proxy_address, msg)
+ handler = self.devices_handlers.get(proxy_address.device_id)
+ if handler is not None:
+ handler.send_proxied_message(proxy_address, msg)
def receive_proxied_message(self, proxy_address, msg):
"""
@@ -399,8 +432,9 @@
return di
device_id = ldi_to_di(logical_device_id)
- handler = self.devices_handlers[device_id]
- handler.packet_out(egress_port_no, msg)
+ handler = self.devices_handlers.get(device_id)
+ if handler is not None:
+ handler.packet_out(egress_port_no, msg)
def receive_inter_adapter_message(self, msg):
"""
@@ -439,33 +473,30 @@
API to create various interfaces (only some PON interfaces as of now)
in the devices
"""
- log.info('create-interface', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_interface(data)
+ log.debug('create-interface', data=data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_interface(self, device, data):
"""
API to update various interfaces (only some PON interfaces as of now)
in the devices
"""
- log.info('update-interface', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_interface(data)
+ log.debug('update-interface', data=data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_interface(self, device, data):
"""
API to delete various interfaces (only some PON interfaces as of now)
in the devices
"""
- log.info('remove-interface', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_interface(data)
+ log.debug('remove-interface', data=data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
def receive_onu_detect_state(self, proxy_address, state):
"""
@@ -480,72 +511,67 @@
"""
API to create tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
log.info('create-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.create_tcont(tcont_data, traffic_descriptor_data)
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
"""
API to update tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
log.info('update-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.update_tcont(tcont_data, traffic_descriptor_data)
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
"""
API to delete tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
log.info('remove-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.remove_tcont(tcont_data, traffic_descriptor_data)
def create_gemport(self, device, data):
"""
API to create gemport object in the devices
:param device: device id
- :data: gemport data object
+ :param data: gemport data object
:return: None
"""
log.info('create-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_gemport(self, device, data):
"""
API to update gemport object in the devices
:param device: device id
- :data: gemport data object
+ :param data: gemport data object
:return: None
"""
log.info('update-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_gemport(self, device, data):
"""
@@ -555,10 +581,9 @@
:return: None
"""
log.info('remove-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
def create_multicast_gemport(self, device, data):
"""
@@ -568,10 +593,9 @@
:return: None
"""
log.info('create-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_multicast_gemport(self, device, data):
"""
@@ -581,10 +605,9 @@
:return: None
"""
log.info('update-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_multicast_gemport(self, device, data):
"""
@@ -594,10 +617,9 @@
:return: None
"""
log.info('remove-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
def create_multicast_distribution_set(self, device, data):
"""
@@ -608,10 +630,9 @@
:return: None
"""
log.info('create-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_multicast_distribution_set(self, device, data):
"""
@@ -622,10 +643,9 @@
:return: None
"""
log.info('update-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_multicast_distribution_set(self, device, data):
"""
@@ -636,7 +656,6 @@
:return: None
"""
log.info('remove-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
diff --git a/voltha/adapters/adtran_olt/adtran_olt_handler.py b/voltha/adapters/adtran_olt/adtran_olt_handler.py
index 1c90348..3ffecbb 100644
--- a/voltha/adapters/adtran_olt/adtran_olt_handler.py
+++ b/voltha/adapters/adtran_olt/adtran_olt_handler.py
@@ -14,27 +14,27 @@
import datetime
import random
+import json
+import xmltodict
-from twisted.internet import reactor, defer
+from twisted.internet import reactor
from twisted.internet.defer import returnValue, inlineCallbacks, succeed
from adtran_device_handler import AdtranDeviceHandler
-from tcont import TCont, TrafficDescriptor, BestEffort
-from gem_port import GemPort
+from download import Download
+from xpon.adtran_olt_xpon import AdtranOltXPON
from codec.olt_state import OltState
from flow.flow_entry import FlowEntry
from net.adtran_zmq import AdtranZmqClient
from voltha.extensions.omci.omci import *
from voltha.protos.common_pb2 import AdminState, OperStatus
-from voltha.protos.device_pb2 import Device
-from voltha.protos.bbf_fiber_base_pb2 import \
- ChannelgroupConfig, ChannelpartitionConfig, ChannelpairConfig, ChannelterminationConfig, \
- OntaniConfig, VOntaniConfig, VEnetConfig
+from voltha.protos.device_pb2 import ImageDownload
FIXED_ONU = True # Enhanced ONU support
ATT_NETWORK = True # Use AT&T cVlan scheme
-class AdtranOltHandler(AdtranDeviceHandler):
+
+class AdtranOltHandler(AdtranDeviceHandler, AdtranOltXPON):
"""
The OLT Handler is used to wrap a single instance of a 10G OLT 1-U pizza-box
"""
@@ -44,7 +44,8 @@
GPON_OLT_HW_URI = '/restconf/data/gpon-olt-hw'
GPON_OLT_HW_STATE_URI = GPON_OLT_HW_URI + ':olt-state'
- GPON_PON_CONFIG_LIST_URI = GPON_OLT_HW_URI + ':olt/pon'
+ GPON_OLT_HW_CONFIG_URI = GPON_OLT_HW_URI + ':olt'
+ GPON_PON_CONFIG_LIST_URI = GPON_OLT_HW_CONFIG_URI + '/pon'
# Per-PON info
@@ -64,28 +65,18 @@
BASE_ONU_OFFSET = 64
- def __init__(self, adapter, device_id, timeout=20):
- super(AdtranOltHandler, self).__init__(adapter, device_id, timeout=timeout)
- self.gpon_olt_hw_revision = None
+ def __init__(self, **kwargs):
+ super(AdtranOltHandler, self).__init__(**kwargs)
+
self.status_poll = None
self.status_poll_interval = 5.0
self.status_poll_skew = self.status_poll_interval / 10
-
self.zmq_client = None
-
- # xPON config dictionaries
-
- self._channel_groups = {} # Name -> dict
- self._channel_partitions = {} # Name -> dict
- self._channel_pairs = {} # Name -> dict
- self._channel_terminations = {} # Name -> dict
- self._v_ont_anis = {} # Name -> dict
- self._ont_anis = {} # Name -> dict
- self._v_enets = {} # Name -> dict
- self._tconts = {} # Name -> dict
- self._traffic_descriptors = {} # Name -> dict
- self._gem_ports = {} # Name -> dict
- self._cached_xpon_pon_info = {} # PON-id -> dict
+ self.ssh_deferred = None
+ self._system_id = None
+ self._download_protocols = None
+ self._download_deferred = None
+ self._downloads = {} # name -> Download obj
def __del__(self):
# OLT Specific things here.
@@ -105,9 +96,34 @@
AdtranDeviceHandler.__del__(self)
+ def _cancel_deferred(self):
+ d1, self.status_poll = self.status_poll, None
+ d2, self.ssh_deferred = self.ssh_deferred, None
+ d3, self._download_deferred = self._download_deferred, None
+
+ for d in [d1, d2, d3]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
def __str__(self):
return "AdtranOltHandler: {}".format(self.ip_address)
+ @property
+ def system_id(self):
+ return self._system_id
+
+ @system_id.setter
+ def system_id(self, value):
+ if self._system_id != value:
+ self._system_id = value
+
+ data = json.dumps({'olt-id': str(value)})
+ uri = AdtranOltHandler.GPON_OLT_HW_CONFIG_URI
+ self.rest_client.request('PATCH', uri, data=data, name='olt-system-id')
+
@inlineCallbacks
def get_device_info(self, device):
"""
@@ -122,7 +138,7 @@
the device type specification returned by device_types().
"""
from codec.physical_entities_state import PhysicalEntitiesState
-
+ # TODO: After a CLI 'reboot' command, the device info may get messed up (prints labels and not values) Enter device and type 'show'
device = {
'model': 'n/a',
'hardware_version': 'n/a',
@@ -181,6 +197,19 @@
specific extensions.
:return: (Deferred or None).
"""
+ from net.rcmd import RCmd
+ try:
+ # Also get the MAC Address for the OLT
+ command = "ip -o link | grep eth0 | sed -n -e 's/^.*ether //p' | awk '{ print $1 }'"
+ rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password,
+ command)
+ self.default_mac_addr = yield rcmd.execute()
+ self.log.info("mac-addr", mac_addr=self.default_mac_addr)
+
+ except Exception as e:
+ log.exception('mac-address', e=e)
+ raise
+
try:
from codec.ietf_interfaces import IetfInterfacesState
from nni_port import MockNniPort
@@ -287,20 +316,95 @@
:param reconciling: (boolean) True if taking over for another VOLTHA
"""
- # For the pizzabox OLT, periodically query the OLT state of all PONs. This
- # is simpler then having each PON port do its own poll. From this, we can:
- #
- # o Discover any new or missing ONT/ONUs
- #
- # o Discover any LOS for any ONT/ONUs
- #
- # o TODO Update some PON level statistics
+ # Make sure configured for ZMQ remote access
+ self._ready_zmq()
+ # ZeroMQ client
self.zmq_client = AdtranZmqClient(self.ip_address, rx_callback=self.rx_packet, port=self.zmq_port)
+
+ # Download support
+ self._download_deferred = reactor.callLater(0, self._get_download_protocols)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
+ # PON Status
self.status_poll = reactor.callLater(5, self.poll_for_status)
return succeed('Done')
+ def on_heatbeat_alarm(self, active):
+ if not active:
+ self._ready_zmq()
+
+ @inlineCallbacks
+ def _get_download_protocols(self):
+ if self._download_protocols is None:
+ try:
+ config = '<filter>' + \
+ '<file-servers-state xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">' + \
+ '<profiles>' + \
+ '<supported-protocol/>' + \
+ '</profiles>' + \
+ '</file-servers-state>' + \
+ '</filter>'
+
+ results = yield self.netconf_client.get(config)
+
+ result_dict = xmltodict.parse(results.data_xml)
+ entries = result_dict['data']['file-servers-state']['profiles']['supported-protocol']
+ self._download_protocols = [entry['#text'].split(':')[-1] for entry in entries
+ if '#text' in entry]
+
+ except Exception as e:
+ self.log.exception('protocols', e=e)
+ self._download_protocols = None
+ self._download_deferred = reactor.callLater(10, self._get_download_protocols)
+
+ @inlineCallbacks
+ def _ready_zmq(self):
+ from net.rcmd import RCmd
+ # Check for port status
+ command = 'netstat -pan | grep -i 0.0.0.0:{} | wc -l'.format(self.zmq_port)
+ rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
+
+ try:
+ self.log.debug('check-request', command=command)
+ results = yield rcmd.execute()
+ self.log.info('check-results', results=results, result_type=type(results))
+ create_it = int(results) != 1
+
+ except Exception as e:
+ self.log.exception('find', e=e)
+ create_it = True
+
+ if create_it:
+ next_run = 15
+ command = 'mkdir -p /etc/pon_agent; touch /etc/pon_agent/debug.conf; '
+ command += 'ps -ae | grep -i ngpon2_agent; '
+ command += 'service_supervisor stop ngpon2_agent; service_supervisor start ngpon2_agent; '
+ command += 'ps -ae | grep -i ngpon2_agent'
+
+ rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
+
+ try:
+ self.log.debug('create-request', command=command)
+ results = yield rcmd.execute()
+ self.log.info('create-results', results=results, result_type=type(results))
+
+ except Exception as e:
+ self.log.exception('mkdir', e=e)
+ else:
+ next_run = 0
+
+ if next_run > 0:
+ self.ssh_deferred = reactor.callLater(next_run, self._ready_zmq)
+
def disable(self):
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+
c, self.zmq_client = self.zmq_client, None
if c is not None:
try:
@@ -308,63 +412,69 @@
except:
pass
- d, self.status_poll = self.status_poll, None
- if d is not None and not d.called:
- try:
- d.cancel()
- except:
- pass
-
super(AdtranOltHandler, self).disable()
- def reenable(self):
- super(AdtranOltHandler, self).reenable()
+ def reenable(self, done_deferred=None):
+ super(AdtranOltHandler, self).reenable(done_deferred=done_deferred)
- self.zmq_client = AdtranZmqClient(self.ip_address, rx_callback=self.rx_packet, port=self.zmq_port)
+ self._ready_zmq()
+ self.zmq_client = AdtranZmqClient(self.ip_address, rx_callback=self.rx_packet,
+ port=self.zmq_port)
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
self.status_poll = reactor.callLater(1, self.poll_for_status)
def reboot(self):
+ self._cancel_deferred()
+
c, self.zmq_client = self.zmq_client, None
if c is not None:
c.shutdown()
- d, self.status_poll = self.status_poll, None
- try:
- if d is not None and not d.called:
- d.cancel()
- except:
- pass
+ # Drop registration for adapter messages
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+
+ # Download supported protocols may change (if new image gets activated)
+ self._download_protocols = None
+
super(AdtranOltHandler, self).reboot()
def _finish_reboot(self, timeout, previous_oper_status, previous_conn_status):
super(AdtranOltHandler, self)._finish_reboot(timeout, previous_oper_status, previous_conn_status)
+ self._ready_zmq()
+
+ # Download support
+ self._download_deferred = reactor.callLater(0, self._get_download_protocols)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
self.zmq_client = AdtranZmqClient(self.ip_address, rx_callback=self.rx_packet, port=self.zmq_port)
- self.status_poll = reactor.callLater(1, self.poll_for_status)
+ self.status_poll = reactor.callLater(5, self.poll_for_status)
def delete(self):
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+
c, self.zmq_client = self.zmq_client, None
if c is not None:
c.shutdown()
- d, self.status_poll = self.status_poll, None
- try:
- if d is not None and not d.called:
- d.cancel()
- except:
- pass
super(AdtranOltHandler, self).delete()
def rx_packet(self, message):
try:
self.log.debug('rx_packet')
- pon_id, onu_id, msg, is_omci = AdtranZmqClient.decode_packet(message)
-
+ pon_id, onu_id, msg_bytes, is_omci = AdtranZmqClient.decode_packet(message,
+ self.is_async_control)
if is_omci:
proxy_address = self._pon_onu_id_to_proxy_address(pon_id, onu_id)
-
- self.adapter_agent.receive_proxied_message(proxy_address, msg)
+ self.adapter_agent.receive_proxied_message(proxy_address, msg_bytes)
else:
pass # TODO: Packet in support not yet supported
# self.adapter_agent.send_packet_in(logical_device_id=logical_device_id,
@@ -415,23 +525,6 @@
self.status_poll = reactor.callLater(delay, self.poll_for_status)
@inlineCallbacks
- def deactivate(self, device):
- # OLT Specific things here
-
- d, self.startup = self.startup, None
- try:
- if d is not None and not d.called:
- d.cancel()
- except:
- pass
- # self.pons.clear()
-
- # TODO: Any other? OLT specific deactivate steps
-
- # Call into base class and have it clean up as well
- super(AdtranOltHandler, self).deactivate(device)
-
- @inlineCallbacks
def update_flow_table(self, flows, device):
"""
Update the flow table on the OLT. If an existing flow is not in the list, it needs
@@ -498,8 +591,8 @@
onu = pon.onu(onu_id)
if onu is not None and onu.enabled:
- data = AdtranZmqClient.encode_omci_message(msg, pon_id, onu_id)
-
+ data = AdtranZmqClient.encode_omci_message(msg, pon_id, onu_id,
+ self.is_async_control)
try:
self.zmq_client.send(data)
@@ -510,19 +603,6 @@
else:
self.log.debug('pon-invalid-or-disabled', pon_id=pon_id)
- @staticmethod
- def is_gpon_olt_hw(content):
- """
- If the hello content
-
- :param content: (dict) Results of RESTCONF adtran-hello GET request
- :return: (string) GPON OLT H/w RESTCONF revision number or None on error/not GPON
- """
- for item in content.get('module-info', None):
- if item.get('module-name') == 'gpon-olt-hw':
- return AdtranDeviceHandler.parse_module_revision(item.get('revision', None))
- return None
-
def get_channel_id(self, pon_id, onu_id):
from pon_port import PonPort
if ATT_NETWORK:
@@ -597,646 +677,540 @@
if self.is_logical_port(port):
raise NotImplemented('TODO: Logical ports not yet supported')
- def get_xpon_info(self, pon_id, pon_id_type='xgs-ponid'):
- """
- Lookup all xPON configuraiton data for a specific pon-id / channel-termination
- :param pon_id: (int) PON Identifier
- :return: (dict) reduced xPON information for the specific PON port
- """
- if pon_id not in self._cached_xpon_pon_info:
-
- terminations = {key: val for key, val in self._channel_terminations.iteritems()
- if val[pon_id_type] == pon_id}
-
- pair_names = set([term['channel-pair'] for term in terminations.itervalues()])
- pairs = {key: val for key, val in self._channel_pairs.iteritems()
- if key in pair_names}
-
- partition_names = set([pair['channel-partition'] for pair in pairs.itervalues()])
- partitions = {key: val for key, val in self._channel_partitions.iteritems()
- if key in partition_names}
-
- v_ont_anis = {key: val for key, val in self._v_ont_anis.iteritems()
- if val['preferred-channel-pair'] in pair_names}
- v_ont_ani_names = set(v_ont_anis.keys())
-
- group_names = set(pair['channel-group'] for pair in pairs.itervalues())
- groups = {key: val for key, val in self._channel_groups.iteritems()
- if key in group_names}
-
- venets = {key: val for key, val in self._v_enets.iteritems()
- if val['v-ont-ani'] in v_ont_ani_names}
-
- tconts = {key: val for key, val in self._tconts.iteritems()
- if val.vont_ani in v_ont_ani_names}
- tcont_names = set(tconts.keys())
-
- gem_ports = {key: val for key, val in self._gem_ports.iteritems()
- if val.tconf_ref in tcont_names}
-
- self._cached_xpon_pon_info[pon_id] = {
- 'channel-terminations': terminations,
- 'channel-pairs': pairs,
- 'channel-partitions': partitions,
- 'channel-groups': groups,
- 'v-ont-anis': v_ont_anis,
- 'v-enets': venets,
- 'tconts': tconts,
- 'gem-ports': gem_ports
- }
- return self._cached_xpon_pon_info[pon_id]
-
- def _get_xpon_collection(self, data):
- if isinstance(data, ChannelgroupConfig):
- return self._channel_groups
- elif isinstance(data, ChannelpartitionConfig):
- return self._channel_partitions
- elif isinstance(data, ChannelpairConfig):
- return self._channel_pairs
- elif isinstance(data, ChannelterminationConfig):
- return self._channel_terminations
- elif isinstance(data, OntaniConfig):
- return self._ont_anis
- elif isinstance(data, VOntaniConfig):
- return self._v_ont_anis
- elif isinstance(data, VEnetConfig):
- return self._v_enets
- return None
-
- @property
- def channel_terminations(self):
- return self._channel_terminations
-
- @property
- def channel_pairs(self):
- return self._channel_pairs
-
- @property
- def channel_partitions(self):
- return self._channel_partitions
-
- @property
- def v_ont_anis(self):
- return self._v_ont_anis
-
- @property
- def v_enets(self):
- return self._v_enets
-
- @property
- def tconts(self):
- return self._tconts
-
- def _data_to_dict(self, data):
- name = data.name
- interface = data.interface
- inst_data = data.data
-
- if isinstance(data, ChannelgroupConfig):
- return 'channel-group', {
- 'name': name,
- 'enabled': interface.enabled,
- 'system-id': inst_data.system_id,
- 'polling-period': inst_data.polling_period
- }
-
- elif isinstance(data, ChannelpartitionConfig):
- def _auth_method_enum_to_string(value):
- from voltha.protos.bbf_fiber_types_pb2 import SERIAL_NUMBER, LOID, \
- REGISTRATION_ID, OMCI, DOT1X
- return {
- SERIAL_NUMBER: 'serial-number',
- LOID: 'loid',
- REGISTRATION_ID: 'registration-id',
- OMCI: 'omci',
- DOT1X: 'dot1x'
- }.get(value, 'unknown')
-
- return 'channel-partition', {
- 'name': name,
- 'enabled': interface.enabled,
- 'authentication-method': _auth_method_enum_to_string(inst_data.authentication_method),
- 'channel-group': inst_data.channelgroup_ref,
- 'fec-downstream': inst_data.fec_downstream,
- 'mcast-aes': inst_data.multicast_aes_indicator,
- 'differential-fiber-distance': inst_data.differential_fiber_distance
- }
-
- elif isinstance(data, ChannelpairConfig):
- return 'channel-pair', {
- 'name': name,
- 'enabled': interface.enabled,
- 'channel-group': inst_data.channelgroup_ref,
- 'channel-partition': inst_data.channelpartition_ref,
- 'line-rate': inst_data.channelpair_linerate
- }
-
- elif isinstance(data, ChannelterminationConfig):
- return 'channel-termination', {
- 'name': name,
- 'enabled': interface.enabled,
- 'xgs-ponid': inst_data.xgs_ponid,
- 'xgpon-ponid': inst_data.xgpon_ponid,
- 'channel-pair': inst_data.channelpair_ref,
- 'ber-calc-period': inst_data.ber_calc_period
- }
-
- elif isinstance(data, OntaniConfig):
- return 'ont-ani', {
- 'name': name,
- 'enabled': interface.enabled,
- 'upstream-fec': inst_data.upstream_fec_indicator,
- 'mgnt-gemport-aes': inst_data.mgnt_gemport_aes_indicator
- }
-
- elif isinstance(data, VOntaniConfig):
- return 'vOnt-ani', {
- 'name': name,
- 'enabled': interface.enabled,
- 'onu-id': inst_data.onu_id,
- 'expected-serial-number': inst_data.expected_serial_number,
- 'preferred-channel-pair': inst_data.preferred_chanpair,
- 'channel-partition': inst_data.parent_ref,
- 'upstream-channel-speed': inst_data.upstream_channel_speed,
- 'data': data
- }
-
- elif isinstance(data, VEnetConfig):
- return 'vEnet', {
- 'name': name,
- 'enabled': interface.enabled,
- 'v-ont-ani': inst_data.v_ontani_ref
- }
-
+ def _update_download_status(self, request, download):
+ if download is not None:
+ request.state = download.download_state
+ request.reason = download.failure_reason
+ request.image_state = download.image_state
+ request.additional_info = download.additional_info
+ request.downloaded_bytes = download.downloaded_bytes
else:
- raise NotImplementedError('Unknown data type')
+ request.state = ImageDownload.DOWNLOAD_UNKNOWN
+ request.reason = ImageDownload.UNKNOWN_ERROR
+ request.image_state = ImageDownload.IMAGE_UNKNOWN
+ request.additional_info = "Download request '{}' not found".format(request.name)
+ request.downloaded_bytes = 0
- def create_interface(self, data):
+ self.adapter_agent.update_image_download(request)
+
+ def start_download(self, device, request, done):
"""
- Create XPON interfaces
- :param data: (xpon config info)
+ This is called to request downloading a specified image into
+ the standby partition of a device based on a NBI call.
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+ :return: (Deferred) Shall be fired to acknowledge the download.
"""
- self.log.debug('create-interface', interface=data.interface, inst_data=data.data)
+ log.info('image_download', request=request)
- name = data.name
- items = self._get_xpon_collection(data)
+ try:
+ if request.name in self._downloads:
+ raise Exception("Download request with name '{}' already exists".
+ format(request.name))
+ try:
+ download = Download.create(self, request, self._download_protocols)
- if items is not None and name not in items:
- self._cached_xpon_pon_info = {} # Clear cached data
+ except Exception:
+ request.additional_info = 'Download request creation failed due to exception'
+ raise
- item_type, new_item = self._data_to_dict(data)
- #self.log.debug('new-item', item_type=item_type, item=new_item)
+ try:
+ self._downloads[download.name] = download
+ self._update_download_status(request, download)
+ done.callback('started')
+ return done
- if name not in items:
- self.log.debug('new-item', item_type=item_type, item=new_item)
+ except Exception:
+ request.additional_info = 'Download request startup failed due to exception'
+ del self._downloads[download.name]
+ download.cancel_download(request)
+ raise
- items[name] = new_item
+ except Exception as e:
+ self.log.exception('create', e=e)
- if isinstance(data, ChannelterminationConfig):
- self._on_channel_termination_create(name)
+ request.reason = ImageDownload.UNKNOWN_ERROR
+ request.state = ImageDownload.DOWNLOAD_FAILED
+ if not request.additional_info:
+ request.additional_info = e.message
- def update_interface(self, data):
+ self.adapter_agent.update_image_download(request)
+
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ raise
+
+ def download_status(self, device, request, done):
"""
- Update XPON interfaces
- :param data: (xpon config info)
+ This is called to inquire about a requested image download status based
+ on a NBI call.
+
+ The adapter is expected to update the DownloadImage DB object with the
+ query result
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) Shall be fired to acknowledge
"""
- name = data.name
- items = self._get_xpon_collection(data)
+ log.info('download_status', request=request)
+ download = self._downloads.get(request.name)
- if items is None:
- raise ValueError('Unknown data type: {}'.format(type(data)))
+ self._update_download_status(request, download)
- existing_item = items.get(name)
- if existing_item is None:
- raise KeyError("'{}' not found. Type: {}".format(name, type(data)))
+ if request.state != ImageDownload.DOWNLOAD_STARTED:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
- item_type, update_item = self._data_to_dict(data)
- self.log.debug('update-item', item_type=item_type, item=update_item)
+ done.callback(request.state)
+ return done
- # Calculate the difference
- diffs = AdtranDeviceHandler._dict_diff(existing_item, update_item)
+ def cancel_download(self, device, request, done):
+ """
+ This is called to cancel a requested image download based on a NBI
+ call. The admin state of the device will not change after the
+ download.
- if len(diffs) == 0:
- self.log.debug('update-item-no-diffs')
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
- self._cached_xpon_pon_info = {} # Clear cached data
+ :return: (Deferred) Shall be fired to acknowledge
+ """
+ log.info('cancel_download', request=request)
- # Act on changed items
- if isinstance(data, ChannelgroupConfig):
- self._on_channel_group_modify(name, items, diffs)
+ download = self._downloads.get(request.name)
- elif isinstance(data, ChannelpartitionConfig):
- self._on_channel_partition_modify(name, items, diffs)
-
- elif isinstance(data, ChannelpairConfig):
- self._on_channel_pair_modify(name, items, diffs)
-
- elif isinstance(data, ChannelterminationConfig):
- self._on_channel_termination_modify(name, items, diffs)
-
- elif isinstance(data, OntaniConfig):
- raise NotImplementedError('TODO: not yet supported')
-
- elif isinstance(data, VOntaniConfig):
- raise NotImplementedError('TODO: not yet supported')
-
- elif isinstance(data, VEnetConfig):
- raise NotImplementedError('TODO: not yet supported')
-
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.cancel_download(request)
+ self._update_download_status(request, download)
+ done.callback(result)
else:
- raise NotImplementedError('Unknown data type')
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
- raise NotImplementedError('TODO: not yet supported')
+ if device.admin_state == AdminState.DOWNLOADING_IMAGE:
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
- def remove_interface(self, data):
+ return done
+
+ def activate_image(self, device, request, done):
"""
- Deleete XPON interfaces
- :param data: (xpon config info)
+ This is called to activate a downloaded image from a standby partition
+ into active partition.
+
+ Depending on the device implementation, this call may or may not
+ cause device reboot. If no reboot, then a reboot is required to make
+ the activated image running on device
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) OperationResponse object.
"""
- name = data.name
+ log.info('activate_image', request=request)
- items = self._get_xpon_collection(data)
- item = items.get(name)
- self.log.debug('delete-interface', name=name, data=data)
-
- if item is not None:
- self._cached_xpon_pon_info = {} # Clear cached data
- del items[name]
-
- if isinstance(data, ChannelgroupConfig):
- pass # Rely upon xPON logic to not allow delete of a referenced group
-
- elif isinstance(data, ChannelpartitionConfig):
- pass # Rely upon xPON logic to not allow delete of a referenced partition
-
- elif isinstance(data, ChannelpairConfig):
- pass # Rely upon xPON logic to not allow delete of a referenced pair
-
- elif isinstance(data, ChannelterminationConfig):
- self._on_channel_termination_delete(name)
-
- elif isinstance(data, OntaniConfig):
- pass
-
- elif isinstance(data, VOntaniConfig):
- pass
-
- elif isinstance(data, VEnetConfig):
- pass
-
- else:
- raise NotImplementedError('Unknown data type')
-
- raise NotImplementedError('TODO: not yet supported')
-
- def _valid_to_modify(self, item_type, valid, diffs):
- bad_keys = [mod_key not in valid for mod_key in diffs]
- if len(bad_keys) != 0:
- self.log.warn("{} modification of '{}' not supported").format(item_type, bad_keys[0])
- return False
- return True
-
- def _get_related_pons(self, item_type):
-
- if isinstance(item_type, ChannelgroupConfig):
- return [] # TODO: Implement
-
- elif isinstance(item_type, ChannelpartitionConfig):
- return [] # TODO: Implement
-
- elif isinstance(item_type, ChannelpairConfig):
- return [] # TODO: Implement
-
- elif isinstance(item_type, ChannelterminationConfig):
- return [] # TODO: Implement
-
+ download = self._downloads.get(request.name)
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.activate_image()
+ self._update_download_status(request, download)
+ done.callback(result)
else:
- return []
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
- def _on_channel_group_modify(self, name, items, diffs):
- if len(diffs) == 0:
- return
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ return done
- valid_keys = ['polling-period'] # Modify of these keys supported
+ def revert_image(self, device, request, done):
+ """
+ This is called to deactivate the specified image at active partition,
+ and revert to previous image at standby partition.
- if self._valid_to_modify('channel-group', valid_keys, diffs.keys()):
- self.log.info('TODO: Not-Implemented-yet')
- # for k, v in diffs.items:
- # items[name][k] = v
+ Depending on the device implementation, this call may or may not
+ cause device reboot. If no reboot, then a reboot is required to
+ make the previous image running on device
- def _on_channel_partition_modify(self, name, items, diffs):
- if len(diffs) == 0:
- return
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
- valid_keys = ['fec-downstream', 'mcast-aes', 'differential-fiber-distance']
+ :return: (Deferred) OperationResponse object.
+ """
+ log.info('revert_image', request=request)
- if self._valid_to_modify('channel-partition', valid_keys, diffs.keys()):
- self.log.info('TODO: Not-Implemented-yet')
- # for k, v in diffs.items:
- # items[name][k] = v
+ download = self._downloads.get(request.name)
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.revert_image()
+ self._update_download_status(request, download)
+ done.callback(result)
+ else:
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
- def _on_channel_pair_modify(self, name, items, diffs):
- if len(diffs) == 0:
- return
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ return done
- valid_keys = ['line-rate'] # Modify of these keys supported
+ def on_channel_group_modify(self, cgroup, update, diffs):
+ valid_keys = ['enable',
+ 'polling-period',
+ 'system-id'] # Modify of these keys supported
- if self._valid_to_modify('channel-pair', valid_keys, diffs.keys()):
- self.log.info('TODO: Not-Implemented-yet')
- # for k, v in diffs.items:
- # items[name][k] = v
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("channel-group leaf '{}' is read-only or write-once".format(invalid_key))
- def _on_channel_termination_create(self, name, pon_type='xgs-ponid'):
- assert name in self._channel_terminations, \
- '{} is not a channel-termination'.format(name)
- ct = self._channel_terminations[name]
+ pons = self.get_related_pons(cgroup)
+ keys = [k for k in diffs.keys() if k in valid_keys]
- pon_id = ct[pon_type]
- # Look up the southbound PON port
+ for k in keys:
+ if k == 'enabled':
+ pass # TODO: ?
- pon_port = self.southbound_ports.get(pon_id, None)
+ elif k == 'polling-period':
+ for pon in pons:
+ pon.discovery_tick = update[k]
+
+ elif k == 'system-id':
+ self.system_id(update[k])
+
+ return update
+
+ def on_channel_partition_modify(self, cpartition, update, diffs):
+ valid_keys = ['enabled', 'fec-downstream', 'mcast-aes', 'differential-fiber-distance']
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("channel-partition leaf '{}' is read-only or write-once".format(invalid_key))
+
+ pons = self.get_related_pons(cpartition)
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'enabled':
+ pass # TODO: ?
+
+ elif k == 'fec-downstream':
+ for pon in pons:
+ pon.downstream_fec_enable = update[k]
+
+ elif k == 'mcast-aes':
+ for pon in pons:
+ pon.mcast_aes = update[k]
+
+ elif k == 'differential-fiber-distance':
+ for pon in pons:
+ pon.deployment_range = update[k] * 1000 # pon-agent uses meters
+ return update
+
+ def on_channel_pair_modify(self, cpair, update, diffs):
+ valid_keys = ['enabled', 'line-rate'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("channel-pair leaf '{}' is read-only or write-once".format(invalid_key))
+
+ pons = self.get_related_pons(cpair)
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'enabled':
+ pass # TODO: ?
+
+ elif k == 'line-rate':
+ for pon in pons:
+ pon.line_rate = update[k]
+ return update
+
+ def on_channel_termination_create(self, ct, pon_type='xgs-ponid'):
+ pons = self.get_related_pons(ct, pon_type=pon_type)
+ pon_port = pons[0] if len(pons) == 1 else None
+
if pon_port is None:
- raise ValueError('Unknown PON port. PON-ID: {}'.format(pon_id))
+ raise ValueError('Unknown PON port. PON-ID: {}'.format(ct[pon_type]))
- assert ct['channel-pair'] in self._channel_pairs, \
+ assert ct['channel-pair'] in self.channel_pairs, \
'{} is not a channel-pair'.format(ct['channel-pair'])
- cpair = self._channel_pairs[ct['channel-pair']]
+ cpair = self.channel_pairs[ct['channel-pair']]
- assert cpair['channel-group'] in self._channel_groups, \
+ assert cpair['channel-group'] in self.channel_groups, \
'{} is not a -group'.format(cpair['channel-group'])
- assert cpair['channel-partition'] in self._channel_partitions, \
+ assert cpair['channel-partition'] in self.channel_partitions, \
'{} is not a channel-partition'.format(cpair('channel-partition'))
- cg = self._channel_groups[cpair['channel-group']]
- cpart = self._channel_partitions[cpair['channel-partition']]
+ cg = self.channel_groups[cpair['channel-group']]
+ cpart = self.channel_partitions[cpair['channel-partition']]
- enabled = ct['enabled']
-
polling_period = cg['polling-period']
+ system_id = cg['system-id']
authentication_method = cpart['authentication-method']
# line_rate = cpair['line-rate']
downstream_fec = cpart['fec-downstream']
deployment_range = cpart['differential-fiber-distance']
- # mcast_aes = cpart['mcast-aes']
-
+ mcast_aes = cpart['mcast-aes']
# TODO: Support BER calculation period
- # TODO Support setting of line rate
- pon_port.xpon_name = name
+ pon_port.xpon_name = ct['name']
pon_port.discovery_tick = polling_period
pon_port.authentication_method = authentication_method
- pon_port.deployment_range = deployment_range * 1000 # pon-agent uses meters
+ pon_port.deployment_range = deployment_range * 1000 # pon-agent uses meters
pon_port.downstream_fec_enable = downstream_fec
- # TODO: For now, upstream FEC = downstream
- pon_port.upstream_fec_enable = downstream_fec
+ pon_port.mcast_aes = mcast_aes
+ # pon_port.line_rate = line_rate # TODO: support once 64-bits
+ self.system_id = system_id
- # TODO: pon_port.mcast_aes = mcast_aes
-
+ # Enabled 'should' be a logical 'and' of all referenced items but
+ # there is no easy way to detected changes in referenced items.
+ # enabled = ct['enabled'] and cpair['enabled'] and cg['enabled'] and cpart['enabled']
+ enabled = ct['enabled']
pon_port.admin_state = AdminState.ENABLED if enabled else AdminState.DISABLED
+ return ct
- def _on_channel_termination_modify(self, name, items, diffs):
- if len(diffs) == 0:
- return
+ def on_channel_termination_modify(self, ct, update, diffs, pon_type='xgs-ponid'):
+ valid_keys = ['enabled'] # Modify of these keys supported
- valid_keys = ['enabled'] # Modify of these keys supported
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("channel-termination leaf '{}' is read-only or write-once".format(invalid_key))
- if self._valid_to_modify('channel-termination', valid_keys, diffs.keys()):
- self.log.info('TODO: Not-Implemented-yet')
- # for k, v in diffs.items:
- # items[name][k] = v
+ pons = self.get_related_pons(ct, pon_type=pon_type)
+ pon_port = pons[0] if len(pons) == 1 else None
- def _on_channel_termination_delete(self, name, pon_type='xgs-ponid'):
- assert name in self._channel_terminations, \
- '{} is not a channel-termination'.format(name)
- ct = self._channel_terminations[name]
-
- # Look up the southbound PON port
- pon_id = ct[pon_type]
- pon_port = self.southbound_ports.get(pon_id, None)
if pon_port is None:
- raise ValueError('Unknown PON port. PON-ID: {}'.format(pon_id))
+ raise ValueError('Unknown PON port. PON-ID: {}'.format(ct[pon_type]))
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'enabled':
+ enabled = update[k]
+ pon_port.admin_state = AdminState.ENABLED if enabled else AdminState.DISABLED
+ return update
+
+ def on_channel_termination_delete(self, ct, pon_type='xgs-ponid'):
+ pons = self.get_related_pons(ct, pon_type=pon_type)
+ pon_port = pons[0] if len(pons) == 1 else None
+
+ if pon_port is None:
+ raise ValueError('Unknown PON port. PON-ID: {}'.format(ct[pon_type]))
pon_port.admin_state = AdminState.DISABLED
+ return None
- def _on_ont_ani_create(self, name):
- self.log.info('TODO: Not-Implemented-yet')
- # elif isinstance(data, OntaniConfig):
- # return 'ont-ani', {
- # 'name': name,
- # 'enabled': interface.enabled,
- # 'upstream-fec': inst_data.upstream_fec_indicator,
- # 'mgnt-gemport-aes': inst_data.mgnt_gemport_aes_indicator
- # }
+ def on_ont_ani_modify(self, ont_ani, update, diffs):
+ valid_keys = ['enabled', 'upstream-fec'] # Modify of these keys supported
- def _on_ont_ani_delete(self, name):
- self.log.info('TODO: Not-Implemented-yet')
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("ont-ani leaf '{}' is read-only or write-once".format(invalid_key))
- def _on_ont_ani_modify(self, name, items, existing, update, diffs):
- pass
+ onus = self.get_related_onus(ont_ani)
+ keys = [k for k in diffs.keys() if k in valid_keys]
- def create_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Create TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- self.log.debug('create-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+ for k in keys:
+ if k == 'enabled':
+ pass # TODO: Have only ONT use this value?
- traffic_descriptor = TrafficDescriptor.create(traffic_descriptor_data)
- tcont = TCont.create(tcont_data, traffic_descriptor)
+ elif k == 'upstream-fec':
+ for onu in onus:
+ onu.upstream_fec_enable = update[k]
+ return update
- if tcont.name not in self._tconts:
- self._cached_xpon_pon_info = {} # Clear cached data
- self._tconts[tcont.name] = tcont
+ def on_vont_ani_modify(self, vont_ani, update, diffs):
+ valid_keys = ['enabled',
+ 'expected-serial-number',
+ 'upstream-channel-speed'
+ ] # Modify of these keys supported
- # Update any ONUs referenced
- tcont.xpon_create(self)
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("vont-ani leaf '{}' is read-only or write-once".format(invalid_key))
- if traffic_descriptor.name not in self._traffic_descriptors:
- self._traffic_descriptors[traffic_descriptor.name] = traffic_descriptor
+ onus = self.get_related_onus(vont_ani)
+ keys = [k for k in diffs.keys() if k in valid_keys]
- # Update any ONUs referenced
- traffic_descriptor.xpon_create(self, tcont)
+ for k in keys:
+ if k == 'enabled':
+ for onu in onus:
+ onu.enabled = update[k]
+ elif k == 'expected-serial-number':
+ for onu in onus:
+ if onu.serial_number != update[k]:
+ onu.pon.delete_onu(onu.onu_id)
+ elif k == 'upstream-channel-speed':
+ for onu in onus:
+ onu.upstream_channel_speed = update[k]
+ return update
- def update_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Update TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- self.log.debug('update-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+ def on_vont_ani_delete(self, vont_ani):
+ onus = self.get_related_onus(vont_ani)
- if tcont_data.name not in self._tconts:
- raise KeyError("TCONT '{}' does not exists".format(tcont_data.name))
+ for onu in onus:
+ try:
+ onu.pon.delete_onu(onu.onu_id)
- if traffic_descriptor_data.name not in self._traffic_descriptors:
- raise KeyError("Traffic Descriptor '{}' does not exists".
- format(traffic_descriptor_data.name))
+ except Exception as e:
+ self.log.exception('onu', onu=onu, e=e)
- self._cached_xpon_pon_info = {} # Clear cached data
+ return None
- traffic_descriptor = TrafficDescriptor.create(traffic_descriptor_data)
- tcont = TCont.create(tcont_data, traffic_descriptor)
- #
- # Update any ONUs referenced
- # tcont.xpon_update(self)
- # traffic_descriptor.xpon_update(self, tcont)
- pass
- raise NotImplementedError('TODO: Not yet supported')
+ def _get_tcont_onu(self, vont_ani):
+ onu = None
+ try:
+ vont_ani = self.v_ont_anis.get(vont_ani)
+ ch_pair = self.channel_pairs.get(vont_ani['preferred-channel-pair'])
+ ch_term = next((term for term in self.channel_terminations.itervalues()
+ if term['channel-pair'] == ch_pair['name']), None)
- def remove_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Remove TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- self.log.debug('remove-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+ pon = self.pon(ch_term['xgs-ponid'])
+ onu = pon.onu(vont_ani['onu-id'])
- tcont = self._tconts.get(tcont_data.name)
- traffic_descriptor = self._traffic_descriptors.get(traffic_descriptor_data.name)
+ except Exception:
+ pass
- if traffic_descriptor is not None:
- del self._traffic_descriptors[traffic_descriptor_data.name]
+ return onu
- self._cached_xpon_pon_info = {} # Clear cached data
- pass # Perform any needed operations
- # raise NotImplementedError('TODO: Not yet supported')
+ def on_tcont_create(self, tcont):
+ from xpon.olt_tcont import OltTCont
- if tcont is not None:
- del self._tconts[tcont_data.name]
+ td = self.traffic_descriptors.get(tcont.get('td-ref'))
+ traffic_descriptor = td['object'] if td is not None else None
- self._cached_xpon_pon_info = {} # Clear cached data
+ tcont['object'] = OltTCont.create(tcont, traffic_descriptor)
- # Update any ONUs referenced
- # tcont.xpon_delete(self)
+ # Look up any ONU associated with this TCONT (should be only one if any)
+ onu = self._get_tcont_onu(tcont['vont-ani'])
- pass # Perform any needed operations
- raise NotImplementedError('TODO: Not yet supported')
+ if onu is not None: # Has it been discovered yet?
+ onu.add_tcont(tcont['object'])
- def create_gemport(self, data):
- """
- Create GEM Port
- :param data:
- """
- self.log.debug('create-gemport', gem_port=data)
+ return tcont
- gem_port = GemPort.create(data, self)
+ def on_tcont_modify(self, tcont, update, diffs):
+ valid_keys = ['td-ref'] # Modify of these keys supported
- if gem_port.name in self._gem_ports:
- raise KeyError("GEM Port '{}' already exists".format(gem_port.name))
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("TCONT leaf '{}' is read-only or write-once".format(invalid_key))
- self._cached_xpon_pon_info = {} # Clear cached data
- self._gem_ports[gem_port.name] = gem_port
+ tc = tcont.get('object')
+ assert tc is not None, 'TCONT not found'
- # Update any ONUs referenced
- gem_port.xpon_create(self)
+ update['object'] = tc
- def update_gemport(self, data):
- """
- Update GEM Port
- :param data:
- """
- self.log.debug('update-gemport', gem_port=data)
+ # Look up any ONU associated with this TCONT (should be only one if any)
+ onu = self._get_tcont_onu(tcont['vont-ani'])
- if data.name not in self._gem_ports:
- raise KeyError("GEM Port '{}' does not exists".format(data.name))
+ if onu is not None: # Has it been discovered yet?
+ keys = [k for k in diffs.keys() if k in valid_keys]
- self._cached_xpon_pon_info = {} # Clear cached data
- #gem_port = GemPort.create(data)
- #
- # TODO: On GEM Port changes, may need to add/delete/modify ONU Flow(s)
- # Update any ONUs referenced
- # gem_port.xpon_update(self)
- pass
- raise NotImplementedError('TODO: Not yet supported')
+ for k in keys:
+ if k == 'td-ref':
+ td = self.traffic_descriptors.get(update['td-ref'])
+ if td is not None:
+ onu.update_tcont_td(tcont['alloc-id'], td)
- def remove_gemport(self, data):
- """
- Delete GEM Port
- :param data:
- """
- self.log.debug('remove-gemport', gem_port=data.name)
+ return update
- gem_port = self._gem_ports.get(data.name)
+ def on_tcont_delete(self, tcont):
+ onu = self._get_tcont_onu(tcont['vont-ani'])
- if gem_port is not None:
- del self._gem_ports[data.name]
+ if onu is not None:
+ onu.remove_tcont(tcont['alloc-id'])
- self._cached_xpon_pon_info = {} # Clear cached data
- #
- # TODO: On GEM Port changes, may need to delete ONU Flow(s)
- # Update any ONUs referenced
- # gem_port.xpon_delete(self)
- pass # Perform any needed operations
- raise NotImplementedError('TODO: Not yet supported')
+ return None
- def create_multicast_gemport(self, data):
- """
- API to create multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- self.log.debug('create-mcast-gemport', gem_port=data)
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ def on_td_create(self, traffic_disc):
+ from xpon.olt_traffic_descriptor import OltTrafficDescriptor
+ traffic_disc['object'] = OltTrafficDescriptor.create(traffic_disc)
+ return traffic_disc
- def update_multicast_gemport(self, data):
- """
- API to update multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- self.log.debug('update-mcast-gemport', gem_port=data)
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ def on_td_modify(self, traffic_disc, update, diffs):
+ from xpon.olt_traffic_descriptor import OltTrafficDescriptor
- def remove_multicast_gemport(self, data):
- """
- API to delete multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- self.log.debug('delete-mcast-gemport', gem_port=data.name)
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ valid_keys = ['fixed-bandwidth',
+ 'assured-bandwidth',
+ 'maximum-bandwidth',
+ 'priority',
+ 'weight',
+ 'additional-bw-eligibility-indicator']
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("traffic-descriptor leaf '{}' is read-only or write-once".format(invalid_key))
- def create_multicast_distribution_set(self, data):
- """
- API to create multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ # New traffic descriptor
+ update['object'] = OltTrafficDescriptor.create(update)
- def update_multicast_distribution_set(self, data):
- """
- API to update multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ td_name = traffic_disc['name']
+ tconts = {key: val for key, val in self.tconts.iteritems()
+ if val['td-ref'] == td_name and td_name is not None}
- def remove_multicast_distribution_set(self, data):
- """
- API to delete multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- #
- #
- #
- raise NotImplementedError('TODO: Not yet supported')
+ for tcont in tconts.itervalues():
+ # Look up any ONU associated with this TCONT (should be only one if any)
+ onu = self._get_tcont_onu(tcont['vont-ani'])
+ if onu is not None:
+ onu.update_tcont_td(tcont['alloc-id'], update['object'])
+
+ return update
+
+ def on_td_delete(self, traffic_desc):
+ # TD may be used by more than one TCONT. Only delete if the last one
+ td_name = traffic_desc['name']
+ num_tconts = len([val for val in self.tconts.itervalues()
+ if val['td-ref'] == td_name and td_name is not None])
+ return None if num_tconts <= 1 else traffic_desc
+
+ def on_gemport_create(self, gem_port):
+ from xpon.olt_gem_port import OltGemPort
+ # Create an GemPort object to wrap the dictionary
+ gem_port['object'] = OltGemPort.create(self, gem_port)
+
+ onus = self.get_related_onus(gem_port)
+ assert len(onus) <= 1, 'Too many ONUs: {}'.format(len(onus))
+
+ if len(onus) == 1:
+ onus[0].add_gem_port(gem_port['object'])
+
+ return gem_port
+
+ def on_gemport_modify(self, gem_port, update, diffs):
+ valid_keys = ['encryption',
+ 'traffic-class'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("GEM Port leaf '{}' is read-only or write-once".format(invalid_key))
+
+ port = gem_port.get('object')
+ assert port is not None, 'GemPort not found'
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+ update['object'] = port
+
+ for k in keys:
+ if k == 'encryption':
+ port.encryption = update[k]
+ elif k == 'traffic-class':
+ pass # TODO: Implement
+
+ return update
+
+ def on_gemport_delete(self, gem_port):
+ onus = self.get_related_onus(gem_port)
+ assert len(onus) <= 1, 'Too many ONUs: {}'.format(len(onus))
+ if len(onus) == 1:
+ onus[0].remove_gem_id(gem_port['gemport-id'])
+ return None
diff --git a/voltha/adapters/adtran_olt/alarms/__init__.py b/voltha/adapters/adtran_olt/alarms/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/voltha/adapters/adtran_olt/alarms/__init__.py
diff --git a/voltha/adapters/adtran_olt/adapter_alarms.py b/voltha/adapters/adtran_olt/alarms/adapter_alarms.py
similarity index 62%
rename from voltha/adapters/adtran_olt/adapter_alarms.py
rename to voltha/adapters/adtran_olt/alarms/adapter_alarms.py
index ceb1063..0e2df7c 100644
--- a/voltha/adapters/adtran_olt/adapter_alarms.py
+++ b/voltha/adapters/adtran_olt/alarms/adapter_alarms.py
@@ -14,6 +14,9 @@
# limitations under the License.
import structlog
+import arrow
+from voltha.protos.events_pb2 import AlarmEventType, \
+ AlarmEventSeverity, AlarmEventState, AlarmEventCategory
# TODO: In the device adapter, the following alarms are still TBD
# (Taken from microsemi, so mileage may vare
@@ -42,10 +45,10 @@
class AdapterAlarms:
- def __init__(self, adapter, device):
- self.log = structlog.get_logger(device_id=device.id)
+ def __init__(self, adapter, device_id):
+ self.log = structlog.get_logger(device_id=device_id)
self.adapter = adapter
- self.device_id = device.id
+ self.device_id = device_id
self.lc = None
def format_id(self, alarm):
@@ -83,3 +86,42 @@
except Exception as e:
self.log.exception('failed-to-send-alarm', e=e)
+
+
+class AlarmBase(object):
+ def __init__(self, handler, object_type, alarm,
+ alarm_category,
+ alarm_type=AlarmEventType.EQUIPMENT,
+ alarm_severity=AlarmEventSeverity.CRITICAL):
+ self._handler = handler
+ self._object_type = object_type
+ self._alarm = alarm
+ self._alarm_category = alarm_category
+ self._alarm_type = alarm_type
+ self._alarm_severity = alarm_severity
+
+ def get_alarm_data(self, status):
+ return {
+ 'ts': arrow.utcnow().timestamp,
+ 'description': self._handler.alarms.format_description(self._object_type,
+ self._alarm,
+ status),
+ 'id': self._handler.alarms.format_id(self._alarm),
+ 'type': self._alarm_type,
+ 'category': self._alarm_category,
+ 'severity': self._alarm_severity,
+ 'state': AlarmEventState.RAISED if status else AlarmEventState.CLEARED
+ }
+
+ def get_context_data(self):
+ return {} # You should override this if needed
+
+ def raise_alarm(self):
+ alarm_data = self.get_alarm_data(True)
+ context_data = self.get_context_data()
+ self._handler.alarms.send_alarm(context_data, alarm_data)
+
+ def clear_alarm(self):
+ alarm_data = self.get_alarm_data(False)
+ context_data = self.get_context_data()
+ self._handler.alarms.send_alarm(context_data, alarm_data)
diff --git a/voltha/adapters/adtran_olt/alarms/heartbeat_alarm.py b/voltha/adapters/adtran_olt/alarms/heartbeat_alarm.py
new file mode 100644
index 0000000..418867f
--- /dev/null
+++ b/voltha/adapters/adtran_olt/alarms/heartbeat_alarm.py
@@ -0,0 +1,29 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from adapter_alarms import AlarmBase
+
+
+class HeartbeatAlarm(AlarmBase):
+ def __init__(self, handler, object_type='olt', heartbeat_misses=0):
+ super(HeartbeatAlarm, self).__init__(handler, object_type,
+ alarm='Heartbeat',
+ alarm_category=AlarmEventCategory.PON,
+ alarm_type=AlarmEventType.EQUIPMENT,
+ alarm_severity=AlarmEventSeverity.CRITICAL)
+ self._misses = heartbeat_misses
+
+ def get_context_data(self):
+ return {'heartbeats-missed': self._misses}
+
diff --git a/voltha/adapters/adtran_olt/alarms/onu_discovery_alarm.py b/voltha/adapters/adtran_olt/alarms/onu_discovery_alarm.py
new file mode 100644
index 0000000..8e992f2
--- /dev/null
+++ b/voltha/adapters/adtran_olt/alarms/onu_discovery_alarm.py
@@ -0,0 +1,35 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from adapter_alarms import AlarmBase
+
+
+class OnuDiscoveryAlarm(AlarmBase):
+ def __init__(self, handler, pon_id, serial_number):
+ super(OnuDiscoveryAlarm, self).__init__(handler, 'ONU Discovery',
+ alarm='Discovery',
+ alarm_category=AlarmEventCategory.ONT,
+ alarm_type=AlarmEventType.COMMUNICATION,
+ alarm_severity=AlarmEventSeverity.MAJOR)
+ self._pon_id = pon_id
+ self._serial_number = serial_number
+
+ def get_context_data(self):
+ return {
+ 'pon-id': self._pon_id,
+ 'serial-number': self._serial_number
+ }
+
+ def clear_alarm(self):
+ raise NotImplementedError('ONU Discovery Alarms are auto-clear')
diff --git a/voltha/adapters/adtran_olt/alarms/onu_los_alarm.py b/voltha/adapters/adtran_olt/alarms/onu_los_alarm.py
new file mode 100644
index 0000000..fba03a6
--- /dev/null
+++ b/voltha/adapters/adtran_olt/alarms/onu_los_alarm.py
@@ -0,0 +1,29 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from adapter_alarms import AlarmBase
+
+
+class OnuLosAlarm(AlarmBase):
+ def __init__(self, handler, onu_id):
+ super(OnuLosAlarm, self).__init__(handler, 'onu LOS',
+ alarm='LOS',
+ alarm_category=AlarmEventCategory.ONT,
+ alarm_type=AlarmEventType.COMMUNICATION,
+ alarm_severity=AlarmEventSeverity.MAJOR)
+ self._onu_id = onu_id
+
+ def get_context_data(self):
+ return {'onu-id': self._onu_id}
+
diff --git a/voltha/adapters/adtran_olt/codec/olt_config.py b/voltha/adapters/adtran_olt/codec/olt_config.py
index 88cfb0a..238d627 100644
--- a/voltha/adapters/adtran_olt/codec/olt_config.py
+++ b/voltha/adapters/adtran_olt/codec/olt_config.py
@@ -53,7 +53,7 @@
Provides decode of PON list from within
"""
def __init__(self, packet):
- assert 'pon-id' in packet
+ assert 'pon-id' in packet, 'pon-id not found'
self._packet = packet
self._onus = None
@@ -108,7 +108,7 @@
Provides decode of onu list for a PON port
"""
def __init__(self, packet):
- assert 'onu-id' in packet
+ assert 'onu-id' in packet, 'onu-id not found'
self._packet = packet
self._tconts = None
self._tconts_dict = None
@@ -119,11 +119,11 @@
return "OltConfig.Pon.Onu: onu-id: {}".format(self.onu_id)
@staticmethod
- def decode(onu_list):
+ def decode(onu_dict):
onus = {}
- if onu_list is not None:
- for onu_data in onu_list:
+ if onu_dict is not None and 'onu' in onu_dict:
+ for onu_data in onu_dict['onu']:
onu = OltConfig.Pon.Onu(onu_data)
assert onu.onu_id not in onus
onus[onu.onu_id] = onu
@@ -136,8 +136,8 @@
return self._packet['onu-id']
@property
- def serial_number(self):
- """The serial number is unique for each ONU"""
+ def serial_number_64(self):
+ """The serial number (base-64) is unique for each ONU"""
return self._packet.get('serial-number', '')
@property
@@ -179,7 +179,7 @@
Provides decode of onu list for the T-CONT container
"""
def __init__(self, packet):
- assert 'alloc-id' in packet
+ assert 'alloc-id' in packet, 'alloc-id not found'
self._packet = packet
self._traffic_descriptor = None
self._best_effort = None
@@ -282,7 +282,7 @@
Provides decode of onu list for the gem-ports container
"""
def __init__(self, packet):
- assert 'port-id' in packet
+ assert 'port-id' in packet, 'port-id not found'
self._packet = packet
def __str__(self):
diff --git a/voltha/adapters/adtran_olt/codec/olt_state.py b/voltha/adapters/adtran_olt/codec/olt_state.py
index cf55d43..4e6d6bd 100644
--- a/voltha/adapters/adtran_olt/codec/olt_state.py
+++ b/voltha/adapters/adtran_olt/codec/olt_state.py
@@ -72,11 +72,11 @@
"""
Provides decode of PON list from within
"""
-
def __init__(self, packet):
assert 'pon-id' in packet
self._packet = packet
self._onus = None
+ self._gems = None
def __str__(self):
return "OltState.Pon: pon-id: {}".format(self.pon_id)
@@ -116,27 +116,27 @@
@property
def rx_packets(self):
"""Sum all of the RX Packets of GEM ports that are not base TCONT's"""
- return self._packet.get('rx-packets', 0)
+ return int(self._packet.get('rx-packets', 0))
@property
def tx_packets(self):
"""Sum all of the TX Packets of GEM ports that are not base TCONT's"""
- return self._packet.get('tx-packets', 0)
+ return int(self._packet.get('tx-packets', 0))
@property
def rx_bytes(self):
"""Sum all of the RX Octets of GEM ports that are not base TCONT's"""
- return self._packet.get('rx-bytes', 0)
+ return int(self._packet.get('rx-bytes', 0))
@property
def tx_bytes(self):
"""Sum all of the TX Octets of GEM ports that are not base TCONT's"""
- return self._packet.get('tx-bytes', 0)
+ return int(self._packet.get('tx-bytes', 0))
@property
def tx_bip_errors(self):
"""Sum the TX ONU bip errors to get TX BIP's per PON"""
- return self._packet.get('tx-bip-errors', 0)
+ return int(self._packet.get('tx-bip-errors', 0))
@property
def wm_tuned_out_onus(self):
@@ -171,7 +171,9 @@
@property
def gems(self):
"""This list is not in the proposed BBF model, the stats are part of ietf-interfaces"""
- raise NotImplementedError('TODO: not yet supported')
+ if self._gems is None:
+ self._gems = OltState.Pon.Gem.decode(self._packet.get('gem', []))
+ return self._gems
@property
def onus(self):
@@ -186,7 +188,6 @@
"""
Provides decode of onu list for a PON port
"""
-
def __init__(self, packet):
assert 'onu-id' in packet, 'onu-id not found in packet'
self._packet = packet
@@ -236,3 +237,58 @@
"""Distance to ONU"""
return self._packet.get('fiber-length', 0)
+
+ class Gem(object):
+ """
+ Provides decode of onu list for a PON port
+ """
+ def __init__(self, packet):
+ assert 'onu-id' in packet, 'onu-id not found in packet'
+ assert 'port-id' in packet, 'port-id not found in packet'
+ assert 'alloc-id' in packet, 'alloc-id not found in packet'
+ self._packet = packet
+
+ def __str__(self):
+ return "OltState.Pon.Gem: onu-id: {}, gem-id".\
+ format(self.onu_id, self.gem_id)
+
+ @staticmethod
+ def decode(gem_list):
+ log.debug('gems:{}{}'.format(os.linesep,
+ pprint.PrettyPrinter().pformat(gem_list)))
+ gems = {}
+ for gem_data in gem_list:
+ gem = OltState.Pon.Gem(gem_data)
+ assert gem.gem_id not in gems
+ gems[gem.gem_id] = gem
+
+ return gems
+
+ @property
+ def onu_id(self):
+ """The ID used to identify the ONU"""
+ return self._packet['onu-id']
+
+ @property
+ def alloc_id(self):
+ return self._packet['alloc-id']
+
+ @property
+ def gem_id(self):
+ return self._packet['port-id']
+
+ @property
+ def tx_packets(self):
+ return int(self._packet.get('tx-packets', 0))
+
+ @property
+ def tx_bytes(self):
+ return int(self._packet.get('tx-bytes', 0))
+
+ @property
+ def rx_packets(self):
+ return int(self._packet.get('rx-packets', 0))
+
+ @property
+ def rx_bytes(self):
+ return int(self._packet.get('rx-bytes', 0))
diff --git a/voltha/adapters/adtran_olt/download.py b/voltha/adapters/adtran_olt/download.py
new file mode 100644
index 0000000..cbd3053
--- /dev/null
+++ b/voltha/adapters/adtran_olt/download.py
@@ -0,0 +1,489 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import structlog
+import xmltodict
+from twisted.internet import reactor
+from twisted.internet.defer import returnValue, inlineCallbacks
+from voltha.protos.device_pb2 import ImageDownload
+from voltha.protos.common_pb2 import AdminState
+
+log = structlog.get_logger()
+
+
+class Download(object):
+ """Class to wrap an image download"""
+
+ def __init__(self, handler, request, protocols):
+ self._handler = handler
+ self._deferred = None
+ self.device_id = request.id
+ self._name = request.name
+ self._url = request.url
+ self._crc = request.crc
+ self._version = request.image_version
+ self._local = request.local_dir
+ self._save_config = request.save_config
+ self._supported_protocols = protocols
+
+ self._download_state = ImageDownload.DOWNLOAD_UNKNOWN
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._image_state = ImageDownload.IMAGE_UNKNOWN
+ self._additional_info = ''
+ self._downloaded_octets = 0
+
+ # Server profile info
+ self._server_profile_name = None
+ self._scheme = None
+ self._host = ''
+ self._port = None
+ self._path = ''
+ self._auth = None
+
+ # Download job info
+ self._download_job_name = None
+
+ def __str__(self):
+ return "ImageDownload: {}".format(self.name)
+
+ @staticmethod
+ def create(handler, request, supported_protocols):
+ """
+ Create and start a new image download
+
+ :param handler: (AdtranDeviceHandler) Device download is for
+ :param done_deferred: (Deferred) deferred to fire on completion
+ :param request: (ImageDownload) Request
+ """
+ download = Download(handler, request, supported_protocols)
+ download._deferred = reactor.callLater(0, download.start_download)
+
+ return download
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def download_state(self):
+ return self._download_state
+
+ @property
+ def failure_reason(self):
+ return self._failure_reason
+
+ @property
+ def image_state(self):
+ return self._image_state
+
+ @property
+ def additional_info(self):
+ return self._additional_info
+
+ @property
+ def downloaded_bytes(self):
+ return self._downloaded_octets
+
+ @property
+ def profile_name(self):
+ return self._server_profile_name
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except Exception as e:
+ pass
+
+ @inlineCallbacks
+ def start_download(self):
+ import uuid
+ log.info('download-start', name=self.name)
+ if not self.parse_url():
+ self._download_failed()
+ returnValue('failed url parsing')
+
+ self._download_state = ImageDownload.DOWNLOAD_STARTED
+ self._failure_reason = ImageDownload.NO_ERROR
+
+ ##############################################################
+ # Configure the file server profile
+ try:
+ self._additional_info = 'Configuring Download Server profile'
+ self._server_profile_name = 'VOLTHA.download.{}'.format(uuid.uuid4())
+ profile = self.server_profile_xml
+ yield self._handler.netconf_client.edit_config(profile)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._server_profile_name = None
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ ##############################################################
+ # Configure the software download maintenance job
+ try:
+ self._additional_info = 'Configuring Image Download Job'
+ self._download_job_name = 'VOLTHA.download.{}'.format(uuid.uuid4())
+ job = self.download_job_xml
+ yield self._handler.netconf_client.edit_config(job)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._download_job_name = None
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ ##############################################################
+ # Schedule a task to monitor the download
+ try:
+ self._additional_info = 'Monitoring download status'
+ self._deferred = reactor.callLater(0.5, self.monitor_download_status)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ returnValue('started')
+
+ def parse_url(self):
+ from urllib3 import util, exceptions
+ try:
+ results = util.parse_url(self._url)
+
+ # Server info
+ self._scheme = results.scheme.lower()
+ if self._scheme not in self._supported_protocols:
+ self._failure_reason = ImageDownload.INVALID_URL
+ self._additional_info = "Unsupported file transfer protocol: {}".format(results.scheme)
+ return False
+
+ self._host = results.host
+ self._port = results.port
+ self._path = results.path
+ self._auth = results.auth
+ return True
+
+ except exceptions.LocationValueError as e:
+ self._failure_reason = ImageDownload.INVALID_URL
+ self._additional_info = e.message
+ return False
+
+ except Exception as e:
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info = e.message
+ return False
+
+ @property
+ def server_profile_xml(self):
+ assert self._scheme in ['http', 'https', 'ftp', 'sftp', 'tftp'], 'Invalid protocol'
+
+ xml = """
+ <file-servers xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">
+ <profiles>
+ <profile>"""
+
+ xml += '<name>{}</name>'.format(self._server_profile_name)
+ xml += '<connection-profile>'
+ xml += ' <host>{}</host>'.format(self._host)
+ xml += ' <port>{}</port>'.format(self._port) if self._port is not None else '<use-standard-port/>'
+
+ if self._scheme in ['http', 'https']:
+ xml += ' <protocol '
+ xml += 'xmlns:adtn-file-srv-https="http://www.adtran.com/ns/yang/adtran-file-servers-https">' +\
+ 'adtn-file-srv-https:{}'.format(self._scheme)
+ xml += ' </protocol>'
+
+ elif self._scheme == 'sftp':
+ xml += ' <protocol '
+ xml += 'xmlns:adtn-file-srv-sftp="http://www.adtran.com/ns/yang/adtran-file-servers-sftp">' +\
+ 'adtn-file-srv-sftp:sftp'
+ xml += ' </protocol>'
+
+ elif self._scheme in ['ftp', 'tftp']:
+ xml += '<protocol>adtn-file-srv:{}</protocol>'.format(self._scheme)
+
+ if self._auth is not None:
+ user_pass = self._auth.split(':')
+ xml += '<username>{}</username>'.format(user_pass[0])
+ xml += '<password>$0${}</password>'.format("".join(user_pass[1:]))
+ # And the trailer
+ xml += """
+ </connection-profile>
+ </profile>
+ </profiles>
+ </file-servers>
+ """
+ return xml
+
+ @property
+ def download_job_xml(self):
+ filepath = os.path.split(self._path)
+
+ xml = """
+ <maintenance-jobs xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs" xmlns:adtn-phys-sw-mnt="http://www.adtran.com/ns/yang/adtran-physical-software-maintenance">
+ <maintenance-job>
+ <name>{}</name>
+ <enabled>true</enabled>
+ <notify-enabled>false</notify-enabled>
+ <no-execution-time-limit/>
+ <run-once>true</run-once>
+ <adtn-phys-sw-mnt:download-software>
+ <adtn-phys-sw-mnt:remote-file>
+ <adtn-phys-sw-mnt:file-server-profile>{}</adtn-phys-sw-mnt:file-server-profile>
+ <adtn-phys-sw-mnt:filepath>{}</adtn-phys-sw-mnt:filepath>
+ <adtn-phys-sw-mnt:filename>{}</adtn-phys-sw-mnt:filename>
+ </adtn-phys-sw-mnt:remote-file>
+ </adtn-phys-sw-mnt:download-software>
+ </maintenance-job>
+ </maintenance-jobs>
+ """.format(self._download_job_name, self._server_profile_name,
+ filepath[0], filepath[1])
+ return xml
+
+ @property
+ def download_status_xml(self):
+ xml = """
+ <filter>
+ <maintenance-jobs-state xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs">
+ <maintenance-job>
+ <name>{}</name>
+ </maintenance-job>
+ </maintenance-jobs-state>
+ </filter>
+ """.format(self._download_job_name)
+ return xml
+
+ @property
+ def delete_server_profile_xml(self):
+ xml = """
+ <file-servers operation="delete" xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">
+ <profiles>
+ <profile>
+ <name>{}</name>
+ </profile>
+ </profiles>
+ </file-servers>
+ """.format(self._name)
+ return xml
+
+ @property
+ def delete_download_job_xml(self):
+ xml = """
+ <maintenance-jobs operation="delete" xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs">
+ <maintenance-job>
+ <name>{}</name>
+ </maintenance-job>
+ </maintenance-jobs>
+ """.format(self._name)
+ return xml
+
+ @inlineCallbacks
+ def monitor_download_status(self):
+ log.debug('monitor-download', name=self.name)
+ try:
+ results = yield self._handler.netconf_client.get(self.download_status_xml)
+
+ result_dict = xmltodict.parse(results.data_xml)
+ entries = result_dict['data']['maintenance-jobs-state']['maintenance-job']
+
+ name = entries.get('name')
+ assert name == self._download_job_name, 'The job status name does not match. {} != {}'.format(name, self.name)
+ self._download_state = self.monitor_state_to_download_state(entries['state']['#text'])
+
+ completed = entries['timestamps'].get('completed-timestamp')
+ started = entries['timestamps'].get('start-timestamp')
+
+ if self._download_state == ImageDownload.DOWNLOAD_FAILED:
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info = entries['error'].get('error-message')
+
+ elif self._download_state == ImageDownload.INSUFFICIENT_SPACE:
+ self._failure_reason = ImageDownload.INSUFFICIENT_SPACE
+ self._additional_info = entries['error'].get('error-message')
+
+ elif self._download_state == ImageDownload.DOWNLOAD_STARTED:
+ self._failure_reason = ImageDownload.NO_ERROR
+ self._additional_info = 'Download started at {}'.format(started)
+
+ elif self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ self._failure_reason = ImageDownload.NO_ERROR
+ self._additional_info = 'Download completed at {}'.format(completed)
+ else:
+ raise NotImplemented('Unsupported state')
+
+ done = self._download_state in [ImageDownload.DOWNLOAD_FAILED,
+ ImageDownload.DOWNLOAD_SUCCEEDED,
+ ImageDownload.INSUFFICIENT_SPACE]
+
+ except Exception as e:
+ log.exception('protocols', e=e)
+ done = False
+
+ if not done:
+ self._deferred = reactor.callLater(1, self.monitor_download_status)
+
+ returnValue('done' if done else 'not-done-yet')
+
+ def _download_failed(self):
+ log.info('download-failed', name=self.name)
+
+ self._cancel_deferred()
+ self._download_state = ImageDownload.DOWNLOAD_FAILED
+
+ # Cleanup NETCONF
+
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+ # TODO: Do we signal any completion due to failure?
+
+ def _download_complete(self):
+ log.info('download-completed', name=self.name)
+
+ self._cancel_deferred()
+ self._download_state = ImageDownload.DOWNLOAD_SUCCEEDED
+ self._downloaded_octets = 123456
+ self._failure_reason = ImageDownload.NO_ERROR
+
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+ # TODO: How do we signal completion?
+
+ device = self._handler.adapter_agent.get_device(self.device_id)
+ if device is not None:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self._handler.adapter_agent.update_device(device)
+
+ @inlineCallbacks
+ def cancel_download(self, request):
+ log.info('cancel-sw-download', name=self.name)
+
+ self._cancel_deferred()
+
+ try:
+ # initiate cancelling software download to device at success
+ # delete image download record
+
+ self._handler.adapter_agent.delete_image_download(request)
+
+ device = self._handler.adapter_agent.get_device(self.device_id)
+ if device is not None:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self._handler.adapter_agent.update_device(device)
+
+ except Exception as e:
+ log.exception(e.message)
+
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+
+ @inlineCallbacks
+ def _cleanup_server_profile(self, retries, attempt=1):
+ log.info('cleanup-server', name=self.name,
+ profile=self._server_profile_name,
+ attempt=attempt, remaining=retries)
+
+ if self._server_profile_name is not None:
+ try:
+ profile = self.delete_server_profile_xml
+ yield self._handler.netconf_client.edit_config(profile)
+ self._server_profile_name = None
+
+ except Exception as e:
+ log.exception(e.message)
+ if retries > 0:
+ reactor.callLater(attempt * 60, self._cleanup_download_job,
+ retries - 1, attempt + 1)
+
+ @inlineCallbacks
+ def _cleanup_download_job(self, retries, attempt=1):
+ log.info('cleanup-download', name=self.name,
+ profile=self._download_job_name,
+ attempt=attempt, remaining=retries)
+
+ if self._download_job_name is not None:
+ try:
+ job = self.delete_download_job_xml
+ yield self._handler.netconf_client.edit_config(job)
+ self._download_job_name = None
+
+ except Exception as e:
+ log.exception(e.message)
+ if retries > 0:
+ reactor.callLater(attempt * 60, self._cleanup_download_job,
+ retries - 1, attempt + 1)
+
+ @inlineCallbacks
+ def activate_image(self):
+ log.info('download-activate', name=self.name)
+
+ if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ pass # TODO: Implement
+ self._image_state = ImageDownload.IMAGE_ACTIVE
+
+ returnValue('TODO: Implement this')
+
+ @inlineCallbacks
+ def revert_image(self):
+ log.info('download-revert', name=self.name)
+
+ if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ pass # TODO: Implement
+ self._image_state = ImageDownload.IMAGE_INACTIVE
+
+ returnValue('TODO: Implement this')
+
+ def monitor_state_to_download_state(self, state):
+ if ':' in state:
+ state = state.split(':')[-1]
+ result = {
+ 'downloading-software': ImageDownload.DOWNLOAD_STARTED, # currently downloading software
+ 'storing-software': ImageDownload.DOWNLOAD_STARTED, # successfully downloaded the required software and is storing it to memory
+ 'software-stored': ImageDownload.DOWNLOAD_SUCCEEDED, # successfully downloaded the required software and has stored it successfully to memory
+ 'software-download-failed': ImageDownload.DOWNLOAD_FAILED, # unsuccessfully attemptedto download the required software
+ 'invalid-software': ImageDownload.DOWNLOAD_FAILED, # successfully downloaded the required software but the software was determined to be invalid
+ 'software-storage-failed': ImageDownload.INSUFFICIENT_SPACE, # successfully downloaded the required software but was unable to successfully stored it to memory
+ }.get(state.lower(), None)
+ log.info('download-state', result=result, state=state, name=self.name)
+ assert result is not None, 'Invalid state'
+ return result
+
+ def monitor_state_to_activate_state(self, state):
+ if ':' in state:
+ state = state.split(':')[-1]
+ result = {
+ 'enabling-software': ImageDownload.IMAGE_ACTIVATE, # currently enabling the software
+ 'software-enabled': ImageDownload.IMAGE_ACTIVE, # successfully enabled the required software
+ 'enable-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to enable the required software revision
+ 'activating-software': ImageDownload.IMAGE_ACTIVATE, # currently activating the software
+ 'software-activated': ImageDownload.IMAGE_ACTIVE, # successfully activated the required software. The job terminated successfully
+ 'activate-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to activate the required software revision
+ 'committing-software': ImageDownload.IMAGE_ACTIVATE, # currently committing the software
+ 'software-committed': ImageDownload.IMAGE_ACTIVATE, # successfully committed the required software. The job terminated successfully
+ 'commit-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to commit the required software revision
+ }.get(state.lower(), None)
+ log.info('download-state', result=result, state=state, name=self.name)
+ assert result is not None, 'Invalid state'
+ return result
\ No newline at end of file
diff --git a/voltha/adapters/adtran_olt/flow/evc.py b/voltha/adapters/adtran_olt/flow/evc.py
index 71b7617..85df0fb 100644
--- a/voltha/adapters/adtran_olt/flow/evc.py
+++ b/voltha/adapters/adtran_olt/flow/evc.py
@@ -284,7 +284,7 @@
try:
# Set installed to true while request is in progress
self._installed = True
- results = yield self._flow.handler.netconf_client.edit_config(xml, lock_timeout=30)
+ results = yield self._flow.handler.netconf_client.edit_config(xml)
self._installed = results.ok
self.status = '' if results.ok else results.error
@@ -327,7 +327,7 @@
self._installed = False
xml = EVC._xml_header('delete') + '<name>{}</name>'.format(self.name) + EVC._xml_trailer()
- d = self._flow.handler.netconf_client.edit_config(xml, lock_timeout=30)
+ d = self._flow.handler.netconf_client.edit_config(xml)
d.addCallbacks(_success, _failure)
dl.append(d)
@@ -335,7 +335,7 @@
for evc_map in self.evc_maps:
dl.append(evc_map.remove())
- return defer.gatherResults(dl)
+ return defer.gatherResults(dl, consumeErrors=True)
@inlineCallbacks
def delete(self, delete_maps=True):
@@ -352,7 +352,7 @@
for evc_map in self.evc_maps:
dl.append(evc_map.delete()) # TODO: implement bulk-flow procedures
- yield defer.gatherResults(dl)
+ yield defer.gatherResults(dl, consumeErrors=True)
except Exception as e:
log.exception('removal', e=e)
@@ -362,7 +362,7 @@
if f is not None and f.handler is not None:
f.handler.remove_evc(self)
- returnValue(succeed('Done'))
+ returnValue('Done')
def reflow(self, reflow_maps=True):
"""
@@ -459,7 +459,7 @@
del_xml += '</evcs>'
log.debug('removing', xml=del_xml)
- return client.edit_config(del_xml, lock_timeout=30)
+ return client.edit_config(del_xml)
return succeed('no entries')
diff --git a/voltha/adapters/adtran_olt/flow/evc_map.py b/voltha/adapters/adtran_olt/flow/evc_map.py
index 027a19e..1f76e6c 100644
--- a/voltha/adapters/adtran_olt/flow/evc_map.py
+++ b/voltha/adapters/adtran_olt/flow/evc_map.py
@@ -265,8 +265,7 @@
if self._is_ingress_map else self._egress_install_xml()
log.debug('install', xml=map_xml, name=self.name)
- results = yield self._flow.handler.netconf_client.edit_config(map_xml,
- lock_timeout=10)
+ results = yield self._flow.handler.netconf_client.edit_config(map_xml)
self._installed = results.ok
self.status = '' if results.ok else results.error
@@ -292,10 +291,9 @@
return EVCMap._xml_header('delete') + \
'<name>{}</name>'.format(self.name) + EVCMap._xml_trailer()
- @inlineCallbacks
def remove(self):
if not self.installed:
- returnValue(succeed('Not installed'))
+ returnValue('Not installed')
log.info('removing', evc_map=self)
@@ -311,7 +309,7 @@
map_xml = self._ingress_remove_xml(self._gem_ids_and_vid) if self._is_ingress_map \
else self._egress_remove_xml()
- d = self._flow.handler.netconf_client.edit_config(map_xml, lock_timeout=30)
+ d = self._flow.handler.netconf_client.edit_config(map_xml)
d.addCallbacks(_success, _failure)
return d
@@ -378,8 +376,11 @@
after = gem_ports()
if len(before) > len(after):
- self._installed = False
- return self.install()
+ if len(after) == 0:
+ return self.remove()
+ else:
+ self._installed = False
+ return self.install()
return succeed('nop')
@@ -529,7 +530,7 @@
del_xml += '</evc-maps>'
log.debug('removing', xml=del_xml)
- return client.edit_config(del_xml, lock_timeout=30)
+ return client.edit_config(del_xml)
return succeed('no entries')
diff --git a/voltha/adapters/adtran_olt/flow/flow_entry.py b/voltha/adapters/adtran_olt/flow/flow_entry.py
index 638fd6b..9af0bba 100644
--- a/voltha/adapters/adtran_olt/flow/flow_entry.py
+++ b/voltha/adapters/adtran_olt/flow/flow_entry.py
@@ -507,7 +507,7 @@
flow_table = sig_table.get(self.signature)
if flow_table is None or flow_id not in flow_table:
- returnValue(succeed('NOP'))
+ returnValue('NOP')
# Remove from flow table and clean up flow table if empty
@@ -546,13 +546,13 @@
if evc is not None:
dl.append(evc.delete())
- yield gatherResults(dl)
+ yield gatherResults(dl, consumeErrors=True)
except Exception as e:
log.exception('removal', e=e)
self.evc = None
- returnValue(succeed('Done'))
+ returnValue('Done')
@staticmethod
def find_evc_map_flows(onu):
diff --git a/voltha/adapters/adtran_olt/flow/mcast.py b/voltha/adapters/adtran_olt/flow/mcast.py
index 80d10ef..7051bd8 100644
--- a/voltha/adapters/adtran_olt/flow/mcast.py
+++ b/voltha/adapters/adtran_olt/flow/mcast.py
@@ -114,10 +114,9 @@
evc_table = _mcast_evcs.get(device_id)
if evc_table is None or flow_id not in evc_table:
- returnValue(succeed('NOP'))
+ returnValue('NOP')
# Remove flow reference
-
if self._flow.flow_id in self._downstream_flows:
del self._downstream_flows[self._flow.flow_id]
@@ -125,7 +124,7 @@
# Use base class to clean up
returnValue(super(MCastEVC, self).remove(remove_maps=True))
- returnValue(succeed('More references'))
+ returnValue('More references')
@inlineCallbacks
def delete(self, delete_maps=True):
@@ -140,7 +139,7 @@
for evc_map in self.evc_maps:
dl.append(evc_map.delete()) # TODO: implement bulk-flow procedures
- yield defer.gatherResults(dl)
+ yield defer.gatherResults(dl, consumeErrors=True)
except Exception as e:
log.exception('removal', e=e)
diff --git a/voltha/adapters/adtran_olt/gem_port.py b/voltha/adapters/adtran_olt/gem_port.py
deleted file mode 100644
index 1ce3915..0000000
--- a/voltha/adapters/adtran_olt/gem_port.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2017-present Adtran, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import structlog
-import json
-from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
-
-log = structlog.get_logger()
-
-
-class GemPort(object):
- """
- Class to wrap TCont capabilities
- """
- def __init__(self, gem_id, alloc_id,
- encryption=False,
- omci_transport=False,
- multicast=False,
- tcont_ref=None,
- ident=None,
- traffic_class=None,
- intf_ref=None,
- exception=False, # FIXED_ONU
- name=None,
- olt=None):
- self.name = name
- self.gem_id = gem_id
- self._alloc_id = alloc_id
- self.tconf_ref = tcont_ref
- self.intf_ref = intf_ref
- self.traffic_class = traffic_class
- self.id = ident
- self._encryption = encryption
- self._omci_transport = omci_transport
- self.multicast = multicast
- self.exception = exception # FIXED_ONU
- self._olt = olt
-
- def __str__(self):
- return "GemPort: {}, alloc-id: {}, gem-id: {}".format(self.name,
- self.alloc_id,
- self.gem_id)
-
- @staticmethod
- def create(data, olt):
- assert isinstance(data, GemportsConfigData)
- exception = data.gemport_id in [2180, 2186, 2192,
- 2198, 2204, 2210,
- 2216, 2222, 2228,
- 2234, 2240, 2246,
- 2252, 2258]
- mcast = data.gemport_id in [4095]
-
- return GemPort(data.gemport_id, None,
- encryption=data.aes_indicator,
- tcont_ref=data.tcont_ref,
- ident=data.id,
- name=data.name,
- traffic_class=data.traffic_class,
- intf_ref=data.itf_ref, # v_enet
- olt=olt,
- multicast=mcast,
- exception=exception)
-
- @property
- def alloc_id(self):
- if self._alloc_id is None and self._olt is not None:
- try:
- self._alloc_id = self._olt.tconts.get(self.tconf_ref).alloc_id
- except Exception:
- pass
-
- return self._alloc_id
-
- @property
- def encryption(self):
- return self._encryption
-
- @property
- def omci_transport(self):
- return self._omci_transport
-
- def to_dict(self):
- return {
- 'port-id': self.gem_id,
- 'alloc-id': self.alloc_id,
- 'encryption': self.encryption,
- 'omci-transport': self.omci_transport
- }
-
- def add_to_hardware(self, session, pon_id, onu_id, operation='POST'):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_GEM_CONFIG_LIST_URI.format(pon_id, onu_id)
- data = json.dumps(self.to_dict())
- name = 'gem-port-create-{}-{}: {}/{}'.format(pon_id, onu_id,
- self.gem_id,
- self.alloc_id)
-
- return session.request(operation, uri, data=data, name=name)
-
- def remove_from_hardware(self, session, pon_id, onu_id):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(pon_id, onu_id, self.gem_id)
- name = 'gem-port-delete-{}-{}: {}'.format(pon_id, onu_id, self.gem_id)
- return session.request('DELETE', uri, name=name)
-
- def _get_onu(self, olt):
- onu = None
- try:
- v_enet = olt.v_enets.get(self.intf_ref)
- vont_ani = olt.v_ont_anis.get(v_enet['v-ont-ani'])
- ch_pair = olt.channel_pairs.get(vont_ani['preferred-channel-pair'])
- ch_term = next((term for term in olt.channel_terminations.itervalues()
- if term['channel-pair'] == ch_pair['name']), None)
-
- pon = olt.pon(ch_term['xgs-ponid'])
- onu = pon.onu(vont_ani['onu-id'])
-
- except Exception:
- pass
-
- return onu
-
- def xpon_create(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- onu = self._get_onu(olt)
-
- if onu is not None:
- onu.add_gem_port(self)
-
- def xpon_update(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- pass # TODO: Not yet supported
-
- def xpon_delete(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- pass # TODO: Not yet supported
diff --git a/voltha/adapters/adtran_olt/net/adtran_netconf.py b/voltha/adapters/adtran_olt/net/adtran_netconf.py
index 14947c9..07518ea 100644
--- a/voltha/adapters/adtran_olt/net/adtran_netconf.py
+++ b/voltha/adapters/adtran_olt/net/adtran_netconf.py
@@ -216,7 +216,7 @@
Lock the configuration system
:return: (deferred) for RpcReply
"""
- log.debug('lock', source=source, timeout=lock_timeout)
+ log.info('lock', source=source, timeout=lock_timeout)
if not self._session or not self._session.connected:
raise NotImplemented('TODO: Support auto-connect if needed')
@@ -244,7 +244,7 @@
:return: (deferred) for RpcReply
"""
- log.debug('unlock', source=source)
+ log.info('unlock', source=source)
if not self._session or not self._session.connected:
raise NotImplemented('TODO: Support auto-connect if needed')
@@ -267,11 +267,10 @@
@inlineCallbacks
def edit_config(self, config, target='running', default_operation='none',
- test_option=None, error_option=None, lock_timeout=-1):
+ test_option=None, error_option=None):
"""
Loads all or part of the specified config to the target configuration datastore with the ability to lock
- the datastore during the edit. To change multiple items, use your own calls to lock/unlock instead of
- using the lock_timeout value
+ the datastore during the edit.
:param config is the configuration, which must be rooted in the config element. It can be specified
either as a string or an Element.format="xml"
@@ -280,8 +279,6 @@
:param test_option if specified must be one of { 'test_then_set', 'set' }
:param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
- :param lock_timeout if >0, the maximum number of seconds to hold a lock on the datastore while the edit
- operation is underway
:return: (deferred) for RpcReply
"""
@@ -295,15 +292,6 @@
except Exception as e:
log.exception('edit-config-connect', e=e)
- rpc_reply = None
- # if lock_timeout > 0:
- # try:
- # request = self._session.lock(target, lock_timeout)
- # rpc_reply = yield request
- #
- # except Exception as e:
- # log.exception('edit_config-Lock', e=e)
- # raise
try:
if config[:7] != '<config':
config = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0"' + \
@@ -317,16 +305,6 @@
log.exception('edit_config', e=e)
raise
- finally:
- pass
- # if lock_timeout > 0:
- # try:
- # yield self._session.unlock(target)
- #
- # except Exception as e:
- # log.exception('edit_config-unlock', e=e)
- # # Note that we just fall through and do not re-raise this exception
-
returnValue(rpc_reply)
def _do_edit_config(self, target, config, default_operation, test_option, error_option):
diff --git a/voltha/adapters/adtran_olt/net/adtran_rest.py b/voltha/adapters/adtran_olt/net/adtran_rest.py
index b420485..9f2b990 100644
--- a/voltha/adapters/adtran_olt/net/adtran_rest.py
+++ b/voltha/adapters/adtran_olt/net/adtran_rest.py
@@ -86,7 +86,8 @@
return "AdtranRestClient {}@{}:{}".format(self._username, self._ip, self._port)
@inlineCallbacks
- def request(self, method, uri, data=None, name='', timeout=None, is_retry=False):
+ def request(self, method, uri, data=None, name='', timeout=None, is_retry=False,
+ suppress_error=False):
"""
Send a REST request to the Adtran device
@@ -100,6 +101,8 @@
and in the real world.
:return: (dict) On success with the proper results
"""
+ log.debug('request', method=method, uri=uri, data=data, retry=is_retry)
+
if method.upper() not in self._valid_methods:
raise NotImplementedError("REST method '{}' is not supported".format(method))
@@ -148,13 +151,14 @@
returnValue(ConnectionClosed)
except Exception as e:
- log.exception("REST {} '{}' request to '{}' failed: {}".format(method, name, url, str(e)))
+ log.exception("rest-request", method=method, url=url, name=name, e=e)
raise
if response.code not in self._valid_results[method.upper()]:
message = "REST {} '{}' request to '{}' failed with status code {}".format(method, name,
url, response.code)
- log.error(message)
+ if not suppress_error:
+ log.error(message)
raise RestInvalidResponseCode(message, url, response.code)
if response.code == self.HTTP_NO_CONTENT:
@@ -168,7 +172,7 @@
type_val = 'application/json'
if not headers.hasHeader(type_key) or type_val not in headers.getRawHeaders(type_key, []):
- raise Exception("REST {} '{}' request response from '{} was not JSON",
+ raise Exception("REST {} '{}' request response from '{}' was not JSON",
method, name, url)
content = yield response.content()
@@ -176,8 +180,8 @@
result = json.loads(content)
except Exception as e:
- log.exception("REST {} '{}' JSON decode of '{}' failure: {}".format(method, name,
- url, str(e)))
+ log.exception("json-decode", method=method, url=url, name=name,
+ content=content, e=e)
raise
returnValue(result)
diff --git a/voltha/adapters/adtran_olt/net/adtran_zmq.py b/voltha/adapters/adtran_olt/net/adtran_zmq.py
index 9242130..4bbd704 100644
--- a/voltha/adapters/adtran_olt/net/adtran_zmq.py
+++ b/voltha/adapters/adtran_olt/net/adtran_zmq.py
@@ -14,6 +14,7 @@
import binascii
import struct
+import json
import structlog
from txzmq import ZmqEndpoint, ZmqFactory
@@ -58,7 +59,25 @@
log.debug('discarding-no-receiver')
@staticmethod
- def encode_omci_message(msg, pon_index, onu_id):
+ def encode_omci_message(msg, pon_index, onu_id, is_async_control):
+ """
+ Create an OMCI Tx Packet for the specified ONU
+
+ :param msg: (str) OMCI message to send
+ :param pon_index: (unsigned int) PON Port index
+ :param onu_id: (unsigned int) ONU ID
+ :param is_async_control: (bool) Newer async/JSON support
+
+ :return: (bytes) octet string to send
+ """
+ assert msg, 'No message provided'
+
+ return AdtranZmqClient._encode_omci_message_json(msg, pon_index, onu_id) \
+ if is_async_control else \
+ AdtranZmqClient._encode_omci_message_legacy(msg, pon_index, onu_id)
+
+ @staticmethod
+ def _encode_omci_message_legacy(msg, pon_index, onu_id):
"""
Create an OMCI Tx Packet for the specified ONU
@@ -68,17 +87,43 @@
:return: (bytes) octet string to send
"""
- assert msg
s = struct.Struct('!II')
+ # Check if length is prepended (32-bits = 4 bytes ASCII)
+ msglen = len(msg)
+ assert msglen == 40*2 or msglen == 44*2, 'Invalid OMCI message length'
+
+ if len(msg) > 40*2:
+ msg = msg[:40*2]
+
return s.pack(pon_index, onu_id) + binascii.unhexlify(msg)
@staticmethod
- def decode_packet(packet):
+ def _encode_omci_message_json(msg, pon_index, onu_id):
+ """
+ Create an OMCI Tx Packet for the specified ONU
+
+ :param msg: (str) OMCI message to send
+ :param pon_index: (unsigned int) PON Port index
+ :param onu_id: (unsigned int) ONU ID
+
+ :return: (bytes) octet string to send
+ """
+
+ return json.dumps({"operation": "NOTIFY",
+ "url": "adtran-olt-pon-control/omci-message",
+ "pon-id": pon_index,
+ "onu-id": onu_id,
+ "message-contents": msg.decode("hex").encode("base64")
+ })
+
+ @staticmethod
+ def decode_packet(packet, is_async_control):
"""
Decode the packet provided by the ZMQ client
:param packet: (bytes) Packet
+ :param is_async_control: (bool) Newer async/JSON support
:return: (long, long, bytes, boolean) PON Index, ONU ID, Frame Contents (OMCI or Ethernet),\
and a flag indicating if it is OMCI
"""
@@ -87,13 +132,15 @@
if len(packet) > 1:
pass # TODO: Can we get multiple packets?
- return AdtranZmqClient._decode_omci_message(packet[0])
+ return AdtranZmqClient._decode_omci_message_json(packet[0]) if is_async_control \
+ else AdtranZmqClient._decode_omci_message_legacy(packet[0])
+
return -1, -1, None, False
@staticmethod
- def _decode_omci_message(packet):
+ def _decode_omci_message_legacy(packet):
"""
- Decode the packet provided by the ZMQ client
+ Decode the packet provided by the ZMQ client (binary legacy format)
:param packet: (bytes) Packet
:return: (long, long, bytes) PON Index, ONU ID, OMCI Frame Contents
@@ -104,6 +151,22 @@
return pon_index, onu_id, omci_msg, True
@staticmethod
+ def _decode_omci_message_json(packet):
+ """
+ Decode the packet provided by the ZMQ client (JSON format)
+
+ :param packet: (string) Packet
+ :return: (long, long, bytes) PON Index, ONU ID, OMCI Frame Contents
+ """
+ msg = json.loads(packet)
+ pon_id = msg['pon-id']
+ onu_id = msg['onu-id']
+ msg_data = msg['message-contents'].decode("base64").encode("hex")
+ is_omci = msg['operation'] == "NOTIFY" and 'omci-message' in msg['url']
+
+ return pon_id, onu_id, msg_data, is_omci
+
+ @staticmethod
def _decode_packet_in_message(packet):
# TODO: This is not yet supported
(pon_index, onu_id) = struct.unpack_from('!II', packet)
diff --git a/voltha/adapters/adtran_olt/net/mock_netconf_client.py b/voltha/adapters/adtran_olt/net/mock_netconf_client.py
index 087a929..59410a5 100644
--- a/voltha/adapters/adtran_olt/net/mock_netconf_client.py
+++ b/voltha/adapters/adtran_olt/net/mock_netconf_client.py
@@ -158,11 +158,10 @@
@inlineCallbacks
def edit_config(self, config, target='running', default_operation='merge',
- test_option=None, error_option=None, lock_timeout=-1):
+ test_option=None, error_option=None):
"""
Loads all or part of the specified config to the target configuration datastore with the ability to lock
- the datastore during the edit. To change multiple items, use your own calls to lock/unlock instead of
- using the lock_timeout value
+ the datastore during the edit.
:param config is the configuration, which must be rooted in the config element. It can be specified
either as a string or an Element.format="xml"
@@ -171,19 +170,9 @@
:param test_option if specified must be one of { 'test_then_set', 'set' }
:param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
- :param lock_timeout if >0, the maximum number of seconds to hold a lock on the datastore while the edit
- operation is underway
:return: (defeered) for RpcReply
"""
- if lock_timeout > 0:
- try:
- request = self.lock(target, lock_timeout)
- yield request
-
- except Exception as e:
- log.exception('edit_config-lock', e=e)
- raise
try:
yield asleep(random.uniform(0.1, 2.0)) # Simulate NETCONF request delay
@@ -191,10 +180,6 @@
log.exception('edit_config', e=e)
raise
- finally:
- if lock_timeout > 0:
- yield self.unlock(target)
-
# TODO: Customize if needed...
xml = _dummy_xml
returnValue(RPCReply(xml))
diff --git a/voltha/adapters/adtran_olt/net/rcmd.py b/voltha/adapters/adtran_olt/net/rcmd.py
new file mode 100644
index 0000000..e46a9c1
--- /dev/null
+++ b/voltha/adapters/adtran_olt/net/rcmd.py
@@ -0,0 +1,111 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import Deferred, succeed
+from twisted.internet.protocol import Factory, Protocol
+from twisted.conch.client.knownhosts import ConsoleUI, KnownHostsFile
+from twisted.conch.endpoints import SSHCommandClientEndpoint
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+_open = open
+
+
+class RCmd(object):
+ """
+ Execute a one-time remote command via SSH
+ """
+ def __init__(self, host, username, password,
+ command,
+ port=None,
+ keys=None,
+ known_hosts=None,
+ agent=None):
+ self.reactor = reactor
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+ self.keys = keys
+ # self.knownHosts = known_hosts
+ self.knownHosts = known_hosts
+ self.agent = agent
+ self.command = command
+ self.ui = RCmd.FixedResponseUI(True)
+
+ class NoiseProtocol(Protocol):
+ def __init__(self):
+ self.finished = Deferred()
+ self.strings = ["bif", "pow", "zot"]
+
+ def connectionMade(self):
+ log.debug('connection-made')
+ self._send_noise()
+
+ def _send_noise(self):
+ if self.strings:
+ self.transport.write(self.strings.pop(0) + "\n")
+ else:
+ self.transport.loseConnection()
+
+ def dataReceived(self, data):
+ log.debug('rx', data=data)
+ if self.finished is not None and not self.finished.called:
+ self.finished.callback(data)
+ self._send_noise()
+
+ def connectionLost(self, reason):
+ log.debug('connection-lost')
+ self.finished.callback(None)
+
+ class PermissiveKnownHosts(KnownHostsFile):
+ def verifyHostKey(self, ui, hostname, ip, key):
+ log.debug('verifyHostKey')
+ return True
+
+ class FixedResponseUI(ConsoleUI):
+ def __init__(self, result):
+ super(RCmd.FixedResponseUI, self).__init__(lambda: _open("/dev/null",
+ "r+b",
+ buffering=0))
+ self.result = result
+
+ def prompt(self, _):
+ log.debug('prompt')
+ return succeed(True)
+
+ def warn(self, text):
+ log.debug('warn')
+ pass
+
+ def _endpoint_for_command(self, command):
+ return SSHCommandClientEndpoint.newConnection(
+ self.reactor, command, self.username, self.host,
+ port=self.port,
+ password=self.password,
+ keys=self.keys,
+ agentEndpoint=self.agent,
+ knownHosts=self.knownHosts,
+ ui=self.ui
+ )
+
+ def execute(self):
+ endpoint = self._endpoint_for_command(self.command)
+ factory = Factory()
+ factory.protocol = RCmd.NoiseProtocol
+
+ d = endpoint.connect(factory)
+ d.addCallback(lambda proto: proto.finished)
+ return d
diff --git a/voltha/adapters/adtran_olt/nni_port.py b/voltha/adapters/adtran_olt/nni_port.py
index 463b02a..c3f4eb3 100644
--- a/voltha/adapters/adtran_olt/nni_port.py
+++ b/voltha/adapters/adtran_olt/nni_port.py
@@ -59,6 +59,9 @@
self._sync_tick = 10.0
self._sync_deferred = None
+ self._stats_tick = 5.0
+ self._stats_deferred = None
+
self._deferred = None
self._state = NniPort.State.INITIAL
@@ -85,6 +88,19 @@
self._max_speed = kwargs.pop('max_speed', OFPPF_100GB_FD)
self._device_port_no = kwargs.pop('device_port_no', self._port_no)
+ # Statistics
+ self.rx_packets = 0
+ self.rx_bytes = 0
+ self.tx_packets = 0
+ self.tx_bytes = 0
+ self.rx_dropped = 0
+ self.rx_errors = 0
+ self.rx_bcast = 0
+ self.rx_mcast = 0
+ self.tx_dropped = 0
+ self.tx_bcast = 0
+ self.tx_mcast = 0
+
def __del__(self):
self.stop()
@@ -95,7 +111,7 @@
self._parent)
@property
- def port_number(self):
+ def port_no(self):
return self._port_no
@property
@@ -111,6 +127,14 @@
return self._state
@property
+ def admin_state(self):
+ return self._admin_state
+
+ @property
+ def oper_status(self):
+ return self._oper_status
+
+ @property
def adapter_agent(self):
return self.olt.adapter_agent
@@ -133,7 +157,9 @@
def _cancel_deferred(self):
d1, self._deferred = self._deferred, None
d2, self._sync_deferred = self._sync_deferred, None
- for d in [d1, d2]:
+ d3, self._stats_deferred = self._stats_deferred, None
+
+ for d in [d1, d2, d3]:
try:
if d is not None and d.called:
d.cancel()
@@ -185,7 +211,6 @@
def start(self):
"""
Start/enable this NNI
-
:return: (deferred)
"""
if self._state == NniPort.State.RUNNING:
@@ -208,29 +233,32 @@
self._enabled = True
self._admin_state = AdminState.ENABLED
- self._oper_status = OperStatus.ACTIVE # TODO: is this correct, how do we tell GRPC
+ self._oper_status = OperStatus.ACTIVE
self._update_adapter_agent()
# TODO: Start status polling of NNI interfaces
self._deferred = None # = reactor.callLater(3, self.do_stuff)
self._state = NniPort.State.RUNNING
+
# Begin hardware sync
self._sync_deferred = reactor.callLater(self._sync_tick, self._sync_hardware)
+ self._stats_deferred= reactor.callLater(self._stats_tick * 2, self._update_statistics)
try:
results = yield self.set_config('enabled', True)
except Exception as e:
self.log.exception('nni-start', e=e)
- self._admin_state = AdminState.UNKNOWN
- raise
+ self._oper_status = OperStatus.UNKNOWN
+ self._update_adapter_agent()
+
returnValue(self._deferred)
@inlineCallbacks
def stop(self):
if self._state == NniPort.State.STOPPED:
- returnValue(succeed('Stopped'))
+ returnValue('Stopped')
self.log.info('stopping')
self._cancel_deferred()
@@ -327,6 +355,19 @@
'</filter>'
return self._parent.netconf_client.get(config)
+ def get_nni_statistics(self):
+ state = '<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
+ ' <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">' + \
+ ' <interface>' + \
+ ' <name>{}</name>'.format(self._name) + \
+ ' <admin-status/>' + \
+ ' <oper-status/>' + \
+ ' <statistics/>' + \
+ ' </interface>' + \
+ ' </interfaces>' + \
+ '</filter>'
+ return self._parent.netconf_client.get(state)
+
def _sync_hardware(self):
if self._state == NniPort.State.RUNNING or self._state == NniPort.State.STOPPED:
def read_config(results):
@@ -338,8 +379,12 @@
enabled = entries.get('enabled',
str(not self.enabled).lower()) == 'true'
- return succeed('in-sync') if self.enabled == enabled else \
- self.set_config('enabled', self.enabled)
+ if self.enabled == enabled:
+ return succeed('in-sync')
+
+ self.set_config('enabled', self.enabled)
+ self._oper_status = OperStatus.ACTIVE
+ self._update_adapter_agent()
except Exception as e:
self.log.exception('read-config', e=e)
@@ -357,6 +402,59 @@
self._sync_deferred.addCallbacks(read_config, failure)
self._sync_deferred.addBoth(reschedule)
+ def _decode_nni_statistics(self, entry):
+ admin_status = entry.get('admin-status')
+ oper_status = entry.get('oper-status')
+ admin_status = entry.get('admin-status')
+ phys_address = entry.get('phys-address')
+
+ stats = entry.get('statistics')
+ if stats is not None:
+ self.rx_bytes = int(stats.get('in-octets', 0))
+ self.rx_dropped = int(stats.get('in-discards', 0))
+ self.rx_errors = int(stats.get('in-errors', 0))
+ self.rx_bcast = int(stats.get('in-broadcast-pkts', 0))
+ self.rx_mcast = int(stats.get('in-multicast-pkts', 0))
+
+ self.tx_bytes = int(stats.get('out-octets', 0))
+ self.tx_bcast = int(stats.get('out-broadcast-pkts', 0))
+ self.tx_mcast = int(stats.get('out-multicast-pkts', 0))
+ self.tx_dropped = int(stats.get('out-discards', 0)) + int(stats.get('out-errors', 0))
+
+ self.rx_packets = int(stats.get('in-unicast-pkts', 0)) + self.rx_mcast + self.rx_bcast
+ self.tx_packets = int(stats.get('out-unicast-pkts', 0)) + self.tx_mcast + self.tx_bcast
+
+ def _update_statistics(self):
+ if self._state == NniPort.State.RUNNING:
+ def read_state(results):
+ self.log.debug('read-state', results=results)
+ try:
+ result_dict = xmltodict.parse(results.data_xml)
+ entry = result_dict['data']['interfaces-state']['interface']
+ self._decode_nni_statistics(entry)
+ return succeed('done')
+
+ except Exception as e:
+ self.log.exception('read-state', e=e)
+ return fail(Failure())
+
+ def failure(reason):
+ self.log.error('update-stats-failed', reason=reason)
+
+ def reschedule(_):
+ delay = self._stats_tick
+ delay += random.uniform(-delay / 10, delay / 10)
+ self._stats_deferred = reactor.callLater(delay, self._update_statistics)
+
+ try:
+ self._stats_deferred = self.get_nni_statistics()
+ self._stats_deferred.addCallbacks(read_state, failure)
+ self._stats_deferred.addBoth(reschedule)
+
+ except Exception as e:
+ self.log.exception('nni-sync', port=self.name, e=e)
+ self._stats_deferred = reactor.callLater(self._stats_tick, self._update_statistics)
+
class MockNniPort(NniPort):
"""
diff --git a/voltha/adapters/adtran_olt/onu.py b/voltha/adapters/adtran_olt/onu.py
index 8497178..3eaf2f2 100644
--- a/voltha/adapters/adtran_olt/onu.py
+++ b/voltha/adapters/adtran_olt/onu.py
@@ -20,15 +20,15 @@
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from adtran_olt_handler import AdtranOltHandler
+from net.adtran_rest import RestInvalidResponseCode
# Following is only used in autoactivate/demo mode. Otherwise xPON commands should be used
_VSSN_TO_VENDOR = {
'ADTN': 'adtran_onu',
- 'BCM?': 'broadcom_onu', # TODO: Get actual VSSN for this vendor
+ 'BRCM': 'broadcom_onu',
'DP??': 'dpoe_onu', # TODO: Get actual VSSN for this vendor
- 'PMC?': 'pmcs_onu', # TODO: Get actual VSSN for this vendor
+ 'PMCS': 'pmcs_onu',
'PSMO': 'ponsim_onu',
- 'SIM?': 'simulated_onu', # TODO: Get actual VSSN for this vendor
'TBIT': 'tibit_onu',
}
@@ -72,18 +72,18 @@
self._equalization_delay = 0
self._fiber_length = 0
self._valid = True # Set false during delete/cleanup
+ self._created = False
self._proxy_address = None
-
- self._include_multicast = True # TODO: May need to add multicast on a per-ONU basis
-
+ self._upstream_fec_enable = onu_info.get('upstream-fec')
+ self._upstream_channel_speed = onu_info['upstream-channel-speed']
+ # TODO: how do we want to enforce upstream channel speed (if at all)?
+ self._include_multicast = True # TODO: May need to add multicast on a per-ONU basis
self._sync_tick = _HW_SYNC_SECS
self._expedite_sync = False
self._expedite_count = 0
self._resync_flows = False
self._sync_deferred = None # For sync of ONT config to hardware
- # TODO: enable and upstream-channel-speed not yet supported
-
self.log = structlog.get_logger(pon_id=self._pon_id, onu_id=self._onu_id)
self._vendor_id = _VSSN_TO_VENDOR.get(self._serial_number_string.upper()[:4],
'Unsupported_{}'.format(self._serial_number_string))
@@ -93,7 +93,8 @@
pass
def __str__(self):
- return "Onu-{}-{}, PON ID: {}".format(self._onu_id, self._serial_number_string, self._pon_id)
+ return "ONU-{}:{}, SN: {}/{}".format(self._onu_id, self._pon_id,
+ self._serial_number_string, self._serial_number_base64)
@staticmethod
def serial_number_to_string(value):
@@ -117,6 +118,10 @@
return self.olt.southbound_ports[self._pon_id]
@property
+ def pon_id(self):
+ return self._pon_id
+
+ @property
def onu_id(self):
return self._onu_id
@@ -129,6 +134,37 @@
return self._name
@property
+ def xpon_name(self):
+ return self._xpon_name
+
+ @property
+ def v_ont_ani(self):
+ return self._vont_ani
+
+ @property
+ def upstream_fec_enable(self):
+ return self._upstream_fec_enable
+
+ @upstream_fec_enable.setter
+ def upstream_fec_enable(self, value):
+ assert isinstance(value, bool), 'upstream FEC enabled is a boolean'
+ if self._upstream_fec_enable != value:
+ self._upstream_fec_enable = value
+
+ # Recalculate PON upstream FEC
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
+
+ @property
+ def upstream_channel_speed(self):
+ return self._upstream_channel_speed
+
+ @upstream_channel_speed.setter
+ def upstream_channel_speed(self, value):
+ assert isinstance(value, (int,float)), 'upstream speed is a numeric value'
+ if self._upstream_channel_speed != value:
+ self._upstream_channel_speed = value
+
+ @property
def enabled(self):
return self._enabled
@@ -145,6 +181,9 @@
else:
self.stop()
+ # Recalculate PON upstream FEC
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
+
@property
def onu_vid(self):
return self._onu_vid
@@ -159,6 +198,10 @@
return self._uni_ports[0]
@property
+ def gem_ports(self):
+ return self._gem_ports.values()
+
+ @property
def proxy_address(self):
if self._proxy_address is None:
from voltha.protos.device_pb2 import Device
@@ -211,10 +254,14 @@
return self._channel_id
@property
- def serial_number(self):
+ def serial_number_64(self):
return self._serial_number_base64
@property
+ def serial_number(self):
+ return self._serial_number_string
+
+ @property
def vendor_id(self):
return self._vendor_id
@@ -279,11 +326,13 @@
name = 'onu-create-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
self._serial_number_base64, self._enabled)
- try:
- yield self.olt.rest_client.request('POST', uri, data=data, name=name)
+ if not self._created:
+ try:
+ yield self.olt.rest_client.request('POST', uri, data=data, name=name)
+ self._created = True
- except Exception as e: # TODO: Add breakpoint here during unexpected reboot test
- self.log.exception('onu-create', e=e)
+ except Exception as e: # TODO: Add breakpoint here during unexpected reboot test
+ self.log.exception('onu-create', e=e)
# Now set up all tconts & gem-ports
first_sync = self._sync_tick
@@ -298,6 +347,8 @@
for _, gem_port in gem_ports.items():
try:
+ gem_port.pon_id = self.pon_id
+ gem_port.onu_id = self.onu_id if self.onu_id is not None else -1
yield self.add_gem_port(gem_port, reflow=reflow)
except Exception as e:
@@ -305,6 +356,9 @@
first_sync = 2 # Expedite first hw-sync
self._sync_deferred = reactor.callLater(first_sync, self._sync_hardware)
+ # Recalculate PON upstream FEC
+
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
returnValue('created')
@@ -337,15 +391,28 @@
try:
yield defer.gatherResults(dl, consumeErrors=True)
- except Exception:
- pass
+ except Exception as e:
+ pass
self._gem_ports.clear()
self._tconts.clear()
+
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
+ name = 'onu-delete-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
+ self._serial_number_base64, self._enabled)
+ try:
+ yield self.olt.rest_client.request('DELETE', uri, name=name)
+
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
+
+ except Exception as e:
+ self.log.exception('onu-delete', e=e)
+
self._olt = None
self._channel_id = None
-
- returnValue(succeed('deleted'))
+ returnValue('deleted')
def start(self):
self._cancel_deferred()
@@ -384,21 +451,21 @@
if self._enabled != config.enable:
dl.append(self.set_config('enable', self._enabled))
- if self.serial_number != config.serial_number:
- dl.append(self.set_config('serial-number', self.serial_number))
+ if self.serial_number_64 != config.serial_number_64:
+ dl.append(self.set_config('serial-number', self.serial_number_64))
- # Sync TCONTs if everything else in sync
+ if self._enabled:
+ # Sync TCONTs if everything else in sync
+ if len(dl) == 0:
+ dl.extend(sync_tconts(config.tconts))
- if len(dl) == 0:
- dl.extend(sync_tconts(config.tconts))
+ # Sync GEM Ports if everything else in sync
- # Sync GEM Ports if everything else in sync
+ if len(dl) == 0:
+ dl.extend(sync_gem_ports(config.gem_ports))
- if len(dl) == 0:
- dl.extend(sync_gem_ports(config.gem_ports))
-
- if len(dl) == 0:
- sync_flows()
+ if len(dl) == 0:
+ sync_flows()
except Exception as e:
self.log.exception('hw-sync-read-config', e=e)
@@ -439,7 +506,7 @@
return [self.add_tcont(self._tconts[alloc_id], reflow=True) for alloc_id in alloc_ids]
def sync_matching_tconts(hw_tconts):
- from tcont import TrafficDescriptor
+ from xpon.traffic_descriptor import TrafficDescriptor
dl = []
# TODO: sync TD & Best Effort. Only other TCONT leaf is the key
@@ -475,8 +542,7 @@
if reflow:
dl.append(my_tcont.add_to_hardware(self.olt.rest_client,
self._pon_id,
- self._onu_id,
- operation="PATCH"))
+ self._onu_id))
return dl
def sync_gem_ports(hw_gem_ports):
@@ -499,8 +565,8 @@
dl.extend(sync_matching_gem_ports(matching_hw_gem_ports))
self._resync_flows |= len(dl) > 0
- except Exception as e:
- self.log.exception('hw-sync-gem-ports', e=e)
+ except Exception as ex:
+ self.log.exception('hw-sync-gem-ports', e=ex)
return dl
@@ -543,7 +609,7 @@
# With 60 second initial an typical worst case resync of 4 times, this
# should resync an ONU and all it's gem-ports and tconts within <90 seconds
- if self._expedite_sync:
+ if self._expedite_sync and self._enabled:
self._expedite_count += 1
if self._expedite_count < _MAX_EXPEDITE_COUNT:
delay = _EXPEDITE_SECS
@@ -576,9 +642,8 @@
def set_config(self, leaf, value):
self.log.debug('set-config', leaf=leaf, value=value)
-
- data = json.dumps({'onu-id': self._onu_id, leaf: value})
- uri = AdtranOltHandler.GPON_ONU_CONFIG_LIST_URI.format(self._pon_id)
+ data = json.dumps({leaf: value})
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
name = 'onu-set-config-{}-{}-{}: {}'.format(self._pon_id, self._onu_id, leaf, value)
return self.olt.rest_client.request('PATCH', uri, data=data, name=name)
@@ -599,11 +664,12 @@
:return: (deferred)
"""
if not self._valid:
- returnValue(succeed('Deleting'))
+ returnValue('Deleting')
if not reflow and tcont.alloc_id in self._tconts:
- returnValue(succeed('already created'))
+ returnValue('already created')
+ self.log.info('add', tcont=tcont, reflow=reflow)
self._tconts[tcont.alloc_id] = tcont
try:
@@ -618,42 +684,48 @@
returnValue(results)
@inlineCallbacks
- def update_tcont(self, alloc_id, new_values):
- # TODO: If alloc-id in use by a gemport, should we deny request?
+ def update_tcont_td(self, alloc_id, new_td):
tcont = self._tconts.get(alloc_id)
if tcont is None:
- returnValue(succeed('not-found'))
+ returnValue('not-found')
- # del self._tconts[alloc_id]
- #
- # try:
- # results = yield tcont.remove_from_hardware()
- #
- # except Exception as e:
- # self.log.exception('delete', e=e)
- # raise
+ tcont.traffic_descriptor = new_td
+ try:
+ results = yield tcont.add_to_hardware(self.olt.rest_client,
+ self._pon_id,
+ self._onu_id)
+ except Exception as e:
+ self.log.exception('tcont', tcont=tcont, e=e)
+ # May occur with xPON provisioning, use hw-resync to recover
+ results = 'resync needed'
- returnValue(succeed('TODO: Not implemented yet'))
+ returnValue(results)
@inlineCallbacks
def remove_tcont(self, alloc_id):
- # TODO: If alloc-id in use by a gemport, should we deny request?
tcont = self._tconts.get(alloc_id)
if tcont is None:
- returnValue(succeed('nop'))
+ returnValue('nop')
del self._tconts[alloc_id]
-
try:
- results = yield tcont.remove_from_hardware()
+ results = yield tcont.remove_from_hardware(self.olt.rest_client,
+ self._pon_id,
+ self._onu_id)
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('tcont-delete', e=e)
except Exception as e:
self.log.exception('delete', e=e)
raise
- returnValue(succeed(results))
+ returnValue(results)
+
+ def gem_port(self, gem_id):
+ return self._gem_ports.get(gem_id)
def gem_ids(self, exception_gems):
"""Get all GEM Port IDs used by this ONU"""
@@ -675,19 +747,18 @@
:return: (deferred)
"""
if not self._valid:
- returnValue(succeed('Deleting'))
+ returnValue('Deleting')
if not reflow and gem_port.gem_id in self._gem_ports:
- returnValue(succeed)
+ returnValue('nop')
+ self.log.info('add', gem_port=gem_port, reflow=reflow)
self._gem_ports[gem_port.gem_id] = gem_port
try:
results = yield gem_port.add_to_hardware(self.olt.rest_client,
self._pon_id,
self.onu_id)
- # self._resync_flows = True
-
# May need to update flow tables/evc-maps
if gem_port.alloc_id in self._tconts:
from flow.flow_entry import FlowEntry
@@ -708,35 +779,42 @@
@inlineCallbacks
def remove_gem_id(self, gem_id):
+ from flow.flow_entry import FlowEntry
+
gem_port = self._gem_ports.get(gem_id)
if gem_port is None:
- returnValue(succeed('nop'))
+ returnValue('nop')
del self._gem_ports[gem_id]
- # self._resync_flows = True
-
try:
- from flow.flow_entry import FlowEntry
if gem_port.alloc_id in self._tconts:
# May need to update flow tables/evc-maps
# GEM-IDs are a sorted list (ascending). First gemport handles downstream traffic
- pass
+ evc_maps = FlowEntry.find_evc_map_flows(self)
+ for evc_map in evc_maps:
+ evc_map.remove_gem_port(gem_port)
results = yield gem_port.remove_from_hardware(self.olt.rest_client,
self._pon_id,
self.onu_id)
- evc_maps = FlowEntry.find_evc_map_flows(self)
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
- for evc_map in evc_maps:
- evc_map.remove_gem_port(gem_port)
-
- except Exception as e:
- self.log.exception('delete', e=e)
+ except Exception as ex:
+ self.log.exception('gem-port-delete', e=ex)
raise
- returnValue(succeed(results))
+ for evc_map in FlowEntry.find_evc_map_flows(self):
+ try:
+ evc_map.remove_gem_port(gem_port)
+
+ except Exception as ex:
+ self.log.exception('evc-map-gem-remove', e=ex)
+
+ returnValue('done')
@staticmethod
def gem_id_to_gvid(gem_id):
diff --git a/voltha/adapters/adtran_olt/pki/__init__.py b/voltha/adapters/adtran_olt/pki/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/voltha/adapters/adtran_olt/pki/__init__.py
diff --git a/voltha/adapters/adtran_olt/pki/adapter_pm_metrics.py b/voltha/adapters/adtran_olt/pki/adapter_pm_metrics.py
new file mode 100644
index 0000000..053350e
--- /dev/null
+++ b/voltha/adapters/adtran_olt/pki/adapter_pm_metrics.py
@@ -0,0 +1,63 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.task import LoopingCall
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
+
+
+class AdapterPmMetrics(object):
+ def __init__(self, handler, device, grouped=False, freq_override=False):
+ self.log = structlog.get_logger(device_id=device.id)
+ self.device = device
+ self.id = device.id
+ self.handler = handler
+ self.name = handler.adapter.name
+ self.default_freq = 150
+ self.grouped = grouped
+ self.freq_override = grouped and freq_override
+ self.lc = None
+
+ def update(self, pm_config):
+ raise NotImplementedError('Your derived class should override this method')
+
+ # def enable_pm_collection(self, pm_group, remote):
+ # if pm_group == 'Ethernet':
+ # self.configure_pm_collection_freq(self.default_freq / 10, remote)
+ #
+ # def disable_pm_collection(self, pm_group, remote):
+ # if pm_group == 'nni':
+ # self.configure_pm_collection_freq(0, remote)
+
+ def make_proto(self):
+ raise NotImplementedError('Your derived class should override this method')
+
+ def start_collector(self, callback):
+ self.log.info("starting-pm-collection", device_name=self.name,
+ device_id=self.device.id)
+ prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
+
+ if self.lc is None:
+ self.lc = LoopingCall(callback, self.device.id, prefix)
+
+ self.lc.start(interval=self.default_freq / 10)
+
+ def stop_collector(self):
+ if self.lc is not None:
+ self.lc.stop()
+
+ def collect_metrics(self, group, names, config):
+ stats = {metric: getattr(group, metric) for (metric, t) in names}
+ return {metric: value for metric, value in stats.iteritems()
+ if config[metric].enabled}
diff --git a/voltha/adapters/adtran_olt/pki/olt_pm_metrics.py b/voltha/adapters/adtran_olt/pki/olt_pm_metrics.py
new file mode 100644
index 0000000..38e6477
--- /dev/null
+++ b/voltha/adapters/adtran_olt/pki/olt_pm_metrics.py
@@ -0,0 +1,242 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
+
+from adapter_pm_metrics import AdapterPmMetrics
+
+
+class OltPmMetrics(AdapterPmMetrics):
+ def __init__(self, handler, device, grouped=False, freq_override=False):
+ super(OltPmMetrics, self).__init__(handler, device,
+ grouped=grouped, freq_override=freq_override)
+
+ # PM Config Types are COUNTER, GUAGE, and STATE # GAUGE is misspelled device.proto
+ self.nni_pm_names = {
+ ('admin_state', PmConfig.STATE),
+ ('oper_status', PmConfig.STATE),
+ ('port_no', PmConfig.GUAGE), # Device and logical_device port numbers same
+ ('rx_packets', PmConfig.COUNTER),
+ ('rx_bytes', PmConfig.COUNTER),
+ ('rx_dropped', PmConfig.COUNTER),
+ ('rx_errors', PmConfig.COUNTER),
+ ('rx_bcast', PmConfig.COUNTER),
+ ('rx_mcast', PmConfig.COUNTER),
+ ('tx_packets', PmConfig.COUNTER),
+ ('tx_bytes', PmConfig.COUNTER),
+ ('tx_dropped', PmConfig.COUNTER),
+ ('tx_bcast', PmConfig.COUNTER),
+ ('tx_mcast', PmConfig.COUNTER),
+ #
+ # Commented out are from spec. May not be supported or implemented yet
+ # ('rx_64', PmConfig.COUNTER),
+ # ('rx_65_127', PmConfig.COUNTER),
+ # ('rx_128_255', PmConfig.COUNTER),
+ # ('rx_256_511', PmConfig.COUNTER),
+ # ('rx_512_1023', PmConfig.COUNTER),
+ # ('rx_1024_1518', PmConfig.COUNTER),
+ # ('rx_frame_err', PmConfig.COUNTER),
+ # ('rx_over_err', PmConfig.COUNTER),
+ # ('rx_crc_err', PmConfig.COUNTER),
+ # ('rx_64', PmConfig.COUNTER),
+ # ('tx_65_127', PmConfig.COUNTER),
+ # ('tx_128_255', PmConfig.COUNTER),
+ # ('tx_256_511', PmConfig.COUNTER),
+ # ('tx_512_1023', PmConfig.COUNTER),
+ # ('tx_1024_1518', PmConfig.COUNTER),
+ # ('collisions', PmConfig.COUNTER),
+ }
+ self.pon_pm_names = {
+ ('admin_state', PmConfig.STATE),
+ ('oper_status', PmConfig.STATE),
+ ('port_no', PmConfig.GUAGE), # Physical device port number
+ ('pon_id', PmConfig.GUAGE),
+ ('rx_packets', PmConfig.COUNTER),
+ ('rx_bytes', PmConfig.COUNTER),
+ ('tx_packets', PmConfig.COUNTER),
+ ('tx_bytes', PmConfig.COUNTER),
+ ('tx_bip_errors', PmConfig.COUNTER),
+ ('in_service_onus', PmConfig.GUAGE),
+ ('closest_onu_distance', PmConfig.GUAGE)
+ }
+ self.onu_pm_names = {
+ ('pon_id', PmConfig.GUAGE),
+ ('onu_id', PmConfig.GUAGE),
+ ('fiber_length', PmConfig.GUAGE),
+ ('equalization_delay', PmConfig.GUAGE),
+ ('rssi', PmConfig.GUAGE), #
+ }
+ self.gem_pm_names = {
+ ('pon_id', PmConfig.GUAGE),
+ ('onu_id', PmConfig.GUAGE),
+ ('gem_id', PmConfig.GUAGE),
+ ('alloc_id', PmConfig.GUAGE),
+ ('rx_packets', PmConfig.COUNTER),
+ ('rx_bytes', PmConfig.COUNTER),
+ ('tx_packets', PmConfig.COUNTER),
+ ('tx_bytes', PmConfig.COUNTER),
+ }
+ self.nni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.nni_pm_names}
+ self.pon_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.pon_pm_names}
+ self.onu_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.onu_pm_names}
+ self.gem_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.gem_pm_names}
+
+ def update(self, pm_config):
+ # TODO: Test both 'group' and 'non-group' functionality
+ # TODO: Test frequency override capability for a particular group
+ if self.default_freq != pm_config.default_freq:
+ # Update the callback to the new frequency.
+ self.default_freq = pm_config.default_freq
+ self.lc.stop()
+ self.lc.start(interval=self.default_freq / 10)
+
+ if pm_config.grouped is True:
+ for m in pm_config.groups:
+ pass
+ # self.pm_group_metrics[m.group_name].config.enabled = m.enabled
+ # if m.enabled is True:
+ # self.enable_pm_collection(m.group_name, remote)
+ # else:
+ # self.disable_pm_collection(m.group_name, remote)
+ else:
+ for m in pm_config.metrics:
+ self.nni_metrics_config[m.name].enabled = m.enabled
+ self.pon_metrics_config[m.name].enabled = m.enabled
+ self.onu_metrics_config[m.name].enabled = m.enabled
+ self.gem_metrics_config[m.name].enabled = m.enabled
+
+ def make_proto(self):
+ pm_config = PmConfigs(id=self.id, default_freq=self.default_freq,
+ grouped=self.grouped,
+ freq_override=self.freq_override)
+ metrics = set()
+
+ if self.grouped:
+ pm_ether_stats = PmGroupConfig(group_name='Ethernet',
+ group_freq=self.default_freq,
+ enabled=True)
+
+ pm_pon_stats = PmGroupConfig(group_name='PON',
+ group_freq=self.default_freq,
+ enabled=True)
+
+ pm_ont_stats = PmGroupConfig(group_name='ONT',
+ group_freq=self.default_freq,
+ enabled=True)
+
+ pm_gem_stats = PmGroupConfig(group_name='GEM',
+ group_freq=self.default_freq,
+ enabled=True)
+ else:
+ pm_ether_stats = pm_config
+ pm_pon_stats = pm_config
+ pm_ont_stats = pm_config
+ pm_gem_stats = pm_config
+
+ for m in sorted(self.nni_metrics_config):
+ pm = self.nni_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+ pm_ether_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+
+ for m in sorted(self.pon_metrics_config):
+ pm = self.pon_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+ pm_pon_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+
+ for m in sorted(self.onu_metrics_config):
+ pm = self.onu_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+ pm_ont_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+
+ for m in sorted(self.gem_metrics_config):
+ pm = self.gem_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+ pm_gem_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+ if self.grouped:
+ pm_config.groups.extend([pm_ether_stats,
+ pm_pon_stats,
+ pm_ont_stats,
+ pm_gem_stats])
+ return pm_config
+
+ def collect_port_metrics(self):
+ port_metrics = dict()
+
+ for port in self.handler.northbound_ports.itervalues():
+ port_metrics['nni.{}'.format(port.port_no)] = self.collect_nni_metrics(port)
+
+ for port in self.handler.southbound_ports.itervalues():
+ port_metrics['pon.{}'.format(port.pon_id)] = self.collect_pon_metrics(port)
+
+ for onu_id in port.onu_ids:
+ onu = port.onu(onu_id)
+ if onu is not None:
+ port_metrics['pon.{}.onu.{}'.format(port.pon_id, onu.onu_id)] = \
+ self.collect_onu_metrics(onu)
+ for gem in onu.gem_ports:
+ if gem.multicast or gem.exception:
+ continue
+
+ port_metrics['pon.{}.onu.{}.gem.{}'.format(port.pon_id,
+ onu.onu_id,
+ gem.gem_id)] = \
+ self.collect_gem_metrics(gem)
+ # TODO: Do any multicast GEM PORT metrics here...
+ return port_metrics
+
+ def collect_nni_metrics(self, nni_port):
+ stats = {metric: getattr(nni_port, metric) for (metric, t) in self.nni_pm_names}
+ return {metric: value for metric, value in stats.iteritems()
+ if self.nni_metrics_config[metric].enabled}
+
+ def collect_pon_metrics(self, pon_port):
+ stats = {metric: getattr(pon_port, metric) for (metric, t) in self.pon_pm_names}
+ return {metric: value for metric, value in stats.iteritems()
+ if self.pon_metrics_config[metric].enabled}
+
+ def collect_onu_metrics(self, onu):
+ stats = {metric: getattr(onu, metric) for (metric, t) in self.onu_pm_names}
+ return {metric: value for metric, value in stats.iteritems()
+ if self.onu_metrics_config[metric].enabled}
+
+ def collect_gem_metrics(self, gem):
+ stats = {metric: getattr(gem, metric) for (metric, t) in self.gem_pm_names}
+ return {metric: value for metric, value in stats.iteritems()
+ if self.gem_metrics_config[metric].enabled}
diff --git a/voltha/adapters/adtran_olt/pon_port.py b/voltha/adapters/adtran_olt/pon_port.py
index d1a242d..3b2334e 100644
--- a/voltha/adapters/adtran_olt/pon_port.py
+++ b/voltha/adapters/adtran_olt/pon_port.py
@@ -14,7 +14,6 @@
import json
import random
-import arrow
import structlog
from enum import Enum
@@ -22,12 +21,12 @@
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from adtran_olt_handler import AdtranOltHandler
+from net.adtran_rest import RestInvalidResponseCode
from codec.olt_config import OltConfig
from onu import Onu
+from alarms.onu_los_alarm import OnuLosAlarm
from voltha.protos.common_pb2 import OperStatus, AdminState
-from voltha.protos.device_pb2 import Device
from voltha.protos.device_pb2 import Port
-from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventState, AlarmEventCategory
class PonPort(object):
@@ -39,7 +38,7 @@
"""
MAX_ONUS_SUPPORTED = 256
DEFAULT_ENABLED = False
- MAX_DEPLOYMENT_RANGE = 40000 # Meters
+ MAX_DEPLOYMENT_RANGE = 25000 # Meters (OLT-PB maximum)
_MCAST_ONU_ID = 253
_MCAST_ALLOC_BASE = 0x500
@@ -82,7 +81,6 @@
self._deferred = None # General purpose
self._discovery_deferred = None # Specifically for ONU discovery
self._sync_deferred = None # For sync of PON config to hardware
-
self._active_los_alarms = set() # ONU-ID
# xPON configuration
@@ -93,6 +91,8 @@
self._upstream_fec_enable = False
self._deployment_range = 25000
self._authentication_method = 'serial-number'
+ self._mcast_aes = False
+ self._line_rate = 'down_10_up_10'
if self.olt.autoactivate:
# Enable PON on startup
@@ -101,6 +101,13 @@
else:
self._activation_method = 'autodiscovery'
+ # Statistics
+ self.rx_packets = 0
+ self.rx_bytes = 0
+ self.tx_packets = 0
+ self.tx_bytes = 0
+ self.tx_bip_errors = 0
+
def __del__(self):
self.stop()
@@ -127,7 +134,7 @@
return self._port
@property
- def port_number(self):
+ def port_no(self):
return self._port_no
@property
@@ -151,6 +158,24 @@
def olt(self):
return self._parent
+ @property
+ def onus(self):
+ """
+ Get a set of all ONUs. While the set is immutable, do not use this method
+ to get a collection that you will iterate through that my yield the CPU
+ such as inline callback. ONUs may be deleted at any time and they will
+ set some references to other objects to NULL during the 'delete' call.
+ Instead, get a list of ONU-IDs and iterate on these and call the 'onu'
+ method below (which will return 'None' if the ONU has been deleted.
+
+ :return: (frozenset) collection of ONU objects on this PON
+ """
+ return frozenset(self._onus.values())
+
+ @property
+ def onu_ids(self):
+ return frozenset(self._onu_by_id.keys())
+
def onu(self, onu_id):
return self._onu_by_id.get(onu_id)
@@ -172,6 +197,23 @@
self.stop()
@property
+ def oper_status(self):
+ return self._oper_status
+
+ @property
+ def in_service_onus(self):
+ return len({onu.onu_id for onu in self.onus
+ if onu.onu_id not in self._active_los_alarms})
+
+ @property
+ def closest_onu_distance(self):
+ distance = -1
+ for onu in self.onus:
+ if onu.fiber_length < distance or distance == -1:
+ distance = onu.fiber_length
+ return distance
+
+ @property
def adapter_agent(self):
return self.olt.adapter_agent
@@ -207,13 +249,44 @@
@upstream_fec_enable.setter
def upstream_fec_enable(self, value):
assert isinstance(value, bool), 'upstream FEC enabled is a boolean'
-
if self._upstream_fec_enable != value:
self._upstream_fec_enable = value
if self._state == PonPort.State.RUNNING:
self._deferred = self._set_pon_config("upstream-fec-enable", value)
@property
+ def any_upstream_fec_enabled(self):
+ for onu in self.onus:
+ if onu.upstream_fec_enable and onu.enabled:
+ return True
+ return False
+
+ @property
+ def mcast_aes(self):
+ return self._mcast_aes
+
+ @mcast_aes.setter
+ def mcast_aes(self, value):
+ assert isinstance(value, bool), 'MCAST AES is a boolean'
+ if self._mcast_aes != value:
+ self._mcast_aes = value
+ if self._state == PonPort.State.RUNNING:
+ pass # TODO
+
+ @property
+ def line_rate(self):
+ return self._line_rate
+
+ @line_rate.setter
+ def line_rate(self, value):
+ assert isinstance(value, (str, unicode)), 'Line Rate is a string'
+ # TODO cast to enum
+ if self._line_rate != value:
+ self._line_rate = value
+ if self._state == PonPort.State.RUNNING:
+ pass # TODO
+
+ @property
def deployment_range(self):
"""Maximum deployment range (in meters)"""
return self._deployment_range
@@ -401,8 +474,10 @@
if len(self._onus) > 0:
dl = []
- for onu in self._onus.itervalues():
- dl.append(onu.restart())
+ for onu_id in self.onu_ids:
+ onu = self.onu(onu_id)
+ if onu is not None:
+ dl.append(onu.restart())
yield defer.gatherResults(dl, consumeErrors=True)
# Begin to ONU discovery and hardware sync
@@ -417,7 +492,7 @@
def stop(self):
if self._state == PonPort.State.STOPPED:
self.log.debug('already stopped')
- returnValue(succeed('Stopped'))
+ returnValue('Stopped')
self.log.info('stopping')
@@ -429,6 +504,15 @@
self._state = PonPort.State.STOPPED
+ # Remove all existing ONUs. They will need to be re-discovered
+
+ onu_ids = frozenset(self._onu_by_id.keys())
+ for onu_id in onu_ids:
+ try:
+ yield self.delete_onu(onu_id)
+ except Exception as e:
+ self.log.exception('onu-cleanup', onu_id=onu_id, e=e)
+
results = yield self._set_pon_config("enabled", False)
self._sync_deferred = reactor.callLater(self._sync_tick, self._sync_hardware)
@@ -613,19 +697,17 @@
self._expedite_sync = True
dl.append(self._set_pon_config("upstream-fec-enable",
self.upstream_fec_enable))
- return defer.gatherResults(dl, consumeErrors=True)
+ defer.gatherResults(dl, consumeErrors=True)
+ return config.onus
- def sync_onus(results):
+ def sync_onus(hw_onus):
if self._state == PonPort.State.RUNNING:
- self.log.debug('sync-pon-results', results=results)
- assert isinstance(results, list), 'expected-list'
- assert isinstance(results[0], OltConfig.Pon), 'expected-pon-at-front'
- hw_onus = results[0].onus
+ self.log.debug('sync-pon-onu-results', config=hw_onus)
# ONU's have their own sync task, extra (should be deleted) are
# handled here. Missing are handled by normal discovery mechanisms.
- hw_onu_ids = frozenset([onu.onu_id for onu in hw_onus])
+ hw_onu_ids = frozenset(hw_onus.keys())
my_onu_ids = frozenset(self._onu_by_id.keys())
extra_onus = hw_onu_ids - my_onu_ids
@@ -669,6 +751,9 @@
if self._admin_state != AdminState.ENABLED:
return
+ # Process LOS list
+ self._process_los_alarms(frozenset(status.ont_los))
+
# Get new/missing from the discovered ONU leaf. Stale ONUs from previous
# configs are now cleaned up during h/w re-sync/reflow.
@@ -679,14 +764,62 @@
for serial_number in new | rediscovered_onus:
reactor.callLater(0, self.add_onu, serial_number, status)
- # Process LOS list
- self._process_los_alarms(frozenset(status.ont_los))
+ # PON Statistics
+ self._process_statistics(status)
# Process ONU info. Note that newly added ONUs will not be processed
# until the next pass
-
self._update_onu_status(status.onus)
+ # Process GEM Port information
+ self._update_gem_status(status.gems)
+
+ def _handle_discovered_onu(self, child_device, ind_info):
+ pon_id = ind_info['_pon_id']
+ olt_id = ind_info['_olt_id']
+
+ if ind_info['_sub_group_type'] == 'onu_discovery':
+ self.log.info('Activation-is-in-progress', olt_id=olt_id,
+ pon_ni=pon_id, onu_data=ind_info,
+ onu_id=child_device.proxy_address.onu_id)
+
+ elif ind_info['_sub_group_type'] == 'sub_term_indication':
+ self.log.info('ONU-activation-is-completed', olt_id=olt_id,
+ pon_ni=pon_id, onu_data=ind_info)
+
+ msg = {'proxy_address': child_device.proxy_address,
+ 'event': 'activation-completed', 'event_data': ind_info}
+
+ # Send the event message to the ONU adapter
+ self.adapter_agent.publish_inter_adapter_message(child_device.id,
+ msg)
+ if ind_info['activation_successful'] is True:
+ for key, v_ont_ani in dict(): # self.v_ont_anis.items():
+ if v_ont_ani.v_ont_ani.data.onu_id == \
+ child_device.proxy_address.onu_id:
+ for tcont_key, tcont in v_ont_ani.tconts.items():
+ owner_info = dict()
+ # To-Do: Right Now use alloc_id as schduler ID. Need to
+ # find way to generate uninqe number.
+ id = tcont.alloc_id
+ owner_info['type'] = 'agg_port'
+ owner_info['intf_id'] = \
+ child_device.proxy_address.channel_id
+ owner_info['onu_id'] = \
+ child_device.proxy_address.onu_id
+ owner_info['alloc_id'] = tcont.alloc_id
+ # self.bal.create_scheduler(id, 'upstream', owner_info, 8)
+ else:
+ self.log.info('Invalid-ONU-event', olt_id=olt_id,
+ pon_ni=ind_info['_pon_id'], onu_data=ind_info)
+
+ def _process_statistics(self, status):
+ self.rx_packets = status.rx_packets
+ self.rx_bytes = status.rx_bytes
+ self.tx_packets = status.tx_packets
+ self.tx_bytes = status.tx_bytes
+ self.tx_bip_errors = status.tx_bip_errors
+
def _update_onu_status(self, onus):
"""
Process ONU status for this PON
@@ -694,9 +827,22 @@
"""
for onu_id, onu_status in onus.iteritems():
if onu_id in self._onu_by_id:
- self._onu_by_id[onu_id].rssi = onu_status.rssi
- self._onu_by_id[onu_id].equalization_delay = onu_status.equalization_delay
- self._onu_by_id[onu_id].fiber_length = onu_status.fiber_length
+ onu = self._onu_by_id[onu_id]
+ onu.rssi = onu_status.rssi
+ onu.equalization_delay = onu_status.equalization_delay
+ onu.equalization_delay = onu_status.equalization_delay
+ onu.fiber_length = onu_status.fiber_length
+
+ def _update_gem_status(self, gems):
+ for gem_id, gem_status in gems.iteritems():
+ onu = self._onu_by_id.get(gem_status.onu_id)
+ if onu is not None:
+ gem_port = onu.gem_port(gem_status.gem_id)
+ if gem_port is not None:
+ gem_port.rx_packets = gem_status.rx_packets
+ gem_port.rx_bytes = gem_status.rx_bytes
+ gem_port.tx_packets = gem_status.tx_packets
+ gem_port.tx_bytes = gem_status.tx_bytes
def _process_los_alarms(self, ont_los):
"""
@@ -706,31 +852,17 @@
cleared_alarms = self._active_los_alarms - ont_los
new_alarms = ont_los - self._active_los_alarms
- def los_alarm(status, _id):
- alarm = 'LOS'
- alarm_data = {
- 'ts': arrow.utcnow().timestamp,
- 'description': self.olt.alarms.format_description('onu LOS', alarm, status),
- 'id': self.olt.alarms.format_id(alarm),
- 'type': AlarmEventType.COMMUNICATION,
- 'category': AlarmEventCategory.ONT,
- 'severity': AlarmEventSeverity.MAJOR,
- 'state': AlarmEventState.RAISED if status else AlarmEventState.CLEARED
- }
- context_data = {'onu_id': _id}
- self.olt.alarms.send_alarm(context_data, alarm_data)
-
if len(cleared_alarms) > 0 or len(new_alarms) > 0:
self.log.info('onu-los', cleared=cleared_alarms, new=new_alarms)
for onu_id in cleared_alarms:
- # TODO: test 'clear' of LOS alarm when you delete an ONU in LOS
self._active_los_alarms.remove(onu_id)
- los_alarm(False, onu_id)
+ OnuLosAlarm(self.olt, onu_id).clear_alarm()
for onu_id in new_alarms:
self._active_los_alarms.add(onu_id)
- los_alarm(True, onu_id)
+ OnuLosAlarm(self.olt, onu_id).raise_alarm()
+ self.delete_onu(onu_id)
def _process_status_onu_discovered_list(self, discovered_onus):
"""
@@ -761,16 +893,19 @@
"""
try:
from flow.demo_data import get_tconts, get_gem_ports, get_onu_id
-
+
if self.activation_method == "autoactivate":
+ # This is currently just for 'DEMO' mode
onu_id = get_onu_id(serial_number)
if onu_id is None:
onu_id = self.get_next_onu_id()
enabled = True
- channel_speed = 0
+ channel_speed = 8500000000
tconts = get_tconts(serial_number, onu_id)
gem_ports = get_gem_ports(serial_number, onu_id)
vont_ani = None
+ xpon_name = None
+ upstream_fec_enabled = True
elif self.activation_method == "autodiscovery":
if self.authentication_method == 'serial-number':
@@ -778,24 +913,33 @@
try:
# TODO: Change iteration to itervalues below
- vont_info = next(info for _, info in gpon_info['v-ont-anis'].items()
+ vont_info = next(info for _, info in gpon_info['vont-anis'].items()
if info.get('expected-serial-number') == serial_number)
- vont_ani = vont_info['data']
+ ont_info = next(info for _, info in gpon_info['ont-anis'].items()
+ if info.get('name') == vont_info['name'])
+
+ vont_ani = vont_info['data']
onu_id = vont_info['onu-id']
enabled = vont_info['enabled']
channel_speed = vont_info['upstream-channel-speed']
+ xpon_name = ont_info['name']
+ upstream_fec_enabled = ont_info.get('upstream-fec', False)
tconts = {key: val for key, val in gpon_info['tconts'].iteritems()
if val.vont_ani == vont_info['name']}
- tcont_names = set(tconts.keys())
gem_ports = {key: val for key, val in gpon_info['gem-ports'].iteritems()
- if val.tconf_ref in tcont_names}
+ if val.tcont_ref in tconts.keys()}
except StopIteration:
- self.log.debug('no-vont-ony')
- return None # Can happen if vont-ani/serial-number has not yet been configured
+ # Can happen if vont-ani or ont-ani has not yet been configured
+ self.log.debug('no-vont-or-ont')
+ return None
+
+ except Exception as e:
+ self.log.exception('autodiscovery', e=e)
+ raise
else:
self.log.debug('not-serial-number-authentication')
return None
@@ -806,11 +950,12 @@
onu_info = {
'device-id': self.olt.device_id,
'serial-number': serial_number,
- 'xpon-name': None,
+ 'xpon-name': xpon_name,
'pon': self,
'onu-id': onu_id,
'enabled': enabled,
'upstream-channel-speed': channel_speed,
+ 'upstream-fec': upstream_fec_enabled,
'password': Onu.DEFAULT_PASSWORD,
't-conts': tconts,
'gem-ports': gem_ports,
@@ -829,38 +974,26 @@
return None
@inlineCallbacks
- def add_onu(self, serial_number, status):
- self.log.info('add-onu', serial_number=serial_number, status=status)
-
- onu_info = self._get_onu_info(Onu.serial_number_to_string(serial_number))
+ def add_onu(self, serial_number_64, status):
+ serial_number = Onu.serial_number_to_string(serial_number_64)
+ self.log.info('add-onu', serial_number=serial_number,
+ serial_number_64=serial_number_64, status=status)
+ onu_info = self._get_onu_info(serial_number)
if onu_info is None:
- self.log.info('lookup-failure', serial_number=serial_number)
+ from alarms.onu_discovery_alarm import OnuDiscoveryAlarm
+ self.log.info('onu-lookup-failure', serial_number=serial_number_64)
+ OnuDiscoveryAlarm(self.olt, self.pon_id, serial_number).raise_alarm()
+ return
- if serial_number not in status.onus or onu_info['onu-id'] in self._active_los_alarms:
+ if serial_number_64 not in status.onus or onu_info['onu-id'] in self._active_los_alarms:
onu = None
+ onu_id = onu_info['onu-id']
- if onu_info['onu-id'] in self._active_los_alarms:
- try:
- yield self._remove_from_hardware(onu_info['onu-id'])
-
- except Exception as e:
- self.log.exception('los-cleanup', e=e)
-
- if serial_number in self._onus or onu_info['onu-id'] in self._onu_by_id:
- # May be here due to unmanaged power-cycle on OLT
-
- self.log.info('onu-already-added', serial_number=serial_number)
-
- assert serial_number in self._onus and\
- onu_info['onu-id'] in self._onu_by_id, \
- 'ONU not in both lists'
-
- # Recover ONU information and attempt to reflow TCONT/GEM-PORT
- # information as well
-
- onu = self._onus[serial_number]
- reflow = True
+ if serial_number_64 in self._onus or onu_id in self._onu_by_id:
+ # May be here due to unmanaged power-cycle on OLT or fiber bounced for a
+ # previously activated ONU. Drop it and add bac on next discovery cycle
+ self.delete_onu(onu_id)
elif len(self._onus) >= self.MAX_ONUS_SUPPORTED:
self.log.warning('max-onus-provisioned', count=len(self._onus))
@@ -868,8 +1001,7 @@
else:
# TODO: Make use of upstream_channel_speed variable
onu = Onu(onu_info)
- reflow = False
- self._onus[serial_number] = onu
+ self._onus[serial_number_64] = onu
self._onu_by_id[onu.onu_id] = onu
if onu is not None:
@@ -885,22 +1017,22 @@
try:
if gem_port.multicast:
self.log.debug('id-or-vid', id_or_vid=id_or_vid)
- self.add_mcast_gem_port(gem_port, -id_or_vid)
+ vid = self.olt.multicast_vlans[0] if len(self.olt.multicast_vlans) else None
+ if vid is not None:
+ self.add_mcast_gem_port(gem_port, vid)
except Exception as e:
self.log.exception('id-or-vid', e=e)
- yield onu.create(tconts, gem_ports, reflow=reflow)
+ yield onu.create(tconts, gem_ports)
# If autoactivate (demo) mode and not reflow, activate the ONU
- if self.olt.autoactivate and not reflow:
+ if self.olt.autoactivate:
self.activate_onu(onu)
except Exception as e:
- self.log.exception('add-onu', serial_number=serial_number, reflow=reflow, e=e)
-
- if not reflow:
- del self._onus[serial_number]
- del self._onu_by_id[onu.onu_id]
+ self.log.exception('add-onu', serial_number=serial_number_64, e=e)
+ del self._onus[serial_number_64]
+ del self._onu_by_id[onu.onu_id]
def activate_onu(self, onu):
"""
@@ -925,7 +1057,7 @@
vlan=channel_id)
def get_next_onu_id(self):
- used_ids = [onu.onu_id for onu in self._onus.itervalues()]
+ used_ids = [onu.onu_id for onu in self.onus]
while True:
onu_id = self._next_onu_id
@@ -945,6 +1077,10 @@
try:
yield self._parent.rest_client.request('DELETE', uri, name=name)
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
+
except Exception as e:
self.log.exception('onu-hw-delete', onu_id=onu_id, e=e)
@@ -956,27 +1092,30 @@
if onu_id in self._onu_by_id:
del self._onu_by_id[onu_id]
- for sn in [onu.serial_numbers for onu in self._onus.itervalues() if onu.onu_id == onu_id]:
- del self._onus[sn]
- try:
- yield self._remove_from_hardware(onu_id)
-
- except Exception as e:
- self.log.exception('onu', serial_number=onu.serial_number, e=e)
+ for sn_64 in [onu.serial_number_64 for onu in self.onus if onu.onu_id == onu_id]:
+ del self._onus[sn_64]
if onu is not None:
- # Clean up adapter agent of this ONU
-
proxy = onu.proxy_address
+ try:
+ onu.delete()
- if proxy is not None:
- onu_device = self.olt.adapter_agent.get_child_device_with_proxy_address(proxy)
- if onu_device is not None:
- self.olt.adapter_agent.delete_child_device(self.olt.device_id,
- onu_device.device_id)
+ except Exception as e:
+ self.log.exception('onu-delete', serial_number=onu.serial_number, e=e)
- self.olt.adapter_agent.update_child_devices_state(self.olt.device_id,
- admin_state=AdminState.DISABLED)
+ if self.olt.autoactivate:
+ # Clean up adapter agent of this ONU
+ if proxy is not None:
+ onu_device = self.olt.adapter_agent.get_child_device_with_proxy_address(proxy)
+ if onu_device is not None:
+ self.olt.adapter_agent.delete_child_device(self.olt.device_id,
+ onu_device.device_id)
+ else:
+ try:
+ yield self._remove_from_hardware(onu_id)
+
+ except Exception as e:
+ self.log.exception('onu-remove', serial_number=onu.serial_number, e=e)
def add_mcast_gem_port(self, mcast_gem, vlan):
"""
diff --git a/voltha/adapters/adtran_olt/tcont.py b/voltha/adapters/adtran_olt/tcont.py
deleted file mode 100644
index 946946d..0000000
--- a/voltha/adapters/adtran_olt/tcont.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2017-present Adtran, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import structlog
-import json
-from enum import Enum
-from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
-from voltha.protos.bbf_fiber_traffic_descriptor_profile_body_pb2 import TrafficDescriptorProfileData
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-
-log = structlog.get_logger()
-
-
-class TCont(object):
- """
- Class to wrap TCont capabilities
- """
- def __init__(self, alloc_id, traffic_descriptor, best_effort=None,
- name=None, ident=None, vont_ani=None):
- self.alloc_id = alloc_id
- self.traffic_descriptor = traffic_descriptor
- self.best_effort = best_effort
- self.name = name
- self.id = ident
- self.vont_ani = vont_ani # (string) reference
-
- def __str__(self):
- return "TCont: {}, alloc-id: {}".format(self.name, self.alloc_id)
-
- @staticmethod
- def create(data, td):
- assert isinstance(data, TcontsConfigData)
- assert isinstance(td, TrafficDescriptor)
-
- return TCont(data.alloc_id, td, best_effort=td.best_effort,
- name=data.name, ident=data.id, vont_ani=data.interface_reference)
-
- @inlineCallbacks
- def add_to_hardware(self, session, pon_id, onu_id, operation='POST'):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_TCONT_CONFIG_LIST_URI.format(pon_id, onu_id)
- data = json.dumps({'alloc-id': self.alloc_id})
- name = 'tcont-create-{}-{}: {}'.format(pon_id, onu_id, self.alloc_id)
- what = 'tcont'
-
- try:
- # For TCONT, only leaf is the key. So only post needed
- if operation == 'POST':
- results = yield session.request('POST', uri, data=data, name=name)
- else:
- results = succeed('nop')
-
- if self.traffic_descriptor is not None:
- what = 'traffic-descriptor'
- results = yield self.traffic_descriptor.add_to_hardware(session,
- pon_id, onu_id,
- self.alloc_id,
- self.best_effort)
- except Exception as e:
- log.exception(what, tcont=self, td=self.traffic_descriptor, e=e)
- raise
-
- returnValue(results)
-
- def remove_from_hardware(self, session, pon_id, onu_id):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, self.alloc_id)
- name = 'tcont-delete-{}-{}: {}'.format(pon_id, onu_id, self.alloc_id)
- return succeed(session.request('DELETE', uri, name=name))
-
- def _get_onu(self, olt):
- onu = None
- try:
- vont_ani = olt.v_ont_anis.get(self.vont_ani)
- ch_pair = olt.channel_pairs.get(vont_ani['preferred-channel-pair'])
- ch_term = next((term for term in olt.channel_terminations.itervalues()
- if term['channel-pair'] == ch_pair['name']), None)
-
- pon = olt.pon(ch_term['xgs-ponid'])
- onu = pon.onu(vont_ani['onu-id'])
-
- except Exception:
- pass
-
- return onu
-
- def xpon_create(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- onu = self._get_onu(olt)
-
- if onu is not None:
- onu.add_tcont(self)
-
- def xpon_update(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- onu = self._get_onu(olt)
-
- if onu is not None:
- pass # TODO: Not yet supported
-
- def xpon_delete(self, olt):
- # Look up any associated ONU. May be None if pre-provisioning
- onu = self._get_onu(olt)
-
- if onu is not None:
- onu.remove_tcont(self.alloc_id)
-
-
-class TrafficDescriptor(object):
- """
- Class to wrap the uplink traffic descriptor.
- """
- class AdditionalBwEligibility(Enum):
- NONE = 0
- BEST_EFFORT_SHARING = 1
- NON_ASSURED_SHARING = 2 # Should match xpon.py values
- DEFAULT = NONE
-
- @staticmethod
- def to_string(value):
- return {
- TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING: "non-assured-sharing",
- TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING: "best-effort-sharing",
- TrafficDescriptor.AdditionalBwEligibility.NONE: "none"
- }.get(value, "unknown")
-
- @staticmethod
- def from_value(value):
- """
- Matches both Adtran and xPON values
- :param value:
- :return:
- """
- return {
- 0: TrafficDescriptor.AdditionalBwEligibility.NONE,
- 1: TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING,
- 2: TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING,
- }.get(value, TrafficDescriptor.AdditionalBwEligibility.DEFAULT)
-
- def __init__(self, fixed, assured, maximum,
- additional=AdditionalBwEligibility.DEFAULT,
- best_effort=None,
- name=None,
- ident=None):
- self.name = name
- self.id = ident
- self.fixed_bandwidth = fixed # bps
- self.assured_bandwidth = assured # bps
- self.maximum_bandwidth = maximum # bps
- self.additional_bandwidth_eligibility = additional
- self.best_effort = best_effort\
- if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING\
- else None
-
- def __str__(self):
- return "TrafficDescriptor: {}, {}/{}/{}".format(self.name,
- self.fixed_bandwidth,
- self.assured_bandwidth,
- self.maximum_bandwidth)
-
- @staticmethod
- def create(data):
- assert isinstance(data, TrafficDescriptorProfileData)
-
- additional = TrafficDescriptor.AdditionalBwEligibility.from_value(
- data.additional_bw_eligibility_indicator)
-
- if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
- best_effort = BestEffort(data.maximum_bandwidth,
- data.priority,
- data.weight)
- else:
- best_effort = None
-
- return TrafficDescriptor(data.fixed_bandwidth, data.assured_bandwidth,
- data.maximum_bandwidth,
- name=data.name,
- ident=data.id,
- best_effort=best_effort,
- additional=additional)
-
- def to_dict(self):
- val = {
- 'fixed-bandwidth': self.fixed_bandwidth,
- 'assured-bandwidth': self.assured_bandwidth,
- 'maximum-bandwidth': self.maximum_bandwidth,
- 'additional-bandwidth-eligibility':
- TrafficDescriptor.AdditionalBwEligibility.to_string(
- self.additional_bandwidth_eligibility)
- }
- return val
-
- @inlineCallbacks
- def add_to_hardware(self, session, pon_id, onu_id, alloc_id, best_effort):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
- data = json.dumps({'traffic-descriptor': self.to_dict()})
- name = 'tcont-td-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
- try:
- results = yield session.request('PATCH', uri, data=data, name=name)
-
- except Exception as e:
- log.exception('traffic-descriptor', td=self, e=e)
- raise
-
- if self.additional_bandwidth_eligibility == \
- TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
- if best_effort is None:
- raise ValueError('TCONT is best-effort but does not define best effort sharing')
-
- try:
- results = yield best_effort.add_to_hardware(session, pon_id, onu_id, alloc_id)
-
- except Exception as e:
- log.exception('best-effort', best_effort=best_effort, e=e)
- raise
-
- returnValue(results)
-
- def xpon_create(self, olt, tcont):
- # Look up any associated ONU. May be None if pre-provisioning
- pass # TODO
-
- def xpon_update(self, olt, tcont):
- # Look up any associated ONU. May be None if pre-provisioning
- pass # TODO: Not yet supported
-
-
-class BestEffort(object):
- def __init__(self, bandwidth, priority, weight):
- self.bandwidth = bandwidth # bps
- self.priority = priority # 0.255
- self.weight = weight # 0..100
-
- def __str__(self):
- return "BestEffort: {}/p-{}/w-{}".format(self.bandwidth,
- self.priority,
- self.weight)
-
- def to_dict(self):
- val = {
- 'bandwidth': self.bandwidth,
- 'priority': self.priority,
- 'weight': self.weight
- }
- return val
-
- def add_to_hardware(self, session, pon_id, onu_id, alloc_id, best_effort):
- from adtran_olt_handler import AdtranOltHandler
-
- uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
- data = json.dumps({'best-effort': best_effort.to_dict()})
- name = 'tcont-best-effort-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
-
- return session.request('PATCH', uri, data=data, name=name)
diff --git a/voltha/adapters/adtran_olt/xpon/__init__.py b/voltha/adapters/adtran_olt/xpon/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/__init__.py
diff --git a/voltha/adapters/adtran_olt/xpon/adtran_olt_xpon.py b/voltha/adapters/adtran_olt/xpon/adtran_olt_xpon.py
new file mode 100644
index 0000000..d1ca2e5
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/adtran_olt_xpon.py
@@ -0,0 +1,386 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from adtran_xpon import AdtranXPON
+from voltha.protos.bbf_fiber_base_pb2 import \
+ ChannelgroupConfig, ChannelpartitionConfig, ChannelpairConfig, \
+ ChannelterminationConfig, OntaniConfig, VOntaniConfig, VEnetConfig
+from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
+from voltha.protos.bbf_fiber_traffic_descriptor_profile_body_pb2 import TrafficDescriptorProfileData
+from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
+from voltha.protos.bbf_fiber_multicast_gemport_body_pb2 import \
+ MulticastGemportsConfigData
+from voltha.protos.bbf_fiber_multicast_distribution_set_body_pb2 import \
+ MulticastDistributionSetData
+
+log = structlog.get_logger()
+
+
+class AdtranOltXPON(AdtranXPON):
+ """
+ Class to for OLT and XPON operations
+ """
+ def __init__(self, **kwargs):
+ super(AdtranOltXPON, self).__init__(**kwargs)
+
+ # xPON config dictionaries
+ self._channel_groups = {} # Name -> dict
+ self._channel_partitions = {} # Name -> dict
+ self._channel_pairs = {} # Name -> dict
+ self._channel_terminations = {} # Name -> dict
+
+ @property
+ def channel_terminations(self):
+ return self._channel_terminations
+
+ @property
+ def channel_groups(self):
+ return self._channel_groups
+
+ @property
+ def channel_pairs(self):
+ return self._channel_pairs
+
+ @property
+ def channel_partitions(self):
+ return self._channel_partitions
+
+ def get_xpon_info(self, pon_id, pon_id_type='xgs-ponid'):
+ """
+ Lookup all xPON configuraiton data for a specific pon-id / channel-termination
+ :param pon_id: (int) PON Identifier
+ :return: (dict) reduced xPON information for the specific PON port
+ """
+ if pon_id not in self._cached_xpon_pon_info:
+ terminations = {key: val for key, val in self._channel_terminations.iteritems()
+ if val[pon_id_type] == pon_id}
+
+ pair_names = set([term['channel-pair'] for term in terminations.itervalues()])
+ pairs = {key: val for key, val in self.channel_pairs.iteritems()
+ if key in pair_names}
+
+ partition_names = set([pair['channel-partition'] for pair in pairs.itervalues()])
+ partitions = {key: val for key, val in self.channel_partitions.iteritems()
+ if key in partition_names}
+
+ v_ont_anis = {key: val for key, val in self.v_ont_anis.iteritems()
+ if val['preferred-channel-pair'] in pair_names}
+ v_ont_ani_names = set(v_ont_anis.keys())
+
+ ont_anis = {key: val for key, val in self.ont_anis.iteritems()
+ if key in v_ont_ani_names}
+
+ group_names = set(pair['channel-group'] for pair in pairs.itervalues())
+ groups = {key: val for key, val in self.channel_groups.iteritems()
+ if key in group_names}
+
+ venets = {key: val for key, val in self.v_enets.iteritems()
+ if val['vont-ani'] in v_ont_ani_names}
+
+ tconts = {key: val['object'] for key, val in self.tconts.iteritems()
+ if val['vont-ani'] in v_ont_ani_names and 'object' in val}
+ tcont_names = set(tconts.keys())
+
+ gem_ports = {key: val['object'] for key, val in self.gem_ports.iteritems()
+ if val['tcont-ref'] in tcont_names and 'object' in val}
+
+ self._cached_xpon_pon_info[pon_id] = {
+ 'channel-terminations': terminations,
+ 'channel-pairs': pairs,
+ 'channel-partitions': partitions,
+ 'channel-groups': groups,
+ 'vont-anis': v_ont_anis,
+ 'ont-anis': ont_anis,
+ 'v-enets': venets,
+ 'tconts': tconts,
+ 'gem-ports': gem_ports
+ }
+ return self._cached_xpon_pon_info[pon_id]
+
+ def get_related_pons(self, item, pon_type='xgs-ponid'):
+ pon_ids = set()
+ ports = []
+ data = item['data']
+
+ if isinstance(data, ChannelgroupConfig):
+ group_name = item['name']
+ pair_names = {val['name'] for val in self.channel_pairs.itervalues()
+ if val['channel-group'] == group_name}
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] in pair_names}
+
+ elif isinstance(data, ChannelpartitionConfig):
+ part_name = item['name']
+ pair_names = {val['name'] for val in self.channel_pairs.itervalues()
+ if val['channel-partition'] == part_name}
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] in pair_names}
+
+ elif isinstance(data, ChannelpairConfig):
+ pair_name = item['name']
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] == pair_name}
+
+ elif isinstance(data, ChannelterminationConfig):
+ pon_ids = [item[pon_type]]
+
+ elif isinstance(data, (OntaniConfig, VOntaniConfig)):
+ # ont_ani name == vont_ani name since no link table support yet
+ vont_name = item['name']
+ pair_name = self.v_ont_anis[vont_name]['preferred-channel-pair'] \
+ if vont_name in self.v_ont_anis else None
+
+ if pair_name is not None:
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] == pair_name}
+
+ elif isinstance(data, VEnetConfig):
+ venet_name = item['name']
+ vont_name = self.v_enets[venet_name]['vont-ani'] \
+ if venet_name in self.v_enets else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ pair_name = self.v_ont_anis[vont_name]['preferred-channel-pair']
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] == pair_name}
+
+ elif isinstance(data, TcontsConfigData):
+ tcont_name = item['name']
+ vont_name = self.tconts[tcont_name]['vont-ani'] \
+ if tcont_name in self.tconts else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ pair_name = self.v_ont_anis[vont_name]['preferred-channel-pair']
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] == pair_name}
+
+ elif isinstance(data, TrafficDescriptorProfileData):
+ td_name = item['name']
+
+ elif isinstance(data, GemportsConfigData):
+ gem_name = item['name']
+ venet_name = self.gem_ports[gem_name]['venet-ref'] \
+ if gem_name in self.gem_ports else None
+
+ vont_name = self.v_enets[venet_name]['vont-ani'] \
+ if venet_name in self.v_enets else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ pair_name = self.v_ont_anis[vont_name]['preferred-channel-pair']
+ pon_ids = {val[pon_type] for val in self.channel_terminations.itervalues()
+ if val['channel-pair'] == pair_name}
+
+ elif isinstance(data, MulticastGemportsConfigData):
+ raise NotImplementedError('TODO')
+
+ elif isinstance(data, MulticastDistributionSetData):
+ raise NotImplementedError('TODO')
+
+ for pon_id in pon_ids:
+ pon_port = self.southbound_ports.get(pon_id, None)
+ if pon_port is not None:
+ ports.append(pon_port)
+
+ return ports
+
+ def get_related_onus(self, item, pon_type='xgs-ponid'):
+ onus = []
+ pons = self.get_related_pons(item, pon_type=pon_type)
+ data = item['data']
+
+ for pon in pons:
+ if isinstance(data, (OntaniConfig, VOntaniConfig)):
+ # ont_ani name == vont_ani name since no link table support yet
+ vont_name = item['name']
+ for onu in pon.onus:
+ if onu.xpon_name == vont_name:
+ onus.append(onu)
+
+ elif isinstance(data, VEnetConfig):
+ venet_name = item['name']
+ vont_name = self.v_enets[venet_name]['vont-ani'] \
+ if venet_name in self.v_enets else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ for onu in pon.onus:
+ if onu.xpon_name == vont_name:
+ onus.append(onu)
+
+ elif isinstance(data, TcontsConfigData):
+ tcont_name = item['name']
+ vont_name = self.tconts[tcont_name]['vont-ani'] \
+ if tcont_name in self.tconts else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ for onu in pon.onus:
+ if onu.xpon_name == vont_name:
+ onus.append(onu)
+
+ elif isinstance(data, TrafficDescriptorProfileData):
+ pass
+
+ elif isinstance(data, GemportsConfigData):
+ gem_name = item['name']
+ venet_name = self.gem_ports[gem_name]['venet-ref'] \
+ if gem_name in self.gem_ports else None
+
+ vont_name = self.v_enets[venet_name]['vont-ani'] \
+ if venet_name in self.v_enets else None
+
+ if vont_name is not None and vont_name in self.v_ont_anis:
+ for onu in pon.onus:
+ if onu.xpon_name == vont_name:
+ onus.append(onu)
+
+ elif isinstance(data, MulticastGemportsConfigData):
+ raise NotImplementedError('TODO')
+
+ elif isinstance(data, MulticastDistributionSetData):
+ raise NotImplementedError('TODO')
+
+ return onus
+
+ def _get_xpon_collection(self, data):
+ collection, create, modify, delete = super(AdtranOltXPON, self)._get_xpon_collection(data)
+
+ if collection is not None:
+ return collection, create, modify, delete
+
+ elif isinstance(data, ChannelgroupConfig):
+ return self.channel_groups, \
+ self.on_channel_group_create,\
+ self.on_channel_group_modify, \
+ self.on_channel_group_delete
+
+ elif isinstance(data, ChannelpartitionConfig):
+ return self.channel_partitions,\
+ self.on_channel_partition_create,\
+ self.on_channel_partition_modify,\
+ self.on_channel_partition_delete
+
+ elif isinstance(data, ChannelpairConfig):
+ return self.channel_pairs, \
+ self.on_channel_pair_create,\
+ self.on_channel_pair_modify, \
+ self.on_channel_pair_delete
+
+ elif isinstance(data, ChannelterminationConfig):
+ return self.channel_terminations,\
+ self.on_channel_termination_create,\
+ self.on_channel_termination_modify,\
+ self.on_channel_termination_delete
+ return None, None, None, None
+
+ def _data_to_dict(self, data, td=None):
+ result = super(AdtranOltXPON, self)._data_to_dict(data, td=td)
+
+ if result is not None:
+ return result
+
+ name = data.name
+ interface = data.interface
+ inst_data = data.data
+
+ if isinstance(data, ChannelgroupConfig):
+ return 'channel-group', {
+ 'name': name,
+ 'enabled': interface.enabled,
+ 'system-id': inst_data.system_id,
+ 'polling-period': inst_data.polling_period,
+ 'data': data
+ }
+
+ elif isinstance(data, ChannelpartitionConfig):
+ def _auth_method_enum_to_string(value):
+ from voltha.protos.bbf_fiber_types_pb2 import SERIAL_NUMBER, LOID, \
+ REGISTRATION_ID, OMCI, DOT1X
+ return {
+ SERIAL_NUMBER: 'serial-number',
+ LOID: 'loid',
+ REGISTRATION_ID: 'registration-id',
+ OMCI: 'omci',
+ DOT1X: 'dot1x'
+ }.get(value, 'unknown')
+
+ return 'channel-partition', {
+ 'name': name,
+ 'enabled': interface.enabled,
+ 'authentication-method': _auth_method_enum_to_string(inst_data.authentication_method),
+ 'channel-group': inst_data.channelgroup_ref,
+ 'fec-downstream': inst_data.fec_downstream,
+ 'mcast-aes': inst_data.multicast_aes_indicator,
+ 'differential-fiber-distance': inst_data.differential_fiber_distance,
+ 'data': data
+ }
+
+ elif isinstance(data, ChannelpairConfig):
+ return 'channel-pair', {
+ 'name': name,
+ 'enabled': interface.enabled,
+ 'channel-group': inst_data.channelgroup_ref,
+ 'channel-partition': inst_data.channelpartition_ref,
+ 'line-rate': inst_data.channelpair_linerate,
+ 'data': data
+ }
+
+ elif isinstance(data, ChannelterminationConfig):
+ return 'channel-termination', {
+ 'name': name,
+ 'enabled': interface.enabled,
+ 'xgs-ponid': inst_data.xgs_ponid,
+ 'xgpon-ponid': inst_data.xgpon_ponid,
+ 'channel-pair': inst_data.channelpair_ref,
+ 'ber-calc-period': inst_data.ber_calc_period,
+ 'data': data
+ }
+
+ else:
+ raise NotImplementedError('Unknown data type')
+
+ def on_channel_group_create(self, cgroup):
+ return cgroup # Implement in your OLT, if needed
+
+ def on_channel_group_modify(self, cgroup, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_channel_group_delete(self, cgroup):
+ return None # Implement in your OLT, if needed
+
+ def on_channel_partition_create(self, cpartition):
+ return cpartition # Implement in your OLT, if needed
+
+ def on_channel_partition_modify(self, cpartition, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_channel_partition_delete(self, cpartition):
+ return None # Implement in your OLT, if needed
+
+ def on_channel_pair_create(self, cpair):
+ return cpair # Implement in your OLT, if needed
+
+ def on_channel_pair_modify(self, cpair, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_channel_pair_delete(self, cpair):
+ return None # Implement in your OLT, if needed
+
+ def on_channel_termination_create(self, cterm):
+ return cterm # Implement in your OLT, if needed
+
+ def on_channel_termination_modify(self, cterm, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_channel_termination_delete(self, cterm):
+ return None # Implement in your OLT, if needed
diff --git a/voltha/adapters/adtran_olt/xpon/adtran_xpon.py b/voltha/adapters/adtran_olt/xpon/adtran_xpon.py
new file mode 100644
index 0000000..71943db
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/adtran_xpon.py
@@ -0,0 +1,497 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from traffic_descriptor import TrafficDescriptor
+from voltha.protos.bbf_fiber_base_pb2 import \
+ OntaniConfig, VOntaniConfig, VEnetConfig
+from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
+from voltha.protos.bbf_fiber_traffic_descriptor_profile_body_pb2 import TrafficDescriptorProfileData
+from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
+from voltha.protos.bbf_fiber_multicast_gemport_body_pb2 import \
+ MulticastGemportsConfigData
+from voltha.protos.bbf_fiber_multicast_distribution_set_body_pb2 import \
+ MulticastDistributionSetData
+
+log = structlog.get_logger()
+
+
+class AdtranXPON(object):
+ """
+ Class to abstract common OLT and ONU xPON operations
+ """
+ def __init__(self, **kwargs):
+ # xPON config dictionaries
+ self._v_ont_anis = {} # Name -> dict
+ self._ont_anis = {} # Name -> dict
+ self._v_enets = {} # Name -> dict
+ self._tconts = {} # Name -> dict
+ self._traffic_descriptors = {} # Name -> dict
+ self._gem_ports = {} # Name -> dict
+ self._mcast_gem_ports = {} # Name -> dict
+ self._mcast_dist_sets = {} # Name -> dict
+ self._cached_xpon_pon_info = {} # PON-id -> dict
+
+ @property
+ def v_ont_anis(self):
+ return self._v_ont_anis
+
+ @property
+ def ont_anis(self):
+ return self._ont_anis
+
+ @property
+ def v_enets(self):
+ return self._v_enets
+
+ @property
+ def tconts(self):
+ return self._tconts
+
+ @property
+ def traffic_descriptors(self):
+ return self._traffic_descriptors
+
+ @property
+ def gem_ports(self):
+ return self._gem_ports
+
+ def _get_xpon_collection(self, data):
+ """
+ Get the collection for the object type and handler routines
+ :param data: xPON object
+ """
+ if isinstance(data, OntaniConfig):
+ return self.ont_anis, \
+ self.on_ont_ani_create,\
+ self.on_ont_ani_modify, \
+ self.on_ont_ani_delete
+
+ elif isinstance(data, VOntaniConfig):
+ return self.v_ont_anis, \
+ self.on_vont_ani_create,\
+ self.on_vont_ani_modify, \
+ self.on_vont_ani_delete
+
+ elif isinstance(data, VEnetConfig):
+ return self.v_enets, \
+ self.on_venet_create,\
+ self.on_venet_modify, \
+ self.on_venet_delete
+
+ elif isinstance(data, TcontsConfigData):
+ return self.tconts, \
+ self.on_tcont_create,\
+ self.on_tcont_modify, \
+ self.on_tcont_delete
+
+ elif isinstance(data, TrafficDescriptorProfileData):
+ return self.traffic_descriptors, \
+ self.on_td_create,\
+ self.on_td_modify, \
+ self.on_td_delete
+
+ elif isinstance(data, GemportsConfigData):
+ return self.gem_ports, \
+ self.on_gemport_create,\
+ self.on_gemport_modify, \
+ self.on_gemport_delete
+
+ elif isinstance(data, MulticastGemportsConfigData):
+ return self.mcast_gem_ports, \
+ self.on_mcast_gemport_create,\
+ self.on_mcast_gemport_modify, \
+ self.on_mcast_gemport_delete
+
+ elif isinstance(data, MulticastDistributionSetData):
+ return self.mcast_dist_sets, \
+ self.on_mcast_dist_set_create,\
+ self.on_mcast_dist_set_modify, \
+ self.on_mcast_dist_set_delete
+
+ return None, None, None, None
+
+ def _data_to_dict(self, data, td=None):
+ if isinstance(data, OntaniConfig):
+ name = data.name
+ interface = data.interface
+ inst_data = data.data
+
+ return 'ont-ani', {
+ 'name': name,
+ 'description': interface.description,
+ 'enabled': interface.enabled,
+ 'upstream-fec': inst_data.upstream_fec_indicator,
+ 'mgnt-gemport-aes': inst_data.mgnt_gemport_aes_indicator,
+ 'data': data
+ }
+ elif isinstance(data, VOntaniConfig):
+ name = data.name
+ interface = data.interface
+ inst_data = data.data
+
+ return 'vOnt-ani', {
+ 'name': name,
+ 'description': interface.description,
+ 'enabled': interface.enabled,
+ 'onu-id': inst_data.onu_id,
+ 'expected-serial-number': inst_data.expected_serial_number,
+ 'preferred-channel-pair': inst_data.preferred_chanpair,
+ 'channel-partition': inst_data.parent_ref,
+ 'upstream-channel-speed': inst_data.upstream_channel_speed,
+ 'data': data
+ }
+ elif isinstance(data, VEnetConfig):
+ name = data.name
+ interface = data.interface
+ inst_data = data.data
+
+ return 'vEnet', {
+ 'name': name,
+ 'description': interface.description,
+ 'enabled': interface.enabled,
+ 'vont-ani': inst_data.v_ontani_ref,
+ 'data': data
+ }
+ elif isinstance(data, TcontsConfigData):
+ return 'TCONT', {
+ 'name': data.name,
+ 'alloc-id': data.alloc_id,
+ 'vont-ani': data.interface_reference,
+ 'td-ref': td['name'],
+ 'data': data
+ }
+ elif isinstance(data, TrafficDescriptorProfileData):
+ additional = TrafficDescriptor.AdditionalBwEligibility.from_value(
+ data.additional_bw_eligibility_indicator)
+
+ return 'Traffic-Desc', {
+ 'name': data.name,
+ 'fixed-bandwidth': data.fixed_bandwidth,
+ 'assured-bandwidth': data.assured_bandwidth,
+ 'maximum-bandwidth': data.maximum_bandwidth,
+ 'priority': data.priority,
+ 'weight': data.weight,
+ 'additional-bw-eligibility-indicator': additional,
+ 'data': data
+ }
+ elif isinstance(data, GemportsConfigData):
+ return 'GEMPort', {
+ 'name': data.name,
+ 'gemport-id': data.gemport_id,
+ 'tcont-ref': data.tcont_ref,
+ 'encryption': data.aes_indicator,
+ 'traffic-class': data.traffic_class,
+ 'venet-ref': data.itf_ref, # vENET
+ 'data': data
+ }
+ elif isinstance(data, MulticastGemportsConfigData):
+ return 'MCAST-GEM', {
+ 'name': data.name,
+ 'gemport-id': data.gemport_id,
+ 'traffic-class': data.traffic_class,
+ 'is-broadcast': data.is_broadcast,
+ 'channel-pair-ref': data.itf_ref, # channel-pair
+ 'data': data
+ }
+ elif isinstance(data, MulticastDistributionSetData):
+ data_dict = {
+ 'name': data.name,
+ 'multicast-gemport-ref': data.multicast_gemport_ref,
+ 'multicast-vlans-all': None,
+ 'multicast-vlans-list': [],
+ 'data': data
+ }
+ assert True is False, 'Need to decode multicast-vlans parameter'
+ return 'MCAST-Distribution', data_dict
+
+ return None
+
+ def create_tcont(self, tcont_data, traffic_descriptor_data):
+ """
+ Create TCONT information
+ :param tcont_data:
+ :param traffic_descriptor_data:
+ """
+ log.debug('create-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+
+ # Handle TD first, then TCONT
+ try:
+ self.xpon_create(traffic_descriptor_data)
+
+ except Exception as e:
+ log.exception('td-create', td=traffic_descriptor_data)
+
+ try:
+ td = self.traffic_descriptors.get(traffic_descriptor_data.name)
+ self.xpon_create(tcont_data, td=td)
+
+ except Exception as e:
+ log.exception('tcont-create', tcont=tcont_data)
+
+ def update_tcont(self, tcont_data, traffic_descriptor_data):
+ """
+ Update TCONT information
+ :param tcont_data:
+ :param traffic_descriptor_data:
+ """
+ log.debug('update-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+
+ # Handle TD first, then TCONT. The TD may be new
+ try:
+ items, _, _, _ = self._get_xpon_collection(traffic_descriptor_data)
+ existing_item = items.get(traffic_descriptor_data.name)
+ if existing_item is None:
+ self.xpon_create(traffic_descriptor_data)
+ else:
+ self.xpon_update(traffic_descriptor_data)
+
+ except Exception as e:
+ log.exception('td-update', td=traffic_descriptor_data)
+
+ try:
+ self.xpon_update(tcont_data)
+
+ except Exception as e:
+ log.exception('tcont-update', tcont=tcont_data)
+
+ def remove_tcont(self, tcont_data, traffic_descriptor_data):
+ """
+ Remove TCONT information
+ :param tcont_data:
+ :param traffic_descriptor_data:
+ """
+ log.debug('remove-tcont', tcont=tcont_data, td=traffic_descriptor_data)
+
+ # Handle TCONT first when removing, then TD
+ try:
+ self.xpon_remove(traffic_descriptor_data)
+ except Exception as e:
+ log.exception('td-update', td=traffic_descriptor_data)
+
+ try:
+ self.xpon_remove(tcont_data)
+ except Exception as e:
+ log.exception('tcont-update', tcont=tcont_data)
+
+ def xpon_create(self, data, td=None):
+ log.debug('xpon-create', data=data)
+
+ name = data.name
+ items, create_method, update_method, _ = self._get_xpon_collection(data)
+
+ if items is None:
+ raise ValueError('Unknown data type: {}'.format(type(data)))
+
+ item_type, new_item = self._data_to_dict(data, td=td)
+
+ if name in items:
+ # Treat like an update. It will update collection if needed
+ return self.xpon_update(data)
+
+ log.debug('new-item', item_type=item_type, item=new_item)
+ items[name] = new_item
+ self._cached_xpon_pon_info = {} # Clear cached data
+
+ if create_method is not None:
+ try:
+ new_item = create_method(new_item)
+ except Exception as e:
+ log.exception('xpon-create', item=new_item, e=e)
+
+ if new_item is not None:
+ items[name] = new_item
+ else:
+ del items[name]
+
+ def xpon_update(self, data):
+ log.debug('xpon-update', data=data)
+
+ name = data.name
+ items, create, update_method, delete = self._get_xpon_collection(data)
+
+ if items is None:
+ raise ValueError('Unknown data type: {}'.format(type(data)))
+
+ existing_item = items.get(name)
+ if existing_item is None:
+ raise KeyError("'{}' not found. Type: {}".format(name, type(data)))
+
+ item_type, update_item = self._data_to_dict(data)
+ log.debug('update-item', item_type=item_type, item=update_item)
+
+ def _dict_diff(lhs, rhs):
+ """
+ Compare the values of two dictionaries and return the items in 'rhs'
+ that are different than 'lhs. The RHS dictionary keys can be a subset of the
+ LHS dictionary, or the RHS dictionary keys can contain new values.
+
+ :param lhs: (dict) Original dictionary values
+ :param rhs: (dict) New dictionary values to compare to the original (lhs) dict
+ :return: (dict) Dictionary with differences from the RHS dictionary
+ """
+ lhs_keys = {k for k in lhs.keys() if k not in ['object', 'data']}
+ rhs_keys = {k for k in rhs.keys() if k not in ['object', 'data']}
+ assert len(lhs_keys) == len(lhs_keys & rhs_keys), 'Dictionary Keys do not match'
+ return {k: v for k, v in rhs.items() if k not in lhs or lhs[k] != rhs[k]}
+
+ # Calculate the difference
+ diffs = _dict_diff(existing_item, update_item)
+
+ if len(diffs) == 0:
+ log.debug('update-item-no-diffs')
+ return
+
+ items[name] = update_item
+ self._cached_xpon_pon_info = {} # Clear cached data
+
+ # Act on any changed items
+ if update_method is not None:
+ try:
+ update_item = update_method(existing_item, update_item, diffs)
+ except Exception as e:
+ log.exception('xpon-update', existing=existing_item,
+ update=update_item, diffs=diffs,
+ e=e)
+
+ if update_item is not None:
+ items[name] = update_item
+ else:
+ del items[name]
+
+ def xpon_remove(self, data):
+ log.debug('xpon_remove', data=data)
+ name = data.name
+
+ items, create, update, delete_method = self._get_xpon_collection(data)
+ item = items.get(name)
+
+ if item is not None:
+ if delete_method is None:
+ item = None
+ else:
+ try:
+ item = delete_method(item)
+
+ except Exception as e:
+ log.exception('xpon-remove', item=items, e=e)
+
+ self._cached_xpon_pon_info = {} # Clear cached data
+
+ if item is None:
+ del items[name]
+ else:
+ # Update item in collection (still referenced somewhere)
+ items[name] = item
+
+ def on_ont_ani_create(self, ont_ani):
+ """
+ A new ONT-ani is being created. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) new ONT-ani
+ :return: (dict) Updated ONT-ani dictionary, None if item should be deleted
+ """
+ return ont_ani # Implement in your OLT, if needed
+
+ def on_ont_ani_modify(self, ont_ani, update, diffs):
+ """
+ A existing ONT-ani is being updated. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) existing ONT-ani item dictionary
+ :param update: (dict) updated (changed) ONT-ani
+ :param diffs: (dict) collection of items different in the update
+ :return: (dict) Updated ONT-ani dictionary, None if item should be deleted
+ """
+ return update # Implement in your OLT, if needed
+
+ def on_ont_ani_delete(self, ont_ani):
+ """
+ A existing ONT-ani is being deleted. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) ONT-ani to delete
+ :return: (dict) None if item should be deleted
+ """
+ return None # Implement in your OLT, if needed
+
+ def on_vont_ani_create(self, vont_ani):
+ return vont_ani # Implement in your OLT, if needed
+
+ def on_vont_ani_modify(self, vont_ani, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_vont_ani_delete(self, vont_ani):
+ return None # Implement in your OLT, if needed
+
+ def on_venet_create(self, venet):
+ return venet # Implement in your OLT, if needed
+
+ def on_venet_modify(self, venet, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_venet_delete(self, venet):
+ return None # Implement in your OLT, if needed
+
+ def on_tcont_create(self, tcont):
+ return tcont # Implement in your OLT, if needed
+
+ def on_tcont_modify(self, tcont, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_tcont_delete(self, tcont):
+ return None # Implement in your OLT, if needed
+
+ def on_td_create(self, traffic_desc):
+ return traffic_desc # Implement in your OLT, if needed
+
+ def on_td_modify(self, traffic_desc, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_td_delete(self, traffic_desc):
+ return None # Implement in your OLT, if needed
+
+ def on_gemport_create(self, gem_port):
+ return gem_port # Implement in your OLT, if needed
+
+ def on_gemport_modify(self, gem_port, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_gemport_delete(self, gem_port):
+ return None # Implement in your OLT, if needed
+
+ def on_mcast_gemport_create(self, mcast_gem_port):
+ return mcast_gem_port # Implement in your OLT, if needed
+
+ def on_mcast_gemport_modify(self, mcast_gem_port, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_mcast_gemport_delete(self, mcast_gem_port):
+ return None # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_create(self, dist_set):
+ return dist_set # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_modify(self, dist_set, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_delete(self, dist_set):
+ return None # Implement in your OLT, if needed
diff --git a/voltha/adapters/adtran_olt/xpon/best_effort.py b/voltha/adapters/adtran_olt/xpon/best_effort.py
new file mode 100644
index 0000000..99622af
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/best_effort.py
@@ -0,0 +1,47 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+
+log = structlog.get_logger()
+
+
+class BestEffort(object):
+ def __init__(self, bandwidth, priority, weight):
+ self.bandwidth = bandwidth # bps
+ self.priority = priority # 0.255
+ self.weight = weight # 0..100
+
+ def __str__(self):
+ return "BestEffort: {}/p-{}/w-{}".format(self.bandwidth,
+ self.priority,
+ self.weight)
+
+ def to_dict(self):
+ val = {
+ 'bandwidth': self.bandwidth,
+ 'priority': self.priority,
+ 'weight': self.weight
+ }
+ return val
+
+ def add_to_hardware(self, session, pon_id, onu_id, alloc_id, best_effort):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
+ data = json.dumps({'best-effort': best_effort.to_dict()})
+ name = 'tcont-best-effort-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
+
+ return session.request('PATCH', uri, data=data, name=name)
diff --git a/voltha/adapters/adtran_olt/xpon/gem_port.py b/voltha/adapters/adtran_olt/xpon/gem_port.py
new file mode 100644
index 0000000..fc16fd9
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/gem_port.py
@@ -0,0 +1,103 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class GemPort(object):
+ """
+ Class to wrap TCont capabilities
+ """
+ def __init__(self, gem_id, alloc_id,
+ encryption=False,
+ omci_transport=False,
+ multicast=False,
+ tcont_ref=None,
+ traffic_class=None,
+ intf_ref=None,
+ exception=False, # FIXED_ONU
+ name=None,
+ handler=None):
+ self.name = name
+ self.gem_id = gem_id
+ self._alloc_id = alloc_id
+ self.tcont_ref = tcont_ref
+ self.intf_ref = intf_ref
+ self.traffic_class = traffic_class
+ self._encryption = encryption
+ self._omci_transport = omci_transport
+ self.multicast = multicast
+ self.exception = exception # FIXED_ONU
+ self._handler = handler
+
+ # TODO: Make this a base class and derive OLT and ONU specific classes from it
+ # The primary thing to change is the PON ID is OLT specific and the add/remove
+ # from hardware methods
+ self._pon_id = None
+ self._onu_id = None
+
+ # Statistics
+ self.rx_packets = 0
+ self.rx_bytes = 0
+ self.tx_packets = 0
+ self.tx_bytes = 0
+
+ def __str__(self):
+ return "GemPort: {}, alloc-id: {}, gem-id: {}".format(self.name,
+ self.alloc_id,
+ self.gem_id)
+
+ @property
+ def pon_id(self):
+ return self._pon_id
+
+ @pon_id.setter
+ def pon_id(self, pon_id):
+ assert self._pon_id is None or self._pon_id == pon_id, 'PON-ID can only be set once'
+ self._pon_id = pon_id
+
+ @property
+ def onu_id(self):
+ return self._onu_id
+
+ @onu_id.setter
+ def onu_id(self, onu_id):
+ assert self._onu_id is None or self._onu_id == onu_id, 'ONU-ID can only be set once'
+ self._onu_id = onu_id
+
+ @property
+ def alloc_id(self):
+ if self._alloc_id is None and self._handler is not None:
+ try:
+ self._alloc_id = self._handler.tconts.get(self.tcont_ref).get('alloc-id')
+
+ except Exception:
+ pass
+
+ return self._alloc_id
+
+ @property
+ def tcont(self):
+ tcont_item = self._handler.tconts.get(self.tcont_ref)
+ return tcont_item.get('object') if tcont_item is not None else None
+
+ @property
+ def omci_transport(self):
+ return self._omci_transport
+
+ def to_dict(self):
+ return {
+ 'port-id': self.gem_id,
+ 'alloc-id': self.alloc_id,
+ 'encryption': self._encryption,
+ 'omci-transport': self.omci_transport
+ }
diff --git a/voltha/adapters/adtran_olt/xpon/olt_gem_port.py b/voltha/adapters/adtran_olt/xpon/olt_gem_port.py
new file mode 100644
index 0000000..79cde83
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/olt_gem_port.py
@@ -0,0 +1,126 @@
+
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+
+from gem_port import GemPort
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = structlog.get_logger()
+
+
+class OltGemPort(GemPort):
+ """
+ Adtran OLT specific implementation
+ """
+ def __init__(self, gem_id, alloc_id,
+ encryption=False,
+ omci_transport=False,
+ multicast=False,
+ tcont_ref=None,
+ traffic_class=None,
+ intf_ref=None,
+ exception=False, # FIXED_ONU
+ name=None,
+ handler=None,
+ is_mock=False):
+ super(OltGemPort, self).__init__(gem_id, alloc_id,
+ encryption=encryption,
+ omci_transport=omci_transport,
+ multicast=multicast,
+ tcont_ref=tcont_ref,
+ traffic_class=traffic_class,
+ intf_ref=intf_ref,
+ exception=exception,
+ name=name,
+ handler=handler)
+ self._is_mock = is_mock
+
+ @staticmethod
+ def create(handler, gem_port):
+ exception = gem_port['gemport-id'] in [2180, 2186, 2192, # FIXED_ONU
+ 2198, 2204, 2210,
+ 2216, 2222, 2228,
+ 2234, 2240, 2246,
+ 2252, 2258]
+ mcast = gem_port['gemport-id'] in [4095]
+
+ # TODO: Use next once real BBF mcast available.
+ # port_ref = 'channel-pair-ref 'if mcast else 'venet-ref'
+ port_ref = 'venet-ref 'if mcast else 'venet-ref'
+
+ return OltGemPort(gem_port['gemport-id'],
+ None,
+ encryption=gem_port['encryption'], # aes_indicator,
+ tcont_ref=gem_port['tcont-ref'],
+ name=gem_port['name'],
+ traffic_class=gem_port['traffic-class'],
+ intf_ref=gem_port.get(port_ref),
+ handler=handler,
+ multicast=mcast,
+ exception=exception)
+
+ @property
+ def encryption(self):
+ return self._encryption
+
+ @encryption.setter
+ def encryption(self, value):
+ assert isinstance(value, bool), 'encryption is a boolean'
+
+ if self._encryption != value:
+ self._encryption = value
+ self.set_config(self._handler.rest_client, 'encryption', value)
+
+ @inlineCallbacks
+ def add_to_hardware(self, session, pon_id, onu_id, operation='POST'):
+ from ..adtran_olt_handler import AdtranOltHandler
+ log.info('add-gem-port-2-hw', pon_id=pon_id, onu_id=onu_id,
+ operation=operation, gem_port=self)
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_LIST_URI.format(pon_id, onu_id)
+ data = json.dumps(self.to_dict())
+ name = 'gem-port-create-{}-{}: {}/{}'.format(pon_id, onu_id,
+ self.gem_id,
+ self.alloc_id)
+ try:
+ results = yield session.request(operation, uri, data=data, name=name)
+
+ except Exception as e:
+ if operation == 'POST':
+ returnValue(self.add_to_hardware(session, pon_id, onu_id,
+ operation='PATCH'))
+ else:
+ log.exception('add-2-hw', gem=self, e=e)
+ raise
+
+ returnValue(results)
+
+ def remove_from_hardware(self, session, pon_id, onu_id):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(pon_id, onu_id, self.gem_id)
+ name = 'gem-port-delete-{}-{}: {}'.format(pon_id, onu_id, self.gem_id)
+ return session.request('DELETE', uri, name=name)
+
+ def set_config(self, session, leaf, value):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ data = json.dumps({leaf: value})
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(self.pon_id,
+ self.onu_id,
+ self.gem_id)
+ name = 'onu-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))
+ return session.request('PATCH', uri, data=data, name=name)
diff --git a/voltha/adapters/adtran_olt/xpon/olt_tcont.py b/voltha/adapters/adtran_olt/xpon/olt_tcont.py
new file mode 100644
index 0000000..5efcdcc
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/olt_tcont.py
@@ -0,0 +1,89 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+from twisted.internet.defer import inlineCallbacks, returnValue
+from tcont import TCont
+
+log = structlog.get_logger()
+
+
+class OltTCont(TCont):
+ """
+ Adtran OLT specific implementation
+ """
+ def __init__(self, alloc_id, traffic_descriptor,
+ name=None, vont_ani=None, is_mock=False):
+ super(OltTCont, self).__init__(alloc_id, traffic_descriptor,
+ name=name, vont_ani=vont_ani)
+ self._is_mock = is_mock
+
+ @staticmethod
+ def create(tcont, td):
+ from traffic_descriptor import TrafficDescriptor
+
+ assert isinstance(tcont, dict), 'TCONT should be a dictionary'
+ assert isinstance(td, TrafficDescriptor), 'Invalid Traffic Descriptor data type'
+
+ return OltTCont(tcont['alloc-id'], td,
+ name=tcont['name'],
+ vont_ani=tcont['vont-ani'])
+
+ @inlineCallbacks
+ def add_to_hardware(self, session, pon_id, onu_id):
+ if self._is_mock:
+ returnValue('mock')
+
+ from ..adtran_olt_handler import AdtranOltHandler
+ log.info('add-tcont-2-hw', pon_id=pon_id, onu_id=onu_id, tcont=self)
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_LIST_URI.format(pon_id, onu_id)
+ data = json.dumps({'alloc-id': self.alloc_id})
+ name = 'tcont-create-{}-{}: {}'.format(pon_id, onu_id, self.alloc_id)
+
+ # For TCONT, only leaf is the key. So only post needed
+ try:
+ results = yield session.request('POST', uri, data=data, name=name,
+ suppress_error=True)
+ except:
+ results = None
+
+ if self.traffic_descriptor is not None:
+ try:
+ results = yield self.traffic_descriptor.add_to_hardware(session,
+ pon_id, onu_id,
+ self.alloc_id)
+ except Exception as e:
+ log.exception('traffic-descriptor', tcont=self,
+ td=self.traffic_descriptor, e=e)
+ raise
+
+ returnValue(results)
+
+ def remove_from_hardware(self, session, pon_id, onu_id):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, self.alloc_id)
+ name = 'tcont-delete-{}-{}: {}'.format(pon_id, onu_id, self.alloc_id)
+ return session.request('DELETE', uri, name=name)
+
+
+
+
+
+
+
+
+
diff --git a/voltha/adapters/adtran_olt/xpon/olt_traffic_descriptor.py b/voltha/adapters/adtran_olt/xpon/olt_traffic_descriptor.py
new file mode 100644
index 0000000..fd4f753
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/olt_traffic_descriptor.py
@@ -0,0 +1,90 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+from traffic_descriptor import TrafficDescriptor
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = structlog.get_logger()
+
+
+class OltTrafficDescriptor(TrafficDescriptor):
+ """
+ Adtran ONU specific implementation
+ """
+ def __init__(self, fixed, assured, maximum,
+ additional=TrafficDescriptor.AdditionalBwEligibility.DEFAULT,
+ best_effort=None,
+ name=None,
+ is_mock=False):
+ super(OltTrafficDescriptor, self).__init__(fixed, assured, maximum,
+ additional=additional,
+ best_effort=best_effort,
+ name=name)
+ self._is_mock = is_mock
+
+ @staticmethod
+ def create(traffic_disc):
+ from best_effort import BestEffort
+
+ assert isinstance(traffic_disc, dict), 'Traffic Descriptor should be a dictionary'
+
+ additional = TrafficDescriptor.AdditionalBwEligibility.from_value(
+ traffic_disc['additional-bw-eligibility-indicator'])
+
+ if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ best_effort = BestEffort(traffic_disc['maximum-bandwidth'],
+ traffic_disc['priority'],
+ traffic_disc['weight'])
+ else:
+ best_effort = None
+
+ return TrafficDescriptor(traffic_disc['fixed-bandwidth'],
+ traffic_disc['assured-bandwidth'],
+ traffic_disc['maximum-bandwidth'],
+ name=traffic_disc['name'],
+ best_effort=best_effort,
+ additional=additional)
+
+ @inlineCallbacks
+ def add_to_hardware(self, session, pon_id, onu_id, alloc_id):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
+ data = json.dumps({'traffic-descriptor': self.to_dict()})
+ name = 'tcont-td-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
+ try:
+ results = yield session.request('PATCH', uri, data=data, name=name)
+
+ except Exception as e:
+ log.exception('traffic-descriptor', td=self, e=e)
+ raise
+
+ if self.additional_bandwidth_eligibility == \
+ TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ if self.best_effort is None:
+ raise ValueError('TCONT is best-effort but does not define best effort sharing')
+
+ try:
+ results = yield self.best_effort.add_to_hardware(session, pon_id, onu_id, alloc_id)
+
+ except Exception as e:
+ log.exception('best-effort', best_effort=self.best_effort, e=e)
+ raise
+
+ returnValue(results)
diff --git a/voltha/adapters/adtran_olt/xpon/tcont.py b/voltha/adapters/adtran_olt/xpon/tcont.py
new file mode 100644
index 0000000..5f2d810
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/tcont.py
@@ -0,0 +1,30 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class TCont(object):
+ """
+ Class to wrap TCont capabilities
+ """
+ def __init__(self, alloc_id, traffic_descriptor, name=None, vont_ani=None):
+ self.alloc_id = alloc_id
+ self.traffic_descriptor = traffic_descriptor
+ self.name = name
+ self.vont_ani = vont_ani # (string) reference
+
+ # TODO: Make this a base class and derive OLT and ONU specific classes from it
+ # The primary thing difference is the add/remove from hardware methods
+
+ def __str__(self):
+ return "TCont: {}, alloc-id: {}".format(self.name, self.alloc_id)
diff --git a/voltha/adapters/adtran_olt/xpon/traffic_descriptor.py b/voltha/adapters/adtran_olt/xpon/traffic_descriptor.py
new file mode 100644
index 0000000..96b7ce5
--- /dev/null
+++ b/voltha/adapters/adtran_olt/xpon/traffic_descriptor.py
@@ -0,0 +1,78 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+
+
+class TrafficDescriptor(object):
+ """
+ Class to wrap the uplink traffic descriptor.
+ """
+ class AdditionalBwEligibility(Enum):
+ NONE = 0
+ BEST_EFFORT_SHARING = 1
+ NON_ASSURED_SHARING = 2 # Should match xpon.py values
+ DEFAULT = NONE
+
+ @staticmethod
+ def to_string(value):
+ return {
+ TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING: "non-assured-sharing",
+ TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING: "best-effort-sharing",
+ TrafficDescriptor.AdditionalBwEligibility.NONE: "none"
+ }.get(value, "unknown")
+
+ @staticmethod
+ def from_value(value):
+ """
+ Matches both Adtran and xPON values
+ :param value:
+ :return:
+ """
+ return {
+ 0: TrafficDescriptor.AdditionalBwEligibility.NONE,
+ 1: TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING,
+ 2: TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING,
+ }.get(value, TrafficDescriptor.AdditionalBwEligibility.DEFAULT)
+
+ def __init__(self, fixed, assured, maximum,
+ additional=AdditionalBwEligibility.DEFAULT,
+ best_effort=None,
+ name=None):
+ self.name = name
+ self.fixed_bandwidth = fixed # bps
+ self.assured_bandwidth = assured # bps
+ self.maximum_bandwidth = maximum # bps
+ self.additional_bandwidth_eligibility = additional
+ self.best_effort = best_effort\
+ if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING\
+ else None
+
+ def __str__(self):
+ return "TrafficDescriptor: {}, {}/{}/{}".format(self.name,
+ self.fixed_bandwidth,
+ self.assured_bandwidth,
+ self.maximum_bandwidth)
+
+ def to_dict(self):
+ val = {
+ 'fixed-bandwidth': self.fixed_bandwidth,
+ 'assured-bandwidth': self.assured_bandwidth,
+ 'maximum-bandwidth': self.maximum_bandwidth,
+ 'additional-bandwidth-eligibility':
+ TrafficDescriptor.AdditionalBwEligibility.to_string(
+ self.additional_bandwidth_eligibility)
+ }
+ return val
+
diff --git a/voltha/adapters/adtran_onu/adtran_onu.py b/voltha/adapters/adtran_onu/adtran_onu.py
index 426ddfd..c1ff799 100755
--- a/voltha/adapters/adtran_onu/adtran_onu.py
+++ b/voltha/adapters/adtran_onu/adtran_onu.py
@@ -17,35 +17,18 @@
"""
Adtran ONU adapter.
"""
-
-from uuid import uuid4
-from twisted.internet import reactor
-from twisted.internet.defer import DeferredQueue, inlineCallbacks, returnValue, succeed
-
+import binascii
from voltha.adapters.iadapter import OnuAdapter
-from voltha.core.logical_device_agent import mac_str_to_tuple
from voltha.protos import third_party
-from voltha.protos.common_pb2 import OperStatus, ConnectStatus, \
- AdminState
-from voltha.protos.device_pb2 import DeviceTypes, Port, Image
-from voltha.protos.health_pb2 import HealthStatus
-from voltha.protos.logical_device_pb2 import LogicalPort
-from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, OFPPF_10GB_FD
-from voltha.protos.openflow_13_pb2 import ofp_port
-from common.frameio.frameio import hexify
from voltha.extensions.omci.omci import *
-from voltha.protos.bbf_fiber_base_pb2 import OntaniConfig, VOntaniConfig, VEnetConfig
-from voltha.adapters.adtran_olt.tcont import TCont, TrafficDescriptor, BestEffort
-from voltha.adapters.adtran_olt.gem_port import GemPort
+from adtran_onu_handler import AdtranOnuHandler
+from twisted.internet import reactor
_ = third_party
-_MAX_INCOMING_OMCI_MESSAGES = 10
-_OMCI_TIMEOUT = 10
-_STARTUP_RETRY_WAIT = 5
-
class AdtranOnuAdapter(OnuAdapter):
+
def __init__(self, adapter_agent, config):
self.log = structlog.get_logger()
super(AdtranOnuAdapter, self).__init__(adapter_agent=adapter_agent,
@@ -53,1283 +36,265 @@
device_handler_class=AdtranOnuHandler,
name='adtran_onu',
vendor='Adtran, Inc.',
- version='0.2',
+ version='0.3',
device_type='adtran_onu',
vendor_id='ADTN')
+ def suppress_alarm(self, filter):
+ raise NotImplementedError()
+
+ def unsuppress_alarm(self, filter):
+ raise NotImplementedError()
+
+ def download_image(self, device, request):
+ raise NotImplementedError()
+
+ def activate_image_update(self, device, request):
+ raise NotImplementedError()
+
+ def cancel_image_download(self, device, request):
+ raise NotImplementedError()
+
+ def revert_image_update(self, device, request):
+ raise NotImplementedError()
+
+ def get_image_download_status(self, device, request):
+ raise NotImplementedError()
+
+ def update_flows_incrementally(self, device, flow_changes, group_changes):
+ raise NotImplementedError()
+
+ def send_proxied_message(self, proxy_address, msg):
+ raise NotImplementedError('Not an ONU method')
+
+ def get_device_details(self, device):
+ raise NotImplementedError('TODO: Not currently supported')
+
+ def change_master_state(self, master):
+ raise NotImplementedError('Not currently supported or required')
+
+ def receive_inter_adapter_message(self, msg):
+ # Currently the only OLT Device adapter that uses this is the EdgeCore
+
+ log.info('receive_inter_adapter_message', msg=msg)
+ proxy_address = msg['proxy_address']
+ assert proxy_address is not None
+
+ # Device_id from the proxy_address is the olt device id. We need to
+ # get the onu device id using the port number in the proxy_address
+
+ device = self.adapter_agent.get_child_device_with_proxy_address(proxy_address)
+
+ if device is not None:
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.rx_inter_adapter_message(msg)
+
+ def abandon_device(self, device):
+ raise NotImplementedError('TODO: Not currently supported')
+
+ def receive_onu_detect_state(self, proxy_address, state):
+ raise NotImplementedError('TODO: Not currently supported')
+
+ def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+ raise NotImplementedError('Not an ONU method')
+
+ def receive_proxied_message(self, proxy_address, msg):
+ log.debug('receive-proxied-message', proxy_address=proxy_address,
+ device_id=proxy_address.device_id, msg=binascii.hexlify(msg))
+ # Device_id from the proxy_address is the olt device id. We need to
+ # get the onu device id using the port number in the proxy_address
+ device = self.adapter_agent.get_child_device_with_proxy_address(proxy_address)
+
+ if device is not None:
+ handler = self.devices_handlers[device.id]
+ if handler is not None:
+ handler.receive_message(msg)
+
+ ######################################################################
+ # PON Mgnt APIs
+
+ def create_interface(self, device, data):
+ """
+ API to create various interfaces (only some PON interfaces as of now)
+ in the devices
+ """
+
+ self.log.debug('create-interface', data=data)
+
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ reactor.callLater(0, handler.xpon_create, data)
+
+ def update_interface(self, device, data):
+ """
+ API to update various interfaces (only some PON interfaces as of now)
+ in the devices
+ """
+ self.log.debug('update-interface', data=data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
+
+ def remove_interface(self, device, data):
+ """
+ API to delete various interfaces (only some PON interfaces as of now)
+ in the devices
+ """
+ self.log.debug('remove-interface', data=data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
+
def create_tcont(self, device, tcont_data, traffic_descriptor_data):
"""
API to create tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
self.log.info('create-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.create_tcont(tcont_data, traffic_descriptor_data)
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
"""
API to update tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
self.log.info('update-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.update_tcont(tcont_data, traffic_descriptor_data)
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
"""
API to delete tcont object in the devices
:param device: device id
- :tcont_data: tcont data object
- :traffic_descriptor_data: traffic descriptor data object
+ :param tcont_data: tcont data object
+ :param traffic_descriptor_data: traffic descriptor data object
:return: None
"""
self.log.info('remove-tcont', tcont_data=tcont_data,
traffic_descriptor_data=traffic_descriptor_data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_tcont(tcont_data, traffic_descriptor_data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.remove_tcont(tcont_data, traffic_descriptor_data)
def create_gemport(self, device, data):
"""
API to create gemport object in the devices
:param device: device id
- :data: gemport data object
+ :param data: gemport data object
:return: None
"""
self.log.info('create-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_gemport(self, device, data):
"""
API to update gemport object in the devices
:param device: device id
- :data: gemport data object
+ :param data: gemport data object
:return: None
"""
self.log.info('update-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_gemport(self, device, data):
"""
API to delete gemport object in the devices
:param device: device id
- :data: gemport data object
+ :param data: gemport data object
:return: None
"""
self.log.info('remove-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
def create_multicast_gemport(self, device, data):
"""
API to create multicast gemport object in the devices
:param device: device id
- :data: multicast gemport data object
+ :param data: multicast gemport data object
:return: None
"""
self.log.info('create-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_multicast_gemport(self, device, data):
"""
API to update multicast gemport object in the devices
:param device: device id
- :data: multicast gemport data object
+ :param data: multicast gemport data object
:return: None
"""
self.log.info('update-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.update_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_multicast_gemport(self, device, data):
"""
API to delete multicast gemport object in the devices
:param device: device id
- :data: multicast gemport data object
+ :param data: multicast gemport data object
:return: None
"""
self.log.info('remove-mcast-gemport', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.remove_multicast_gemport(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
def create_multicast_distribution_set(self, device, data):
"""
API to create multicast distribution rule to specify
the multicast VLANs that ride on the multicast gemport
:param device: device id
- :data: multicast distribution data object
+ :param data: multicast distribution data object
:return: None
"""
self.log.info('create-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_create(data)
def update_multicast_distribution_set(self, device, data):
"""
API to update multicast distribution rule to specify
the multicast VLANs that ride on the multicast gemport
:param device: device id
- :data: multicast distribution data object
+ :param data: multicast distribution data object
:return: None
"""
self.log.info('update-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_update(data)
def remove_multicast_distribution_set(self, device, data):
"""
API to delete multicast distribution rule to specify
the multicast VLANs that ride on the multicast gemport
:param device: device id
- :data: multicast distribution data object
+ :param data: multicast distribution data object
:return: None
"""
self.log.info('remove-mcast-distribution-set', data=data)
- if device.id in self.devices_handlers:
- handler = self.devices_handlers[device.id]
- if handler is not None:
- handler.create_multicast_distribution_set(data)
-
-
-class AdtranOnuHandler(object):
- def __init__(self, adapter, device_id):
- self.adapter = adapter
- self.adapter_agent = adapter.adapter_agent
- self.device_id = device_id
- self.logical_device_id = None
- self.enabled = True
- self.log = structlog.get_logger(device_id=device_id)
- self.incoming_messages = DeferredQueue(size=_MAX_INCOMING_OMCI_MESSAGES)
- self.proxy_address = None
- self.tx_id = 0
- self.last_response = None
- self.ofp_port_no = None
- self.control_vlan = None
- # reference of uni_port is required when re-enabling the device if
- # it was disabled previously
- self.uni_port = None
- self.pon_port = None
- self._v_ont_anis = {} # Name -> dict
- self._ont_anis = {} # Name -> dict
- self._v_enets = {} # Name -> dict
- self._tconts = {} # Name -> dict
- self._traffic_descriptors = {} # Name -> dict
- self._gem_ports = {} # Name -> dict
- self._deferred = None
-
- def _cancel_deferred(self):
- d, self._deferred = self._deferred, None
- try:
- if d is not None and not d.called:
- d.cancel()
- except:
- pass
-
- def receive_message(self, msg):
- try:
- self.incoming_messages.put(msg)
-
- except Exception as e:
- self.log.exception('rx-msg', e=e)
-
- def activate(self, device):
- self.log.info('activating')
-
- # first we verify that we got parent reference and proxy info
- assert device.parent_id, 'Invalid Parent ID'
- assert device.proxy_address.device_id, 'Invalid Device ID'
- # assert device.proxy_address.channel_id, 'invalid Channel ID'
-
- self._cancel_deferred()
-
- # register for proxied messages right away
- self.proxy_address = device.proxy_address
- self.adapter_agent.register_for_proxied_messages(device.proxy_address)
-
- # populate device info
- device.root = True
- device.vendor = 'Adtran Inc.'
- device.model = '10G GPON ONU' # TODO: get actual number
- device.model = '10G GPON ONU' # TODO: get actual number
- device.hardware_version = 'NOT AVAILABLE'
- device.firmware_version = 'NOT AVAILABLE'
-
- # TODO: Support more versions as needed
- images = Image(version='NOT AVAILABLE')
- device.images.image.extend([images])
-
- device.connect_status = ConnectStatus.UNKNOWN
- self.adapter_agent.update_device(device)
-
- # register physical ports
- self.pon_port = Port(port_no=1,
- label='PON port',
- type=Port.PON_ONU,
- admin_state=AdminState.ENABLED,
- oper_status=OperStatus.ACTIVE,
- peers=[Port.PeerPort(device_id=device.parent_id,
- port_no=device.parent_port_no)])
-
- self.uni_port = Port(port_no=2,
- label='Ethernet port',
- type=Port.ETHERNET_UNI,
- admin_state=AdminState.ENABLED,
- oper_status=OperStatus.ACTIVE)
-
- self.adapter_agent.add_port(device.id, self.uni_port)
- self.adapter_agent.add_port(device.id, self.pon_port)
-
- # add uni port to logical device
- parent_device = self.adapter_agent.get_device(device.parent_id)
- self.logical_device_id = parent_device.parent_id
- assert self.logical_device_id, 'Invalid logical device ID'
-
- if device.vlan:
- # vlan non-zero if created via legacy method (not xPON). Also
- # Set a random serial number since not xPON based
-
- device.serial_number = uuid4().hex
- self._add_logical_port(device.vlan, control_vlan=device.vlan)
-
- # Begin ONU Activation sequence
- self._deferred = reactor.callLater(0, self.message_exchange)
-
- self.adapter_agent.update_device(device)
-
- def _add_logical_port(self, openflow_port_no, control_vlan=None,
- capabilities=OFPPF_10GB_FD | OFPPF_FIBER,
- speed=OFPPF_10GB_FD):
-
- if self.ofp_port_no is None:
- self.ofp_port_no = openflow_port_no
- self.control_vlan = control_vlan
-
- device = self.adapter_agent.get_device(self.device_id)
-
- if control_vlan is not None and device.vlan != control_vlan:
- device.vlan = control_vlan
- self.adapter_agent.update_device(device)
-
- openflow_port = ofp_port(
- port_no=openflow_port_no,
- hw_addr=mac_str_to_tuple('08:00:%02x:%02x:%02x:%02x' %
- ((device.parent_port_no >> 8 & 0xff),
- device.parent_port_no & 0xff,
- (openflow_port_no >> 8) & 0xff,
- openflow_port_no & 0xff)),
- name='uni-{}'.format(openflow_port_no),
- config=0,
- state=OFPPS_LIVE,
- curr=capabilities,
- advertised=capabilities,
- peer=capabilities,
- curr_speed=speed,
- max_speed=speed
- )
- self.adapter_agent.add_logical_port(self.logical_device_id,
- LogicalPort(
- id='uni-{}'.format(openflow_port),
- ofp_port=openflow_port,
- device_id=device.id,
- device_port_no=self.uni_port.port_no))
-
- def _get_uni_port(self):
- ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_UNI)
- if ports:
- # For now, we use on one uni port
- return ports[0]
-
- def _get_pon_port(self):
- ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
- if ports:
- # For now, we use on one uni port
- return ports[0]
-
- def reconcile(self, device):
- self.log.info('reconciling-ONU-device-starts')
-
- # first we verify that we got parent reference and proxy info
- assert device.parent_id
- assert device.proxy_address.device_id
- # assert device.proxy_address.channel_id
- self._cancel_deferred()
-
- # register for proxied messages right away
- self.proxy_address = device.proxy_address
- self.adapter_agent.register_for_proxied_messages(device.proxy_address)
-
- # Set the connection status to REACHABLE
- device.connect_status = ConnectStatus.REACHABLE
- self.adapter_agent.update_device(device)
- self.enabled = True
-
- # TODO: Verify that the uni, pon and logical ports exists
-
- # Mark the device as REACHABLE and ACTIVE
- device = self.adapter_agent.get_device(device.id)
- device.connect_status = ConnectStatus.REACHABLE
- device.oper_status = OperStatus.ACTIVE
- self.adapter_agent.update_device(device)
-
- self.log.info('reconciling-ONU-device-ends')
-
- @inlineCallbacks
- def update_flow_table(self, device, flows):
- import voltha.core.flow_decomposer as fd
- from voltha.protos.openflow_13_pb2 import OFPP_IN_PORT, OFPP_TABLE, OFPP_NORMAL, OFPP_FLOOD, OFPP_ALL
- from voltha.protos.openflow_13_pb2 import OFPP_CONTROLLER, OFPP_LOCAL, OFPP_ANY, OFPP_MAX
- #
- # We need to proxy through the OLT to get to the ONU
- # Configuration from here should be using OMCI
- #
- self.log.info('update_flow_table', device_id=device.id, flows=flows)
-
- for flow in flows:
- # TODO: Do we get duplicates here (ie all flows re-pushed on each individual flow add?)
-
- in_port = fd.get_in_port(flow)
- out_port = fd.get_out_port(flow)
- self.log.debug('InPort: {}, OutPort: {}'.format(in_port, out_port))
-
- for field in fd.get_ofb_fields(flow):
- self.log.debug('Found OFB field', field=field)
-
- for action in fd.get_actions(flow):
- self.log.debug('Found Action', action=action)
-
- raise NotImplementedError()
-
- def get_tx_id(self):
- self.tx_id += 1
- return self.tx_id
-
- def send_omci_message(self, frame):
- _frame = hexify(str(frame))
- self.log.info('send-omci-message-%s' % _frame)
- device = self.adapter_agent.get_device(self.device_id)
- try:
- self.adapter_agent.send_proxied_message(device.proxy_address, _frame)
- except Exception as e:
- self.log.info('send-omci-message-exception', exc=str(e))
-
- @inlineCallbacks
- def wait_for_response(self):
- self.log.info('wait-for-response') # TODO: Add timeout
-
- def add_watchdog(deferred, timeout=_OMCI_TIMEOUT):
- from twisted.internet import defer
-
- def callback(value):
- if not watchdog.called:
- watchdog.cancel()
- return value
-
- deferred.addBoth(callback)
-
- from twisted.internet import reactor
- watchdog = reactor.callLater(timeout, defer.timeout, deferred)
- return deferred
-
- try:
- response = yield add_watchdog(self.incoming_messages.get())
-
- self.log.info('got-response')
- resp = OmciFrame(response)
- resp.show()
- #returnValue(resp)
- self.last_response = resp
-
- except Exception as e:
- self.last_response = None
- raise e
-
- @inlineCallbacks
- def message_exchange(self):
- self.log.info('message-exchange')
- self._deferred = None
-
- if self.device_id is None or self.incoming_messages is None:
- returnValue(succeed('deleted'))
-
- # reset incoming message queue
- while self.incoming_messages.pending:
- _ = yield self.incoming_messages.get()
-
- ####################################################
- # Start by getting some useful device information
-
- device = self.adapter_agent.get_device(self.device_id)
- # TODO device.oper_status = OperStatus.ACTIVATING
-
- device.oper_status = OperStatus.ACTIVE
- device.connect_status = ConnectStatus.REACHABLE
- self.adapter_agent.update_device(device)
-
- if not self.enabled:
- # Try again later
- self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT,
- self.message_exchange)
- # TODO device.connect_status = ConnectStatus.UNREACHABLE
-
- try:
- # TODO: Handle tx/wait-for-response timeouts and retry logic.
- # May timeout to ONU not fully discovered (can happen in xPON case)
- # or other errors.
-
- # Decode fields in response and update device info
- self.send_get_OntG('vendor_id')
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- device.vendor = data["vendor_id"]
-
- # Mark as reachable if at least first message gets through
- device.connect_status = ConnectStatus.REACHABLE
-
- self.send_get_cardHolder('actual_plugin_unit_type', 257)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- device.type = str(data["actual_plugin_unit_type"])
-
- self.send_get_circuit_pack('number_of_ports', 257)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- device.type = str(data["number_of_ports"])
-
- self.send_get_IpHostConfigData('mac_address', 515)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- device.mac_address = str(data["mac_address"])
-
- self.send_get_Ont2G('equipment_id', 0)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- eqptId_bootVersion = str(data["equipment_id"])
- eqptId = eqptId_bootVersion[0:10]
- bootVersion = eqptId_bootVersion[12:20]
-
- self.send_get_Ont2G('omcc_version', 0)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- #decimal version
- omciVersion = str(data["omcc_version"])
-
- self.send_get_Ont2G('vendor_product_code', 0)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- #decimal value
- vedorProductCode = str(data["vendor_product_code"])
-
- self.send_get_OntG('version', 0)
- yield self.wait_for_response()
- response = self.last_response
- omci_response = response.getfieldval("omci_message")
- data = omci_response.getfieldval("data")
- device.hardware_version = str(data["version"])
-
- # Possbility of bug in ONT Firmware. uncomment this code after it is fixed.
- # self.send_get_SoftwareImage('version',0)
- # yield self.wait_for_response()
- # response = self.last_response
- # omci_response = response.getfieldval("omci_message")
- # data = omci_response.getfieldval("data")
- # device.firmware_version = str(data["version"])
-
- self.send_set_adminState(257)
- yield self.wait_for_response()
- response = self.last_response
-
- # device.model = '10G GPON ONU' # TODO: get actual number
- # device.hardware_version = 'TODO: to be filled'
- # device.firmware_version = 'TODO: to be filled'
- # device.serial_number = uuid4().hex
- # TODO: Support more versions as needed
- # images = Image(version=results.get('software_version', 'unknown'))
- # device.images.image.extend([images])
-
- # self.adapter_agent.update_device(device)
- device.oper_status = OperStatus.ACTIVE
- device.connect_status = ConnectStatus.REACHABLE
-
- except Exception as e:
- self.log.debug('Failed', e=e)
- # Try again later. May not have been discovered
- self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT,
- self.message_exchange)
-
- ####################################################
-
- self.log.info('onu-activated')
-
- # self.send_get_circuit_pack()
- # yield self.wait_for_response()
- self.adapter_agent.update_device(device)
-
- def send_mib_reset(self, entity_id=0):
- self.log.info('send_mib_reset')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciMibReset.message_id,
- omci_message=OmciMibReset(
- entity_class=OntData.class_id,
- entity_id=entity_id
- )
- )
- self.send_omci_message(frame)
-
- def send_set_tcont(self, entity_id, alloc_id):
- data = dict(
- alloc_id=alloc_id
- )
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciSet.message_id,
- omci_message=OmciSet(
- entity_class=Tcont.class_id,
- entity_id=entity_id,
- attributes_mask=Tcont.mask_for(*data.keys()),
- data=data
- )
- )
- self.send_omci_message(frame)
-
- def send_create_gem_port_network_ctp(self, entity_id, port_id,
- tcont_id, direction, tm):
- _directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
- if _directions.has_key(direction):
- _direction = _directions[direction]
- else:
- self.log.error('invalid-gem-port-direction', direction=direction)
- raise ValueError('Invalid GEM port direction: {_dir}'.format(_dir=direction))
-
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=GemPortNetworkCtp.class_id,
- entity_id=entity_id,
- data=dict(
- port_id=port_id,
- tcont_pointer=tcont_id,
- direction=_direction,
- traffic_management_pointer_upstream=tm
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_set_8021p_mapper_service_profile(self, entity_id, interwork_tp_id):
- data = dict(
- interwork_tp_pointer_for_p_bit_priority_0=interwork_tp_id
- )
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciSet.message_id,
- omci_message=OmciSet(
- entity_class=Ieee8021pMapperServiceProfile.class_id,
- entity_id=entity_id,
- attributes_mask=Ieee8021pMapperServiceProfile.mask_for(
- *data.keys()),
- data=data
- )
- )
- self.send_omci_message(frame)
-
- def send_create_mac_bridge_service_profile(self, entity_id):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=MacBridgeServiceProfile.class_id,
- entity_id=entity_id,
- data=dict(
- spanning_tree_ind=False,
- learning_ind=True,
- priority=0x8000,
- max_age=20 * 256,
- hello_time=2 * 256,
- forward_delay=15 * 256,
- unknown_mac_address_discard=True
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_create_8021p_mapper_service_profile(self, entity_id):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=Ieee8021pMapperServiceProfile.class_id,
- entity_id=entity_id,
- data=dict(
- tp_pointer=OmciNullPointer,
- interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_create_gal_ethernet_profile(self, entity_id, max_gem_payload_size):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=GalEthernetProfile.class_id,
- entity_id=entity_id,
- data=dict(
- max_gem_payload_size=max_gem_payload_size
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_create_gem_inteworking_tp(self, entity_id, gem_port_net_ctp_id,
- service_profile_id):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=GemInterworkingTp.class_id,
- entity_id=entity_id,
- data=dict(
- gem_port_network_ctp_pointer=gem_port_net_ctp_id,
- interworking_option=5,
- service_profile_pointer=service_profile_id,
- interworking_tp_pointer=0x0,
- gal_profile_pointer=0x1
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_create_mac_bridge_port_configuration_data(self, entity_id, bridge_id,
- port_id, tp_type, tp_id):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=MacBridgePortConfigurationData.class_id,
- entity_id=entity_id,
- data=dict(
- bridge_id_pointer=bridge_id,
- port_num=port_id,
- tp_type=tp_type,
- tp_pointer=tp_id
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_create_vlan_tagging_filter_data(self, entity_id, vlan_id):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciCreate.message_id,
- omci_message=OmciCreate(
- entity_class=VlanTaggingFilterData.class_id,
- entity_id=entity_id,
- data=dict(
- vlan_filter_0=vlan_id,
- forward_operation=0x10,
- number_of_entries=1
- )
- )
- )
- self.send_omci_message(frame)
-
- def send_get_circuit_pack(self, attribute, entity_id=0):
- self.log.info('send_get_circuit_pack: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=CircuitPack.class_id,
- entity_id=entity_id,
- attributes_mask=CircuitPack.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- def send_get_device_info(self, attribute, entity_id=0):
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=CircuitPack.class_id,
- entity_id=entity_id,
- attributes_mask=CircuitPack.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- def send_get_OntG(self, attribute, entity_id=0):
- self.log.info('send_get_OntG: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=OntG.class_id,
- entity_id=entity_id,
- attributes_mask=OntG.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- # def send_get_OntG(self, entity_id=0):
- # self.log.info('send_get_OntG: entry')
- # frame = OmciFrame(
- # transaction_id=self.get_tx_id(),
- # message_type=OmciGet.message_id,
- # omci_message=OmciGet(
- # entity_class=OntG.class_id,
- # entity_id=0,
- # attributes_mask=OntG.mask_for('vendor_id')
- # )
- # )
- # self.log.info('send_get_OntG: sending')
- # self.send_omci_message(frame)
- # self.log.info('send_get_OntG: sent')
-
- def send_get_Ont2G(self, attribute, entity_id=0):
- self.log.info('send_get_Ont2G: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=Ont2G.class_id,
- entity_id=entity_id,
- attributes_mask=Ont2G.mask_for(attribute)
- )
- )
-
- self.send_omci_message(frame)
-
- def send_get_cardHolder(self, attribute, entity_id=0):
- self.log.info('send_get_cardHolder: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=Cardholder.class_id,
- entity_id=entity_id,
- attributes_mask=Cardholder.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- def send_set_adminState(self,entity_id):
- self.log.info('send_set_AdminState: entry')
- data = dict(
- administrative_state=0
- )
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciSet.message_id,
- omci_message=OmciSet(
- entity_class=PptpEthernetUni.class_id,
- entity_id=entity_id,
- attributes_mask=PptpEthernetUni.mask_for(*data.keys()),
- data=data
- )
- )
- self.send_omci_message(frame)
-
- def send_get_IpHostConfigData(self, attribute, entity_id=0):
- self.log.info('send_get_IpHostConfigData: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=IpHostConfigData.class_id,
- entity_id=entity_id,
- attributes_mask=IpHostConfigData.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- def send_get_SoftwareImage(self, attribute, entity_id=0):
- self.log.info('send_get_SoftwareImage: entry')
- frame = OmciFrame(
- transaction_id=self.get_tx_id(),
- message_type=OmciGet.message_id,
- omci_message=OmciGet(
- entity_class=SoftwareImage.class_id,
- entity_id=entity_id,
- attributes_mask=SoftwareImage.mask_for(attribute)
- )
- )
- self.send_omci_message(frame)
-
- @inlineCallbacks
- def reboot(self):
- from common.utils.asleep import asleep
- self.log.info('rebooting', device_id=self.device_id)
- self._cancel_deferred()
-
- # Update the operational status to ACTIVATING and connect status to
- # UNREACHABLE
- device = self.adapter_agent.get_device(self.device_id)
- previous_oper_status = device.oper_status
- previous_conn_status = device.connect_status
- device.oper_status = OperStatus.ACTIVATING
- device.connect_status = ConnectStatus.UNREACHABLE
-
- self.adapter_agent.update_device(device)
-
- # Sleep 10 secs, simulating a reboot
- # TODO: send alert and clear alert after the reboot
- yield asleep(10) # TODO: Need to reboot for real
-
- # Change the operational status back to its previous state. With a
- # real OLT the operational state should be the state the device is
- # after a reboot.
- # Get the latest device reference
- device = self.adapter_agent.get_device(self.device_id)
- device.oper_status = previous_oper_status
- device.connect_status = previous_conn_status
- self.adapter_agent.update_device(device)
- self.log.info('rebooted', device_id=self.device_id)
-
- def self_test_device(self, device):
- """
- This is called to Self a device based on a NBI call.
- :param device: A Voltha.Device object.
- :return: Will return result of self test
- """
- from voltha.protos.voltha_pb2 import SelfTestResponse
- self.log.info('self-test-device', device=device.id)
- # TODO: Support self test?
- return SelfTestResponse(result=SelfTestResponse.NOT_SUPPORTED)
-
- def disable(self):
- self.log.info('disabling', device_id=self.device_id)
- self.enabled = False
- self._cancel_deferred()
-
- # Get the latest device reference
- device = self.adapter_agent.get_device(self.device_id)
-
- # Disable all ports on that device
- self.adapter_agent.disable_all_ports(self.device_id)
-
- # Update the device operational status to UNKNOWN
- device.oper_status = OperStatus.UNKNOWN
- device.connect_status = ConnectStatus.UNREACHABLE
- self.adapter_agent.update_device(device)
-
- # Remove the uni logical port from the OLT, if still present
- parent_device = self.adapter_agent.get_device(device.parent_id)
- assert parent_device
- logical_device_id = parent_device.parent_id
- assert logical_device_id
- port_no, self.ofp_port_no = self.ofp_port_no, None
- port_id = 'uni-{}'.format(port_no)
-
- try:
- port = self.adapter_agent.get_logical_port(logical_device_id,
- port_id)
- self.adapter_agent.delete_logical_port(logical_device_id, port)
- except KeyError:
- self.log.info('logical-port-not-found', device_id=self.device_id,
- portid=port_id)
-
- # Remove pon port from parent
- self.pon_port = self._get_pon_port()
- self.adapter_agent.delete_port_reference_from_parent(self.device_id,
- self.pon_port)
-
- # Just updating the port status may be an option as well
- # port.ofp_port.config = OFPPC_NO_RECV
- # yield self.adapter_agent.update_logical_port(logical_device_id,
- # port)
- # Unregister for proxied message
- self.adapter_agent.unregister_for_proxied_messages(
- device.proxy_address)
-
- # TODO:
- # 1) Remove all flows from the device
- # 2) Remove the device from ponsim
-
- self.log.info('disabled', device_id=device.id)
-
- def reenable(self):
- self.log.info('re-enabling', device_id=self.device_id)
- try:
- # Get the latest device reference
- device = self.adapter_agent.get_device(self.device_id)
- self._cancel_deferred()
-
- # First we verify that we got parent reference and proxy info
- assert device.parent_id
- assert device.proxy_address.device_id
- # assert device.proxy_address.channel_id
-
- # Re-register for proxied messages right away
- self.proxy_address = device.proxy_address
- self.adapter_agent.register_for_proxied_messages(
- device.proxy_address)
-
- # Re-enable the ports on that device
- self.adapter_agent.enable_all_ports(self.device_id)
-
- # Refresh the port reference
- self.uni_port = self._get_uni_port()
- self.pon_port = self._get_pon_port()
-
- # Add the pon port reference to the parent
- self.adapter_agent.add_port_reference_to_parent(device.id,
- self.pon_port)
-
- # Update the connect status to REACHABLE
- device.connect_status = ConnectStatus.REACHABLE
- self.adapter_agent.update_device(device)
-
- # re-add uni port to logical device
- parent_device = self.adapter_agent.get_device(device.parent_id)
- self.logical_device_id = parent_device.parent_id
- assert self.logical_device_id, 'Invalid logical device ID'
-
- if device.vlan:
- # vlan non-zero if created via legacy method (not xPON)
- self._add_logical_port(device.vlan, device.vlan,
- control_vlan=device.vlan)
-
- device = self.adapter_agent.get_device(device.id)
- device.oper_status = OperStatus.ACTIVE
- self.enabled = True
-
- self.adapter_agent.update_device(device)
-
- self.log.info('re-enabled', device_id=device.id)
- except Exception, e:
- self.log.exception('error-reenabling', e=e)
-
- def delete(self):
- self.log.info('deleting', device_id=self.device_id)
- # A delete request may be received when an OLT is disabled
-
- self.enabled = False
- self._cancel_deferred()
-
- # TODO: Need to implement this
- # 1) Remove all flows from the device
-
- # Drop references
- self.incoming_messages = None
-
- self.log.info('deleted', device_id=self.device_id)
-
- # Drop device ID
- self.device_id = None
-
- # PON Mgnt APIs #
-
- def _get_xpon_collection(self, data):
- if isinstance(data, OntaniConfig):
- return self._ont_anis
- elif isinstance(data, VOntaniConfig):
- return self._v_ont_anis
- elif isinstance(data, VEnetConfig):
- return self._v_enets
- return None
-
- def create_interface(self, data):
- """
- Create XPON interfaces
- :param data: (xpon config info)
- """
- name = data.name
- interface = data.interface
- inst_data = data.data
-
- items = self._get_xpon_collection(data)
- if items is None:
- raise NotImplemented('xPON {} is not implemented'.
- format(type(data)))
-
- if isinstance(data, OntaniConfig):
- self.log.debug('create_interface-ont-ani', interface=interface, data=inst_data)
-
- if name not in items:
- items[name] = {
- 'name': name,
- 'enabled': interface.enabled,
- 'upstream-fec': inst_data.upstream_fec_indicator,
- 'mgnt-gemport-aes': inst_data.mgnt_gemport_aes_indicator
- }
-
- elif isinstance(data, VOntaniConfig):
- self.log.debug('create_interface-v-ont-ani', interface=interface, data=inst_data)
-
- if name not in items:
- items[name] = {
- 'name': name,
- 'enabled': interface.enabled,
- 'onu-id': inst_data.onu_id,
- 'expected-serial-number': inst_data.expected_serial_number,
- 'preferred-channel-pair': inst_data.preferred_chanpair,
- 'channel-partition': inst_data.parent_ref,
- 'upstream-channel-speed': inst_data.upstream_channel_speed
- }
-
- elif isinstance(data, VEnetConfig):
- self.log.debug('create_interface-v-enet', interface=interface, data=inst_data)
-
- if name not in items:
- items[name] = {
- 'name': name,
- 'enabled': interface.enabled,
- 'v-ont-ani': inst_data.v_ontani_ref
- }
- ofp_port_no, cntl_vlan = self._decode_openflow_port_and_control_vlan(items[name])
- self._add_logical_port(ofp_port_no, control_vlan=cntl_vlan)
-
- else:
- raise NotImplementedError('Unknown data type')
-
- def _decode_openflow_port_and_control_vlan(self, venet_info):
- try:
- # Allow spaces or dashes as separator, select last as
- # the port number
-
- ofp_port_no = int(venet_info['name'].replace(' ', '-').split('-')[-1:][0])
- cntl_vlan = ofp_port_no
-
- return ofp_port_no, cntl_vlan
-
- except ValueError:
- self.log.error('invalid-uni-port-name', name=venet_info['name'])
- except KeyError:
- self.log.error('invalid-venet-data', data=venet_info)
-
- def update_interface(self, data):
- """
- Update XPON interfaces
- :param data: (xpon config info)
- """
- name = data.name
- interface = data.interface
- inst_data = data.data
-
- items = self._get_xpon_collection(data)
-
- if items is None:
- raise ValueError('Unknown data type: {}'.format(type(data)))
-
- if name not in items:
- raise KeyError("'{}' not found. Type: {}".format(name, type(data)))
-
- raise NotImplementedError('TODO: not yet supported')
-
- def remove_interface(self, data):
- """
- Deleete XPON interfaces
- :param data: (xpon config info)
- """
- self.log.info('remove-interface', data=data)
-
- name = data.name
- interface = data.interface
- inst_data = data.data
-
- items = self._get_xpon_collection(data)
- item = items.get(name)
-
- if item in items:
- del items[name]
- pass # TODO Do something....
- # raise NotImplementedError('TODO: not yet supported')
-
- def create_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Create TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- traffic_descriptor = TrafficDescriptor.create(traffic_descriptor_data)
- tcont = TCont.create(tcont_data, traffic_descriptor)
-
- if tcont.name in self._tconts:
- raise KeyError("TCONT '{}' already exists".format(tcont.name))
-
- if traffic_descriptor.name in self._traffic_descriptors:
- raise KeyError("Traffic Descriptor '{}' already exists".format(traffic_descriptor.name))
-
- self._tconts[tcont.name] = tcont
- self._traffic_descriptors[traffic_descriptor.name] = traffic_descriptor
-
- def update_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Update TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- if tcont_data.name not in self._tconts:
- raise KeyError("TCONT '{}' does not exists".format(tcont_data.name))
-
- if traffic_descriptor_data.name not in self._traffic_descriptors:
- raise KeyError("Traffic Descriptor '{}' does not exists".
- format(traffic_descriptor_data.name))
-
- traffic_descriptor = TrafficDescriptor.create(traffic_descriptor_data)
- tcont = TCont.create(tcont_data, traffic_descriptor)
- #
- pass
- raise NotImplementedError('TODO: Not yet supported')
-
- def remove_tcont(self, tcont_data, traffic_descriptor_data):
- """
- Remove TCONT information
- :param tcont_data:
- :param traffic_descriptor_data:
- """
- tcont = self._tconts.get(tcont_data.name)
- traffic_descriptor = self._traffic_descriptors.get(traffic_descriptor_data.name)
-
- if traffic_descriptor is not None:
- del self._traffic_descriptors[traffic_descriptor_data.name]
- pass # Perform any needed operations
- # raise NotImplementedError('TODO: Not yet supported')
-
- if tcont is not None:
- del self._tconts[tcont_data.name]
- pass # Perform any needed operations
- # raise NotImplementedError('TODO: Not yet supported')
-
- def create_gemport(self, data):
- """
- Create GEM Port
- :param data:
- """
- gem_port = GemPort.create(data)
-
- if gem_port.name in self._gem_ports:
- raise KeyError("GEM Port '{}' already exists".format(gem_port.name))
-
- self._gem_ports[gem_port.name] = gem_port
-
- # TODO: On GEM Port changes, may need to add ONU Flow(s)
-
- def update_gemport(self, data):
- """
- Update GEM Port
- :param data:
- """
- if data.name not in self._gem_ports:
- raise KeyError("GEM Port '{}' does not exists".format(data.name))
-
- gem_port = GemPort.create(data)
- #
- # TODO: On GEM Port changes, may need to add/delete/modify ONU Flow(s)
- pass
- raise NotImplementedError('TODO: Not yet supported')
-
- def remove_gemport(self, data):
- """
- Delete GEM Port
- :param data:
- """
- gem_port = self._gem_ports.get(data.name)
-
- if gem_port is not None:
- del self._gem_ports[data.name]
- #
- # TODO: On GEM Port changes, may need to delete ONU Flow(s)
- pass # Perform any needed operations
- # raise NotImplementedError('TODO: Not yet supported')
-
- def create_multicast_gemport(self, data):
- """
- API to create multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- pass # TODO: Implement
-
- def update_multicast_gemport(self, data):
- """
- API to update multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- pass # TODO: Implement
-
- def remove_multicast_gemport(self, data):
- """
- API to delete multicast gemport object in the devices
- :data: multicast gemport data object
- :return: None
- """
- pass # TODO: Implement
-
- def create_multicast_distribution_set(self, data):
- """
- API to create multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- pass # TODO: Implement
-
- def update_multicast_distribution_set(self, data):
- """
- API to update multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- pass # TODO: Implement
-
- def remove_multicast_distribution_set(self, data):
- """
- API to delete multicast distribution rule to specify
- the multicast VLANs that ride on the multicast gemport
- :data: multicast distribution data object
- :return: None
- """
- pass # TODO: Implement
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ handler.xpon_remove(data)
diff --git a/voltha/adapters/adtran_onu/adtran_onu_handler.py b/voltha/adapters/adtran_onu/adtran_onu_handler.py
new file mode 100644
index 0000000..8f8f75d
--- /dev/null
+++ b/voltha/adapters/adtran_onu/adtran_onu_handler.py
@@ -0,0 +1,1111 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import arrow
+
+from voltha.adapters.adtran_olt.xpon.adtran_xpon import AdtranXPON
+from omci.omci_cc import OMCISupport
+from pon_port import PonPort
+from uni_port import UniPort
+from heartbeat import HeartBeat
+
+from voltha.adapters.adtran_olt.alarms.adapter_alarms import AdapterAlarms
+from onu_pm_metrics import OnuPmMetrics
+
+from uuid import uuid4
+from twisted.internet import reactor
+from twisted.internet.defer import DeferredQueue, inlineCallbacks
+from twisted.internet.defer import returnValue
+
+from voltha.protos import third_party
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus
+from voltha.protos.device_pb2 import Image
+from voltha.extensions.omci.omci import *
+from common.utils.indexpool import IndexPool
+
+_ = third_party
+_MAXIMUM_PORT = 128 # PON and UNI ports
+
+
+class AdtranOnuHandler(AdtranXPON):
+ def __init__(self, adapter, device_id):
+ kwargs = dict()
+ super(AdtranOnuHandler, self).__init__(**kwargs)
+ self.adapter = adapter
+ self.adapter_agent = adapter.adapter_agent
+ self.device_id = device_id
+ self.log = structlog.get_logger(device_id=device_id)
+ self.logical_device_id = None
+ self.proxy_address = None
+ self._event_messages = None
+ self._enabled = False
+ self.pm_metrics = None
+ self.alarms = None
+ self._mgmt_gemport_aes = False
+ self._upstream_channel_speed = 0
+
+ self._unis = dict() # Port # -> UniPort
+ self._pons = dict() # Port # -> PonPort
+ self._heartbeat = HeartBeat.create(self, device_id)
+
+ self._deferred = None
+ self._event_deferred = None
+ self._omci = None
+ self._port_number_pool = IndexPool(_MAXIMUM_PORT, 1)
+
+ self._olt_created = False # True if deprecated method of OLT creating DA is used
+ self._is_mock = False
+
+ def __str__(self):
+ return "AdtranOnuHandler: {}".format(self.device_id)
+
+ def _cancel_deferred(self):
+ d1, self._deferred = self._deferred, None
+ d2, self._event_deferred = self._event_deferred, None
+
+ for d in [d1, d2]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ assert isinstance(value, bool), 'enabled is a boolean'
+ if self._enabled != value:
+ self._enabled = value
+ if self._enabled:
+ self.start()
+ else:
+ self.stop()
+
+ @property
+ def mgmt_gemport_aes(self):
+ return self._mgmt_gemport_aes
+
+ @mgmt_gemport_aes.setter
+ def mgmt_gemport_aes(self, value):
+ if self._mgmt_gemport_aes != value:
+ self._mgmt_gemport_aes = value
+ # TODO: Anything else
+
+ @property
+ def upstream_channel_speed(self):
+ return self._upstream_channel_speed
+
+ @upstream_channel_speed.setter
+ def upstream_channel_speed(self, value):
+ if self._upstream_channel_speed != value:
+ self._upstream_channel_speed = value
+ # TODO: Anything else
+
+ @property
+ def is_mock(self):
+ return self._is_mock # Not pointing to any real hardware
+
+ @property
+ def olt_created(self):
+ return self._olt_created # ONU was created with deprecated 'child_device_detected' call
+
+ @property
+ def omci(self):
+ return self._omci
+
+ @property
+ def heartbeat(self):
+ return self._heartbeat
+
+ @property
+ def uni_ports(self):
+ return self._unis.values()
+
+ def uni_port(self, port_no_or_name):
+ if isinstance(port_no_or_name, (str, unicode)):
+ return next((uni for uni in self.uni_ports
+ if uni.name == port_no_or_name), None)
+
+ assert isinstance(port_no_or_name, int), 'Invalid parameter type'
+ return self._unis.get(port_no_or_name)
+
+ @property
+ def pon_ports(self):
+ return self._pons.values()
+
+ def pon_port(self, port_no):
+ return self._pons.get(port_no)
+
+ @property
+ def _next_port_number(self):
+ return self._port_number_pool.get_next()
+
+ def _release_port_number(self, number):
+ self._port_number_pool.release(number)
+
+ def start(self):
+ assert self._enabled, 'Start should only be called if enabled'
+ #
+ # TODO: Perform common startup tasks here
+ #
+ self._cancel_deferred()
+
+ self._omci = OMCISupport(self, self.adapter, self.device_id)
+ self._omci.enabled = True
+
+ # Handle received ONU event messages
+ self._event_messages = DeferredQueue()
+ self._event_deferred = reactor.callLater(0, self._handle_onu_events)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
+ # Port startup
+ for port in self.uni_ports:
+ port.enabled = True
+
+ for port in self.pon_ports:
+ port.enabled = True
+
+ # Heartbeat
+ self._heartbeat.enabled = True
+
+ def stop(self):
+ assert not self._enabled, 'Stop should only be called if disabled'
+ #
+ # TODO: Perform common shutdown tasks here
+ #
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+
+ # Heartbeat
+ self._heartbeat.stop()
+
+ # Port shutdown
+ for port in self.uni_ports:
+ port.enabled = False
+
+ for port in self.pon_ports:
+ port.enabled = False
+
+ omci, self._omci = self._omci, None
+ if omci is not None:
+ omci.enabled = False
+
+ queue, self._event_deferred = self._event_deferred, None
+ if queue is not None:
+ while queue.pending:
+ _ = yield queue.get()
+
+ def receive_message(self, msg):
+ if self._omci is not None and self.enabled:
+ self._omci.receive_message(msg)
+
+ def activate(self, device):
+ self.log.info('activating')
+
+ # first we verify that we got parent reference and proxy info
+ assert device.parent_id, 'Invalid Parent ID'
+ assert device.proxy_address.device_id, 'Invalid Device ID'
+
+ if device.vlan:
+ # vlan non-zero if created via legacy method (not xPON). Also
+ # Set a random serial number since not xPON based
+ self._olt_created = True
+
+ # register for proxied messages right away
+ self.proxy_address = device.proxy_address
+ self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+ # initialize device info
+ device.root = True
+ device.vendor = 'Adtran Inc.'
+ device.model = 'n/a'
+ device.hardware_version = 'n/a'
+ device.firmware_version = 'n/a'
+
+ # TODO: Support more versions as needed
+ images = Image(version='NOT AVAILABLE')
+ device.images.image.extend([images])
+
+ device.connect_status = ConnectStatus.UNKNOWN
+
+ ############################################################################
+ # Setup PM configuration for this device
+
+ self.pm_metrics = OnuPmMetrics(self, device, grouped=True, freq_override=False)
+ pm_config = self.pm_metrics.make_proto()
+ self.log.info("initial-pm-config", pm_config=pm_config)
+ self.adapter_agent.update_device_pm_config(pm_config, init=True)
+
+ ############################################################################
+ # Setup Alarm handler
+
+ self.alarms = AdapterAlarms(self.adapter, device.id)
+
+ # reference of uni_port is required when re-enabling the device if
+ # it was disabled previously
+ # Need to query ONU for number of supported uni ports
+ # For now, temporarily set number of ports to 1 - port #2
+
+ # Register physical ports. Should have at least one of each
+
+ pon_port = PonPort.create(self, self._next_port_number)
+
+ self._pons[pon_port.port_number] = pon_port
+ self.adapter_agent.add_port(device.id, pon_port.get_port())
+
+ parent_device = self.adapter_agent.get_device(device.parent_id)
+ self.logical_device_id = parent_device.parent_id
+ assert self.logical_device_id, 'Invalid logical device ID'
+
+ if self._olt_created:
+ # vlan non-zero if created via legacy method (not xPON). Also
+ # Set a random serial number since not xPON based
+
+ uni_port = UniPort.create(self, self._next_port_number,
+ 'deprecated', device.vlan)
+ self._unis[uni_port.port_number] = uni_port
+ self.adapter_agent.add_port(device.id, uni_port.get_port())
+
+ device.serial_number = uuid4().hex
+ # self._add_logical_port(device.vlan, control_vlan=device.vlan)
+ uni_port.add_logical_port(device.vlan, control_vlan=device.vlan)
+
+ # Start things up for this ONU Handler.
+ self.enabled = True
+
+ # Start collecting stats from the device after a brief pause
+ reactor.callLater(10, self.start_kpi_collection, device.id)
+
+ self.adapter_agent.update_device(device)
+
+ def reconcile(self, device):
+ self.log.info('reconciling-ONU-device-starts')
+
+ # first we verify that we got parent reference and proxy info
+ assert device.parent_id
+ assert device.proxy_address.device_id
+ # assert device.proxy_address.channel_id
+ self._cancel_deferred()
+
+ # register for proxied messages right away
+ self.proxy_address = device.proxy_address
+ self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
+ # Set the connection status to REACHABLE
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+ self.enabled = True
+
+ # TODO: Verify that the uni, pon and logical ports exists
+
+ # Mark the device as REACHABLE and ACTIVE
+ device = self.adapter_agent.get_device(device.id)
+ device.connect_status = ConnectStatus.REACHABLE
+ device.oper_status = OperStatus.ACTIVE
+ self.adapter_agent.update_device(device)
+
+ self.log.info('reconciling-ONU-device-ends')
+
+ def update_pm_config(self, device, pm_config):
+ # TODO: This has not been tested
+ self.log.info('update_pm_config', pm_config=pm_config)
+ self.pm_metrics.update(pm_config)
+
+ def start_kpi_collection(self, device_id):
+ # TODO: This has not been tested
+ def _collect(device_id, prefix):
+ from voltha.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+
+ if self.enabled:
+ try:
+ # Step 1: gather metrics from device
+ port_metrics = self.pm_metrics.collect_port_metrics()
+
+ # Step 2: prepare the KpiEvent for submission
+ # we can time-stamp them here or could use time derived from OLT
+ ts = arrow.utcnow().timestamp
+ kpi_event = KpiEvent(
+ type=KpiEventType.slice,
+ ts=ts,
+ prefixes={
+ prefix + '.{}'.format(k): MetricValuePairs(metrics=port_metrics[k])
+ for k in port_metrics.keys()}
+ )
+ # Step 3: submit
+ self.adapter_agent.submit_kpis(kpi_event)
+
+ except Exception as e:
+ self.log.exception('failed-to-submit-kpis', e=e)
+
+ self.pm_metrics.start_collector(_collect)
+
+ @inlineCallbacks
+ def update_flow_table(self, device, flows):
+ #
+ # We need to proxy through the OLT to get to the ONU
+ # Configuration from here should be using OMCI
+ #
+ # self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
+
+ import voltha.core.flow_decomposer as fd
+ from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, ofp_port
+
+ def is_downstream(port):
+ return port == 100 # Need a better way
+
+ def is_upstream(port):
+ return not is_downstream(port)
+
+ omci = self._omci
+
+ for flow in flows:
+ _type = None
+ _port = None
+ _vlan_vid = None
+ _udp_dst = None
+ _udp_src = None
+ _ipv4_dst = None
+ _ipv4_src = None
+ _metadata = None
+ _output = None
+ _push_tpid = None
+ _field = None
+ _set_vlan_vid = None
+ self.log.info('bulk-flow-update', device_id=device.id, flow=flow)
+ try:
+ _in_port = fd.get_in_port(flow)
+ assert _in_port is not None
+
+ if is_downstream(_in_port):
+ self.log.info('downstream-flow')
+ elif is_upstream(_in_port):
+ self.log.info('upstream-flow')
+ else:
+ raise Exception('port should be 1 or 2 by our convention')
+
+ _out_port = fd.get_out_port(flow) # may be None
+ self.log.info('out-port', out_port=_out_port)
+
+ for field in fd.get_ofb_fields(flow):
+ if field.type == fd.ETH_TYPE:
+ _type = field.eth_type
+ self.log.info('field-type-eth-type',
+ eth_type=_type)
+
+ elif field.type == fd.IP_PROTO:
+ _proto = field.ip_proto
+ self.log.info('field-type-ip-proto',
+ ip_proto=_proto)
+
+ elif field.type == fd.IN_PORT:
+ _port = field.port
+ self.log.info('field-type-in-port',
+ in_port=_port)
+
+ elif field.type == fd.VLAN_VID:
+ _vlan_vid = field.vlan_vid & 0xfff
+ self.log.info('field-type-vlan-vid',
+ vlan=_vlan_vid)
+
+ elif field.type == fd.VLAN_PCP:
+ _vlan_pcp = field.vlan_pcp
+ self.log.info('field-type-vlan-pcp',
+ pcp=_vlan_pcp)
+
+ elif field.type == fd.UDP_DST:
+ _udp_dst = field.udp_dst
+ self.log.info('field-type-udp-dst',
+ udp_dst=_udp_dst)
+
+ elif field.type == fd.UDP_SRC:
+ _udp_src = field.udp_src
+ self.log.info('field-type-udp-src',
+ udp_src=_udp_src)
+
+ elif field.type == fd.IPV4_DST:
+ _ipv4_dst = field.ipv4_dst
+ self.log.info('field-type-ipv4-dst',
+ ipv4_dst=_ipv4_dst)
+
+ elif field.type == fd.IPV4_SRC:
+ _ipv4_src = field.ipv4_src
+ self.log.info('field-type-ipv4-src',
+ ipv4_dst=_ipv4_src)
+
+ elif field.type == fd.METADATA:
+ _metadata = field.table_metadata
+ self.log.info('field-type-metadata',
+ metadata=_metadata)
+
+ else:
+ raise NotImplementedError('field.type={}'.format(
+ field.type))
+
+ for action in fd.get_actions(flow):
+
+ if action.type == fd.OUTPUT:
+ _output = action.output.port
+ self.log.info('action-type-output',
+ output=_output, in_port=_in_port)
+
+ elif action.type == fd.POP_VLAN:
+ self.log.info('action-type-pop-vlan',
+ in_port=_in_port)
+
+ elif action.type == fd.PUSH_VLAN:
+ _push_tpid = action.push.ethertype
+ log.info('action-type-push-vlan',
+ push_tpid=_push_tpid, in_port=_in_port)
+ if action.push.ethertype != 0x8100:
+ self.log.error('unhandled-tpid',
+ ethertype=action.push.ethertype)
+
+ elif action.type == fd.SET_FIELD:
+ _field = action.set_field.field.ofb_field
+ assert (action.set_field.field.oxm_class ==
+ OFPXMC_OPENFLOW_BASIC)
+ self.log.info('action-type-set-field',
+ field=_field, in_port=_in_port)
+ if _field.type == fd.VLAN_VID:
+ _set_vlan_vid = _field.vlan_vid & 0xfff
+ self.log.info('set-field-type-valn-vid', _set_vlan_vid)
+ else:
+ self.log.error('unsupported-action-set-field-type',
+ field_type=_field.type)
+ else:
+ log.error('unsupported-action-type',
+ action_type=action.type, in_port=_in_port)
+
+ #
+ # All flows created from ONU adapter should be OMCI based
+ #
+ if _vlan_vid == 0 and _set_vlan_vid != None and _set_vlan_vid != 0:
+ # allow priority tagged packets
+ # Set AR - ExtendedVlanTaggingOperationConfigData
+ # 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
+
+ results = yield omci.send_delete_vlan_tagging_filter_data(0x2102)
+
+ # self.send_set_vlan_tagging_filter_data(0x2102, _set_vlan_vid)
+ results = yield omci.send_create_vlan_tagging_filter_data(
+ 0x2102,
+ _set_vlan_vid)
+
+ results = yield omci.send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(
+ 0x202,
+ 0x1000,
+ _set_vlan_vid)
+
+ results = yield omci.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(
+ 0x202,
+ 8,
+ 0,
+ 0,
+ 1,
+ 8,
+ _set_vlan_vid)
+
+ # Set AR - ExtendedVlanTaggingOperationConfigData
+ # 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
+ '''
+ results = yield omci.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(0x205, 8, 0, 0,
+
+ '''
+
+ except Exception as e:
+ log.exception('failed-to-install-flow', e=e, flow=flow)
+
+ @inlineCallbacks
+ def reboot(self):
+ from common.utils.asleep import asleep
+ self.log.info('rebooting', device_id=self.device_id)
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+
+ # Update the operational status to ACTIVATING and connect status to
+ # UNREACHABLE
+ device = self.adapter_agent.get_device(self.device_id)
+ previous_oper_status = device.oper_status
+ previous_conn_status = device.connect_status
+ device.oper_status = OperStatus.ACTIVATING
+ device.connect_status = ConnectStatus.UNREACHABLE
+
+ self.adapter_agent.update_device(device)
+
+ # Sleep 10 secs, simulating a reboot
+ # TODO: send alert and clear alert after the reboot
+ yield asleep(10) # TODO: Need to reboot for real
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
+ # Change the operational status back to its previous state. With a
+ # real OLT the operational state should be the state the device is
+ # after a reboot.
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+ device.oper_status = previous_oper_status
+ device.connect_status = previous_conn_status
+ self.adapter_agent.update_device(device)
+ self.log.info('rebooted', device_id=self.device_id)
+
+ def self_test_device(self, device):
+ """
+ This is called to Self a device based on a NBI call.
+ :param device: A Voltha.Device object.
+ :return: Will return result of self test
+ """
+ from voltha.protos.voltha_pb2 import SelfTestResponse
+ self.log.info('self-test-device', device=device.id)
+ # TODO: Support self test?
+ return SelfTestResponse(result=SelfTestResponse.NOT_SUPPORTED)
+
+ def disable(self):
+ self.log.info('disabling', device_id=self.device_id)
+ self.enabled = False
+
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+
+ # Disable all ports on that device
+ self.adapter_agent.disable_all_ports(self.device_id)
+
+ # Update the device operational status to UNKNOWN
+ device.oper_status = OperStatus.UNKNOWN
+ device.connect_status = ConnectStatus.UNREACHABLE
+ self.adapter_agent.update_device(device)
+
+ # Remove the uni logical port from the OLT, if still present
+ parent_device = self.adapter_agent.get_device(device.parent_id)
+ assert parent_device
+ logical_device_id = parent_device.parent_id
+ assert logical_device_id
+
+ for uni in self.uni_ports:
+ port_id = 'uni-{}'.format(uni.port_number)
+
+ try:
+ port = self.adapter_agent.get_logical_port(logical_device_id,
+ port_id)
+ self.adapter_agent.delete_logical_port(logical_device_id, port)
+ except KeyError:
+ self.log.info('logical-port-not-found', device_id=self.device_id,
+ portid=port_id)
+
+ # Remove pon port from parent
+ for port in self.pon_ports:
+ self.adapter_agent.delete_port_reference_from_parent(self.device_id,
+ port.get_port())
+
+ # Just updating the port status may be an option as well
+ # port.ofp_port.config = OFPPC_NO_RECV
+ # yield self.adapter_agent.update_logical_port(logical_device_id,
+ # port)
+ # Unregister for proxied message
+ self.adapter_agent.unregister_for_proxied_messages(
+ device.proxy_address)
+
+ # TODO:
+ # 1) Remove all flows from the device
+ # 2) Remove the device from ponsim
+
+ self.log.info('disabled', device_id=device.id)
+
+ def reenable(self):
+ self.log.info('re-enabling', device_id=self.device_id)
+ try:
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+ self._cancel_deferred()
+
+ # First we verify that we got parent reference and proxy info
+ assert device.parent_id
+ assert device.proxy_address.device_id
+ # assert device.proxy_address.channel_id
+
+ # Re-register for proxied messages right away
+ self.proxy_address = device.proxy_address
+ self.adapter_agent.register_for_proxied_messages(
+ device.proxy_address)
+
+ # Re-enable the ports on that device
+ self.adapter_agent.enable_all_ports(self.device_id)
+
+ # Refresh the port reference
+ # self.uni_port = self._get_uni_port() deprecated
+
+ # Add the pon port reference to the parent
+ for port in self.pon_ports:
+ # TODO: Send 'enable' to PonPort?
+ self.adapter_agent.add_port_reference_to_parent(device.id,
+ port.get_port())
+
+ # Update the connect status to REACHABLE
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+
+ # re-add uni port to logical device
+ parent_device = self.adapter_agent.get_device(device.parent_id)
+ self.logical_device_id = parent_device.parent_id
+ assert self.logical_device_id, 'Invalid logical device ID'
+
+ if self.olt_created:
+ # vlan non-zero if created via legacy method (not xPON)
+ self.uni_port('deprecated').add_logical_port(device.vlan, device.vlan,
+ control_vlan=device.vlan)
+
+ device = self.adapter_agent.get_device(device.id)
+ device.oper_status = OperStatus.ACTIVE
+
+ self.enabled = True
+ self.adapter_agent.update_device(device)
+
+ self.log.info('re-enabled', device_id=device.id)
+ except Exception, e:
+ self.log.exception('error-reenabling', e=e)
+
+ def delete(self):
+ self.log.info('deleting', device_id=self.device_id)
+ # A delete request may be received when an OLT is disabled
+
+ self.enabled = False
+
+ # TODO: Need to implement this
+ # 1) Remove all flows from the device
+
+ self.log.info('deleted', device_id=self.device_id)
+
+ # Drop device ID
+ self.device_id = None
+
+ def _check_for_mock_config(self, data):
+ # Check for MOCK configuration
+ description = data.get('description')
+ if description is not None and description.lower() == 'mock':
+ self._is_mock = True
+
+ def on_ont_ani_create(self, ont_ani):
+ """
+ A new ONT-ani is being created. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) new ONT-ani
+ :return: (dict) Updated ONT-ani dictionary, None if item should be deleted
+ """
+ self.log.info('ont-ani-create', ont_ani=ont_ani)
+
+ self._check_for_mock_config(ont_ani)
+ self.enabled = ont_ani['enabled']
+
+ return ont_ani # Implement in your OLT, if needed
+
+ def on_ont_ani_modify(self, ont_ani, update, diffs):
+ """
+ A existing ONT-ani is being updated. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) existing ONT-ani item dictionary
+ :param update: (dict) updated (changed) ONT-ani
+ :param diffs: (dict) collection of items different in the update
+ :return: (dict) Updated ONT-ani dictionary, None if item should be deleted
+ """
+ valid_keys = ['enabled', 'mgnt-gemport-aes'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("ont_ani leaf '{}' is read-only or write-once".format(invalid_key))
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'enabled':
+ self.enabled = update[k]
+
+ elif k == 'mgnt-gemport-aes':
+ self.mgmt_gemport_aes = update[k]
+
+ return update
+
+ def on_ont_ani_delete(self, ont_ani):
+ """
+ A existing ONT-ani is being deleted. You can override this method to
+ perform custom operations as needed. If you override this method, you can add
+ additional items to the item dictionary to track additional implementation
+ key/value pairs.
+
+ :param ont_ani: (dict) ONT-ani to delete
+ :return: (dict) None if item should be deleted
+ """
+ # TODO: Is this ever called or is the iAdapter 'delete' called first?
+ return None # Implement in your OLT, if needed
+
+ def on_vont_ani_create(self, vont_ani):
+ self.log.info('vont-ani-create', vont_ani=vont_ani)
+
+ self._check_for_mock_config(vont_ani)
+ # TODO: look up PON port and update 'upstream-channel-speed'
+ return vont_ani # Implement in your OLT, if needed
+
+ def on_vont_ani_modify(self, vont_ani, update, diffs):
+ valid_keys = ['upstream-channel-speed'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("vont_ani leaf '{}' is read-only or write-once".format(invalid_key))
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'upstream-channel-speed':
+ self.upstream_channel_speed = update[k]
+
+ return update
+
+ def on_vont_ani_delete(self, vont_ani):
+ # TODO: Is this ever called or is the iAdapter 'delete' called first?
+ return None # Implement in your OLT, if needed
+
+ def on_venet_create(self, venet):
+ self.log.info('venet-create', venet=venet)
+
+ self._check_for_mock_config(venet)
+
+ # TODO: This first set is copied over from BroadCOM ONU. For testing, actual work
+ # is the last 7 lines. The 'test' code below assumes we have not registered
+ # any UNI ports during 'activate' but we want to create them as the vEnet
+ # information comes in.
+ # onu_device = self.adapter_agent.get_device(self.device_id)
+ # existing_uni_ports = self.adapter_agent.get_ports(onu_device.parent_id, Port.ETHERNET_UNI)
+ #
+ # parent_port_num = None
+ # for uni in existing_uni_ports:
+ # if uni.label == venet['name']: # TODO: was -> data.interface.name:
+ # parent_port_num = uni.port_no
+ # break
+ #
+ # # Create both the physical and logical ports for the UNI now
+ # parent_device = self.adapter_agent.get_device(onu_device.parent_id)
+ # logical_device_id = parent_device.parent_id
+ # assert logical_device_id, 'Invalid logical device ID'
+ # # self.add_uni_port(onu_device, logical_device_id, venet['name'], parent_port_num)
+ #
+ # pon_ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
+ # if pon_ports:
+ # # TODO: Assumed only one PON port and UNI port per ONU.
+ # pon_port = pon_ports[0]
+ # else:
+ # self.log.error("No-Pon-port-configured-yet")
+ # return
+ #
+ # self.adapter_agent.delete_port_reference_from_parent(self.device_id, pon_port)
+ # pon_port.peers[0].device_id = onu_device.parent_id
+ # pon_port.peers[0].port_no = parent_port_num
+ # self.adapter_agent.add_port_reference_to_parent(self.device_id, pon_port)
+
+ #################################################################################
+ # Start of actual work (what actually does something)
+ # TODO: Clean this up. Use looked up UNI
+
+ if self._olt_created:
+ uni_port = self.uni_port('deprecated')
+
+ else:
+ # vlan non-zero if created via legacy method (not xPON). Also
+ # Set a random serial number since not xPON based
+
+ device = self.adapter_agent.get_device(self.device_id)
+ ofp_port_no, cntl_vlan = UniPort.decode_openflow_port_and_control_vlan(self, venet)
+
+ uni_port = UniPort.create(self, venet['name'],
+ self._next_port_number,
+ cntl_vlan)
+
+ self._unis[uni_port.port_number] = uni_port
+ self.adapter_agent.add_port(device.id, uni_port.get_port())
+
+ uni_port.add_logical_port(ofp_port_no, control_vlan=cntl_vlan)
+
+ # TODO: Next is just for debugging to see what this call returns after
+ # we add a UNI
+ # existing_uni_ports = self.adapter_agent.get_ports(onu_device.parent_id, Port.ETHERNET_UNI)
+
+ uni_port.enabled = venet['enabled']
+
+ return venet
+
+ def on_venet_modify(self, venet, update, diffs):
+ # Look up the associated UNI port
+
+ if self._olt_created:
+ uni_port = self.uni_port('deprecated')
+ else:
+ uni_port = self.uni_port(venet['name'])
+
+ if uni_port is not None:
+ valid_keys = ['enabled'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("venet leaf '{}' is read-only or write-once".format(invalid_key))
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'enabled':
+ uni_port.enabled = update[k]
+
+ return update
+
+ def on_venet_delete(self, venet):
+ # Look up the associated UNI port
+
+ if self._olt_created:
+ uni_port = self.uni_port('deprecated')
+ else:
+ uni_port = self.uni_port(venet['name'])
+
+ if uni_port is not None:
+ port_no = uni_port.port_number
+ del self._unis[port_no]
+ uni_port.delete()
+ self._release_port_number(port_no)
+
+ return None
+
+ def on_tcont_create(self, tcont):
+ from onu_tcont import OnuTCont
+
+ self.log.info('create-tcont')
+
+ td = self.traffic_descriptors.get(tcont.get('td-ref'))
+ traffic_descriptor = td['object'] if td is not None else None
+ tcont['object'] = OnuTCont.create(self, tcont, traffic_descriptor)
+
+ # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+ pon_port = self.pon_ports[0]
+
+ if pon_port is not None:
+ pon_port.add_tcont(tcont['object'])
+
+ return tcont
+
+ def on_tcont_modify(self, tcont, update, diffs):
+ valid_keys = ['td-ref'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("TCONT leaf '{}' is read-only or write-once".format(invalid_key))
+
+ tc = tcont.get('object')
+ assert tc is not None, 'TCONT not found'
+
+ update['object'] = tc
+
+ # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+ pon_port = self.pon_ports[0]
+
+ if pon_port is not None:
+ keys = [k for k in diffs.keys() if k in valid_keys]
+
+ for k in keys:
+ if k == 'td-ref':
+ td = self.traffic_descriptors.get(update['td-ref'])
+ if td is not None:
+ pon_port.update_tcont_td(tcont['alloc-id'], td)
+
+ return update
+
+ def on_tcont_delete(self, tcont):
+ pon_port = self.pon_ports[0] # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+
+ if pon_port is not None:
+ pon_port.remove_tcont(tcont['alloc-id'])
+
+ return None
+
+ def on_td_create(self, traffic_disc):
+ from onu_traffic_descriptor import OnuTrafficDescriptor
+
+ traffic_disc['object'] = OnuTrafficDescriptor.create(traffic_disc)
+ return traffic_disc
+
+ def on_td_modify(self, traffic_disc, update, diffs):
+ from onu_traffic_descriptor import OnuTrafficDescriptor
+
+ valid_keys = ['fixed-bandwidth',
+ 'assured-bandwidth',
+ 'maximum-bandwidth',
+ 'priority',
+ 'weight',
+ 'additional-bw-eligibility-indicator']
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("traffic-descriptor leaf '{}' is read-only or write-once".format(invalid_key))
+
+ # New traffic descriptor
+ update['object'] = OnuTrafficDescriptor.create(update)
+
+ td_name = traffic_disc['name']
+ tconts = {key: val for key, val in self.tconts.iteritems()
+ if val['td-ref'] == td_name and td_name is not None}
+
+ for tcont in tconts.itervalues():
+ pon_port = self.pon_ports[0] # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+
+ if pon_port is not None:
+ pon_port.update_tcont_td(tcont['alloc-id'], update['object'])
+
+ return update
+
+ def on_td_delete(self, traffic_desc):
+ # TD may be used by more than one TCONT. Only delete if the last one
+
+ td_name = traffic_desc['name']
+ num_tconts = len([val for val in self.tconts.itervalues()
+ if val['td-ref'] == td_name and td_name is not None])
+
+ return None if num_tconts <= 1 else traffic_desc
+
+ def on_gemport_create(self, gem_port):
+ from onu_gem_port import OnuGemPort
+
+ gem_port['object'] = OnuGemPort.create(self, gem_port)
+ # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+ pon_port = self.pon_ports[0]
+ if pon_port is not None:
+ pon_port.add_gem_port(gem_port['object'])
+
+ return gem_port
+
+ def on_gemport_modify(self, gem_port, update, diffs):
+ valid_keys = ['encryption',
+ 'traffic-class'] # Modify of these keys supported
+
+ invalid_key = next((key for key in diffs.keys() if key not in valid_keys), None)
+ if invalid_key is not None:
+ raise KeyError("GEM Port leaf '{}' is read-only or write-once".format(invalid_key))
+
+ port = gem_port.get('object')
+ assert port is not None, 'GemPort not found'
+
+ keys = [k for k in diffs.keys() if k in valid_keys]
+ update['object'] = port
+
+ for k in keys:
+ if k == 'encryption':
+ port.encryption = update[k]
+ elif k == 'traffic-class':
+ pass # TODO: Implement
+
+ return update
+
+ def on_gemport_delete(self, gem_port):
+ pon_port = self.pon_ports[0] # Look up any PON port # TODO: Add the vont-ani 'name' to the PON Port and look up that way
+ if pon_port is not None:
+ pon_port.remove_gem_id(gem_port['gemport-id'])
+
+ return None
+
+ def on_mcast_gemport_create(self, mcast_gem_port):
+ return mcast_gem_port # Implement in your OLT, if needed
+
+ def on_mcast_gemport_modify(self, mcast_gem_port, update, diffs):
+ return mcast_gem_port # Implement in your OLT, if needed
+
+ def on_mcast_gemport_delete(self, mcast_gem_port):
+ return None # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_create(self, dist_set):
+ return dist_set # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_modify(self, dist_set, update, diffs):
+ return update # Implement in your OLT, if needed
+
+ def on_mcast_dist_set_delete(self, dist_set):
+ return None # Implement in your OLT, if needed
+
+ def rx_inter_adapter_message(self, msg):
+ if self.enabled and self._event_messages is not None:
+ self._event_messages.put(msg)
+
+ @inlineCallbacks
+ def _handle_onu_events(self):
+ #
+ # TODO: From broadcom ONU. This is from the 'receive_inter_adapter_message()'
+ # method.
+ #
+ event_msg = yield self._event_messages.get()
+
+ if self._event_deferred is None:
+ returnValue('cancelled')
+
+ if event_msg['event'] == 'activation-completed':
+ # if event_msg['event_data']['activation_successful']:
+ # for uni in self.uni_ports:
+ # port_no = self.proxy_address.channel_id + uni
+ # reactor.callLater(1,
+ # self.message_exchange,
+ # self.proxy_address.onu_id,
+ # self.proxy_address.onu_session_id,
+ # port_no)
+ #
+ # device = self.adapter_agent.get_device(self.device_id)
+ # device.oper_status = OperStatus.ACTIVE
+ # self.adapter_agent.update_device(device)
+ #
+ # else:
+ # device = self.adapter_agent.get_device(self.device_id)
+ # device.oper_status = OperStatus.FAILED
+ # self.adapter_agent.update_device(device)
+ pass
+
+ elif event_msg['event'] == 'deactivation-completed':
+ # device = self.adapter_agent.get_device(self.device_id)
+ # device.oper_status = OperStatus.DISCOVERED
+ # self.adapter_agent.update_device(device)
+ pass
+
+ elif event_msg['event'] == 'ranging-completed':
+ # if event_msg['event_data']['ranging_successful']:
+ # device = self.adapter_agent.get_device(self.device_id)
+ # device.oper_status = OperStatus.ACTIVATING
+ # self.adapter_agent.update_device(device)
+ #
+ # else:
+ # device = self.adapter_agent.get_device(self.device_id)
+ # device.oper_status = OperStatus.FAILED
+ # self.adapter_agent.update_device(device)
+ pass
+
+ # Handle next event (self._event_deferred is None if we got stopped)
+
+ self._event_deferred = reactor.callLater(0, self.handle_onu_events)
diff --git a/voltha/adapters/adtran_onu/heartbeat.py b/voltha/adapters/adtran_onu/heartbeat.py
new file mode 100644
index 0000000..02fec22
--- /dev/null
+++ b/voltha/adapters/adtran_onu/heartbeat.py
@@ -0,0 +1,182 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet import reactor
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus
+from omci.omci_me import OntGFrame
+
+class HeartBeat(object):
+ """Wraps health-check support for ONU"""
+ INITIAL_DELAY = 60 # Delay after start until first check
+ TICK_DELAY = 2 # Heartbeat interval
+
+ def __init__(self, handler, device_id):
+ self.log = structlog.get_logger(device_id=device_id)
+ self._enabled = False
+ self._handler = handler
+ self._device_id = device_id
+ self._defer = None
+ self._alarm_active = False
+ self._heartbeat_count = 0
+ self._heartbeat_miss = 0
+ self._alarms_raised_count = 0
+ self.heartbeat_failed_limit = 5
+ self.heartbeat_last_reason = ''
+ self.heartbeat_interval = self.TICK_DELAY
+
+ def __str__(self):
+ return "HeartBeat: count:{}, miss: {}".format(self._heartbeat_count,
+ self._heartbeat_miss)
+
+ @staticmethod
+ def create(handler, device_id):
+ return HeartBeat(handler, device_id)
+
+ def _start(self, delay=INITIAL_DELAY):
+ self._defer = reactor.callLater(delay, self.check_pulse)
+
+ def _stop(self):
+ d, self._defeered = self._defeered, None
+ if d is not None and not d.called():
+ d.cancel()
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ if self._enabled != value:
+ self._enabled = value
+
+ if value:
+ self._start()
+ else:
+ self._stop()
+
+ @property
+ def check_item(self):
+ # return 'vendor_id' if self._handler.olt_created else 'serial_number'
+ return 'vendor_id' if self._handler.olt_created else 'vendor_id'
+
+ @property
+ def check_value(self):
+ if self._handler.olt_created:
+ return 'ADTN'
+
+ # device = self._handler.adapter_agent.get_device(self._device_id)
+ # return device.serial_number
+ return 'ADTN'
+
+ @property
+ def alarm_active(self):
+ return self._alarm_active
+
+ @property
+ def heartbeat_count(self):
+ return self._heartbeat_count
+
+ @property
+ def heartbeat_miss(self):
+ return self._heartbeat_miss
+
+ @property
+ def alarms_raised_count(self):
+ return self._alarms_raised_count
+
+ def check_pulse(self):
+ if self.enabled:
+ try:
+ self._defer = self._handler.omci.send(OntGFrame(self.check_item).get())
+ self._defer.addCallbacks(self._heartbeat_success, self._heartbeat_fail)
+
+ except Exception as e:
+ self._defer = reactor.callLater(5, self._heartbeat_fail, e)
+
+ def _heartbeat_success(self, results):
+ self.log.debug('heartbeat-success')
+
+ try:
+ omci_response = results.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ value = data[self.check_item]
+
+ if value != self.check_value:
+ self._heartbeat_miss = self.heartbeat_failed_limit
+ self.heartbeat_last_reason = "Invalid {}, got '{}' but expected '{}'".\
+ format(self.check_item, value, self.check_value)
+ else:
+ self._heartbeat_miss = 0
+ self.heartbeat_last_reason = ''
+
+ except Exception as e:
+ self._heartbeat_miss = self.heartbeat_failed_limit
+ self.heartbeat_last_reason = e.message
+
+ self.heartbeat_check_status(results)
+
+ def _heartbeat_fail(self, failure):
+ self._heartbeat_miss += 1
+ self.log.info('heartbeat-miss', failure=failure,
+ count=self._heartbeat_count,
+ miss=self._heartbeat_miss)
+ self.heartbeat_last_reason = 'OMCI connectivity error'
+ self.heartbeat_check_status(None)
+
+ def on_heartbeat_alarm(self, active):
+ # TODO: Do something here ?
+ #
+ # TODO: If failed (active = true) due to bad serial-number shut off the UNI port?
+ pass
+
+ def heartbeat_check_status(self, results):
+ """
+ Check the number of heartbeat failures against the limit and emit an alarm if needed
+ """
+ device = self._handler.adapter_agent.get_device(self._device_id)
+
+ try:
+ from ..adtran_olt.alarms.heartbeat_alarm import HeartbeatAlarm
+
+ if self._heartbeat_miss >= self.heartbeat_failed_limit:
+ if device.connect_status == ConnectStatus.REACHABLE:
+ self.log.warning('heartbeat-failed', count=self._heartbeat_miss)
+ device.connect_status = ConnectStatus.UNREACHABLE
+ device.oper_status = OperStatus.FAILED
+ device.reason = self.heartbeat_last_reason
+ self._handler.adapter_agent.update_device(device)
+ HeartbeatAlarm(self._handler, 'onu', self._heartbeat_miss).raise_alarm()
+ self._alarm_active = True
+ self.on_heartbeat_alarm(True)
+ else:
+ # Update device states
+ if device.connect_status != ConnectStatus.REACHABLE and self._alarm_active:
+ device.connect_status = ConnectStatus.REACHABLE
+ device.oper_status = OperStatus.ACTIVE
+ device.reason = ''
+ self._handler.adapter_agent.update_device(device)
+ HeartbeatAlarm(self._handler, 'onu').clear_alarm()
+
+ self._alarm_active = False
+ self._alarms_raised_count += 1
+ self.on_heartbeat_alarm(False)
+
+ except Exception as e:
+ self.log.exception('heartbeat-check', e=e)
+
+ # Reschedule next heartbeat
+ if self.enabled:
+ self._heartbeat_count += 1
+ self._defer = reactor.callLater(self.heartbeat_interval, self.check_pulse)
diff --git a/voltha/adapters/adtran_onu/omci/README.md b/voltha/adapters/adtran_onu/omci/README.md
new file mode 100644
index 0000000..2d030fe
--- /dev/null
+++ b/voltha/adapters/adtran_onu/omci/README.md
@@ -0,0 +1,12 @@
+#OMCI Support
+
+This directory contains classes to assist in the creation, transmission,
+and reception of OMCI frames on this ONU Adapter.
+
+##Files
+
+*TODO*: Add info on each file and what it is for
+
+##Unit Tests
+
+*TODO*: Add info on how to run unit tests on these files
\ No newline at end of file
diff --git a/voltha/adapters/adtran_onu/omci/__init__.py b/voltha/adapters/adtran_onu/omci/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/voltha/adapters/adtran_onu/omci/__init__.py
diff --git a/voltha/adapters/adtran_onu/omci/omci_cc.py b/voltha/adapters/adtran_onu/omci/omci_cc.py
new file mode 100644
index 0000000..4e08a61
--- /dev/null
+++ b/voltha/adapters/adtran_onu/omci/omci_cc.py
@@ -0,0 +1,689 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OMCI Message support
+"""
+
+import sys
+import arrow
+from twisted.internet import reactor, defer
+from twisted.internet.defer import DeferredQueue, inlineCallbacks, returnValue, TimeoutError, CancelledError, failure, fail
+from voltha.protos import third_party
+from common.frameio.frameio import hexify
+from voltha.extensions.omci.omci import *
+
+_ = third_party
+
+_MAX_INCOMING_OMCI_MESSAGES = 256
+DEFAULT_OMCI_TIMEOUT = 3 # Seconds
+MAX_OMCI_REQUEST_AGE = 60 # Seconds
+MAX_OMCI_TX_ID = 0xFFFF # 2 Octets max
+
+# abbreviations
+# ECA = EntityClassAttribute
+# AA = AttributeAccess
+OP = EntityOperations
+
+
+class OMCISupport(object):
+ """ Handle OMCI Specifics for Adtran ONUs"""
+
+ def __init__(self, handler, adapter, device_id):
+ self.log = structlog.get_logger(device_id=device_id)
+ self._handler = handler
+ self._adapter = adapter
+ self._device_id = device_id
+ self._proxy_address = None
+ self._tx_tid = 1
+ self._deferred = None # TODO: Remove later if never used
+ self._enabled = False
+ self._requests = dict() # Tx ID -> (timestamp, deferred, tx_frame, timeout)
+ self._onu_messages = DeferredQueue(size=_MAX_INCOMING_OMCI_MESSAGES)
+
+ # Statistics
+ self._tx_frames = 0
+ self._rx_frames = 0
+ self._rx_onu_frames = 0 # Autonomously generated ONU frames
+ self._rx_timeouts = 0
+ self._tx_errors = 0 # Exceptions during tx request
+ self._consecutive_errors = 0 # Rx & Tx errors in a row, good rx resets this to 0
+ self._reply_min = sys.maxint # Fastest successful tx -> rx
+ self._reply_max = 0 # Longest successful tx -> rx
+ self._reply_sum = 0.0 # Total seconds for successful tx->rx (float for average)
+
+ def __str__(self):
+ return "OMCISupport: {}".format(self._device_id)
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ assert isinstance(value, bool), 'enabled is a boolean'
+ if self._enabled != value:
+ self._enabled = value
+ if self._enabled:
+ self.start()
+ else:
+ self.stop()
+
+ @property
+ def tx_frames(self):
+ return self._tx_frames
+
+ @property
+ def rx_frames(self):
+ return self._rx_frames
+
+ @property
+ def rx_onu_frames(self):
+ return self._rx_onu_frames
+
+ @property
+ def rx_timeouts(self):
+ return self._rx_timeouts
+
+ @property
+ def tx_errors(self):
+ return self._tx_errors
+
+ @property
+ def consecutive_errors(self):
+ return self._consecutive_errors
+
+ @property
+ def reply_min(self):
+ return int(round(self._reply_min * 1000.0)) # Milliseconds
+
+ @property
+ def reply_max(self):
+ return int(round(self._reply_max * 1000.0)) # Milliseconds
+
+ @property
+ def reply_average(self):
+ avg = self._reply_sum / self._rx_frames if self._rx_frames > 0 else 0.0
+ return int(round(avg * 1000.0)) # Milliseconds
+
+ @property
+ def get_onu_autonomous_message(self):
+ """
+ Attempt to retrieve and remove an object from the ONU autonomous
+ message queue.
+
+ :return: a Deferred which fires with the next OmciFrame available in
+ the queue.
+ """
+ return self._onu_messages.get()
+
+ def start(self):
+ assert self._enabled, 'Start should only be called if enabled'
+ #
+ # TODO: Perform common startup tasks here
+ #
+ self._cancel_deferred()
+ self.flush()
+
+ device = self._adapter.adapter_agent.get_device(self._device_id)
+ self._proxy_address = device.proxy_address
+
+ def stop(self):
+ assert not self._enabled, 'Stop should only be called if disabled'
+ #
+ # TODO: Perform common shutdown tasks here
+ #
+ self._cancel_deferred()
+ self.flush()
+ self._proxy_address = None
+ pass
+
+ def _receive_onu_message(self, rx_frame):
+ """ Autonomously generated ONU frame Rx handler"""
+ self.log.debug('rx-onu-frame', frame=rx_frame)
+
+ self._rx_onu_frames += 1
+ self._onu_messages.put((rx_frame, arrow.utcnow().float_timestamp))
+
+ def receive_message(self, msg):
+ """
+ Receive and OMCI message from the proxy channel to the OLT
+ """
+ if self.enabled:
+ try:
+ now = arrow.utcnow()
+ d = None
+
+ try:
+ rx_frame = OmciFrame(msg)
+ rx_tid = rx_frame.fields['transaction_id']
+
+ if rx_tid == 0:
+
+ return self._receive_onu_message(rx_frame)
+
+ self._rx_frames += 1
+ self._consecutive_errors = 0
+
+ except Exception as e:
+ self.log.exception('frame-decode', e=e)
+ return
+
+ try:
+ (ts, d, _, _) = self._requests.pop(rx_tid)
+
+ ts_diff = now - arrow.Arrow.utcfromtimestamp(ts)
+ secs = ts_diff.total_seconds()
+ self._reply_sum += secs
+
+ if secs < self._reply_min:
+ self._reply_min = secs
+
+ if secs > self._reply_max:
+ self._reply_max = secs
+
+ # TODO: Could also validate response type based on request action
+
+ except KeyError:
+ self.log.warn('message-missing', rx_id=rx_tid)
+ return
+
+ except Exception as e:
+ self.log.exception('frame-decode', e=e)
+ if d is not None:
+ return d.errback(failure.Failure(e))
+ return
+
+ d.callback(rx_frame)
+
+ except Exception as e:
+ self.log.exception('rx-msg', e=e)
+
+ def flush(self, max_age=0):
+ limit = arrow.utcnow().float_timestamp - max_age
+ old = [tid for tid, (ts, _, _, _) in self._requests.iteritems()
+ if ts <= limit]
+
+ for tid in old:
+ (_, d, _, _) = self._requests.pop(tid)
+ if d is not None and not d.called:
+ d.cancel()
+
+ self._requests = dict()
+
+ if max_age == 0:
+ # Flush autonomous messages
+ while self._onu_messages.pending:
+ _ = yield self._onu_messages.get()
+
+ def _get_tx_tid(self):
+ """
+ Get the next Transaction ID for a tx. Note 0 is reserved
+ for autonomously generated message from an ONU
+
+ :return: (int) TID
+ """
+ tx_tid, self._tx_tid = self._tx_tid, self._tx_tid + 1
+ if self._tx_tid > MAX_OMCI_TX_ID:
+ self._tx_tid = 1
+
+ return tx_tid
+
+ def _request_failure(self, value, tx_tid):
+ if tx_tid in self._requests:
+ (_, _, _, timeout) = self._requests.pop(tx_tid)
+ else:
+ # tx_msg = None
+ timeout = 0
+
+ if isinstance(value, failure.Failure):
+ value.trap(CancelledError)
+ self._rx_timeouts += 1
+ self._consecutive_errors += 1
+ self.log.info('timeout', tx_id=tx_tid, timeout=timeout)
+ value = failure.Failure(TimeoutError(timeout, "Deferred"))
+
+ return value
+
+ def send(self, frame, timeout=DEFAULT_OMCI_TIMEOUT):
+ self.flush(max_age=MAX_OMCI_REQUEST_AGE)
+
+ assert timeout <= MAX_OMCI_REQUEST_AGE, \
+ 'Maximum timeout is {} seconds'.format(MAX_OMCI_REQUEST_AGE)
+ assert isinstance(frame, OmciFrame), \
+ "Invalid frame class '{}'".format(type(frame))
+
+ if not self.enabled or self._proxy_address is None:
+ # TODO custom exceptions throughout this code would be helpful
+ return fail(result=failure.Failure(Exception('OMCI is not enabled')))
+
+ try:
+ tx_tid = frame.fields['transaction_id']
+ if tx_tid is None:
+ tx_tid = self._get_tx_tid()
+ frame.fields['transaction_id'] = tx_tid
+
+ assert tx_tid not in self._requests, 'TX TID is already exists'
+ assert tx_tid >= 0, 'Invalid Tx TID: {}'.format(tx_tid)
+
+ ts = arrow.utcnow().float_timestamp
+ d = defer.Deferred()
+
+ self._adapter.adapter_agent.send_proxied_message(self._proxy_address,
+ hexify(str(frame)))
+ self._tx_frames += 1
+ self._requests[tx_tid] = (ts, d, frame, timeout)
+
+ d.addErrback(self._request_failure, tx_tid)
+
+ if timeout > 0:
+ d.addTimeout(timeout, reactor)
+
+ except Exception as e:
+ self._tx_errors += 1
+ self._consecutive_errors += 1
+ self.log.exception('send-omci', e=e)
+ return fail(result=failure.Failure(e))
+
+ return d
+
+ def send_get_OntG(self, attribute, entity_id=0, timeout=DEFAULT_OMCI_TIMEOUT):
+ self.log.debug('send_get_OntG')
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=OntG.class_id,
+ entity_id=entity_id,
+ attributes_mask=OntG.mask_for(attribute)
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_mib_reset(self, entity_id=0, timeout=DEFAULT_OMCI_TIMEOUT):
+ self.log.debug('send_mib_reset')
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciMibReset.message_id,
+ omci_message=OmciMibReset(
+ entity_class=OntData.class_id,
+ entity_id=entity_id
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_set_tcont(self, entity_id, alloc_id, timeout=DEFAULT_OMCI_TIMEOUT):
+ data = dict(
+ alloc_id=alloc_id
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=Tcont.class_id,
+ entity_id=entity_id,
+ attributes_mask=Tcont.mask_for(*data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_gem_port_network_ctp(self, entity_id, port_id,
+ tcont_id, direction, tm,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+
+ _directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
+
+ if _directions.has_key(direction):
+ _direction = _directions[direction]
+ else:
+ self.log.error('invalid-gem-port-direction', direction=direction)
+ raise ValueError('Invalid GEM port direction: {_dir}'.format(_dir=direction))
+
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=GemPortNetworkCtp.class_id,
+ entity_id=entity_id,
+ data=dict(
+ port_id=port_id,
+ tcont_pointer=tcont_id,
+ direction=_direction,
+ traffic_management_pointer_upstream=tm
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_set_8021p_mapper_service_profile(self, entity_id,
+ interwork_tp_id,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ data = dict(
+ interwork_tp_pointer_for_p_bit_priority_0=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_1=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_2=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_3=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_4=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_5=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_6=interwork_tp_id,
+ interwork_tp_pointer_for_p_bit_priority_7=interwork_tp_id
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=Ieee8021pMapperServiceProfile.class_id,
+ entity_id=entity_id,
+ attributes_mask=Ieee8021pMapperServiceProfile.mask_for(
+ *data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_8021p_mapper_service_profile(self, entity_id, timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=Ieee8021pMapperServiceProfile.class_id,
+ entity_id=entity_id,
+ data=dict(
+ tp_pointer=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_1=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_2=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_3=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_4=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_5=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_6=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_7=OmciNullPointer
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_mac_bridge_service_profile(self, entity_id, timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=MacBridgeServiceProfile.class_id,
+ entity_id=entity_id,
+ data=dict(
+ spanning_tree_ind=False,
+ learning_ind=True,
+ priority=0x8000,
+ max_age=20 * 256,
+ hello_time=2 * 256,
+ forward_delay=15 * 256,
+ unknown_mac_address_discard=True
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_gal_ethernet_profile(self, entity_id, max_gem_payload_size,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=GalEthernetProfile.class_id,
+ entity_id=entity_id,
+ data=dict(
+ max_gem_payload_size=max_gem_payload_size
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_gem_inteworking_tp(self, entity_id, gem_port_net_ctp_id,
+ service_profile_id, timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=GemInterworkingTp.class_id,
+ entity_id=entity_id,
+ data=dict(
+ gem_port_network_ctp_pointer=gem_port_net_ctp_id,
+ interworking_option=5,
+ service_profile_pointer=service_profile_id,
+ interworking_tp_pointer=0x0,
+ gal_profile_pointer=0x1
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_mac_bridge_port_configuration_data(self, entity_id, bridge_id,
+ port_id, tp_type, tp_id,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=MacBridgePortConfigurationData.class_id,
+ entity_id=entity_id,
+ data=dict(
+ bridge_id_pointer=bridge_id,
+ port_num=port_id,
+ tp_type=tp_type,
+ tp_pointer=tp_id
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_vlan_tagging_filter_data(self, entity_id, vlan_id,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=VlanTaggingFilterData.class_id,
+ entity_id=entity_id,
+ data=dict(
+ vlan_filter_0=vlan_id,
+ forward_operation=0x10,
+ number_of_entries=1
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ # def send_get_device_info(self, attribute, entity_id=0, timeout=DEFAULT_OMCI_TIMEOUT):
+ # # TODO: Can this be combined with send_get_circuit_pack above?
+ # frame = OmciFrame(
+ # transaction_id=self._get_tx_tid(),
+ # message_type=OmciGet.message_id,
+ # omci_message=OmciGet(
+ # entity_class=CircuitPack.class_id,
+ # entity_id=entity_id,
+ # attributes_mask=CircuitPack.mask_for(attribute)
+ # )
+ # )
+ # return self.send(frame, timeout)
+
+ def send_set_adminState(self, entity_id, timeout=DEFAULT_OMCI_TIMEOUT):
+ self.log.debug('send_set_AdminState')
+ data = dict(
+ administrative_state=0
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=PptpEthernetUni.class_id,
+ entity_id=entity_id,
+ attributes_mask=PptpEthernetUni.mask_for(*data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_get_SoftwareImage(self, attribute, entity_id=0, timeout=DEFAULT_OMCI_TIMEOUT):
+ self.log.debug('send_get_SoftwareImage')
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=SoftwareImage.class_id,
+ entity_id=entity_id,
+ attributes_mask=SoftwareImage.mask_for(attribute)
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_create_extended_vlan_tagging_operation_configuration_data(self,
+ entity_id,
+ assoc_type,
+ assoc_me,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=
+ ExtendedVlanTaggingOperationConfigurationData.class_id,
+ entity_id=entity_id,
+ data=dict(
+ association_type=assoc_type,
+ associated_me_pointer=assoc_me
+ )
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_set_extended_vlan_tagging_operation_tpid_configuration_data(self,
+ entity_id,
+ input_tpid,
+ output_tpid,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ data = dict(
+ input_tpid=input_tpid,
+ output_tpid=output_tpid,
+ downstream_mode=0, # inverse of upstream
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=
+ ExtendedVlanTaggingOperationConfigurationData.class_id,
+ entity_id=entity_id,
+ attributes_mask=
+ ExtendedVlanTaggingOperationConfigurationData.mask_for(
+ *data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(self,
+ entity_id,
+ filter_inner_vid,
+ treatment_inner_vid,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ data = dict(
+ received_frame_vlan_tagging_operation_table=
+ VlanTaggingOperation(
+ filter_outer_priority=15,
+ filter_outer_vid=4096,
+ filter_outer_tpid_de=0,
+
+ filter_inner_priority=15,
+ filter_inner_vid=filter_inner_vid,
+ filter_inner_tpid_de=0,
+ filter_ether_type=0,
+
+ treatment_tags_to_remove=0,
+ treatment_outer_priority=15,
+ treatment_outer_vid=0,
+ treatment_outer_tpid_de=0,
+
+ treatment_inner_priority=0,
+ treatment_inner_vid=treatment_inner_vid,
+ treatment_inner_tpid_de=4
+ )
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=
+ ExtendedVlanTaggingOperationConfigurationData.class_id,
+ entity_id=entity_id,
+ attributes_mask=
+ ExtendedVlanTaggingOperationConfigurationData.mask_for(
+ *data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
+
+ def send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(self,
+ entity_id,
+ filter_inner_priority,
+ filter_inner_vid,
+ filter_inner_tpid_de,
+ treatment_tags_to_remove,
+ treatment_inner_priority,
+ treatment_inner_vid,
+ timeout=DEFAULT_OMCI_TIMEOUT):
+ data = dict(
+ received_frame_vlan_tagging_operation_table=
+ VlanTaggingOperation(
+ filter_outer_priority=15,
+ filter_outer_vid=4096,
+ filter_outer_tpid_de=0,
+ filter_inner_priority=filter_inner_priority,
+ filter_inner_vid=filter_inner_vid,
+ filter_inner_tpid_de=filter_inner_tpid_de,
+ filter_ether_type=0,
+ treatment_tags_to_remove=treatment_tags_to_remove,
+ treatment_outer_priority=15,
+ treatment_outer_vid=0,
+ treatment_outer_tpid_de=0,
+ treatment_inner_priority=treatment_inner_priority,
+ treatment_inner_vid=treatment_inner_vid,
+ treatment_inner_tpid_de=4
+ )
+ )
+ frame = OmciFrame(
+ transaction_id=self._get_tx_tid(),
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=
+ ExtendedVlanTaggingOperationConfigurationData.class_id,
+ entity_id=entity_id,
+ attributes_mask=
+ ExtendedVlanTaggingOperationConfigurationData.mask_for(
+ *data.keys()),
+ data=data
+ )
+ )
+ return self.send(frame, timeout)
diff --git a/voltha/adapters/adtran_onu/omci/omci_defs.py b/voltha/adapters/adtran_onu/omci/omci_defs.py
new file mode 100644
index 0000000..5b3f5a8
--- /dev/null
+++ b/voltha/adapters/adtran_onu/omci/omci_defs.py
@@ -0,0 +1,32 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+""" Additional definitions not found in OMCI library"""
+
+from enum import Enum
+
+
+class ReasonCodes(Enum):
+ # OMCI Result and reason codes
+
+ Success = 0, # Command processed successfully
+ ProcessingError = 1, # Command processing error
+ NotSupported = 2, # Command not supported
+ ParameterError = 3, # Parameter error
+ UnknownEntity = 4, # Unknown managed entity
+ UnknownInstance = 5, # Unknown managed entity instance
+ DeviceBusy = 6, # Device busy
+ InstanceExists = 7, # Instance Exists
+ AttributeFailure = 9, # Attribute(s) failed or unknown
diff --git a/voltha/adapters/adtran_onu/omci/omci_me.py b/voltha/adapters/adtran_onu/omci/omci_me.py
new file mode 100644
index 0000000..f51bbd8
--- /dev/null
+++ b/voltha/adapters/adtran_onu/omci/omci_me.py
@@ -0,0 +1,742 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Message support
+"""
+from voltha.extensions.omci.omci import *
+
+# abbreviations
+OP = EntityOperations
+
+
+class MEFrame(object):
+ """Base class to help simplify Frame Creation"""
+ def __init__(self, entity_class, entity_id, data):
+ assert issubclass(entity_class, EntityClass), \
+ "'{}' must be a subclass of MEFrame".format(entity_class)
+ self.check_type(entity_id, int)
+
+ if not 0 <= entity_id <= 0xFFFF:
+ raise ValueError('entity_id should be 0..65535')
+
+ self._class = entity_class
+ self._entity_id = entity_id
+ self.data = data
+
+ # TODO: add a required attributes check list for various operations
+ # that each derive class can set as required. Then check these
+ # in the appropriate operation method in this class and assert
+ # if something is missing
+
+ def __str__(self):
+ return '{}: Entity_ID: {}, Data: {}'.\
+ format(type(self.entity_class), self._entity_id, self.data)
+
+ @staticmethod
+ def check_type(param, types):
+ if not isinstance(param, types):
+ raise TypeError("param '{}' should be a {}".format(param, types))
+
+ @property
+ def entity_class(self):
+ """
+ The Entity Class for this ME
+ :return: (EntityClass) Entity class
+ """
+ return self._class
+
+ @property
+ def entity_id(self):
+ """
+ The Entity ID for this ME frame
+ :return: (int) Entity ID (0..0xFFFF)
+ """
+ return self._entity_id
+
+ def create(self):
+ """
+ Create a Create request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert OP.Create in self.entity_class.mandatory_operations, \
+ "Set not allowed for '{}'".format(self.entity_class)
+ assert hasattr(self.entity_class, 'class_id'), 'class_id required for Create actions'
+ assert hasattr(self, 'entity_id'), 'entity_id required for Create actions'
+ assert hasattr(self, 'data'), 'data required for Create actions'
+
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) > 0, 'No attributes supplied'
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciCreate.message_id,
+ omci_message=OmciCreate(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ data=data
+ ))
+
+ def delete(self):
+ """
+ Create a Delete request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert OP.Delete in self.entity_class.mandatory_operations, \
+ "Delete not allowed for '{}'".format(self.entity_class)
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id')
+ ))
+
+ def set(self):
+ """
+ Create a Set request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert OP.Set in self.entity_class.mandatory_operations, \
+ "Set not allowed for '{}'".format(self.entity_class)
+ assert hasattr(self, 'data'), 'data required for Set actions'
+
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, dict)
+ assert len(data) > 0, 'No attributes supplied'
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciSet.message_id,
+ omci_message=OmciSet(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ attributes_mask=self.entity_class.mask_for(*data.keys()),
+ data=data
+ ))
+
+ def get(self):
+ """
+ Create a Get request frame for this ME
+ :return: (OmciFrame) OMCI Frame
+ """
+ assert OP.Get in self.entity_class.mandatory_operations, \
+ "Get not allowed for '{}'".format(self.entity_class)
+ assert hasattr(self, 'data'), 'data required for Get actions'
+
+ data = getattr(self, 'data')
+ MEFrame.check_type(data, (list, set, dict))
+ assert len(data) > 0, 'No attributes supplied'
+
+ mask_set = data.keys() if isinstance(data, dict) else data
+
+ return OmciFrame(
+ transaction_id=None,
+ message_type=OmciGet.message_id,
+ omci_message=OmciGet(
+ entity_class=getattr(self.entity_class, 'class_id'),
+ entity_id=getattr(self, 'entity_id'),
+ attributes_mask=self.entity_class.mask_for(*mask_set)
+ ))
+
+ @staticmethod
+ def _attr_to_data(attributes):
+ """
+ Convert an object into the 'data' set or dictionary for get/set/create/delete
+ requests.
+
+ This method takes a 'string', 'list', or 'set' for get requests and
+ converts it to a 'set' of attributes.
+
+ For create/set requests a dictionary of attribute/value pairs is required
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, set, or dict can be provided. For create/set
+ operations, a dictionary should be provided. For delete
+ the attributes may be None since they are ignored.
+
+ :return: (set, dict) set for get/deletes, dict for create/set
+ """
+ if isinstance(attributes, basestring):
+ # data = [str(attributes)]
+ data = set()
+ data.add(str(attributes))
+
+ elif isinstance(attributes, list):
+ assert all(isinstance(attr, basestring) for attr in attributes),\
+ 'attribute list must be strings'
+ data = {str(attr) for attr in attributes}
+ assert len(data) == len(attributes), 'Attributes were not unique'
+
+ elif isinstance(attributes, set):
+ assert all(isinstance(attr, basestring) for attr in attributes),\
+ 'attribute set must be strings'
+ data = {str(attr) for attr in attributes}
+
+ elif isinstance(attributes, (dict, type(None))):
+ data = attributes
+
+ else:
+ raise TypeError("Unsupported attributes type '{}'".format(type(attributes)))
+
+ return data
+
+
+class CardholderFrame(MEFrame):
+ """
+ This managed entity represents fixed equipment slot configuration
+ for the ONU
+ """
+ def __init__(self, single, slot_number, attributes):
+ """
+ :param single:(bool) True if the ONU is a single piece of integrated equipment,
+ False if the ONU contains pluggable equipment modules
+ :param slot_number: (int) slot number (0..254)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Validate
+ MEFrame.check_type(single, bool)
+ MEFrame.check_type(slot_number, int)
+ if not 0 <= slot_number <= 254:
+ raise ValueError('slot_number should be 0..254')
+
+ entity_id = 256 + slot_number if single else slot_number
+
+ super(CardholderFrame, self).__init__(Cardholder, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class CircuitPackFrame(MEFrame):
+ """
+ This managed entity models a real or virtual circuit pack that is equipped in
+ a real or virtual ONU slot.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. Its value is the same as that
+ of the cardholder managed entity containing this
+ circuit pack instance. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(CircuitPackFrame, self).__init__(CircuitPack, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class IpHostConfigDataFrame(MEFrame):
+ """
+ The IP host config data configures IPv4 based services offered on the ONU.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(IpHostConfigDataFrame, self).__init__(IpHostConfigData,
+ entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class GemInterworkingTpFrame(MEFrame):
+ """
+ An instance of this managed entity represents a point in the ONU where the
+ interworking of a bearer service (usually Ethernet) to the GEM layer takes
+ place.
+ """
+ def __init__(self, entity_id,
+ gem_port_network_ctp_pointer=None,
+ interworking_option=None,
+ service_profile_pointer=None,
+ interworking_tp_pointer=None,
+ gal_profile_pointer=None,
+ attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param gem_port_network_ctp_pointer: (int) This attribute points to an instance of
+ the GEM port network CTP. (0..65535)
+
+ :param interworking_option: (int) This attribute identifies the type
+ of non-GEM function that is being interworked.
+ The options are:
+ 0 Circuit-emulated TDM
+ 1 MAC bridged LAN
+ 2 Reserved
+ 3 Reserved
+ 4 Video return path
+ 5 IEEE 802.1p mapper
+ 6 Downstream broadcast
+ 7 MPLS PW TDM service
+
+ :param service_profile_pointer: (int) This attribute points to an instance of
+ a service profile.
+ CES service profile if interworking option = 0
+ MAC bridge service profile if interworking option = 1
+ Video return path service profile if interworking option = 4
+ IEEE 802.1p mapper service profile if interworking option = 5
+ Null pointer if interworking option = 6
+ CES service profile if interworking option = 7
+
+ :param interworking_tp_pointer: (int) This attribute is used for the circuit
+ emulation service and IEEE 802.1p mapper
+ service without a MAC bridge.
+
+ :param gal_profile_pointer: (int) This attribute points to an instance of
+ a service profile.
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified..
+ """
+ # Validate
+ self.check_type(gem_port_network_ctp_pointer, [int, type(None)])
+ self.check_type(interworking_option, [int, type(None)])
+ self.check_type(service_profile_pointer, [int, type(None)])
+ self.check_type(interworking_tp_pointer, [int, type(None)])
+ self.check_type(gal_profile_pointer, [int, type(None)])
+
+ if gem_port_network_ctp_pointer is not None and not 0 <= gem_port_network_ctp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('gem_port_network_ctp_pointer should be 0..0xFFFE')
+
+ if interworking_option is not None and not 0 <= interworking_option <= 7:
+ raise ValueError('interworking_option should be 0..7')
+
+ if service_profile_pointer is not None and not 0 <= service_profile_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('service_profile_pointer should be 0..0xFFFE')
+
+ if interworking_tp_pointer is not None and not 0 <= interworking_tp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+ if gal_profile_pointer is not None and not 0 <= gal_profile_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('gal_profile_pointer should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if gem_port_network_ctp_pointer is not None or \
+ interworking_option is not None or \
+ service_profile_pointer is not None or \
+ interworking_tp_pointer is not None or \
+ gal_profile_pointer is not None:
+
+ data = data or dict()
+
+ if gem_port_network_ctp_pointer is not None:
+ data[gem_port_network_ctp_pointer] = gem_port_network_ctp_pointer
+
+ if interworking_option is not None:
+ data[interworking_option] = interworking_option
+
+ if service_profile_pointer is not None:
+ data[service_profile_pointer] = service_profile_pointer
+
+ if interworking_tp_pointer is not None:
+ data[interworking_tp_pointer] = interworking_tp_pointer
+
+ if gal_profile_pointer is not None:
+ data[gal_profile_pointer] = gal_profile_pointer
+
+ super(GemInterworkingTpFrame, self).__init__(GemInterworkingTp,
+ entity_id,
+ data)
+
+
+class GemPortNetworkCtpFrame(MEFrame):
+ """
+ This managed entity represents the termination of a GEM port on an ONU.
+ """
+ def __init__(self, entity_id, port_id=None, tcont_id=None,
+ direction=None, upstream_tm=None, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param port_id: (int) This attribute is the port-ID of the GEM port associated
+ with this CTP
+
+ :param tcont_id: (int) This attribute points to a T-CONT instance
+
+ :param direction: (string) Data direction. Valid values are:
+ 'upstream' - UNI-to-ANI
+ 'downstream' - ANI-to-UNI
+ 'bi-directional' - guess :-)
+
+ :param upstream_tm: (int) If the traffic management option attribute in
+ the ONU-G ME is 0 (priority controlled) or 2
+ (priority and rate controlled), this pointer
+ specifies the priority queue ME serving this GEM
+ port network CTP. If the traffic management
+ option attribute is 1 (rate controlled), this
+ attribute redundantly points to the T-CONT serving
+ this GEM port network CTP.
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ _directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
+
+ # Validate
+ self.check_type(port_id, [int, type(None)])
+ self.check_type(tcont_id, [int, type(None)])
+ self.check_type(direction, [basestring, type(None)])
+ self.check_type(upstream_tm, [int, type(None)])
+
+ if port_id is not None and not 0 <= port_id <= 0xFFFE: # TODO: Verify max
+ raise ValueError('port_id should be 0..0xFFFE')
+
+ if tcont_id is not None and not 0 <= tcont_id <= 0xFFFE: # TODO: Verify max
+ raise ValueError('tcont_id should be 0..0xFFFE')
+
+ if direction is not None and str(direction).lower() not in _directions:
+ raise ValueError('direction should one of {}'.format(_directions.keys()))
+
+ if upstream_tm is not None and not 0 <= upstream_tm <= 0xFFFE: # TODO: Verify max
+ raise ValueError('upstream_tm should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if port_id is not None or tcont_id is not None or\
+ direction is not None or upstream_tm is not None:
+
+ data = data or dict()
+
+ if port_id is not None:
+ data[port_id] = port_id
+ if tcont_id is not None:
+ data[tcont_id] = tcont_id
+ if direction is not None:
+ data[direction] = direction
+ if upstream_tm is not None:
+ data[upstream_tm] = upstream_tm
+
+ super(GemPortNetworkCtpFrame, self).__init__(GemPortNetworkCtp,
+ entity_id,
+ data)
+
+
+class Ieee8021pMapperServiceProfileFrame(MEFrame):
+ """
+ This managed entity associates the priorities of IEEE 802.1p [IEEE
+ 802.1D] priority tagged frames with specific connections.
+ """
+ def __init__(self, entity_id, tp_pointer=None, interwork_tp_pointers=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param tp_pointer: (int) This attribute points to an instance of the
+ associated termination point. (0..65535)
+
+ :param interwork_tp_pointers: (list) List of 1 to 8 interworking termination
+ point IDs. The first entry is assigned
+ got p-bit priority 0. If less than 8 IDs
+ are provided, the last ID is used for
+ the remaining items.
+ """
+ if tp_pointer is None and interwork_tp_pointers is None:
+ data = dict(
+ tp_pointer=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_1=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_2=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_3=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_4=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_5=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_6=OmciNullPointer,
+ interwork_tp_pointer_for_p_bit_priority_7=OmciNullPointer
+ )
+ else:
+ self.check_type(tp_pointer, [list, type(None)])
+ self.check_type(interwork_tp_pointers, [list, type(None)])
+
+ data = dict()
+
+ if tp_pointer is not None:
+ data[tp_pointer] = tp_pointer
+
+ if interwork_tp_pointers is not None:
+ assert all(isinstance(tp, int) and 0 <= tp <= 0xFFFF
+ for tp in interwork_tp_pointers),\
+ 'Interworking TP IDs must be 0..0xFFFF'
+ assert 1 <= len(interwork_tp_pointers) <= 8, \
+ 'Invalid number of Interworking TP IDs. Must be 1..8'
+
+ data = dict()
+ for pbit in range(0, len(interwork_tp_pointers)):
+ data['interwork_tp_pointer_for_p_bit_priority_'.format(pbit)] = \
+ interwork_tp_pointers[pbit]
+
+ for pbit in range(len(interwork_tp_pointers), 7):
+ data['interwork_tp_pointer_for_p_bit_priority_'.format(pbit)] = \
+ interwork_tp_pointers[len(interwork_tp_pointers) - 1]
+
+ super(Ieee8021pMapperServiceProfileFrame, self).__init__(Ieee8021pMapperServiceProfile,
+ entity_id,
+ data)
+
+
+class MacBridgePortConfigurationDataFrame(MEFrame):
+ """
+ This managed entity represents the ONU as equipment.
+ """
+ def __init__(self, entity_id, bridge_id_pointer=None, port_num=None,
+ tp_type=None, tp_pointer=None, attributes=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param bridge_id_pointer: (int) This attribute points to an instance of the
+ MAC bridge service profile. (0..65535)
+
+ :param port_num: (int) This attribute is the bridge port number. (0..255)
+
+ :param tp_type: (int) This attribute identifies the type of termination point
+ associated with this MAC bridge port. Valid values are:
+ 1 Physical path termination point Ethernet UNI
+ 2 Interworking VCC termination point
+ 3 IEEE 802.1p mapper service profile
+ 4 IP host config data or IPv6 host config data
+ 5 GEM interworking termination point
+ 6 Multicast GEM interworking termination point
+ 7 Physical path termination point xDSL UNI part 1
+ 8 Physical path termination point VDSL UNI
+ 9 Ethernet flow termination point
+ 10 Reserved
+ 11 Virtual Ethernet interface point
+ 12 Physical path termination point MoCA UNI
+
+ :param tp_pointer: (int) This attribute points to the termination point
+ associated with this MAC bridge por. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) additional ME attributes.
+ not specifically specified as a parameter. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Validate
+ self.check_type(bridge_id_pointer, [int, type(None)])
+ self.check_type(port_num, [int, type(None)])
+ self.check_type(tp_type, [int, type(None)])
+ self.check_type(tp_pointer, [int, type(None)])
+
+ if bridge_id_pointer is not None and not 0 <= bridge_id_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('bridge_id_pointer should be 0..0xFFFE')
+
+ if port_num is not None and not 0 <= port_num <= 255:
+ raise ValueError('port_num should be 0..255') # TODO: Verify min,max
+
+ if tp_type is not None and not 1 <= tp_type <= 12:
+ raise ValueError('service_profile_pointer should be 1..12')
+
+ if tp_pointer is not None and not 0 <= tp_pointer <= 0xFFFE: # TODO: Verify max
+ raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+ data = MEFrame._attr_to_data(attributes)
+
+ if bridge_id_pointer is not None or \
+ port_num is not None or \
+ tp_type is not None or \
+ tp_pointer is not None:
+
+ data = data or dict()
+
+ if bridge_id_pointer is not None:
+ data[bridge_id_pointer] = bridge_id_pointer
+
+ if port_num is not None:
+ data[port_num] = port_num
+
+ if tp_type is not None:
+ data[tp_type] = tp_type
+
+ if tp_pointer is not None:
+ data[tp_pointer] = tp_pointer
+
+ super(MacBridgePortConfigurationDataFrame, self).\
+ __init__(MacBridgePortConfigurationData, entity_id, data)
+
+
+class OntGFrame(MEFrame):
+ """
+ This managed entity represents the ONU as equipment.
+ """
+ def __init__(self, attributes):
+ """
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(OntGFrame, self).__init__(OntG, 0,
+ MEFrame._attr_to_data(attributes))
+
+
+class Ont2GFrame(MEFrame):
+ """
+ This managed entity contains additional attributes associated with a PON ONU.
+ """
+ def __init__(self, attributes):
+ """
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ # Only one managed entity instance (Entity ID=0)
+ super(Ont2GFrame, self).__init__(Ont2G, 0,
+ MEFrame._attr_to_data(attributes))
+
+
+class PptpEthernetUniFrame(MEFrame):
+ """
+ This managed entity represents the point at an Ethernet UNI where the physical path
+ terminates and Ethernet physical level functions are performed.
+ """
+ def __init__(self, entity_id, attributes):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param attributes: (basestring, list, set, dict) attributes. For gets
+ a string, list, or set can be provided. For create/set
+ operations, a dictionary should be provided, for
+ deletes None may be specified.
+ """
+ super(PptpEthernetUniFrame, self).__init__(PptpEthernetUni, entity_id,
+ MEFrame._attr_to_data(attributes))
+
+
+class SoftwareImageFrame(MEFrame):
+ """
+ This managed entity models an executable software image stored in the ONU.
+ """
+ def __init__(self, entity_id):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+ """
+ super(SoftwareImageFrame, self).__init__(SoftwareImage, entity_id, None)
+
+
+class TcontFrame(MEFrame):
+ """
+ An instance of the traffic container managed entity T-CONT represents a
+ logical connection group associated with a G-PON PLOAM layer alloc-ID.
+ """
+ def __init__(self, entity_id, alloc_id=None, policy=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param alloc_id: (int) This attribute links the T-CONT with the alloc-ID
+ assigned by the OLT in the assign_alloc-ID PLOAM
+ message (0..0xFFF)
+
+ :param policy: (int) This attribute indicates the T-CONT's traffic scheduling
+ policy. Valid values:
+ 0 - Null
+ 1 - Strict priority
+ 2 - WRR - Weighted round robin
+ """
+ # Validate
+ self.check_type(alloc_id, [int, type(None)])
+ self.check_type(policy, [int, type(None)])
+
+ if alloc_id is not None and not 0 <= alloc_id <= 0xFFF:
+ raise ValueError('alloc_id should be 0..0xFFF')
+
+ if policy is not None and not 0 <= policy <= 2:
+ raise ValueError('policy should be 0..2')
+
+ if alloc_id is None and policy is None:
+ data = None
+ else:
+ data = dict()
+
+ if alloc_id is not None:
+ data[alloc_id] = alloc_id
+
+ if policy is not None:
+ data[policy] = policy
+
+ super(TcontFrame, self).__init__(Tcont, entity_id, data)
+
+
+class VlanTaggingFilterDataFrame(MEFrame):
+ """
+ An instance of this managed entity represents a point in the ONU where the
+ interworking of a bearer service (usually Ethernet) to the GEM layer takes
+ place.
+ """
+ def __init__(self, entity_id, vlan_tcis=None, forward_operation=None):
+ """
+ :param entity_id: (int) This attribute uniquely identifies each instance of
+ this managed entity. (0..65535)
+
+ :param vlan_tcis: (list) This attribute is a list of provisioned TCI values
+ for the bridge port. (0..0xFFFF)
+
+ :param forward_operation: (int) What to do. See ITU spec for more information
+
+ """
+ # Validate
+ self.check_type(vlan_tcis, [list, type(None)])
+ self.check_type(forward_operation, [int, type(None)])
+
+ if forward_operation is not None and not 0 <= forward_operation <= 0x21:
+ raise ValueError('forward_operation should be 0..0x21')
+
+ if vlan_tcis is None and forward_operation is None:
+ data = None
+ else:
+ data = dict()
+
+ if vlan_tcis is not None:
+ assert all(isinstance(tci, int) and 0 <= tci <= 0xFFFF
+ for tci in vlan_tcis), "VLAN TCI's are 0..0xFFFF"
+ for index in range(0, len(vlan_tcis)):
+ data['vlan_filter_{}'.format(index)] = vlan_tcis[index]
+ data['number_of_entries'] = len(vlan_tcis)
+
+ if forward_operation is not None:
+ data[forward_operation] = forward_operation
+
+ super(VlanTaggingFilterDataFrame, self).__init__(VlanTaggingFilterData,
+ entity_id,
+ data)
+
+
+# TODO: Wednesday - Start with send_create_extended_vlan_tagging_operation_configuration_data
diff --git a/voltha/adapters/adtran_onu/onu_gem_port.py b/voltha/adapters/adtran_onu/onu_gem_port.py
new file mode 100644
index 0000000..b0aecd1
--- /dev/null
+++ b/voltha/adapters/adtran_onu/onu_gem_port.py
@@ -0,0 +1,155 @@
+
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from voltha.adapters.adtran_olt.xpon.gem_port import GemPort
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+from omci.omci_me import GemPortNetworkCtpFrame
+
+
+class OnuGemPort(GemPort):
+ """
+ Adtran ONU specific implementation
+ """
+ def __init__(self, gem_id, alloc_id,
+ encryption=False,
+ omci_transport=False,
+ multicast=False,
+ tcont_ref=None,
+ traffic_class=None,
+ intf_ref=None,
+ exception=False, # FIXED_ONU
+ name=None,
+ handler=None,
+ is_mock=False):
+ super(OnuGemPort, self).__init__(gem_id, alloc_id,
+ encryption=encryption,
+ omci_transport=omci_transport,
+ multicast=multicast,
+ tcont_ref=tcont_ref,
+ traffic_class=traffic_class,
+ intf_ref=intf_ref,
+ exception=exception,
+ name=name,
+ handler=handler)
+ self._is_mock = is_mock
+ self.log = structlog.get_logger(device_id=handler.device_id, gem_id=gem_id)
+
+ @property
+ def encryption(self):
+ return self._encryption
+
+ @encryption.setter
+ def encryption(self, value):
+ assert isinstance(value, bool), 'encryption is a boolean'
+
+ if self._encryption != value:
+ self._encryption = value
+ omci = None # TODO: Get from handler
+
+ @staticmethod
+ def create(handler, gem_port):
+ return OnuGemPort(gem_port['gemport-id'],
+ None,
+ encryption=gem_port['encryption'], # aes_indicator,
+ tcont_ref=gem_port['tcont-ref'],
+ name=gem_port['name'],
+ traffic_class=gem_port['traffic-class'],
+ handler=handler)
+
+ @inlineCallbacks
+ def add_to_hardware(self, omci):
+ if self._is_mock:
+ returnValue('mock')
+
+ omci = self._handler.omci
+ tcont = self.tcont
+ assert omci is not None, 'No OMCI engine'
+ assert tcont is not None, 'No TCONT'
+ assert tcont.entity_id == 0x8001, 'Hardcoded Entity ID NOT FOUND'
+
+ try:
+ direction = "downstream" if self.multicast else "bi-directional"
+ assert not self.multicast, 'MCAST is not supported yet'
+
+ # TODO: For TCONT ID, get the TCONT's entity ID that you programmed
+ # TODO: For TM, is this the entity ID for a traffic descriptor?
+ results = yield omci.send_create_gem_port_network_ctp(self.gem_id, # Entity ID
+ self.gem_id, # Port ID
+ tcont.entity_id, # TCONT ID
+ direction, # Direction
+ 0x100) # TM
+
+ # results = yield omci.send(GemPortNetworkCtpFrame(self.gem_id, # Entity ID
+ # self.gem_id, # Port ID
+ # tcont.entity_id, # TCONT ID
+ # direction, # Direction
+ # 0x100).create() # TM
+
+ except Exception as e:
+ self.log.exception('gemport-create', e=e)
+ raise
+
+ try:
+ # GEM Interworking config
+ # TODO: For service mapper ID, always hardcoded or does it come from somewhere else
+ # It is probably the TCONT entity ID
+ results = yield omci.send_create_gem_inteworking_tp(self.gem_id, # Entity ID
+ self.gem_id, # GEMPort NET CTP ID
+ tcont.entity_id) # Service Mapper Profile ID
+ except Exception as e:
+ self.log.exception('interworking-create', e=e)
+ raise
+
+ try:
+ # Mapper Service Profile config
+ # TODO: All p-bits currently go to the one and only GEMPORT ID for now
+ # TODO: The entity ID is probably the TCONT entity ID
+ results = omci.send_set_8021p_mapper_service_profile(tcont.entity_id, # Entity ID
+ self.gem_id) # Interworking TP ID
+ except Exception as e:
+ self.log.exception('mapper-set', e=e)
+ raise
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_from_hardware(self, omci):
+ if self._is_mock:
+ returnValue('mock')
+
+ omci = self._handler.omci
+ assert omci is not None, 'No OMCI engine'
+
+ results = succeed('TODO: Implement me')
+
+ # uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(pon_id, onu_id, self.gem_id)
+ # name = 'gem-port-delete-{}-{}: {}'.format(pon_id, onu_id, self.gem_id)
+ # return session.request('DELETE', uri, name=name)
+ returnValue(results)
+
+ def set_config(self, omci, value, leaf):
+ if self._is_mock:
+ return
+
+ # from ..adtran_olt_handler import AdtranOltHandler
+ #
+ # data = json.dumps({leaf: value})
+ # uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(self.pon_id,
+ # self.onu_id,
+ # self.gem_id)
+ # name = 'onu-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))
+ # return session.request('PATCH', uri, data=data, name=name)
+ pass # TODO: Implement me
\ No newline at end of file
diff --git a/voltha/adapters/adtran_onu/onu_pm_metrics.py b/voltha/adapters/adtran_onu/onu_pm_metrics.py
new file mode 100644
index 0000000..c4078bf
--- /dev/null
+++ b/voltha/adapters/adtran_onu/onu_pm_metrics.py
@@ -0,0 +1,142 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
+from ..adtran_olt.pki.adapter_pm_metrics import AdapterPmMetrics
+
+
+class OnuPmMetrics(AdapterPmMetrics):
+ def __init__(self, handler, device, grouped=False, freq_override=False):
+ super(OnuPmMetrics, self).__init__(handler, device,
+ grouped=grouped, freq_override=freq_override)
+
+ # PM Config Types are COUNTER, GUAGE, and STATE # Note: GAUGE is misspelled in device.proto
+ self.omci_pm_names = {
+ ('enabled', PmConfig.STATE),
+ ('tx_frames', PmConfig.COUNTER),
+ ('tx_errors', PmConfig.COUNTER),
+ ('rx_frames', PmConfig.COUNTER),
+ ('rx_onu_frames', PmConfig.COUNTER),
+ ('rx_timeouts', PmConfig.COUNTER),
+ ('consecutive_errors', PmConfig.COUNTER),
+ ('reply_min', PmConfig.GUAGE), # Milliseconds
+ ('reply_max', PmConfig.GUAGE), # Milliseconds
+ ('reply_average', PmConfig.GUAGE), # Milliseconds
+ }
+ self.health_pm_names = {
+ ('enabled', PmConfig.STATE),
+ ('alarm_active', PmConfig.STATE),
+ ('heartbeat_count', PmConfig.COUNTER),
+ ('heartbeat_miss', PmConfig.COUNTER),
+ ('alarms_raised_count', PmConfig.COUNTER),
+ ('heartbeat_failed_limit', PmConfig.COUNTER),
+ ('heartbeat_interval', PmConfig.COUNTER),
+ }
+ # TODO Add PON Port PM
+ # TODO Add UNI Port PM
+
+ self.omci_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.omci_pm_names}
+ self.health_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+ for (m, t) in self.health_pm_names}
+
+ def update(self, pm_config):
+ # TODO: Test both 'group' and 'non-group' functionality
+ # TODO: Test frequency override capability for a particular group
+ if self.default_freq != pm_config.default_freq:
+ # Update the callback to the new frequency.
+ self.default_freq = pm_config.default_freq
+ self.lc.stop()
+ self.lc.start(interval=self.default_freq / 10)
+
+ if pm_config.grouped is True:
+ for m in pm_config.groups:
+ pass
+ # self.pm_group_metrics[m.group_name].config.enabled = m.enabled
+ # if m.enabled is True:,
+ ('tx_errors', PmConfig.COUNTER),
+ ('rx_frames', PmConfig.COUNTER),
+ # self.enable_pm_collection(m.group_name, remote)
+ # else:
+ # self.disable_pm_collection(m.group_name, remote)
+ else:
+ for m in pm_config.metrics:
+ self.omci_metrics_config[m.name].enabled = m.enabled
+ self.health_metrics_config[m.name].enabled = m.enabled
+
+ def make_proto(self):
+ pm_config = PmConfigs(id=self.id, default_freq=self.default_freq,
+ grouped=self.grouped,
+ freq_override=self.freq_override)
+ metrics = set()
+
+ if self.grouped:
+ pm_omci_stats = PmGroupConfig(group_name='OMCI',
+ group_freq=self.default_freq,
+ enabled=True)
+
+ pm_health_stats = PmGroupConfig(group_name='Heartbeat',
+ group_freq=self.default_freq,
+ enabled=True)
+ # TODO Add PON Port PM
+ # TODO Add UNI Port PM
+ else:
+ pm_omci_stats = pm_config
+ pm_health_stats = pm_config
+ # TODO Add PON Port PM
+ # TODO Add UNI Port PM
+
+ for m in sorted(self.omci_metrics_config):
+ pm = self.omci_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+
+ pm_omci_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+
+ for m in sorted(self.health_metrics_config):
+ pm = self.health_metrics_config[m]
+ if not self.grouped:
+ if pm.name in metrics:
+ continue
+ metrics.add(pm.name)
+
+ pm_health_stats.metrics.extend([PmConfig(name=pm.name,
+ type=pm.type,
+ enabled=pm.enabled)])
+
+ return pm_config
+
+ def collect_port_metrics(self):
+ metrics = dict()
+ metrics['omci'] = self.collect_metrics(self.handler.omci,
+ self.omci_pm_names,
+ self.omci_metrics_config)
+
+ metrics['heartbeat'] = self.collect_metrics(self.handler.heartbeat,
+ self.health_pm_names,
+ self.health_metrics_config)
+
+ # TODO Add PON Port PM
+ # TODO Add UNI Port PM
+
+ return metrics
+
+
+
+
+
diff --git a/voltha/adapters/adtran_onu/onu_tcont.py b/voltha/adapters/adtran_onu/onu_tcont.py
new file mode 100644
index 0000000..ec51246
--- /dev/null
+++ b/voltha/adapters/adtran_onu/onu_tcont.py
@@ -0,0 +1,94 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+from voltha.adapters.adtran_olt.xpon.tcont import TCont
+from voltha.adapters.adtran_olt.xpon.traffic_descriptor import TrafficDescriptor
+from omci.omci_me import TcontFrame
+
+
+class OnuTCont(TCont):
+ """
+ Adtran ONU specific implementation
+ """
+ def __init__(self, handler, alloc_id, traffic_descriptor, entity_id,
+ name=None, vont_ani=None, is_mock=False):
+ super(OnuTCont, self).__init__(alloc_id, traffic_descriptor,
+ name=name, vont_ani=vont_ani)
+ self._handler = handler
+ self._is_mock = is_mock
+ self._entity_id = entity_id
+ self.log = structlog.get_logger(device_id=handler.device_id, alloc_id=alloc_id)
+
+ @property
+ def entity_id(self):
+ return self._entity_id
+
+ @staticmethod
+ def create(handler, tcont, td):
+ assert isinstance(tcont, dict), 'TCONT should be a dictionary'
+ assert isinstance(td, TrafficDescriptor), 'Invalid Traffic Descriptor data type'
+
+ # TODO: Pass in a unique TCONT Entity ID from the ONU's PON Object
+ entity_id = 0x8001
+
+ return OnuTCont(handler,
+ tcont['alloc-id'],
+ td,
+ entity_id,
+ name=tcont['name'],
+ vont_ani=tcont['vont-ani'])
+
+ @inlineCallbacks
+ def add_to_hardware(self, omci):
+ if self._is_mock:
+ returnValue('mock')
+
+ try:
+ # TODO: What is a valid Entity ID (compute and save if needed)
+ #
+ # NOTE: Entity ID should be computed. For NGPON2, they were starting
+ # at 256 and incrementing.
+ results = yield self._handler.omci.send_set_tcont(self._entity_id, # Entity ID
+ self.alloc_id) # Alloc ID
+
+ # response = yield omci.send(TcontFrame(self._entity_id,
+ # alloc_id=self.alloc_id).get())
+
+ except Exception as e:
+ self.log.exception('tcont-set', e=e)
+ raise
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_from_hardware(self, omci):
+ if self._is_mock:
+ returnValue('mock')
+
+ results = yield omci.send(TcontFrame(self._entity_id).delete())
+ returnValue(results)
+
+
+
+
+
+
+
+
+
+
+
diff --git a/voltha/adapters/adtran_onu/onu_traffic_descriptor.py b/voltha/adapters/adtran_onu/onu_traffic_descriptor.py
new file mode 100644
index 0000000..d24de7b
--- /dev/null
+++ b/voltha/adapters/adtran_onu/onu_traffic_descriptor.py
@@ -0,0 +1,86 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.adapters.adtran_olt.xpon.traffic_descriptor import TrafficDescriptor
+from voltha.adapters.adtran_olt.xpon.best_effort import BestEffort
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+
+class OnuTrafficDescriptor(TrafficDescriptor):
+ """
+ Adtran ONU specific implementation
+ """
+ def __init__(self, fixed, assured, maximum,
+ additional=TrafficDescriptor.AdditionalBwEligibility.DEFAULT,
+ best_effort=None,
+ name=None,
+ is_mock=False):
+ super(OnuTrafficDescriptor, self).__init__(fixed, assured, maximum,
+ additional=additional,
+ best_effort=best_effort,
+ name=name)
+ self._is_mock = is_mock
+
+ @staticmethod
+ def create(traffic_disc):
+ assert isinstance(traffic_disc, dict), 'Traffic Descriptor should be a dictionary'
+
+ additional = TrafficDescriptor.AdditionalBwEligibility.from_value(
+ traffic_disc['additional-bw-eligibility-indicator'])
+
+ if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ best_effort = BestEffort(traffic_disc['maximum-bandwidth'],
+ traffic_disc['priority'],
+ traffic_disc['weight'])
+ else:
+ best_effort = None
+
+ return TrafficDescriptor(traffic_disc['fixed-bandwidth'],
+ traffic_disc['assured-bandwidth'],
+ traffic_disc['maximum-bandwidth'],
+ name=traffic_disc['name'],
+ best_effort=best_effort,
+ additional=additional)
+
+ @inlineCallbacks
+ def add_to_hardware(self, omci):
+
+ if self._is_mock:
+ returnValue('mock')
+
+ results = succeed('TODO: Implement me')
+ # from ..adtran_olt_handler import AdtranOltHandler
+ #
+ # uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
+ # data = json.dumps({'traffic-descriptor': self.to_dict()})
+ # name = 'tcont-td-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
+ # try:
+ # results = yield session.request('PATCH', uri, data=data, name=name)
+ #
+ # except Exception as e:
+ # log.exception('traffic-descriptor', td=self, e=e)
+ # raise
+ #
+ # if self.additional_bandwidth_eligibility == \
+ # TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ # if self.best_effort is None:
+ # raise ValueError('TCONT is best-effort but does not define best effort sharing')
+ #
+ # try:
+ # results = yield self.best_effort.add_to_hardware(session, pon_id, onu_id, alloc_id)
+ #
+ # except Exception as e:
+ # log.exception('best-effort', best_effort=self.best_effort, e=e)
+ # raise
+ returnValue(results)
diff --git a/voltha/adapters/adtran_onu/pon_port.py b/voltha/adapters/adtran_onu/pon_port.py
new file mode 100644
index 0000000..3bbbe67
--- /dev/null
+++ b/voltha/adapters/adtran_onu/pon_port.py
@@ -0,0 +1,671 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed, TimeoutError
+from twisted.internet import reactor
+
+from voltha.protos.common_pb2 import AdminState
+from voltha.protos.device_pb2 import Port
+
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus
+
+from omci.omci_me import *
+
+_STARTUP_RETRY_WAIT = 5
+BRDCM_DEFAULT_VLAN = 4091 # TODO: Deprecate later...
+
+# abbreviations
+OP = EntityOperations
+
+
+class PonPort(object):
+ """Wraps northbound-port / vlan bridge support for ONU"""
+
+ def __init__(self, handler, port_no):
+ self._enabled = False
+ self._valid = True
+ self._handler = handler
+ self._deferred = None
+ self._port = None
+ self._port_number = port_no
+ self._bridge_initialized = False
+ self.log = structlog.get_logger(device_id=handler.device_id, port_no=port_no)
+
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE
+
+ self._gem_ports = {} # gem-id -> GemPort
+ self._tconts = {} # alloc-id -> TCont
+
+ # TODO: Add stats, alarm reference, ...
+
+ pass
+
+ def __str__(self):
+ return "PonPort" # TODO: Encode current state
+
+ @staticmethod
+ def create(handler, port_no):
+ port = PonPort(handler, port_no)
+ return port
+
+ def _start(self):
+ self._cancel_deferred()
+
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE
+ self._update_adapter_agent()
+
+ # Begin ONU Activation sequence
+ self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT, self.message_exchange)
+
+ # TODO: start h/w sync
+ pass
+
+ def _stop(self):
+ self._cancel_deferred()
+
+ self._bridge_initialized = False
+ self._admin_state = AdminState.DISABLED
+ self._oper_status = OperStatus.UNKNOWN
+ self._update_adapter_agent()
+ # TODO: stop h/w sync
+ pass
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def delete(self):
+ self.enabled = False
+ self._valid = False
+ self._handler = None
+ # TODO: anything else
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ if self._enabled != value:
+ self._enabled = value
+
+ if value:
+ self._start()
+ else:
+ self._stop()
+
+ @property
+ def bridge_initialized(self):
+ return self._bridge_initialized
+
+ @property
+ def port_number(self):
+ return self._port_number
+
+ def get_port(self):
+ """
+ Get the VOLTHA PORT object for this port
+ :return: VOLTHA Port object
+ """
+ if self._port is None:
+ device = self._handler.adapter_agent.get_device(self._handler.device_id)
+
+ self._port = Port(port_no=self.port_number,
+ label='PON port',
+ type=Port.PON_ONU,
+ admin_state=self._admin_state,
+ oper_status=self._oper_status,
+ peers = [Port.PeerPort(device_id=device.parent_id,
+ port_no=device.parent_port_no)])
+ return self._port
+
+ def _update_adapter_agent(self):
+ # TODO: Currently does the adapter_agent allow 'update' of port status
+ # self.adapter_agent.update_port(self.olt.device_id, self.get_port())
+ pass
+
+ @inlineCallbacks
+ def message_exchange(self):
+ self.log.info('message-exchange')
+ self._deferred = None
+
+ if self._handler.device_id is None or not self.enabled:
+ returnValue(succeed('deleted'))
+
+ elif not self.enabled:
+ # Wait until enabled
+ returnValue('not-enabled')
+
+ omci = self._handler.omci
+
+ try:
+ # reset incoming message queue
+ omci.flush()
+
+ ####################################################
+ # Start by getting some useful device information
+
+ device = self._handler.adapter_agent.get_device(self._handler.device_id)
+ device.oper_status = OperStatus.ACTIVATING
+ device.connect_status = ConnectStatus.UNREACHABLE
+
+ except Exception as e:
+ self.log.exception('top-of-msg-exch', e=e)
+ device = None
+
+ if device is None:
+ # Wait until enabled
+ returnValue('no-device')
+
+ try:
+ # May timeout to ONU not fully discovered (can happen in xPON case)
+ # or other errors.
+ # Decode fields in response and update device info
+
+ response = yield omci.send_get_OntG('vendor_id')
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ vendor = data["vendor_id"]
+ assert vendor == 'ADTN', \
+ "Invalid Device/Wrong device adapter assigned: '{}'".format(vendor)
+
+ response = yield omci.send(OntGFrame('vendor_id').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ vendor = data["vendor_id"]
+ assert vendor == 'ADTN', \
+ "Invalid Device/Wrong device adapter assigned: '{}'".format(vendor)
+
+ # TODO: Get serial number and validate!
+
+ # Mark as reachable if at least first message gets through
+ device.connect_status = ConnectStatus.REACHABLE
+ self._handler.adapter_agent.update_device(device)
+
+ # response = yield omci.send_get_cardHolder('actual_plugin_unit_type', 257)
+ response = yield omci.send(CardholderFrame(True, 1,
+ 'actual_plugin_unit_type').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ # device.type = str(data["actual_plugin_unit_type"])
+
+ # response = yield omci.send_get_circuit_pack('number_of_ports', 257)
+ response = yield omci.send(CircuitPackFrame(257, 'number_of_ports').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ num_ports = data["number_of_ports"]
+ assert num_ports == 1, 'Invalid number of ports: {}'.format(num_ports)
+
+ # response = yield omci.send_get_IpHostConfigData('mac_address', 515)
+ response = yield omci.send(IpHostConfigDataFrame(515, 'mac_address').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ device.mac_address = str(data["mac_address"])
+
+ # response = yield omci.send_get_Ont2G('equipment_id', 0)
+ response = yield omci.send(Ont2GFrame('equipment_id').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ eqptId_bootVersion = str(data["equipment_id"])
+ eqptId = eqptId_bootVersion[0:10] # ie) BVMDZ10DRA
+ bootVersion = eqptId_bootVersion[12:20] # ie) CML.D55~
+
+ # response = yield omci.send_get_Ont2G('omcc_version', 0)
+ response = yield omci.send(Ont2GFrame('omcc_version').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ # decimal version
+ omciVersion = str(data["omcc_version"])
+
+ # response = yield omci.send_get_Ont2G('vendor_product_code', 0)
+ response = yield omci.send(Ont2GFrame('vendor_product_code').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ # decimal value
+ vendorProductCode = str(data["vendor_product_code"])
+
+ # response = yield omci.send(OntGFrame('version').get())
+ response = yield omci.send(OntGFrame('version').get())
+
+ omci_response = response.getfieldval("omci_message")
+ data = omci_response.getfieldval("data")
+ device.model = str(data["version"]) # such as 1287800F1
+
+ # Possibility of bug in ONT Firmware. uncomment this code after it is fixed.
+ # response = yield omci.send_get_SoftwareImage('version', 0)
+ #
+ # omci_response = response.getfieldval("omci_message")
+ # data = omci_response.getfieldval("data")
+ # device.firmware_version = str(data["version"])
+ # is_committed = data["is_committed"]
+ # is_active = data["is_active"]
+ # is_valid = data["is_valid"]
+
+ # TODO: May have some issue with the next one...
+ # response = yield omci.send_set_adminState(257)
+
+ # device.hardware_version = 'TODO: to be filled'
+ # TODO: Support more versions as needed
+ # images = Image(version=results.get('software_version', 'unknown'))
+ # device.images.image.extend([images])
+
+ # self.adapter_agent.update_device(device)
+ device.oper_status = OperStatus.ACTIVE
+ device.connect_status = ConnectStatus.REACHABLE
+ self._handler.adapter_agent.update_device(device)
+
+ # Start up non-critical message exchange
+ self._deferred = reactor.callLater(0, self.message_exchange_part_2)
+ self.log.info('onu-activated')
+
+ # These exceptions are not recoverable
+ except (AssertionError, TypeError, ValueError, AttributeError) as e:
+ self.log.exception('Failed', e=e)
+ device.oper_status = OperStatus.FAILED
+ device.reason = e.message
+ self._handler.adapter_agent.update_device(device)
+
+ except TimeoutError as e:
+ self.log.debug('Failed', e=e)
+ self._handler.adapter_agent.update_device(device)
+ # Try again later. May not have been discovered
+ self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT,
+ self.message_exchange)
+
+ except Exception as e:
+ self.log.exception('Failed', e=e)
+ self._handler.adapter_agent.update_device(device)
+ # Try again later. May not have been discovered
+ self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT,
+ self.message_exchange)
+
+ @inlineCallbacks
+ def message_exchange_part_2(self):
+ """ Called after basic OMCI message startup/exchange """
+
+ self.log.info('message-exchange-part-2')
+ self._deferred = None
+
+ if self._handler.device_id is None or not self.enabled:
+ returnValue('not-enabled')
+
+ omci = self._handler.omci
+
+ try:
+ # reset incoming message queue
+ omci.flush()
+ device = self._handler.adapter_agent.get_device(self._handler.device_id)
+
+ except Exception as e:
+ self.log.exception('top-of-msg-exch', e=e)
+ device = None
+
+ if not self.enabled or device is None:
+ returnValue('not-enabled')
+
+ try:
+ cvid = BRDCM_DEFAULT_VLAN # TODO: What should this be?
+
+ # construct message
+ # MIB Reset - OntData - 0
+ # results = yield omci.send_mib_reset()
+
+ # Create AR - GalEthernetProfile - 1
+ results = yield omci.send_create_gal_ethernet_profile(
+ 1, # Entity ID
+ 48) # Max GEM Payload size
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # Port 2
+ # Extended VLAN Tagging Operation config
+ # TODO: add entry here for additional UNI interfaces
+ results = yield omci.send_create_extended_vlan_tagging_operation_configuration_data(
+ 0x202, # Entity ID
+ 2, # Assoc Type
+ 0x102) # Assoc ME
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # Set AR - ExtendedVlanTaggingOperationConfigData - 514 - 8100 - 8100
+ results = yield omci.send_set_extended_vlan_tagging_operation_tpid_configuration_data(
+ 0x202, # Entity ID
+ 0x8100, # input TPID
+ 0x8100) # output TPID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # unsupported_mask = results.fields['omci_message'].fields['unsupported_attributes_mask']
+ # failed_mask = results.fields['omci_message'].fields['failed_attributes_mask']
+
+ # MAC Bridge Service config
+ results = yield omci.send_create_mac_bridge_service_profile(0x201) # Entity ID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # Create AR - MacBridgePortConfigData
+ results = yield omci.send_create_mac_bridge_port_configuration_data(
+ 0x201, # Entity ID
+ 0x201, # Bridge ID
+ 2, # Port ID
+ 1, # TP Type
+ 0x102) # TP ID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # Mapper Service config
+ results = yield omci.send_create_8021p_mapper_service_profile(0x8001) # Entity ID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # MAC Bridge Port config
+ results = yield omci.send_create_mac_bridge_port_configuration_data(
+ 0x2102, # Entity ID
+ 0x201, # Bridge ID
+ 3, # Port ID
+ 3, # TP Type
+ 0x8001) # TP ID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # VLAN Tagging Filter config
+ # TODO: Probably need to get VLAN ID from device.vlan
+ results = yield omci.send_create_vlan_tagging_filter_data(0x2102, # Entity ID
+ cvid) # VLAN ID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ # Set AR - ExtendedVlanTaggingOperationConfigData
+ # 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
+ # results = yield omci.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(
+ # 0x202, # Entity ID
+ # 8, # Filter Inner Priority
+ # 0, # Filter Inner VID
+ # 0, # Filter Inner TPID DE
+ # 1, # Treatment tags to remove
+ # 8, # Treatment inner priority
+ # cvid) # Treatment inner VID
+ #
+ # Set AR - ExtendedVlanTaggingOperationConfigData
+ # 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to untagged pkts - c-vid
+ results = yield omci.send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(
+ 0x202, # Entity ID
+ 0x1000, # Filter Inner VID
+ cvid) # Treatment inner VID
+
+ # success = results.fields['omci_message'].fields['success_code'] == 0
+ # error_mask = results.fields['omci_message'].fields['parameter_error_attributes_mask']
+
+ ######################################################################
+ # If here, we can add TCONTs/GEM Ports as needed
+
+ self._bridge_initialized = True
+ self._deferred = reactor.callLater(0, self.sync_existing_xpon)
+ # that xPON may have already sent us
+
+ # ###############################################################################
+ # # Multicast related MEs
+ # # Set AR - MulticastOperationsProfile - Dynamic Access Control List table
+ # # Create AR - MacBridgePortConfigData - 9000 - 513 - 6 - 6 - 6
+ # results = yield omci.send_create_mac_bridge_port_configuration_data(
+ # 0x2328,
+ # 0x201,
+ # 6,
+ # 6,
+ # 6)
+ #
+ # # Multicast Operation Profile config
+ # # Create AR - MulticastOperationsProfile
+ # results = yield omci.send_create_multicast_operations_profile(
+ # 0x201,
+ # 3)
+ #
+ # # Multicast Subscriber config
+ # # Create AR - MulticastSubscriberConfigInfo
+ # results = yield omci.send_create_multicast_subscriber_config_info(
+ # 0x201,
+ # 0,
+ # 0x201)
+ #
+ # # Create AR - GemPortNetworkCtp - 260 - 4000 - 0 Multicast
+ # results = yield omci.send_create_gem_port_network_ctp(
+ # 0x104,
+ # 0x0FA0,
+ # 0,
+ # "downstream",
+ # 0)
+ #
+ # # Multicast GEM Interworking config Multicast
+ # # Create AR - MulticastGemInterworkingTp - 6 - 260
+ # results = yield omci.send_create_multicast_gem_interworking_tp(0x6, 0x104)
+ #
+ # results = yield omci.send_set_multicast_operations_profile_acl_row0(
+ # 0x201,
+ # 'dynamic',
+ # 0,
+ # 0x0fa0,
+ # 0x0fa0,
+ # '0.0.0.0',
+ # '224.0.0.0',
+ # '239.255.255.255')
+ #
+ # # Multicast Operation Profile config
+ # # Set AR - MulticastOperationsProfile - Downstream IGMP Multicast TCI
+ # results = yield omci.send_set_multicast_operations_profile_ds_igmp_mcast_tci(
+ # 0x201,
+ # 4,
+ # cvid)
+
+ except AssertionError as e:
+ self.log.exception('Failed', e=e)
+ # TODO: get message and report back
+ # TODO: get message and report back
+
+ except Exception as e:
+ self.log.debug('Failed', e=e)
+ self._handler.adapter_agent.update_device(device)
+ # Try again later. TODO: Do we want to restart at part 1 here ?
+ self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT,
+ self.message_exchange_part_2)
+
+ @inlineCallbacks
+ def sync_existing_xpon(self):
+ """
+ Run through existing TCONT and GEM Ports and push into hardware
+ """
+ for tcont in self._tconts.itervalues():
+ try:
+ yield self.add_tcont(tcont, reflow=True)
+ except Exception as e:
+ self.log.exception('tcont-reflow', e=e, tcont=tcont)
+
+ for gem_port in self._gem_ports.itervalues():
+ try:
+ yield self.add_gem_port(gem_port, reflow=True)
+
+ except Exception as e:
+ self.log.exception('gem-port-reflow', e=e, gem_port=gem_port)
+
+ returnValue('Done')
+
+ @inlineCallbacks
+ def add_tcont(self, tcont, reflow=False):
+ """
+ Creates/ a T-CONT with the given alloc-id
+
+ :param tcont: (TCont) Object that maintains the TCONT properties
+ :param reflow: (boolean) If true, force add (used during h/w resync)
+ :return: (deferred)
+ """
+ if not self._valid:
+ returnValue('Deleting')
+
+ if not reflow and tcont.alloc_id in self._tconts:
+ returnValue('already created')
+
+ self.log.info('add', tcont=tcont, reflow=reflow)
+ self._tconts[tcont.alloc_id] = tcont
+
+ if not self.bridge_initialized:
+ returnValue('Bridge Not Initialized')
+
+ try:
+ results = yield tcont.add_to_hardware(self._handler.omci)
+
+ except Exception as e:
+ self.log.exception('tcont', tcont=tcont, reflow=reflow, e=e)
+ # May occur with xPON provisioning, use hw-resync to recover
+ results = 'resync needed'
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def update_tcont_td(self, alloc_id, new_td):
+ tcont = self._tconts.get(alloc_id)
+
+ if tcont is None:
+ returnValue('not-found')
+
+ tcont.traffic_descriptor = new_td
+
+ if not self.bridge_initialized:
+ returnValue('Bridge Not Initialized')
+
+ try:
+ results = yield tcont.add_to_hardware(self._handler.omci)
+
+ except Exception as e:
+ self.log.exception('tcont', tcont=tcont, e=e)
+ # May occur with xPON provisioning, use hw-resync to recover
+ results = 'resync needed'
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_tcont(self, alloc_id):
+ tcont = self._tconts.get(alloc_id)
+
+ if tcont is None:
+ returnValue('nop')
+
+ del self._tconts[alloc_id]
+
+ if not self.bridge_initialized:
+ returnValue('Bridge Not Initialized')
+
+ try:
+ results = yield tcont.remove_from_hardware(self._handler.omci)
+
+ except Exception as e:
+ self.log.exception('delete', e=e)
+ results = e
+ # raise
+
+ returnValue(results)
+
+ def gem_port(self, gem_id):
+ return self._gem_ports.get(gem_id)
+
+ @property
+ def gem_ids(self):
+ """Get all GEM Port IDs used by this ONU"""
+ return sorted([gem_id for gem_id, gem in self._gem_ports.items()])
+
+ @inlineCallbacks
+ def add_gem_port(self, gem_port, reflow=False):
+ """
+ Add a GEM Port to this ONU
+
+ :param gem_port: (GemPort) GEM Port to add
+ :param reflow: (boolean) If true, force add (used during h/w resync)
+ :return: (deferred)
+ """
+ if not self._valid:
+ returnValue('Deleting')
+
+ if not reflow and gem_port.gem_id in self._gem_ports:
+ returnValue('nop')
+
+ self.log.info('add', gem_port=gem_port, reflow=reflow)
+ self._gem_ports[gem_port.gem_id] = gem_port
+
+ if not self.bridge_initialized:
+ returnValue('Bridge Not Initialized')
+
+ try:
+ results = yield gem_port.add_to_hardware(self._handler.omci)
+ # TODO: Are flows affected by this change?
+
+ except Exception as e:
+ self.log.exception('gem-port', gem_port=gem_port, reflow=reflow, e=e)
+ # This can happen with xPON if the ONU has been provisioned, but the PON Discovery
+ # has not occurred for the ONU. Rely on hw sync to recover
+ results = 'resync needed'
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_gem_id(self, gem_id):
+ gem_port = self._gem_ports.get(gem_id)
+
+ if gem_port is None:
+ returnValue('nop')
+
+ del self._gem_ports[gem_id]
+
+ if not self.bridge_initialized:
+ returnValue('Bridge Not Initialized')
+
+ try:
+ results = yield gem_port.remove_from_hardware(self._handler.omci)
+ # TODO: Are flows affected by this change?
+
+ except Exception as ex:
+ self.log.exception('gem-port-delete', e=ex)
+ raise
+
+ returnValue(results)
+
+
+
+
+
+
+
+
+
+
+
diff --git a/voltha/adapters/adtran_onu/uni_port.py b/voltha/adapters/adtran_onu/uni_port.py
new file mode 100644
index 0000000..b93bbb9
--- /dev/null
+++ b/voltha/adapters/adtran_onu/uni_port.py
@@ -0,0 +1,188 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from voltha.protos.common_pb2 import OperStatus, AdminState
+from voltha.protos.device_pb2 import Port
+from voltha.protos.openflow_13_pb2 import OFPPF_10GB_FD
+from voltha.core.logical_device_agent import mac_str_to_tuple
+from voltha.protos.logical_device_pb2 import LogicalPort
+from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER
+from voltha.protos.openflow_13_pb2 import ofp_port
+
+
+class UniPort(object):
+ """Wraps southbound-port(s) support for ONU"""
+
+ def __init__(self, handler, name, port_no, control_vlan=None):
+ self.log = structlog.get_logger(device_id=handler.device_id,
+ port_no=port_no)
+ self._enabled = False
+ self._handler = handler
+ self._name = name
+ self._port = None
+ self._port_number = port_no
+ self._logical_port_number = None
+ self._control_vlan = control_vlan
+
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE
+ # TODO Add state, stats, alarm reference, ...
+
+ pass
+
+ def __str__(self):
+ return "UniPort: {}:{}".format(self.name, self.port_number)
+
+ @staticmethod
+ def create(handler, name, port_no, control_vlan):
+ port = UniPort(handler, name, port_no, control_vlan)
+ return port
+
+ def _start(self):
+ self._cancel_deferred()
+
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE
+ self._update_adapter_agent()
+ # TODO: start h/w sync
+ # TODO: Enable the actual physical port?
+ pass
+
+ def _stop(self):
+ self._cancel_deferred()
+
+ self._admin_state = AdminState.DISABLED
+ self._oper_status = OperStatus.UNKNOWN
+ self._update_adapter_agent()
+ # TODO: Disable/power-down the actual physical port?
+ pass
+
+ def delete(self):
+ self.enabled = False
+ self._handler = None
+ # TODO: anything else
+
+ def _cancel_deferred(self):
+ pass
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ if self._enabled != value:
+ self._enabled = value
+
+ if value:
+ self._start()
+ else:
+ self._stop()
+
+ @property
+ def port_number(self):
+ """
+ Physical device port number
+ :return: (int) port number
+ """
+ return self._port_number
+
+ @property
+ def logical_port_number(self):
+ """
+ Logical device port number (used as OpenFlow port for UNI)
+ :return: (int) port number
+ """
+ return self._logical_port_number
+
+ def _update_adapter_agent(self):
+ # TODO: Currently does the adapter_agent allow 'update' of port status
+ # self.adapter_agent.update_port(self.olt.device_id, self.get_port())
+ pass
+
+ @staticmethod
+ def decode_openflow_port_and_control_vlan(self, venet_info):
+ try:
+ # Allow spaces or dashes as separator, select last as
+ # the port number
+
+ port_no = int(venet_info['name'].replace(' ', '-').split('-')[-1:][0])
+ cntl_vlan = port_no
+
+ return port_no, cntl_vlan
+
+ except ValueError:
+ self.log.error('invalid-uni-port-name', name=venet_info['name'])
+ except KeyError:
+ self.log.error('invalid-venet-data', data=venet_info)
+
+ def get_port(self):
+ """
+ Get the VOLTHA PORT object for this port
+ :return: VOLTHA Port object
+ """
+ if self._port is None:
+ device = self._handler.adapter_agent.get_device(self._handler.device_id)
+
+ self._port = Port(port_no=self.port_number,
+ label='Ethernet port',
+ type=Port.ETHERNET_UNI,
+ admin_state=self._admin_state,
+ oper_status=self._oper_status,
+ peers=[Port.PeerPort(device_id=device.parent_id,
+ port_no=device.parent_port_no)])
+ return self._port
+
+ def add_logical_port(self, openflow_port_no, control_vlan=None,
+ capabilities=OFPPF_10GB_FD | OFPPF_FIBER,
+ speed=OFPPF_10GB_FD):
+
+ if self._logical_port_number is None:
+ self._logical_port_number = openflow_port_no
+ self._control_vlan = control_vlan
+
+ device = self._handler.adapter_agent.get_device(self._handler.device_id)
+
+ if control_vlan is not None and device.vlan != control_vlan:
+ device.vlan = control_vlan
+ self._handler.adapter_agent.update_device(device)
+
+ openflow_port = ofp_port(
+ port_no=openflow_port_no,
+ hw_addr=mac_str_to_tuple('08:00:%02x:%02x:%02x:%02x' %
+ ((device.parent_port_no >> 8 & 0xff),
+ device.parent_port_no & 0xff,
+ (openflow_port_no >> 8) & 0xff,
+ openflow_port_no & 0xff)),
+ name='uni-{}'.format(openflow_port_no),
+ config=0,
+ state=OFPPS_LIVE,
+ curr=capabilities,
+ advertised=capabilities,
+ peer=capabilities,
+ curr_speed=speed,
+ max_speed=speed
+ )
+ self._handler.adapter_agent.add_logical_port(self._handler.logical_device_id,
+ LogicalPort(
+ id='uni-{}'.format(openflow_port),
+ ofp_port=openflow_port,
+ device_id=device.id,
+ device_port_no=self._port_number))
+ # TODO: Should we use the UNI object 'name' as the id for OpenFlow?