VOL-1397: Adtran-OLT - Initial containerization commit
- Need to move VERSION to base directory
Change-Id: I9d62d0607a011ce642e379fd92b35ec48b300070
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..3d3cc8f
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,6 @@
+# virtualenv dirs
+venv-darwin
+venv-linux
+
+# Markdown
+**/*.md
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..13af726
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,181 @@
+# Created by .ignore support plugin (hsz.mobi)
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+### Python template
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..3c81fc2
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,6 @@
+[gerrit]
+host=gerrit.opencord.org
+port=29418
+project=voltha-adtran-adapter.git
+defaultremote=origin
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6833f4b
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,159 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifeq ($(TAG),)
+TAG := latest
+endif
+
+ifeq ($(TARGET_TAG),)
+TARGET_TAG := latest
+endif
+
+ifneq ($(http_proxy)$(https_proxy),)
+# Include proxies from the environment
+DOCKER_PROXY_ARGS = \
+ --build-arg http_proxy=$(http_proxy) \
+ --build-arg https_proxy=$(https_proxy) \
+ --build-arg ftp_proxy=$(ftp_proxy) \
+ --build-arg no_proxy=$(no_proxy) \
+ --build-arg HTTP_PROXY=$(HTTP_PROXY) \
+ --build-arg HTTPS_PROXY=$(HTTPS_PROXY) \
+ --build-arg FTP_PROXY=$(FTP_PROXY) \
+ --build-arg NO_PROXY=$(NO_PROXY)
+endif
+
+DOCKER_BUILD_ARGS = \
+ --build-arg TAG=$(TAG) \
+ --build-arg REGISTRY=$(REGISTRY) \
+ --build-arg REPOSITORY=$(REPOSITORY) \
+ $(DOCKER_PROXY_ARGS) $(DOCKER_CACHE_ARG) \
+ --rm --force-rm \
+ $(DOCKER_BUILD_EXTRA_ARGS)
+
+VENVDIR := venv-$(shell uname -s | tr '[:upper:]' '[:lower:]')
+VENV_BIN ?= virtualenv
+VENV_OPTS ?=
+
+PYVOLTHA_DIR ?= ../pyvoltha
+
+DOCKER_IMAGE_LIST = \
+ voltha-adtran-base \
+ voltha-adapter-adtran-onu \
+ voltha-adapter-adtran-olt
+
+.PHONY: base adtran_olt adtran_onu tag push pull
+
+# This should to be the first and default target in this Makefile
+help:
+ @echo "Usage: make [<target>]"
+ @echo "where available targets are:"
+ @echo
+ @echo "build : Build the Adapter and docker images.\n\
+ If this is the first time you are building, choose \"make build\" option."
+ @echo "clean : Remove files created by the build and tests"
+ @echo "distclean : Remove venv directory"
+ @echo "fetch : Pre-fetch artifacts for subsequent local builds"
+ @echo "help : Print this help"
+ @echo "rebuild-venv : Rebuild local Python virtualenv from scratch"
+ @echo "venv : Build local Python virtualenv if did not exist yet"
+ @echo "containers : Build all the docker containers"
+ @echo "base : Build the base docker container used by all other dockers"
+ @echo "adapter_adtran_olt : Build the ADTRAN olt adapter docker container"
+ @echo "adapter_adtran_onu : Build the ADTRAN olt adapter docker container"
+ @echo "tag : Tag a set of images"
+ @echo "push : Push the docker images to an external repository"
+ @echo "pull : Pull the docker images from a repository"
+ @echo
+
+build: containers
+
+containers: base adapter_adtran_olt adapter_adtran_onu
+
+base:
+ifdef LOCAL_PYVOLTHA
+ @rm -f pyvoltha/dist/*
+ @mkdir -p pyvoltha/dist
+ cp $(PYVOLTHA_DIR)/dist/*.tar.gz pyvoltha/dist/
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adtran-base-local:${TAG} -f docker/Dockerfile.base_local .
+else
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adtran-base:${TAG} -f docker/Dockerfile.base .
+endif
+
+adapter_adtran_olt: base
+ifdef PYVOLTHA_BASE_IMAGE
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-adtran-olt:${TAG} -f docker/Dockerfile.adapter_adtran_olt_pyvoltha .
+else
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-adtran-olt:${TAG} -f docker/Dockerfile.adapter_adtran_olt .
+endif
+
+adapter_adtran_onu: base
+ifdef PYVOLTHA_BASE_IMAGE
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-adtran-onu:${TAG} -f docker/Dockerfile.adapter_adtran_onu_pyvoltha .
+else
+ docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-adtran-onu:${TAG} -f docker/Dockerfile.adapter_adtran_onu .
+endif
+
+tag: $(patsubst %,%.tag,$(DOCKER_IMAGE_LIST))
+
+push: tag $(patsubst %,%.push,$(DOCKER_IMAGE_LIST))
+
+pull: $(patsubst %,%.pull,$(DOCKER_IMAGE_LIST))
+
+%.tag:
+ docker tag ${REGISTRY}${REPOSITORY}voltha-$(subst .tag,,$@):${TAG} ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .tag,,$@):${TARGET_TAG}
+
+%.push:
+ docker push ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .push,,$@):${TARGET_TAG}
+
+%.pull:
+ docker pull ${REGISTRY}${REPOSITORY}voltha-$(subst .pull,,$@):${TAG}
+
+clean:
+ find . -name '*.pyc' | xargs rm -f
+ rm -rf pyvoltha
+
+distclean: clean
+ rm -rf ${VENVDIR}
+
+purge-venv:
+ rm -fr ${VENVDIR}
+
+rebuild-venv: purge-venv venv
+
+venv: ${VENVDIR}/.built
+
+${VENVDIR}/.built:
+ @ $(VENV_BIN) ${VENV_OPTS} ${VENVDIR}
+ @ $(VENV_BIN) ${VENV_OPTS} --relocatable ${VENVDIR}
+ @ . ${VENVDIR}/bin/activate && \
+ pip install --upgrade pip; \
+ if ! pip install -r requirements.txt; \
+ then \
+ echo "On MAC OS X, if the installation failed with an error \n'<openssl/opensslv.h>': file not found,"; \
+ echo "see the BUILD.md file for a workaround"; \
+ else \
+ uname -s > ${VENVDIR}/.built; \
+ fi
+ @ $(VENV_BIN) ${VENV_OPTS} --relocatable ${VENVDIR}
+
+ifdef LOCAL_PYVOLTHA
+ mkdir -p pyvoltha/dist
+ cp ../../pyvoltha/dist/*.tar.gz pyvoltha/dist/
+ @ . ${VENVDIR}/bin/activate && \
+ pip install pyvoltha/dist/*.tar.gz
+endif
+
+# end file
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..1e4ec5e
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2.0.1-dev
diff --git a/adapters/__init__.py b/adapters/__init__.py
new file mode 100644
index 0000000..d67fcf2
--- /dev/null
+++ b/adapters/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_common/__init__.py b/adapters/adtran_common/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/adtran_common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_common/adtran_device_handler.py b/adapters/adtran_common/adtran_device_handler.py
new file mode 100644
index 0000000..79877b7
--- /dev/null
+++ b/adapters/adtran_common/adtran_device_handler.py
@@ -0,0 +1,1438 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Adtran generic VOLTHA device handler
+"""
+import argparse
+import datetime
+import shlex
+import time
+
+import structlog
+from twisted.internet import reactor, defer
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.failure import Failure
+
+from adapters.adtran_common.net.adtran_netconf import AdtranNetconfClient
+from adapters.adtran_common.net.adtran_rest import AdtranRestClient
+from pyvoltha.protos import third_party
+from pyvoltha.protos.common_pb2 import OperStatus, AdminState, ConnectStatus
+from pyvoltha.protos.logical_device_pb2 import LogicalDevice
+from pyvoltha.protos.openflow_13_pb2 import ofp_desc, ofp_switch_features, OFPC_PORT_STATS, \
+ OFPC_GROUP_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS
+from pyvoltha.adapters.extensions.alarms.adapter_alarms import AdapterAlarms
+from pyvoltha.adapters.extensions.kpi.olt.olt_pm_metrics import OltPmMetrics
+from pyvoltha.common.utils.asleep import asleep
+from flow.flow_tables import DeviceFlows, DownstreamFlows
+
+_ = third_party
+
+DEFAULT_MULTICAST_VLAN = 4000
+BROADCOM_UNTAGGED_VLAN = 4091
+DEFAULT_UTILITY_VLAN = BROADCOM_UNTAGGED_VLAN
+
+_DEFAULT_RESTCONF_USERNAME = ""
+_DEFAULT_RESTCONF_PASSWORD = ""
+_DEFAULT_RESTCONF_PORT = 8081
+
+_DEFAULT_NETCONF_USERNAME = ""
+_DEFAULT_NETCONF_PASSWORD = ""
+_DEFAULT_NETCONF_PORT = 830
+
+_STARTUP_RETRY_TIMEOUT = 5 # 5 seconds delay after activate failed before we
+_DEFAULT_RESOURCE_MGR_KEY = "adtran"
+
+
+class AdtranDeviceHandler(object):
+ """
+ A device that supports the ADTRAN RESTCONF protocol for communications
+ with a VOLTHA/VANILLA managed device.
+ Port numbering guidelines for Adtran OLT devices. Derived classes may augment
+ the numbering scheme below as needed.
+
+ - Reserve port 0 for the CPU capture port. All ports to/from this port should
+ be related to messages destined to/from the OpenFlow controller.
+
+ - Begin numbering northbound ports (network facing) at port 1 contiguously.
+ Consider the northbound ports to typically be the highest speed uplinks.
+ If these ports are removable or provided by one or more slots in a chassis
+ subsystem, still reserve the appropriate amount of port numbers whether they
+ are populated or not.
+
+ - Number southbound ports (customer facing) ports next starting at the next
+ available port number. If chassis based, follow the same rules as northbound
+ ports and reserve enough port numbers.
+
+ - Number any out-of-band management ports (if any) last. It will be up to the
+ Device Adapter developer whether to expose these to openflow or not. If you do
+ not expose them, but do have the ports, still reserve the appropriate number of
+ port numbers just in case.
+ """
+ # HTTP shortcuts
+ HELLO_URI = '/restconf/adtran-hello:hello'
+
+ # RPC XML shortcuts
+ RESTART_RPC = '<system-restart xmlns="urn:ietf:params:xml:ns:yang:ietf-system"/>'
+
+ def __init__(self, **kwargs):
+ from net.pio_zmq import DEFAULT_PIO_TCP_PORT
+ from net.pon_zmq import DEFAULT_PON_AGENT_TCP_PORT
+
+ super(AdtranDeviceHandler, self).__init__()
+
+ adapter = kwargs['adapter']
+ device_id = kwargs['device-id']
+ timeout = kwargs.get('timeout', 20)
+
+ self.adapter = adapter
+ self.adapter_agent = adapter.adapter_agent
+ self.device_id = device_id
+ self.log = structlog.get_logger(device_id=device_id)
+ self.startup = None # Startup/reboot deferred
+ self.channel = None # Proxy messaging channel with 'send' method
+ self.logical_device_id = None
+ self.pm_metrics = None
+ self.alarms = None
+ self.multicast_vlans = [DEFAULT_MULTICAST_VLAN]
+ self.utility_vlan = DEFAULT_UTILITY_VLAN
+ self.mac_address = '00:13:95:00:00:00'
+ self._rest_support = None
+ self._initial_enable_complete = False
+ self.resource_mgr = None
+ self.tech_profiles = None # dict(): intf_id -> ResourceMgr.TechProfile
+
+ # Northbound and Southbound ports
+ self.northbound_ports = {} # port number -> Port
+ self.southbound_ports = {} # port number -> Port (For PON, use pon-id as key)
+ # self.management_ports = {} # port number -> Port TODO: Not currently supported
+
+ self.num_northbound_ports = None
+ self.num_southbound_ports = None
+ # self.num_management_ports = None
+
+ self.ip_address = None
+ self.host_and_port = None
+ self.timeout = timeout
+ self.restart_failure_timeout = 5 * 60 # 5 Minute timeout
+
+ # REST Client
+ self.rest_port = _DEFAULT_RESTCONF_PORT
+ self.rest_username = _DEFAULT_RESTCONF_USERNAME
+ self.rest_password = _DEFAULT_RESTCONF_PASSWORD
+ self._rest_client = None
+
+ # NETCONF Client
+ self.netconf_port = _DEFAULT_NETCONF_PORT
+ self.netconf_username = _DEFAULT_NETCONF_USERNAME
+ self.netconf_password = _DEFAULT_NETCONF_PASSWORD
+ self._netconf_client = None
+
+ # Flow entries
+ self.upstream_flows = DeviceFlows()
+ self.downstream_flows = DownstreamFlows()
+
+ self.max_nni_ports = 1 # TODO: This is a VOLTHA imposed limit in 'flow_decomposer.py
+ # and logical_device_agent.py
+
+ self.resource_manager_key = _DEFAULT_RESOURCE_MGR_KEY
+ # OMCI ZMQ Channel
+ self.pon_agent_port = DEFAULT_PON_AGENT_TCP_PORT
+ self.pio_port = DEFAULT_PIO_TCP_PORT
+
+ # Heartbeat support
+ self.heartbeat_count = 0
+ self.heartbeat_miss = 0
+ self.heartbeat_interval = 2 # TODO: Decrease before release or any scale testing
+ self.heartbeat_failed_limit = 3
+ self.heartbeat_timeout = 5
+ self.heartbeat = None
+ self.heartbeat_last_reason = ''
+
+ # Virtualized OLT Support
+ self.is_virtual_olt = False
+
+ # Installed flows
+ self._evcs = {} # Flow ID/name -> FlowEntry
+
+ def _delete_logical_device(self):
+ ldi, self.logical_device_id = self.logical_device_id, None
+
+ if ldi is None:
+ return
+
+ self.log.debug('delete-logical-device', ldi=ldi)
+
+ logical_device = self.adapter_agent.get_logical_device(ldi)
+ self.adapter_agent.delete_logical_device(logical_device)
+
+ device = self.adapter_agent.get_device(self.device_id)
+ device.parent_id = ''
+
+ # Update the logical device mapping
+ if ldi in self.adapter.logical_device_id_to_root_device_id:
+ del self.adapter.logical_device_id_to_root_device_id[ldi]
+
+ def __del__(self):
+ # Kill any startup or heartbeat defers
+
+ d, self.startup = self.startup, None
+ h, self.heartbeat = self.heartbeat, None
+
+ if d is not None and not d.called:
+ d.cancel()
+
+ if h is not None and not h.called:
+ h.cancel()
+
+ # Remove the logical device
+ self._delete_logical_device()
+
+ self.northbound_ports.clear()
+ self.southbound_ports.clear()
+
+ def __str__(self):
+ return "AdtranDeviceHandler: {}".format(self.ip_address)
+
+ @property
+ def netconf_client(self):
+ return self._netconf_client
+
+ @property
+ def rest_client(self):
+ return self._rest_client
+
+ @property
+ def evcs(self):
+ return list(self._evcs.values())
+
+ def add_evc(self, evc):
+ if self._evcs is not None and evc.name not in self._evcs:
+ self._evcs[evc.name] = evc
+
+ def remove_evc(self, evc):
+ if self._evcs is not None and evc.name in self._evcs:
+ del self._evcs[evc.name]
+
+ def parse_provisioning_options(self, device):
+ if device.ipv4_address:
+ self.ip_address = device.ipv4_address
+ self.host_and_port = '{}:{}'.format(self.ip_address,
+ self.netconf_port)
+ elif device.host_and_port:
+ self.host_and_port = device.host_and_port.split(":")
+ self.ip_address = self.host_and_port[0]
+ self.netconf_port = int(self.host_and_port[1])
+ self.adapter_agent.update_device(device)
+
+ else:
+ self.activate_failed(device, 'No IP_address field provided')
+
+ #############################################################
+ # Now optional parameters
+ def check_tcp_port(value):
+ ivalue = int(value)
+ if ivalue <= 0 or ivalue > 65535:
+ raise argparse.ArgumentTypeError("%s is a not a valid port number" % value)
+ return ivalue
+
+ def check_vid(value):
+ ivalue = int(value)
+ if ivalue < 1 or ivalue > 4094:
+ raise argparse.ArgumentTypeError("Valid VLANs are 1..4094")
+ return ivalue
+
+ parser = argparse.ArgumentParser(description='Adtran Device Adapter')
+ parser.add_argument('--nc_username', '-u', action='store', default=_DEFAULT_NETCONF_USERNAME,
+ help='NETCONF username')
+ parser.add_argument('--nc_password', '-p', action='store', default=_DEFAULT_NETCONF_PASSWORD,
+ help='NETCONF Password')
+ parser.add_argument('--nc_port', '-t', action='store', default=_DEFAULT_NETCONF_PORT,
+ type=check_tcp_port, help='NETCONF TCP Port')
+ parser.add_argument('--rc_username', '-U', action='store', default=_DEFAULT_RESTCONF_USERNAME,
+ help='REST username')
+ parser.add_argument('--rc_password', '-P', action='store', default=_DEFAULT_RESTCONF_PASSWORD,
+ help='REST Password')
+ parser.add_argument('--rc_port', '-T', action='store', default=_DEFAULT_RESTCONF_PORT,
+ type=check_tcp_port, help='RESTCONF TCP Port')
+ parser.add_argument('--zmq_port', '-z', action='store', default=DEFAULT_PON_AGENT_TCP_PORT,
+ type=check_tcp_port, help='PON Agent ZeroMQ Port')
+ parser.add_argument('--pio_port', '-Z', action='store', default=DEFAULT_PIO_TCP_PORT,
+ type=check_tcp_port, help='PIO Service ZeroMQ Port')
+ parser.add_argument('--multicast_vlan', '-M', action='store',
+ default='{}'.format(DEFAULT_MULTICAST_VLAN),
+ help='Multicast VLAN'),
+ parser.add_argument('--utility_vlan', '-B', action='store',
+ default='{}'.format(DEFAULT_UTILITY_VLAN),
+ type=check_vid, help='VLAN for Controller based upstream flows from ONUs')
+ parser.add_argument('--resource_mgr_key', '-o', action='store',
+ default=_DEFAULT_RESOURCE_MGR_KEY,
+ help='OLT Type to look up associated resource manager configuration')
+ try:
+ args = parser.parse_args(shlex.split(device.extra_args))
+
+ # May have multiple multicast VLANs
+ self.multicast_vlans = [int(vid.strip()) for vid in args.multicast_vlan.split(',')]
+
+ self.netconf_username = args.nc_username
+ self.netconf_password = args.nc_password
+ self.netconf_port = args.nc_port
+
+ self.rest_username = args.rc_username
+ self.rest_password = args.rc_password
+ self.rest_port = args.rc_port
+
+ self.pon_agent_port = args.zmq_port
+ self.pio_port = args.pio_port
+ self.resource_manager_key = args.resource_mgr_key
+
+ if not self.rest_username:
+ self.rest_username = 'NDE0NDRkNDk0ZQ==\n'. \
+ decode('base64').decode('hex')
+ if not self.rest_password:
+ self.rest_password = 'NTA0MTUzNTM1NzRmNTI0NA==\n'. \
+ decode('base64').decode('hex')
+ if not self.netconf_username:
+ self.netconf_username = 'Njg3Mzc2NzI2ZjZmNzQ=\n'. \
+ decode('base64').decode('hex')
+ if not self.netconf_password:
+ self.netconf_password = 'NDI0ZjUzNDM0Zg==\n'. \
+ decode('base64').decode('hex')
+
+ except argparse.ArgumentError as e:
+ self.activate_failed(device,
+ 'Invalid arguments: {}'.format(e.message),
+ reachable=False)
+ except Exception as e:
+ self.log.exception('option_parsing_error: {}'.format(e.message))
+
+ @inlineCallbacks
+ def activate(self, done_deferred, reconciling):
+ """
+ Activate the OLT device
+
+ :param done_deferred: (Deferred) Deferred to fire when done
+ :param reconciling: If True, this adapter is taking over for a previous adapter
+ for an existing OLT
+ """
+ self.log.info('AdtranDeviceHandler.activating', reconciling=reconciling)
+
+ if self.logical_device_id is None:
+ device = self.adapter_agent.get_device(self.device_id)
+
+ try:
+ # Parse our command line options for this device
+ self.parse_provisioning_options(device)
+
+ ############################################################################
+ # Currently, only virtual OLT (pizzabox) is supported
+ # self.is_virtual_olt = Add test for MOCK Device if we want to support it
+
+ ############################################################################
+ # Start initial discovery of NETCONF support (if any)
+ try:
+ device.reason = 'establishing NETCONF connection'
+ self.adapter_agent.update_device(device)
+
+ self.startup = self.make_netconf_connection()
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('netconf-connection', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ ############################################################################
+ # Update access information on network device for full protocol support
+ try:
+ device.reason = 'device networking validation'
+ self.adapter_agent.update_device(device)
+ self.startup = self.ready_network_access()
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('network-setup', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ ############################################################################
+ # Restconf setup
+ try:
+ device.reason = 'establishing RESTConf connections'
+ self.adapter_agent.update_device(device)
+ self.startup = self.make_restconf_connection()
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('restconf-setup', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ ############################################################################
+ # Get the device Information
+ if reconciling:
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+ else:
+ try:
+ device.reason = 'retrieving device information'
+ self.adapter_agent.update_device(device)
+ self.startup = self.get_device_info(device)
+ results = yield self.startup
+
+ device.model = results.get('model', 'unknown')
+ device.hardware_version = results.get('hardware_version', 'unknown')
+ device.firmware_version = results.get('firmware_version', 'unknown')
+ device.serial_number = results.get('serial_number', 'unknown')
+ device.images.image.extend(results.get('software-images', []))
+
+ device.root = True
+ device.vendor = results.get('vendor', 'Adtran Inc.')
+ device.connect_status = ConnectStatus.REACHABLE
+ self.adapter_agent.update_device(device)
+
+ except Exception as e:
+ self.log.exception('device-info', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ try:
+ # Enumerate and create Northbound NNI interfaces
+ device.reason = 'enumerating northbound interfaces'
+ self.adapter_agent.update_device(device)
+ self.startup = self.enumerate_northbound_ports(device)
+ results = yield self.startup
+
+ self.startup = self.process_northbound_ports(device, results)
+ yield self.startup
+
+ device.reason = 'adding northbound interfaces to adapter'
+ self.adapter_agent.update_device(device)
+
+ if not reconciling:
+ for port in self.northbound_ports.itervalues():
+ self.adapter_agent.add_port(device.id, port.get_port())
+
+ except Exception as e:
+ self.log.exception('NNI-enumeration', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ try:
+ # Enumerate and create southbound interfaces
+ device.reason = 'enumerating southbound interfaces'
+ self.adapter_agent.update_device(device)
+ self.startup = self.enumerate_southbound_ports(device)
+ results = yield self.startup
+
+ self.startup = self.process_southbound_ports(device, results)
+ yield self.startup
+
+ device.reason = 'adding southbound interfaces to adapter'
+ self.adapter_agent.update_device(device)
+
+ if not reconciling:
+ for port in self.southbound_ports.itervalues():
+ self.adapter_agent.add_port(device.id, port.get_port())
+
+ except Exception as e:
+ self.log.exception('PON_enumeration', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ # Initialize resource manager
+ self.initialize_resource_manager()
+
+ if reconciling:
+ if device.admin_state == AdminState.ENABLED:
+ if device.parent_id:
+ self.logical_device_id = device.parent_id
+ self.adapter_agent.reconcile_logical_device(device.parent_id)
+ else:
+ self.log.info('no-logical-device-set')
+
+ # Reconcile child devices
+ self.adapter_agent.reconcile_child_devices(device.id)
+ ld_initialized = self.adapter_agent.get_logical_device()
+ assert device.parent_id == ld_initialized.id, \
+ 'parent ID not Logical device ID'
+
+ else:
+ # Complete activation by setting up logical device for this OLT and saving
+ # off the devices parent_id
+ ld_initialized = self.create_logical_device(device)
+
+ ############################################################################
+ # Setup PM configuration for this device
+ if self.pm_metrics is None:
+ try:
+ device.reason = 'setting up Performance Monitoring configuration'
+ self.adapter_agent.update_device(device)
+
+ kwargs = {
+ 'nni-ports': self.northbound_ports.values(),
+ 'pon-ports': self.southbound_ports.values()
+ }
+ self.pm_metrics = OltPmMetrics(self.adapter_agent, self.device_id,
+ ld_initialized.id, grouped=True,
+ freq_override=False, **kwargs)
+
+ pm_config = self.pm_metrics.make_proto()
+ self.log.debug("initial-pm-config", pm_config=pm_config)
+ self.adapter_agent.update_device_pm_config(pm_config, init=True)
+
+ except Exception as e:
+ self.log.exception('pm-setup', e=e)
+ self.activate_failed(device, e.message, reachable=False)
+
+ ############################################################################
+ # Set the ports in a known good initial state
+ if not reconciling:
+ device.reason = 'setting device to a known initial state'
+ self.adapter_agent.update_device(device)
+ try:
+ for port in self.northbound_ports.itervalues():
+ self.startup = yield port.reset()
+
+ for port in self.southbound_ports.itervalues():
+ self.startup = yield port.reset()
+
+ except Exception as e:
+ self.log.exception('port-reset', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ ############################################################################
+ # Create logical ports for all southbound and northbound interfaces
+ try:
+ device.reason = 'creating logical ports'
+ self.adapter_agent.update_device(device)
+ self.startup = self.create_logical_ports(device, ld_initialized, reconciling)
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('logical-port', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ ############################################################################
+ # Setup Alarm handler
+ device.reason = 'setting up adapter alarms'
+ self.adapter_agent.update_device(device)
+
+ self.alarms = AdapterAlarms(self.adapter_agent, device.id, ld_initialized.id)
+
+ ############################################################################
+ # Register for ONU detection
+ # self.adapter_agent.register_for_onu_detect_state(device.id)
+ # Complete device specific steps
+ try:
+ self.log.debug('device-activation-procedures')
+ device.reason = 'performing model specific activation procedures'
+ self.adapter_agent.update_device(device)
+ self.startup = self.complete_device_specific_activation(device, reconciling)
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('device-activation-procedures', e=e)
+ returnValue(self.restart_activate(done_deferred, reconciling))
+
+ # Schedule the heartbeat for the device
+ self.log.debug('starting-heartbeat')
+ self.start_heartbeat(delay=10)
+
+ device = self.adapter_agent.get_device(device.id)
+ device.parent_id = ld_initialized.id
+ device.oper_status = OperStatus.ACTIVE
+ device.reason = ''
+ self.adapter_agent.update_device(device)
+ self.logical_device_id = ld_initialized.id
+
+ # Start collecting stats from the device after a brief pause
+ reactor.callLater(10, self.pm_metrics.start_collector)
+
+ # Signal completion
+ self._initial_enable_complete = True
+ self.log.info('activated')
+
+ except Exception as e:
+ self.log.exception('activate', e=e)
+ if done_deferred is not None:
+ done_deferred.errback(e)
+
+ if done_deferred is not None:
+ done_deferred.callback('activated')
+
+ returnValue('activated')
+
+ def restart_activate(self, done_deferred, reconciling):
+ """
+ Startup activation failed, pause a short period of time and retry
+
+ :param done_deferred: (deferred) Deferred to fire upon completion of activation
+ :param reconciling: (bool) If true, we are reconciling after moving to a new vCore
+ """
+ d, self.startup = self.startup, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+ device = self.adapter_agent.get_device(self.device_id)
+ device.reason = 'Failed during {}, retrying'.format(device.reason)
+ self.adapter_agent.update_device(device)
+ self.startup = reactor.callLater(_STARTUP_RETRY_TIMEOUT, self.activate,
+ done_deferred, reconciling)
+ return 'retrying'
+
+ @inlineCallbacks
+ def ready_network_access(self):
+ # Override in device specific class if needed
+ returnValue('nop')
+
+ def activate_failed(self, device, reason, reachable=True):
+ """
+ Activation process (adopt_device) has failed.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions. Such extensions shall be described as part of
+ the device type specification returned by device_types().
+ :param reason: (string) failure reason
+ :param reachable: (boolean) Flag indicating if device may be reachable
+ via RESTConf or NETConf even after this failure.
+ """
+ device.oper_status = OperStatus.FAILED
+ if not reachable:
+ device.connect_status = ConnectStatus.UNREACHABLE
+
+ device.reason = reason
+ self.adapter_agent.update_device(device)
+ raise Exception('Failed to activate OLT: {}'.format(device.reason))
+
+ @inlineCallbacks
+ def make_netconf_connection(self, connect_timeout=None,
+ close_existing_client=False):
+
+ if close_existing_client and self._netconf_client is not None:
+ try:
+ yield self._netconf_client.close()
+ except:
+ pass
+ self._netconf_client = None
+
+ client = self._netconf_client
+
+ if client is None:
+ if not self.is_virtual_olt:
+ client = AdtranNetconfClient(self.ip_address,
+ self.netconf_port,
+ self.netconf_username,
+ self.netconf_password,
+ self.timeout)
+ else:
+ from python.adapters.adtran.adtran_common.net.mock_netconf_client import MockNetconfClient
+ client = MockNetconfClient(self.ip_address,
+ self.netconf_port,
+ self.netconf_username,
+ self.netconf_password,
+ self.timeout)
+ if client.connected:
+ self._netconf_client = client
+ returnValue(True)
+
+ timeout = connect_timeout or self.timeout
+
+ try:
+ request = client.connect(timeout)
+ results = yield request
+ self._netconf_client = client
+ returnValue(results)
+
+ except Exception as e:
+ self.log.exception('Failed to create NETCONF Client', e=e)
+ self._netconf_client = None
+ raise
+
+ @inlineCallbacks
+ def make_restconf_connection(self, get_timeout=None):
+ client = self._rest_client
+
+ if client is None:
+ client = AdtranRestClient(self.ip_address,
+ self.rest_port,
+ self.rest_username,
+ self.rest_password,
+ self.timeout)
+
+ timeout = get_timeout or self.timeout
+
+ try:
+ request = client.request('GET', self.HELLO_URI, name='hello', timeout=timeout)
+ results = yield request
+ if isinstance(results, dict) and 'module-info' in results:
+ self._rest_client = client
+ returnValue(results)
+ else:
+ from twisted.internet.error import ConnectError
+ self._rest_client = None
+ raise ConnectError(string='Results received but unexpected data type or contents')
+ except Exception:
+ self._rest_client = None
+ raise
+
+ def create_logical_device(self, device):
+ version = device.images.image[0].version
+
+ ld = LogicalDevice(
+ # NOTE: not setting id and datapath_id will let the adapter agent pick id
+ desc=ofp_desc(mfr_desc='VOLTHA Project',
+ hw_desc=device.hardware_version,
+ sw_desc=version,
+ serial_num=device.serial_number,
+ dp_desc='n/a'),
+ switch_features=ofp_switch_features(n_buffers=256,
+ n_tables=2,
+ capabilities=(
+ OFPC_FLOW_STATS |
+ OFPC_TABLE_STATS |
+ OFPC_GROUP_STATS |
+ OFPC_PORT_STATS)),
+ root_device_id=device.id)
+
+ ld_initialized = self.adapter_agent.create_logical_device(ld,
+ dpid=self.mac_address)
+ return ld_initialized
+
+ @inlineCallbacks
+ def create_logical_ports(self, device, ld_initialized, reconciling):
+ if not reconciling:
+ # Add the ports to the logical device
+
+ for port in self.northbound_ports.itervalues():
+ lp = port.get_logical_port()
+ if lp is not None:
+ self.adapter_agent.add_logical_port(ld_initialized.id, lp)
+
+ for port in self.southbound_ports.itervalues():
+ lp = port.get_logical_port()
+ if lp is not None:
+ self.adapter_agent.add_logical_port(ld_initialized.id, lp)
+
+ # Clean up all EVCs, EVC maps and ACLs (exceptions are ok)
+ try:
+ from flow.evc import EVC
+ self.startup = yield EVC.remove_all(self.netconf_client)
+ from flow.utility_evc import UtilityEVC
+ self.startup = yield UtilityEVC.remove_all(self.netconf_client)
+
+ except Exception as e:
+ self.log.exception('evc-cleanup', e=e)
+
+ try:
+ from flow.evc_map import EVCMap
+ self.startup = yield EVCMap.remove_all(self.netconf_client)
+
+ except Exception as e:
+ self.log.exception('evc-map-cleanup', e=e)
+
+ from flow.acl import ACL
+ ACL.clear_all(device.id)
+ try:
+ self.startup = yield ACL.remove_all(self.netconf_client)
+
+ except Exception as e:
+ self.log.exception('acl-cleanup', e=e)
+
+ from flow.flow_entry import FlowEntry
+ FlowEntry.clear_all(self)
+
+ from download import Download
+ Download.clear_all(self.netconf_client)
+
+ # Start/stop the interfaces as needed. These are deferred calls
+
+ dl = []
+ for port in self.northbound_ports.itervalues():
+ try:
+ dl.append(port.start())
+ except Exception as e:
+ self.log.exception('northbound-port-startup', e=e)
+
+ for port in self.southbound_ports.itervalues():
+ try:
+ dl.append(port.start() if port.admin_state == AdminState.ENABLED else port.stop())
+
+ except Exception as e:
+ self.log.exception('southbound-port-startup', e=e)
+
+ results = yield defer.gatherResults(dl, consumeErrors=True)
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def device_information(self, device):
+ """
+ Examine the various managment models and extract device information for
+ VOLTHA use
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :return: (Deferred or None).
+ """
+ yield defer.Deferred(lambda c: c.callback("Not Required"))
+
+ @inlineCallbacks
+ def enumerate_northbound_ports(self, device):
+ """
+ Enumerate all northbound ports of a device. You should override
+ a non-recoverable error, throw an appropriate exception.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :return: (Deferred or None).
+ """
+ yield defer.Deferred(lambda c: c.callback("Not Required"))
+
+ @inlineCallbacks
+ def process_northbound_ports(self, device, results):
+ """
+ Process the results from the 'enumerate_northbound_ports' method.
+ You should override this method in your derived class as necessary and
+ create an NNI Port object (of your own choosing) that supports a 'get_port'
+ method. Once created, insert it into this base class's northbound_ports
+ collection.
+
+ Should you encounter a non-recoverable error, throw an appropriate exception.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :param results: Results from the 'enumerate_northbound_ports' method that
+ you implemented. The type and contents are up to you to
+ :return:
+ """
+ yield defer.Deferred(lambda c: c.callback("Not Required"))
+
+ @inlineCallbacks
+ def enumerate_southbound_ports(self, device):
+ """
+ Enumerate all southbound ports of a device. You should override
+ this method in your derived class as necessary. Should you encounter
+ a non-recoverable error, throw an appropriate exception.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :return: (Deferred or None).
+ """
+ yield defer.Deferred(lambda c: c.callback("Not Required"))
+
+ @inlineCallbacks
+ def process_southbound_ports(self, device, results):
+ """
+ Process the results from the 'enumerate_southbound_ports' method.
+ You should override this method in your derived class as necessary and
+ create an Port object (of your own choosing) that supports a 'get_port'
+ method. Once created, insert it into this base class's southbound_ports
+ collection.
+
+ Should you encounter a non-recoverable error, throw an appropriate exception.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :param results: Results from the 'enumerate_southbound_ports' method that
+ you implemented. The type and contents are up to you to
+ :return:
+ """
+ yield defer.Deferred(lambda c: c.callback("Not Required"))
+
+ # TODO: Move some of the items below from here and the EVC to a utility class
+
+ def is_nni_port(self, port):
+ return port in self.northbound_ports
+
+ def is_uni_port(self, port):
+ raise NotImplementedError('implement in derived class')
+
+ def is_pon_port(self, port):
+ raise NotImplementedError('implement in derived class')
+
+ def is_logical_port(self, port):
+ return not self.is_nni_port(port) and not self.is_uni_port(port) and not self.is_pon_port(port)
+
+ def get_port_name(self, port):
+ raise NotImplementedError('implement in derived class')
+
+ def initialize_resource_manager(self):
+ raise NotImplementedError('implement in derived class')
+
+ @inlineCallbacks
+ def complete_device_specific_activation(self, _device, _reconciling):
+ # NOTE: Override this in your derived class for any device startup completion
+ return defer.succeed('NOP')
+
+ @inlineCallbacks
+ def disable(self):
+ """
+ This is called when a previously enabled device needs to be disabled based on a NBI call.
+ """
+ self.log.info('disabling', device_id=self.device_id)
+
+ # Cancel any running enable/disable/... in progress
+ d, self.startup = self.startup, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+ device.reason = 'Disabling'
+ self.adapter_agent.update_device(device)
+
+ # Drop registration for ONU detection
+ # self.adapter_agent.unregister_for_onu_detect_state(self.device.id)
+ # Suspend any active healthchecks / pings
+
+ h, self.heartbeat = self.heartbeat, None
+ try:
+ if h is not None and not h.called:
+ h.cancel()
+ except:
+ pass
+ # Update the operational status to UNKNOWN
+
+ device.oper_status = OperStatus.UNKNOWN
+ device.connect_status = ConnectStatus.UNREACHABLE
+ self.adapter_agent.update_device(device)
+
+ # Disable all child devices first
+ self.adapter_agent.update_child_devices_state(self.device_id,
+ admin_state=AdminState.DISABLED)
+
+ # Remove the peer references from this device
+ self.adapter_agent.delete_all_peer_references(self.device_id)
+
+ # Remove the logical device to clear out logical device ports for any
+ # previously activated ONUs
+ self._delete_logical_device()
+
+ # Set all ports to disabled
+ self.adapter_agent.disable_all_ports(self.device_id)
+
+ dl = []
+ for port in self.northbound_ports.itervalues():
+ dl.append(port.stop())
+
+ for port in self.southbound_ports.itervalues():
+ dl.append(port.stop())
+
+ # NOTE: Flows removed before this method is called
+ # Wait for completion
+
+ self.startup = defer.gatherResults(dl, consumeErrors=True)
+ yield self.startup
+
+ if self.netconf_client:
+ self.netconf_client.close()
+
+ self._netconf_client = None
+ self._rest_client = None
+
+ device.reason = ''
+ self.adapter_agent.update_device(device)
+ self.log.info('disabled', device_id=device.id)
+ returnValue(None)
+
+ @inlineCallbacks
+ def reenable(self, done_deferred=None):
+ """
+ This is called when a previously disabled device needs to be enabled based on a NBI call.
+ :param done_deferred: (Deferred) Deferred to fire when done
+ """
+ self.log.info('re-enabling', device_id=self.device_id)
+
+ # Cancel any running enable/disable/... in progress
+ d, self.startup = self.startup, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ if not self._initial_enable_complete:
+ # Never contacted the device on the initial startup, do 'activate' steps instead
+ self.startup = reactor.callLater(0, self.activate, done_deferred, False)
+ returnValue('activating')
+
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+
+ # Update the connect status to REACHABLE
+ device.connect_status = ConnectStatus.REACHABLE
+ device.oper_status = OperStatus.ACTIVATING
+ self.adapter_agent.update_device(device)
+
+ # Reenable any previously configured southbound ports
+ for port in self.southbound_ports.itervalues():
+ self.log.debug('reenable-pon-port', pon_id=port.pon_id)
+ port.enabled = True
+
+ # Flows should not exist on re-enable. They are re-pushed
+ if len(self._evcs):
+ self.log.warn('evcs-found', evcs=self._evcs)
+ self._evcs.clear()
+
+ try:
+ yield self.make_restconf_connection()
+
+ except Exception as e:
+ self.log.exception('adtran-hello-reconnect', e=e)
+
+ try:
+ yield self.make_netconf_connection()
+
+ except Exception as e:
+ self.log.exception('NETCONF-re-connection', e=e)
+
+ # Recreate the logical device
+ # NOTE: This causes a flow update event
+ ld_initialized = self.create_logical_device(device)
+
+ # Create logical ports for all southbound and northbound interfaces
+ try:
+ self.startup = self.create_logical_ports(device, ld_initialized, False)
+ yield self.startup
+
+ except Exception as e:
+ self.log.exception('logical-port-creation', e=e)
+
+ device = self.adapter_agent.get_device(device.id)
+ device.parent_id = ld_initialized.id
+ device.oper_status = OperStatus.ACTIVE
+ device.reason = ''
+ self.logical_device_id = ld_initialized.id
+
+ # update device active status now
+ self.adapter_agent.update_device(device)
+
+ # Reenable all child devices
+ self.adapter_agent.update_child_devices_state(device.id,
+ admin_state=AdminState.ENABLED)
+ # Schedule the heartbeat for the device
+ self.log.debug('starting-heartbeat')
+ self.start_heartbeat(delay=5)
+
+ self.log.info('re-enabled', device_id=device.id)
+
+ if done_deferred is not None:
+ done_deferred.callback('Done')
+
+ returnValue('reenabled')
+
+ @inlineCallbacks
+ def reboot(self):
+ """
+ This is called to reboot a device based on a NBI call. The admin state of the device
+ will not change after the reboot.
+ """
+ self.log.debug('reboot')
+
+ if not self._initial_enable_complete:
+ # Never contacted the device on the initial startup, do 'activate' steps instead
+ returnValue('failed')
+
+ # Cancel any running enable/disable/... in progress
+ d, self.startup = self.startup, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+ # Issue reboot command
+
+ if not self.is_virtual_olt:
+ try:
+ yield self.netconf_client.rpc(AdtranDeviceHandler.RESTART_RPC)
+
+ except Exception as e:
+ self.log.exception('NETCONF-shutdown', e=e)
+ returnValue(defer.fail(Failure()))
+
+ # self.adapter_agent.unregister_for_onu_detect_state(self.device.id)
+
+ # Update the operational status to ACTIVATING and connect status to
+ # UNREACHABLE
+
+ device = self.adapter_agent.get_device(self.device_id)
+ previous_oper_status = device.oper_status
+ previous_conn_status = device.connect_status
+ device.oper_status = OperStatus.ACTIVATING
+ device.connect_status = ConnectStatus.UNREACHABLE
+ self.adapter_agent.update_device(device)
+
+ # Update the child devices connect state to UNREACHABLE
+ self.adapter_agent.update_child_devices_state(self.device_id,
+ connect_status=ConnectStatus.UNREACHABLE)
+
+ # Shutdown communications with OLT. Typically it takes about 2 seconds
+ # or so after the reply before the restart actually occurs
+
+ try:
+ response = yield self.netconf_client.close()
+ self.log.debug('Restart response XML was: {}'.format('ok' if response.ok else 'bad'))
+
+ except Exception as e:
+ self.log.exception('NETCONF-client-shutdown', e=e)
+
+ # Clear off clients
+
+ self._netconf_client = None
+ self._rest_client = None
+
+ # Run remainder of reboot process as a new task. The OLT then may be up in a
+ # few moments or may take 3 minutes or more depending on any self tests enabled
+
+ current_time = time.time()
+ timeout = current_time + self.restart_failure_timeout
+
+ self.startup = reactor.callLater(10, self._finish_reboot, timeout,
+ previous_oper_status,
+ previous_conn_status)
+ returnValue(self.startup)
+
+ @inlineCallbacks
+ def _finish_reboot(self, timeout, previous_oper_status, previous_conn_status):
+ # Now wait until REST & NETCONF are re-established or we timeout
+
+ self.log.info('Resuming-activity',
+ remaining=timeout - time.time(), timeout=timeout, current=time.time())
+
+ if self.rest_client is None:
+ try:
+ yield self.make_restconf_connection(get_timeout=10)
+
+ except Exception:
+ self.log.debug('No RESTCONF connection yet')
+ self._rest_client = None
+
+ if self.netconf_client is None:
+ try:
+ yield self.make_netconf_connection(connect_timeout=10)
+
+ except Exception as e:
+ try:
+ if self.netconf_client is not None:
+ yield self.netconf_client.close()
+ except Exception as e:
+ self.log.exception(e.message)
+ finally:
+ self._netconf_client = None
+
+ if (self.netconf_client is None and not self.is_virtual_olt) or self.rest_client is None:
+ current_time = time.time()
+ if current_time < timeout:
+ self.startup = reactor.callLater(5, self._finish_reboot, timeout,
+ previous_oper_status,
+ previous_conn_status)
+ returnValue(self.startup)
+
+ if self.netconf_client is None and not self.is_virtual_olt:
+ self.log.error('NETCONF-restore-failure')
+ pass # TODO: What is best course of action if cannot get clients back?
+
+ if self.rest_client is None:
+ self.log.error('RESTCONF-restore-failure')
+ pass # TODO: What is best course of action if cannot get clients back?
+
+ # Pause additional 5 seconds to let allow OLT microservices to complete some more initialization
+ yield asleep(5)
+ # TODO: Update device info. The software images may have changed...
+ # Get the latest device reference
+
+ device = self.adapter_agent.get_device(self.device_id)
+ device.oper_status = previous_oper_status
+ device.connect_status = previous_conn_status
+ self.adapter_agent.update_device(device)
+
+ # Update the child devices connect state to REACHABLE
+ self.adapter_agent.update_child_devices_state(self.device_id,
+ connect_status=ConnectStatus.REACHABLE)
+ # Restart ports to previous state
+ dl = []
+
+ for port in self.northbound_ports.itervalues():
+ dl.append(port.restart())
+
+ for port in self.southbound_ports.itervalues():
+ dl.append(port.restart())
+
+ try:
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ except Exception as e:
+ self.log.exception('port-restart', e=e)
+
+ # Re-subscribe for ONU detection
+ # self.adapter_agent.register_for_onu_detect_state(self.device.id)
+ # Request reflow of any EVC/EVC-MAPs
+ if len(self._evcs) > 0:
+ dl = []
+ for evc in self.evcs:
+ dl.append(evc.reflow())
+
+ try:
+ yield defer.gatherResults(dl)
+ except Exception as e:
+ self.log.exception('flow-restart', e=e)
+
+ self.log.info('rebooted', device_id=self.device_id)
+ returnValue('Rebooted')
+
+ @inlineCallbacks
+ def delete(self):
+ """
+ This is called to delete a device from the PON based on a NBI call.
+ If the device is an OLT then the whole PON will be deleted.
+ """
+ self.log.info('deleting', device_id=self.device_id)
+
+ # Cancel any outstanding tasks
+
+ d, self.startup = self.startup, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+ h, self.heartbeat = self.heartbeat, None
+ try:
+ if h is not None and not h.called:
+ h.cancel()
+ except:
+ pass
+
+ # Get the latest device reference
+ device = self.adapter_agent.get_device(self.device_id)
+ device.reason = 'Deleting'
+ self.adapter_agent.update_device(device)
+
+ # self.adapter_agent.unregister_for_onu_detect_state(self.device.id)
+
+ # Remove all flows from the device
+ # TODO: Create a bulk remove-all by device-id
+
+ evcs = self._evcs
+ self._evcs.clear()
+
+ for evc in evcs:
+ evc.delete() # TODO: implement bulk-flow procedures
+
+ # Remove all child devices
+ self.adapter_agent.delete_all_child_devices(self.device_id)
+
+ # Remove the logical device (should already be gone if disable came first)
+ self._delete_logical_device()
+
+ # Remove the peer references from this device
+ self.adapter_agent.delete_all_peer_references(self.device_id)
+
+ # Tell all ports to stop any background processing
+
+ for port in self.northbound_ports.itervalues():
+ port.delete()
+
+ for port in self.southbound_ports.itervalues():
+ port.delete()
+
+ self.northbound_ports.clear()
+ self.southbound_ports.clear()
+
+ # Shutdown communications with OLT
+
+ if self.netconf_client is not None:
+ try:
+ yield self.netconf_client.close()
+ except Exception as e:
+ self.log.exception('NETCONF-shutdown', e=e)
+
+ self._netconf_client = None
+
+ self._rest_client = None
+ mgr, self.resource_mgr = self.resource_mgr, None
+ if mgr is not None:
+ del mgr
+
+ self.log.info('deleted', device_id=self.device_id)
+
+ def delete_child_device(self, proxy_address):
+ self.log.debug('sending-deactivate-onu',
+ olt_device_id=self.device_id,
+ proxy_address=proxy_address)
+ try:
+ children = self.adapter_agent.get_child_devices(self.device_id)
+ for child in children:
+ if child.proxy_address.onu_id == proxy_address.onu_id and \
+ child.proxy_address.channel_id == proxy_address.channel_id:
+ self.adapter_agent.delete_child_device(self.device_id,
+ child.id,
+ onu_device=child)
+ break
+
+ except Exception as e:
+ self.log.error('adapter_agent error', error=e)
+
+ def packet_out(self, egress_port, msg):
+ raise NotImplementedError('Overload in a derived class')
+
+ def update_pm_config(self, device, pm_config):
+ # TODO: This has not been tested
+ self.log.info('update_pm_config', pm_config=pm_config)
+ self.pm_metrics.update(pm_config)
+
+ @inlineCallbacks
+ def get_device_info(self, device):
+ """
+ Perform an initial network operation to discover the device hardware
+ and software version. Serial Number would be helpful as well.
+
+ Upon successfully retrieving the information, remember to call the
+ 'start_heartbeat' method to keep in contact with the device being managed
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions. Such extensions shall be described as part of
+ the device type specification returned by device_types().
+ """
+ device = {}
+ returnValue(device)
+
+ def start_heartbeat(self, delay=10):
+ assert delay > 1, 'Minimum heartbeat is 1 second'
+ self.log.info('Starting-Device-Heartbeat ***')
+ self.heartbeat = reactor.callLater(delay, self.check_pulse)
+ return self.heartbeat
+
+ def check_pulse(self):
+ if self.logical_device_id is not None:
+ try:
+ self.heartbeat = self.rest_client.request('GET', self.HELLO_URI,
+ name='hello', timeout=5)
+ self.heartbeat.addCallbacks(self._heartbeat_success, self._heartbeat_fail)
+
+ except Exception as e:
+ self.heartbeat = reactor.callLater(5, self._heartbeat_fail, e)
+
+ def on_heatbeat_alarm(self, active):
+ if active and self.netconf_client is None or not self.netconf_client.connected:
+ self.make_netconf_connection(close_existing_client=True)
+
+ def heartbeat_check_status(self, _):
+ """
+ Check the number of heartbeat failures against the limit and emit an alarm if needed
+ """
+ device = self.adapter_agent.get_device(self.device_id)
+
+ try:
+ from pyvoltha.adapters.extensions.alarms.heartbeat_alarm import HeartbeatAlarm
+
+ if self.heartbeat_miss >= self.heartbeat_failed_limit:
+ if device.connect_status == ConnectStatus.REACHABLE:
+ self.log.warning('heartbeat-failed', count=self.heartbeat_miss)
+ device.connect_status = ConnectStatus.UNREACHABLE
+ device.oper_status = OperStatus.FAILED
+ device.reason = self.heartbeat_last_reason
+ self.adapter_agent.update_device(device)
+ HeartbeatAlarm(self.alarms, 'olt', self.heartbeat_miss).raise_alarm()
+ self.on_heatbeat_alarm(True)
+ else:
+ # Update device states
+ if device.connect_status != ConnectStatus.REACHABLE:
+ device.connect_status = ConnectStatus.REACHABLE
+ device.oper_status = OperStatus.ACTIVE
+ device.reason = ''
+ self.adapter_agent.update_device(device)
+ HeartbeatAlarm(self.alarms, 'olt').clear_alarm()
+ self.on_heatbeat_alarm(False)
+
+ if self.netconf_client is None or not self.netconf_client.connected:
+ self.make_netconf_connection(close_existing_client=True)
+
+ except Exception as e:
+ self.log.exception('heartbeat-check', e=e)
+
+ # Reschedule next heartbeat
+ if self.logical_device_id is not None:
+ self.heartbeat_count += 1
+ self.heartbeat = reactor.callLater(self.heartbeat_interval, self.check_pulse)
+
+ def _heartbeat_success(self, results):
+ self.log.debug('heartbeat-success')
+ self.heartbeat_miss = 0
+ self.heartbeat_last_reason = ''
+ self.heartbeat_check_status(results)
+
+ def _heartbeat_fail(self, failure):
+ self.heartbeat_miss += 1
+ self.log.info('heartbeat-miss', failure=failure,
+ count=self.heartbeat_count,
+ miss=self.heartbeat_miss)
+ self.heartbeat_last_reason = 'RESTCONF connectivity error'
+ self.heartbeat_check_status(None)
+
+ @staticmethod
+ def parse_module_revision(revision):
+ try:
+ return datetime.datetime.strptime(revision, '%Y-%m-%d')
+ except Exception:
+ return None
+
+ def remove_from_flow_table(self, _flows):
+ """
+ Remove flows from the device
+ :param _flows: (list) Flows
+ """
+ raise NotImplementedError()
+
+ def add_to_flow_table(self, _flows):
+ """
+ Remove flows from the device
+ :param _flows: (list) Flows
+ """
+ raise NotImplementedError()
+
+ def process_inter_adapter_message(self, msg):
+ """
+ Called when the adapter receives a message that was sent to it directly
+ from another adapter. An adapter is automatically registered for these
+ messages when creating the inter-container kafka proxy. Note that it is
+ the responsibility of the sending and receiving adapters to properly encode
+ and decode the message.
+ :param msg: Proto Message (any)
+ :return: Proto Message Response
+ """
+ raise NotImplementedError()
+
+ def get_ofp_device_info(self, device):
+ """
+ Retrieve the OLT device info. This includes the ofp_desc and
+ ofp_switch_features. The existing ofp structures can be used,
+ or all the attributes get added to the Device definition or a new proto
+ definition gets created. This API will allow the Core to create a
+ LogicalDevice associated with this device (OLT only).
+ :param device: device
+ :return: Proto Message (TBD)
+ """
+ raise NotImplementedError()
+
+ def get_ofp_port_info(self, device, port_no):
+ """
+ Retrieve the port info. This includes the ofp_port. The existing ofp
+ structure can be used, or all the attributes get added to the Port
+ definitions or a new proto definition gets created. This API will allow
+ the Core to create a LogicalPort associated with this device.
+ :param device: device
+ :param port_no: port number
+ :return: Proto Message (TBD)
+ """
+ raise NotImplementedError()
diff --git a/adapters/adtran_common/download.py b/adapters/adtran_common/download.py
new file mode 100644
index 0000000..8207a99
--- /dev/null
+++ b/adapters/adtran_common/download.py
@@ -0,0 +1,523 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import xmltodict
+from twisted.internet import reactor
+from twisted.internet.defer import returnValue, inlineCallbacks
+from pyvoltha.protos.device_pb2 import ImageDownload
+from pyvoltha.protos.common_pb2 import AdminState
+
+log = structlog.get_logger()
+
+# TODO: Following two would be good provisionable parameters
+DEFAULT_AUTO_AGE_MINUTES = 10
+DEFAULT_MAX_JOB_RUN_SECONDS = 3600 * 4 # Some OLT files are 250MB+
+
+
+class Download(object):
+ """Class to wrap an image download"""
+
+ def __init__(self, handler, request, protocols):
+ self._handler = handler
+ self._deferred = None
+ self.device_id = request.id
+ self._name = request.name
+ self._url = request.url
+ self._crc = request.crc
+ self._version = request.image_version
+ self._local = request.local_dir
+ self._save_config = request.save_config
+ self._supported_protocols = protocols
+
+ self._download_state = ImageDownload.DOWNLOAD_UNKNOWN
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._image_state = ImageDownload.IMAGE_UNKNOWN
+ self._additional_info = ''
+ self._downloaded_octets = 0
+
+ # Server profile info
+ self._server_profile_name = None
+ self._scheme = None
+ self._host = ''
+ self._port = None
+ self._path = ''
+ self._auth = None
+
+ # Download job info
+ self._download_job_name = None
+
+ self._age_out_period = DEFAULT_AUTO_AGE_MINUTES
+ self._max_execution = DEFAULT_MAX_JOB_RUN_SECONDS
+
+ def __str__(self):
+ return "ImageDownload: {}".format(self.name)
+
+ @staticmethod
+ def create(handler, request, supported_protocols):
+ """
+ Create and start a new image download
+
+ :param handler: (AdtranDeviceHandler) Device download is for
+ :param request: (ImageDownload) Request
+ :param supported_protocols: (list) download methods allowed (http, tftp, ...)
+ """
+ download = Download(handler, request, supported_protocols)
+ download._deferred = reactor.callLater(0, download.start_download)
+
+ return download
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def download_state(self):
+ return self._download_state
+
+ @property
+ def failure_reason(self):
+ return self._failure_reason
+
+ @property
+ def image_state(self):
+ return self._image_state
+
+ @property
+ def additional_info(self):
+ return self._additional_info
+
+ @property
+ def downloaded_bytes(self):
+ return self._downloaded_octets
+
+ @property
+ def profile_name(self):
+ return self._server_profile_name
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except Exception as e:
+ pass
+
+ @inlineCallbacks
+ def start_download(self):
+ import uuid
+ log.info('download-start', name=self.name)
+ if not self.parse_url():
+ self._download_failed()
+ returnValue('failed url parsing')
+
+ self._download_state = ImageDownload.DOWNLOAD_STARTED
+ self._failure_reason = ImageDownload.NO_ERROR
+
+ ##############################################################
+ # Configure the file server profile
+ try:
+ self._additional_info = 'Configuring Download Server profile'
+ self._server_profile_name = 'VOLTHA.download.{}'.format(uuid.uuid4())
+ profile = self.server_profile_xml
+ yield self._handler.netconf_client.edit_config(profile)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._server_profile_name = None
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ ##############################################################
+ # Configure the software download maintenance job
+ try:
+ self._additional_info = 'Configuring Image Download Job'
+ self._download_job_name = 'VOLTHA.download.{}'.format(uuid.uuid4())
+ job = self.download_job_xml
+ yield self._handler.netconf_client.edit_config(job)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._download_job_name = None
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ ##############################################################
+ # Schedule a task to monitor the download
+ try:
+ self._additional_info = 'Monitoring download status'
+ self._deferred = reactor.callLater(0.5, self.monitor_download_status)
+
+ except Exception as e:
+ log.exception('server-profile', e=e)
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info += ': Failure: {}'.format(e.message)
+ self._download_failed()
+ raise
+
+ returnValue('started')
+
+ def parse_url(self):
+ from urllib3 import util, exceptions
+ try:
+ results = util.parse_url(self._url)
+
+ # Server info
+ self._scheme = results.scheme.lower()
+ if self._scheme not in self._supported_protocols:
+ self._failure_reason = ImageDownload.INVALID_URL
+ self._additional_info = "Unsupported file transfer protocol: {}".format(results.scheme)
+ return False
+
+ self._host = results.host
+ self._port = results.port
+ self._path = results.path
+ self._auth = results.auth
+ return True
+
+ except exceptions.LocationValueError as e:
+ self._failure_reason = ImageDownload.INVALID_URL
+ self._additional_info = e.message
+ return False
+
+ except Exception as e:
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info = e.message
+ return False
+
+ @property
+ def server_profile_xml(self):
+ assert self._scheme in ['http', 'https', 'ftp', 'sftp', 'tftp'], 'Invalid protocol'
+
+ xml = """
+ <file-servers xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">
+ <profiles>
+ <profile>"""
+
+ xml += '<name>{}</name>'.format(self._server_profile_name)
+ xml += '<connection-profile>'
+ xml += ' <host>{}</host>'.format(self._host)
+ xml += ' <port>{}</port>'.format(self._port) if self._port is not None else '<use-standard-port/>'
+
+ if self._scheme in ['http', 'https']:
+ xml += ' <protocol '
+ xml += 'xmlns:adtn-file-srv-https="http://www.adtran.com/ns/yang/adtran-file-servers-https">' +\
+ 'adtn-file-srv-https:{}'.format(self._scheme)
+ xml += ' </protocol>'
+
+ elif self._scheme == 'sftp':
+ xml += ' <protocol '
+ xml += 'xmlns:adtn-file-srv-sftp="http://www.adtran.com/ns/yang/adtran-file-servers-sftp">' +\
+ 'adtn-file-srv-sftp:sftp'
+ xml += ' </protocol>'
+
+ elif self._scheme in ['ftp', 'tftp']:
+ xml += '<protocol>adtn-file-srv:{}</protocol>'.format(self._scheme)
+
+ if self._auth is not None:
+ user_pass = self._auth.split(':')
+ xml += '<username>{}</username>'.format(user_pass[0])
+ xml += '<password>$0${}</password>'.format("".join(user_pass[1:]))
+ # And the trailer
+ xml += """
+ </connection-profile>
+ </profile>
+ </profiles>
+ </file-servers>
+ """
+ return xml
+
+ @property
+ def download_job_xml(self):
+ # TODO: May want to support notifications
+ # TODO: Not sure about this name for the entity
+ entity = 'main 0'
+ xml = """
+ <maintenance-jobs xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs" xmlns:adtn-phys-sw-mnt="http://www.adtran.com/ns/yang/adtran-physical-software-maintenance">
+ <maintenance-job>
+ <name>{}</name>
+ <enabled>true</enabled>
+ <notify-enabled>false</notify-enabled>
+ <maximum-execution-time>{}</maximum-execution-time>
+ <run-once>true</run-once>
+ <adtn-phys-sw-mnt:download-software>
+ <adtn-phys-sw-mnt:physical-entity>{}</adtn-phys-sw-mnt:physical-entity>
+ <adtn-phys-sw-mnt:software-name>software</adtn-phys-sw-mnt:software-name>
+ <adtn-phys-sw-mnt:remote-file>
+ <adtn-phys-sw-mnt:file-server-profile>{}</adtn-phys-sw-mnt:file-server-profile>
+ <adtn-phys-sw-mnt:filename>{}</adtn-phys-sw-mnt:filename>
+ """.format(self._download_job_name, self._max_execution, entity,
+ self._server_profile_name, self._name)
+
+ if self._path is not None:
+ xml += """
+ <adtn-phys-sw-mnt:filepath>{}</adtn-phys-sw-mnt:filepath>
+ """.format(self._path)
+
+ xml += """
+ </adtn-phys-sw-mnt:remote-file>
+ </adtn-phys-sw-mnt:download-software>
+ </maintenance-job>
+ </maintenance-jobs>
+ """
+ return xml
+
+ @property
+ def download_status_xml(self):
+ xml = """
+ <filter>
+ <maintenance-jobs-state xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs">
+ <maintenance-job>
+ <name>{}</name>
+ </maintenance-job>
+ </maintenance-jobs-state>
+ </filter>
+ """.format(self._download_job_name)
+ return xml
+
+ @property
+ def delete_server_profile_xml(self):
+ xml = """
+ <file-servers xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">
+ <profiles operation="delete">
+ <profile>
+ <name>{}</name>
+ </profile>
+ </profiles>
+ </file-servers>
+ """.format(self._server_profile_name)
+ return xml
+
+ @property
+ def delete_download_job_xml(self):
+ xml = """
+ <maintenance-jobs xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs">
+ <maintenance-job operation="delete">>
+ <name>{}</name>
+ </maintenance-job>
+ </maintenance-jobs>
+ """.format(self._download_job_name)
+ return xml
+
+ @inlineCallbacks
+ def monitor_download_status(self):
+ log.debug('monitor-download', name=self.name)
+ try:
+ results = yield self._handler.netconf_client.get(self.download_status_xml)
+
+ result_dict = xmltodict.parse(results.data_xml)
+ entries = result_dict['data']['maintenance-jobs-state']['maintenance-job']
+
+ name = entries.get('name')
+ assert name == self._download_job_name, 'The job status name does not match. {} != {}'.format(name, self.name)
+ self._download_state = self.monitor_state_to_download_state(entries['state']['#text'])
+
+ completed = entries['timestamps'].get('completed-timestamp')
+ started = entries['timestamps'].get('start-timestamp')
+
+ if self._download_state == ImageDownload.DOWNLOAD_FAILED:
+ self._failure_reason = ImageDownload.UNKNOWN_ERROR
+ self._additional_info = entries['error'].get('error-message')
+
+ elif self._download_state == ImageDownload.INSUFFICIENT_SPACE:
+ self._failure_reason = ImageDownload.INSUFFICIENT_SPACE
+ self._additional_info = entries['error'].get('error-message')
+
+ elif self._download_state == ImageDownload.DOWNLOAD_STARTED:
+ self._failure_reason = ImageDownload.NO_ERROR
+ self._additional_info = 'Download started at {}'.format(started)
+
+ elif self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ self._failure_reason = ImageDownload.NO_ERROR
+ self._additional_info = 'Download completed at {}'.format(completed)
+ else:
+ raise NotImplemented('Unsupported state')
+
+ done = self._download_state in [ImageDownload.DOWNLOAD_FAILED,
+ ImageDownload.DOWNLOAD_SUCCEEDED,
+ ImageDownload.INSUFFICIENT_SPACE]
+
+ except Exception as e:
+ log.exception('protocols', e=e)
+ done = False
+
+ if not done:
+ self._deferred = reactor.callLater(1, self.monitor_download_status)
+
+ returnValue('done' if done else 'not-done-yet')
+
+ def _download_failed(self):
+ log.info('download-failed', name=self.name)
+
+ self._cancel_deferred()
+ self._download_state = ImageDownload.DOWNLOAD_FAILED
+
+ # Cleanup NETCONF
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+ # TODO: Do we signal any completion due to failure?
+
+ def _download_complete(self):
+ log.info('download-completed', name=self.name)
+
+ self._cancel_deferred()
+ self._download_state = ImageDownload.DOWNLOAD_SUCCEEDED
+ self._downloaded_octets = 123456
+ self._failure_reason = ImageDownload.NO_ERROR
+
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+ # TODO: How do we signal completion?
+
+ device = self._handler.adapter_agent.get_device(self.device_id)
+ if device is not None:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self._handler.adapter_agent.update_device(device)
+
+ def cancel_download(self, request):
+ log.info('cancel-sw-download', name=self.name)
+
+ self._cancel_deferred()
+
+ try:
+ # initiate cancelling software download to device at success
+ # delete image download record
+
+ self._handler.adapter_agent.delete_image_download(request)
+
+ device = self._handler.adapter_agent.get_device(self.device_id)
+ if device is not None:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self._handler.adapter_agent.update_device(device)
+
+ except Exception as e:
+ log.exception(e.message)
+
+ reactor.callLater(0, self._cleanup_download_job, 20)
+ reactor.callLater(0, self._cleanup_server_profile, 20)
+
+ @inlineCallbacks
+ def _cleanup_server_profile(self, retries, attempt=1):
+ log.info('cleanup-server', name=self.name,
+ profile=self._server_profile_name,
+ attempt=attempt, remaining=retries)
+
+ if self._server_profile_name is not None:
+ try:
+ profile = self.delete_server_profile_xml
+ yield self._handler.netconf_client.edit_config(profile)
+ self._server_profile_name = None
+
+ except Exception as e:
+ log.exception(e.message)
+ if retries > 0:
+ reactor.callLater(attempt * 60, self._cleanup_download_job,
+ retries - 1, attempt + 1)
+
+ @inlineCallbacks
+ def _cleanup_download_job(self, retries, attempt=1):
+ log.info('cleanup-download', name=self.name,
+ profile=self._download_job_name,
+ attempt=attempt, remaining=retries)
+
+ if self._download_job_name is not None:
+ try:
+ job = self.delete_download_job_xml
+ yield self._handler.netconf_client.edit_config(job)
+ self._download_job_name = None
+
+ except Exception as e:
+ log.exception(e.message)
+ if retries > 0:
+ reactor.callLater(attempt * 60, self._cleanup_download_job,
+ retries - 1, attempt + 1)
+
+ @inlineCallbacks
+ def activate_image(self):
+ log.info('download-activate', name=self.name)
+
+ if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ pass # TODO: Implement
+ self._image_state = ImageDownload.IMAGE_ACTIVE
+
+ returnValue('TODO: Implement this')
+
+ @inlineCallbacks
+ def revert_image(self):
+ log.info('download-revert', name=self.name)
+
+ if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
+ pass # TODO: Implement
+ self._image_state = ImageDownload.IMAGE_INACTIVE
+
+ returnValue('TODO: Implement this')
+
+ def monitor_state_to_download_state(self, state):
+ if ':' in state:
+ state = state.split(':')[-1]
+ result = {
+ 'downloading-software': ImageDownload.DOWNLOAD_STARTED, # currently downloading software
+ 'storing-software': ImageDownload.DOWNLOAD_STARTED, # successfully downloaded the required software and is storing it to memory
+ 'software-stored': ImageDownload.DOWNLOAD_SUCCEEDED, # successfully downloaded the required software and has stored it successfully to memory
+ 'software-download-failed': ImageDownload.DOWNLOAD_FAILED, # unsuccessfully attemptedto download the required software
+ 'invalid-software': ImageDownload.DOWNLOAD_FAILED, # successfully downloaded the required software but the software was determined to be invalid
+ 'software-storage-failed': ImageDownload.INSUFFICIENT_SPACE, # successfully downloaded the required software but was unable to successfully stored it to memory
+ }.get(state.lower(), None)
+ log.info('download-software-state', result=result, state=state, name=self.name)
+ assert result is not None, 'Invalid state'
+ return result
+
+ def monitor_state_to_activate_state(self, state):
+ if ':' in state:
+ state = state.split(':')[-1]
+ result = {
+ 'enabling-software': ImageDownload.IMAGE_ACTIVATE, # currently enabling the software
+ 'software-enabled': ImageDownload.IMAGE_ACTIVE, # successfully enabled the required software
+ 'enable-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to enable the required software revision
+ 'activating-software': ImageDownload.IMAGE_ACTIVATE, # currently activating the software
+ 'software-activated': ImageDownload.IMAGE_ACTIVE, # successfully activated the required software. The job terminated successfully
+ 'activate-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to activate the required software revision
+ 'committing-software': ImageDownload.IMAGE_ACTIVATE, # currently committing the software
+ 'software-committed': ImageDownload.IMAGE_ACTIVATE, # successfully committed the required software. The job terminated successfully
+ 'commit-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to commit the required software revision
+ }.get(state.lower(), None)
+ log.info('download-activate-state', result=result, state=state, name=self.name)
+ assert result is not None, 'Invalid state'
+ return result
+
+ @staticmethod
+ def clear_all(client):
+ """
+ Remove all file server profiles and download jobs
+ :param client: (ncclient) NETCONF Client to use
+ """
+ from twisted.internet import defer
+ del_fs_xml = """
+ <file-servers xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">
+ <profiles operation="delete"/>
+ </file-servers>
+ """
+ del_job_xml = """
+ <maintenance-jobs operation="delete" xmlns="http://www.adtran.com/ns/yang/adtran-maintenance-jobs"/>
+ """
+ dl = [client.edit_config(del_fs_xml, ignore_delete_error=True),
+ client.edit_config(del_job_xml, ignore_delete_error=True)]
+
+ return defer.gatherResults(dl, consumeErrors=True)
diff --git a/adapters/adtran_common/flow/__init__.py b/adapters/adtran_common/flow/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/adtran_common/flow/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_common/flow/acl.py b/adapters/adtran_common/flow/acl.py
new file mode 100644
index 0000000..67f8c08
--- /dev/null
+++ b/adapters/adtran_common/flow/acl.py
@@ -0,0 +1,385 @@
+#
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import xmltodict
+import re
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+log = structlog.get_logger()
+
+_acl_list = {} # Key -> device-id -> Name: List of encoded EVCs
+
+ACL_NAME_FORMAT = 'VOLTHA-ACL-{}-{}' # format(flow_entry.flow_id, flow-entry-hash)
+ACL_NAME_REGEX_ALL = 'VOLTHA-ACL-*'
+ACE_NAME_FORMAT = 'VOLTHA-ACE-{}' # format(flow_entry.flow_id)
+
+
+class ACL(object):
+ """
+ Class to wrap Trap-to-Controller functionality
+ """
+ def __init__(self, flow_entry):
+ self._installed = False
+ self._status_message = None
+ self._parent = flow_entry # FlowEntry parent
+ self._flow = flow_entry.flow
+ self._handler = flow_entry.handler
+ self._name = ACL.flow_to_name(flow_entry)
+ self._rule_name = ACL.flow_to_ace_name(flow_entry)
+ self._eth_type = flow_entry.eth_type
+ self._ip_protocol = flow_entry.ip_protocol
+ self._ipv4_dst = flow_entry.ipv4_dst
+ self._src_port = flow_entry.udp_src
+ self._dst_port = flow_entry.udp_dst
+ self._exception = False
+ self._enabled = True
+ self._valid = self._decode()
+
+ def __str__(self):
+ return 'ACL: {}, Installed: {}, L2: {}, L3/4: {}'.\
+ format(self.name, self._installed, self.is_l2_exception,
+ self.is_l3_l4_exception)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def installed(self):
+ return self._installed
+
+ @property
+ def is_l2_exception(self):
+ from flow_entry import FlowEntry
+ return self._eth_type not in (None,
+ FlowEntry.EtherType.IPv4,
+ FlowEntry.EtherType.IPv6)
+
+ @property
+ def is_l3_l4_exception(self):
+ return not self.is_l2_exception and self._ip_protocol is not None
+
+ @staticmethod
+ def _xml_header(operation=None):
+ return '<access-lists xmlns="http://www.adtran.com/ns/yang/adtran-ietf-access-control-list"\
+ xmlns:adtn-ietf-ns-acl="http://www.adtran.com/ns/yang/adtran-ietf-ns-access-control-list"><acl{}>'.\
+ format('' if operation is None else ' xc:operation="{}"'.format(operation))
+
+ @staticmethod
+ def _xml_trailer():
+ return '</acl></access-lists>'
+
+ def _xml_action(self):
+ xml = '<actions>'
+ if self._exception:
+ xml += '<adtn-ietf-ns-acl:exception-to-cpu/>'
+ else:
+ xml += '<permit/>'
+ xml += '</actions>'
+ return xml
+
+ def _ace_l2(self):
+ xml = '<ace>'
+ xml += '<rule-name>{}</rule-name>'.format(self._rule_name)
+ xml += '<matches><l2-acl><ether-type>{:04x}</ether-type></l2-acl></matches>'.format(self._eth_type)
+ xml += self._xml_action()
+ xml += '</ace>'
+ return xml
+
+ def _ace_l2_l3_ipv4(self):
+ xml = '<ace>'
+ xml += '<rule-name>{}</rule-name>'.format(self._rule_name)
+ xml += '<matches><l2-l3-ipv4-acl>'
+ xml += '<ether-type>{:04X}</ether-type>'.format(self._eth_type)
+
+ if self._ip_protocol is not None:
+ xml += '<protocol>{}</protocol>'.format(self._ip_protocol)
+ if self._ipv4_dst is not None:
+ xml += '<destination-ipv4-network>{}/32</destination-ipv4-network>'.format(self._ipv4_dst)
+ if self._src_port is not None:
+ xml += '<source-port-range><lower-port>{}</lower-port><operation>eq</operation></source-port-range>'.\
+ format(self._src_port)
+ if self._dst_port is not None:
+ xml += '<destination-port-range><lower-port>' + \
+ '{}</lower-port><operations>eq</operations></destination-port-range>'.format(self._dst_port)
+
+ xml += '</l2-l3-ipv4-acl></matches>'
+ xml += self._xml_action()
+ xml += '</ace>'
+ return xml
+
+ def _ace_any(self):
+ xml = '<ace>'
+ xml += '<rule-name>{}</rule-name>'.format(self._rule_name)
+ xml += '<matches><any-acl/></matches>'
+ xml += self._xml_action()
+ xml += '</ace>'
+ return xml
+
+ def _acl_eth(self):
+ xml = '<acl-type>eth-acl</acl-type>'
+ xml += '<acl-name>{}</acl-name>'.format(self._name)
+ return xml
+
+ def _acl_l4(self):
+ xml = '<acl-type>mixed-l2-l3-ipv4-acl</acl-type>'
+ xml += '<acl-name>{}</acl-name>'.format(self._name)
+ return xml
+
+ def _acl_any(self):
+ xml = '<acl-type>any-acl</acl-type>'
+ xml += '<acl-name>{}</acl-name>'.format(self._name)
+ return xml
+
+ def _install_xml(self):
+ xml = ACL._xml_header('create')
+ if self.is_l2_exception:
+ xml += self._acl_eth()
+ xml += '<aces>{}</aces>'.format(self._ace_l2())
+ elif self.is_l3_l4_exception:
+ xml += self._acl_l4()
+ xml += '<aces>{}</aces>'.format(self._ace_l2_l3_ipv4())
+ else:
+ xml += self._acl_any()
+ xml += '<aces>{}</aces>'.format(self._ace_any())
+
+ xml += ACL._xml_trailer()
+ return xml
+
+ def _remove_xml(self):
+ xml = ACL._xml_header('delete')
+ if self.is_l2_exception:
+ xml += self._acl_eth()
+ elif self.is_l3_l4_exception:
+ xml += self._acl_l4()
+ else:
+ xml += self._acl_any()
+ xml += ACL._xml_trailer()
+ return xml
+
+ def evc_map_ingress_xml(self):
+ """ Individual ACL specific XML for the EVC MAP """
+
+ xml = '<adtn-evc-map-acl:acl-type '
+ fmt = 'xmlns:adtn-ietf-acl="http://www.adtran.com/ns/yang/adtran-ietf-access-control-list">adtn-ietf-acl:{}'\
+ '</adtn-evc-map-acl:acl-type>'
+
+ if self.is_l2_exception:
+ xml += fmt.format('eth-acl')
+
+ elif self.is_l3_l4_exception:
+ xml += fmt.format('mixed-l2-l3-ipv4-acl')
+
+ else:
+ xml += fmt.format('any-acl')
+
+ xml += '<adtn-evc-map-acl:acl-name>{}</adtn-evc-map-acl:acl-name>'.format(self.name)
+ return xml
+
+ @staticmethod
+ def create(flow_entry):
+ acl = ACL(flow_entry)
+
+ # Already created and installed, return that one
+ acls_installed = _acl_list.get(flow_entry.handler.device_id)
+ if acls_installed is not None:
+ entry = acls_installed.get(acl._name)
+ if entry is not None:
+ return entry
+
+ return acl
+
+ @staticmethod
+ def flow_to_name(flow_entry):
+ return ACL_NAME_FORMAT.format(flow_entry.flow_id, ACL.acl_hash(flow_entry))
+
+ @staticmethod
+ def flow_to_ace_name(flow_entry):
+ return ACE_NAME_FORMAT.format(flow_entry.flow_id)
+
+ @staticmethod
+ def acl_hash(flow_entry):
+ from hashlib import md5
+ in_port = flow_entry.in_port or 0
+ eth_type = flow_entry.eth_type or 0
+ ip_protocol = flow_entry.ip_protocol or 0
+ ipv4_dst = flow_entry.ipv4_dst or 0
+ src_port = flow_entry.udp_src or 0
+ dst_port = flow_entry.udp_dst or 0
+ hex_string = md5('{},{},{},{},{},{}'.format(in_port, eth_type, ip_protocol,
+ ipv4_dst, src_port, dst_port)).hexdigest()
+ return hex_string
+
+ @property
+ def valid(self):
+ return self._valid
+
+ @property
+ def installed(self):
+ return self._installed
+
+ @property
+ def status(self):
+ return self._status_message
+
+ @inlineCallbacks
+ def install(self):
+ log.debug('installing-acl', installed=self._installed)
+
+ if not self._installed and self._enabled:
+ if self._handler.device_id not in _acl_list:
+ _acl_list[self._handler.device_id] = {}
+
+ acls_installed = _acl_list[self._handler.device_id]
+ if self._name in acls_installed:
+ # Return OK
+ returnValue(self._enabled)
+
+ try:
+ acl_xml = self._install_xml()
+ log.debug('install-xml', xml=acl_xml, name=self._name)
+
+ results = yield self._handler.netconf_client.edit_config(acl_xml)
+ self._installed = results.ok
+ self._status_message = '' if results.ok else results.error
+
+ if self._installed:
+ acls_installed[self._name] = self
+
+ except Exception as e:
+ log.exception('install-failure', name=self._name, e=e)
+ raise
+
+ returnValue(self._installed and self._enabled)
+
+ @inlineCallbacks
+ def remove(self):
+ log.debug('removing-acl', installed=self._installed)
+
+ if self._installed:
+ acl_xml = self._remove_xml()
+ log.info('remove-xml', xml=acl_xml, name=self._name)
+
+ results = yield self._handler.netconf_client.edit_config(acl_xml)
+ self._installed = not results.ok
+ self._status_message = '' if results.ok else results.error
+
+ if not self._installed:
+ acls_installed = _acl_list.get(self._handler.device_id)
+ if acls_installed is not None and self._name in acls_installed:
+ del acls_installed[self._name]
+
+ returnValue(not self._installed)
+
+ def enable(self):
+ if not self._enabled:
+ self._enabled = False
+ raise NotImplemented("TODO: Implement this")
+
+ def disable(self):
+ if self._enabled:
+ self._enabled = True
+ raise NotImplemented("TODO: Implement this")
+
+ def _decode(self):
+ """
+ Examine the field settings and set ACL up for requested fields
+ """
+ # If EtherType is not None and not IP, this is an L2 exception
+ self._exception = self.is_l2_exception or self.is_l3_l4_exception
+ return True
+
+ # BULK operations
+
+ @staticmethod
+ def enable_all():
+ raise NotImplemented("TODO: Implement this")
+
+ @staticmethod
+ def disable_all():
+ raise NotImplemented("TODO: Implement this")
+
+ @staticmethod
+ def clear_all(device_id):
+ """
+ Clear all acls for this device id from the list
+ :param device_id: id of the device
+ """
+ if device_id in _acl_list:
+ del _acl_list[device_id]
+
+ @staticmethod
+ def remove_all(client, regex_=ACL_NAME_REGEX_ALL):
+ """
+ Remove all matching ACLs from hardware
+ :param client: (ncclient) NETCONF Client to use
+ :param regex_: (String) Regular expression for name matching
+ :return: (deferred)
+ """
+ # Do a 'get' on the evc config an you should get the names
+ get_xml = """
+ <filter>
+ <access-lists xmlns="http://www.adtran.com/ns/yang/adtran-ietf-access-control-list">
+ <acl><acl-type/><acl-name/></acl>
+ </access-lists>
+ </filter>
+ """
+ log.debug('query', xml=get_xml, regex=regex_)
+
+ def request_failed(results, operation):
+ log.error('{}-failed'.format(operation), results=results)
+
+ def delete_complete(results):
+ log.debug('delete-complete', results=results)
+
+ def do_delete(rpc_reply, regexpr):
+ log.debug('query-complete', rpc_reply=rpc_reply)
+
+ if rpc_reply.ok:
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+ entries = result_dict['data']['access-lists'] if 'access-lists' in result_dict['data'] else {}
+
+ if 'acl' in entries:
+ p = re.compile(regexpr)
+
+ pairs = []
+ if isinstance(entries['acl'], list):
+ pairs = {(entry['acl-type'], entry['acl-name']) for entry in entries['acl']
+ if 'acl-name' in entry and 'acl-type' in entry and p.match(entry['acl-name'])}
+ else:
+ if 'acl' in entries:
+ entry = entries['acl']
+ if 'acl-name' in entry and 'acl-type' in entry and p.match(entry['acl-name']):
+ pairs = [(entry['acl-type'], entry['acl-name'])]
+
+ if len(pairs) > 0:
+ del_xml = '<access-lists xmlns="http://www.adtran.com/ns/yang/adtran-ietf-access-control-list">'
+ for pair in pairs:
+ del_xml += '<acl xc:operation = "delete">'
+ del_xml += '<acl-type>{}</acl-type>'.format(pair[0])
+ del_xml += '<acl-name>{}</acl-name>'.format(pair[1])
+ del_xml += '</acl>'
+ del_xml += '</access-lists>'
+ log.debug('removing', xml=del_xml)
+
+ return client.edit_config(del_xml)
+
+ return succeed('no entries')
+
+ d = client.get(get_xml)
+ d.addCallbacks(do_delete, request_failed, callbackArgs=[regex_], errbackArgs=['get'])
+ d.addCallbacks(delete_complete, request_failed, errbackArgs=['edit-config'])
+ return d
diff --git a/adapters/adtran_common/flow/evc.py b/adapters/adtran_common/flow/evc.py
new file mode 100644
index 0000000..5e00bca
--- /dev/null
+++ b/adapters/adtran_common/flow/evc.py
@@ -0,0 +1,479 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import xmltodict
+import re
+import structlog
+from enum import IntEnum
+from twisted.internet import reactor, defer
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+log = structlog.get_logger()
+
+EVC_NAME_FORMAT = 'VOLTHA-{}' # format(flow.id)
+EVC_NAME_REGEX_ALL = EVC_NAME_FORMAT.format('*')
+DEFAULT_STPID = 0x8100
+
+
+class EVC(object):
+ """
+ Class to wrap EVC functionality
+ """
+ class SwitchingMethod(IntEnum):
+ SINGLE_TAGGED = 1
+ DOUBLE_TAGGED = 2
+ MAC_SWITCHED = 3
+ DOUBLE_TAGGED_MAC_SWITCHED = 4
+ DEFAULT = SINGLE_TAGGED
+
+ @staticmethod
+ def xml(value):
+ if value is None:
+ value = EVC.SwitchingMethod.DEFAULT
+ if value == EVC.SwitchingMethod.SINGLE_TAGGED:
+ return '<single-tag-switched/>'
+ elif value == EVC.SwitchingMethod.DOUBLE_TAGGED:
+ return '<double-tag-switched/>'
+ elif value == EVC.SwitchingMethod.MAC_SWITCHED:
+ return '<mac-switched/>'
+ elif value == EVC.SwitchingMethod.DOUBLE_TAGGED_MAC_SWITCHED:
+ return '<double-tag-mac-switched/>'
+ raise ValueError('Invalid SwitchingMethod enumeration')
+
+ class Men2UniManipulation(IntEnum):
+ SYMMETRIC = 1
+ POP_OUT_TAG_ONLY = 2
+ DEFAULT = SYMMETRIC
+
+ @staticmethod
+ def xml(value):
+ if value is None:
+ value = EVC.Men2UniManipulation.DEFAULT
+ fmt = '<men-to-uni-tag-manipulation>{}</men-to-uni-tag-manipulation>'
+ if value == EVC.Men2UniManipulation.SYMMETRIC:
+ return fmt.format('<symmetric/>')
+ elif value == EVC.Men2UniManipulation.POP_OUT_TAG_ONLY:
+ return fmt.format('<pop-outer-tag-only/>')
+ raise ValueError('Invalid Men2UniManipulation enumeration')
+
+ class ElineFlowType(IntEnum):
+ NNI_TO_UNI = 1
+ UNI_TO_NNI = 2
+ NNI_TO_NNI = 3
+ UNI_TO_UNI = 4
+ ACL_FILTER = 5
+ UNKNOWN = 6
+ UNSUPPORTED = 7 # Or Invalid
+
+ def __init__(self, flow_entry):
+ self._installed = False
+ self._status_message = None
+ self._flow = flow_entry
+ self._name = self._create_name()
+ self._deferred = None
+ self._evc_maps = {} # Map Name -> evc-map
+
+ self._flow_type = EVC.ElineFlowType.UNKNOWN
+
+ # EVC related properties
+ self._enabled = True
+ self._men_ports = []
+ self._s_tag = None
+ self._stpid = None
+ self._switching_method = None
+ self.service_evc = False
+
+ self._ce_vlan_preservation = None
+ self._men_to_uni_tag_manipulation = None
+
+ try:
+ self._valid = self._decode()
+
+ except Exception as e:
+ log.exception('Failure during EVC decode', e=e)
+ self._valid = False
+
+ def __str__(self):
+ return "EVC-{}: MEN: {}, S-Tag: {}".format(self._name, self._men_ports, self._s_tag)
+
+ def _create_name(self):
+ #
+ # TODO: Take into account selection criteria and output to make the name
+ #
+ return EVC_NAME_FORMAT.format(self._flow.flow_id)
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+
+ except Exception as e:
+ pass
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def valid(self):
+ return self._valid
+
+ @property
+ def installed(self):
+ return self._installed
+
+ @installed.setter
+ def installed(self, value):
+ assert not value, 'EVC Install can only be reset'
+ self._installed = False
+
+ @property
+ def status(self):
+ return self._status_message
+
+ @status.setter
+ def status(self, value):
+ self._status_message = value
+
+ @property
+ def s_tag(self):
+ return self._s_tag
+
+ @property
+ def stpid(self):
+ return self._stpid
+
+ @stpid.setter
+ def stpid(self, value):
+ assert self._stpid is None or self._stpid == value, 'STPID can only be set once'
+ self._stpid = value
+
+ @property
+ def switching_method(self):
+ return self._switching_method
+
+ @switching_method.setter
+ def switching_method(self, value):
+ assert self._switching_method is None or self._switching_method == value,\
+ 'Switching Method can only be set once. EVC: {}'.format(self.name)
+ self._switching_method = value
+
+ @property
+ def ce_vlan_preservation(self):
+ return self._ce_vlan_preservation
+
+ @ce_vlan_preservation.setter
+ def ce_vlan_preservation(self, value):
+ assert self._ce_vlan_preservation is None or self._ce_vlan_preservation == value,\
+ 'CE VLAN Preservation can only be set once'
+ self._ce_vlan_preservation = value
+
+ @property
+ def men_to_uni_tag_manipulation(self):
+ return self._men_to_uni_tag_manipulation
+
+ @men_to_uni_tag_manipulation.setter
+ def men_to_uni_tag_manipulation(self, value):
+ assert self._men_to_uni_tag_manipulation is None or self._men_to_uni_tag_manipulation == value, \
+ 'MEN-to-UNI tag manipulation can only be set once'
+ self._men_to_uni_tag_manipulation = value
+
+ @property
+ def flow_entry(self):
+ # Note that the first flow used to create the EVC is saved and it may
+ # eventually get deleted while others still use the EVC. This should
+ # be okay as the downstream flow/signature table is used to maintain
+ # the lifetime on this EVC object.
+ return self._flow
+
+ @flow_entry.setter
+ def flow_entry(self, value):
+ self._flow = value
+
+ @property
+ def evc_maps(self):
+ """
+ Get all EVC Maps that reference this EVC
+ :return: list of EVCMap
+ """
+ return list(self._evc_maps.values()) if self._evc_maps is not None else []
+
+ @property
+ def evc_map_names(self):
+ """
+ Get all EVC Map names that reference this EVC
+ :return: list of EVCMap names
+ """
+ return list(self._evc_maps.keys()) if self._evc_maps is not None else []
+
+ def add_evc_map(self, evc_map):
+ if self._evc_maps is None:
+ self._evc_maps = dict()
+
+ if evc_map.name not in self._evc_maps:
+ self._evc_maps[evc_map.name] = evc_map
+
+ def remove_evc_map(self, evc_map):
+ if self._evc_maps is not None and evc_map.name in self._evc_maps:
+ del self._evc_maps[evc_map.name]
+
+ def schedule_install(self, delay=0):
+ """
+ Try to install EVC and all MAPs in a single operational sequence.
+ The delay parameter is used during recovery to allow multiple associated
+ EVC maps to be updated/modified independently before the parent EVC
+ is installed.
+
+ :param delay: (int) Seconds to delay before install
+ """
+ self._cancel_deferred()
+
+ self._deferred = reactor.callLater(delay, self._do_install) \
+ if self._valid else succeed('Not VALID')
+
+ return self._deferred
+
+ @staticmethod
+ def _xml_header(operation=None):
+ return '<evcs xmlns="http://www.adtran.com/ns/yang/adtran-evcs"{}><evc>'.\
+ format('' if operation is None else ' xc:operation="{}"'.format(operation))
+
+ @staticmethod
+ def _xml_trailer():
+ return '</evc></evcs>'
+
+ @inlineCallbacks
+ def _do_install(self):
+ # Install the EVC if needed
+ log.debug('do-install', valid=self._valid, installed=self._installed)
+
+ if self._valid and not self._installed:
+ # TODO: Currently install EVC and then MAPs. Can do it all in a single edit-config operation
+
+ xml = EVC._xml_header()
+ xml += '<name>{}</name>'.format(self.name)
+ xml += '<enabled>{}</enabled>'.format('true' if self._enabled else 'false')
+
+ if self._ce_vlan_preservation is not None:
+ xml += '<ce-vlan-preservation>{}</ce-vlan-preservation>'.format('false')
+
+ if self._s_tag is not None:
+ xml += '<stag>{}</stag>'.format(self._s_tag)
+ xml += '<stag-tpid>{}</stag-tpid>'.format(self._stpid or DEFAULT_STPID)
+ else:
+ xml += 'no-stag/'
+
+ for port in self._men_ports:
+ xml += '<men-ports>{}</men-ports>'.format(port)
+
+ # xml += EVC.Men2UniManipulation.xml(self._men_to_uni_tag_manipulation)
+ # xml += EVC.SwitchingMethod.xml(self._switching_method)
+ xml += EVC._xml_trailer()
+
+ log.debug('create-evc', name=self.name, xml=xml)
+ try:
+ # Set installed to true while request is in progress
+ self._installed = True
+ results = yield self._flow.handler.netconf_client.edit_config(xml)
+ self._installed = results.ok
+ self.status = '' if results.ok else results.error
+
+ except Exception as e:
+ log.exception('install-failed', name=self.name, e=e)
+ raise
+
+ # Install any associated EVC Maps
+
+ if self._installed:
+ for evc_map in self.evc_maps:
+ try:
+ yield evc_map.install()
+
+ except Exception as e:
+ evc_map.status = 'Exception during EVC-MAP Install: {}'.format(e.message)
+ log.exception('evc-map-install-failed', e=e)
+
+ returnValue(self._installed and self._valid)
+
+ def remove(self, remove_maps=True):
+ """
+ Remove EVC (and optional associated EVC-MAPs) from hardware
+ :param remove_maps: (boolean)
+ :return: (deferred)
+ """
+ if not self.installed:
+ return succeed('Not installed')
+
+ log.info('removing', evc=self, remove_maps=remove_maps)
+ dl = []
+
+ def _success(rpc_reply):
+ log.debug('remove-success', rpc_reply=rpc_reply)
+ self._installed = False
+
+ def _failure(results):
+ log.error('remove-failed', results=results)
+ self._installed = False
+
+ xml = EVC._xml_header('delete') + '<name>{}</name>'.format(self.name) + EVC._xml_trailer()
+ d = self._flow.handler.netconf_client.edit_config(xml)
+ d.addCallbacks(_success, _failure)
+ dl.append(d)
+
+ if remove_maps:
+ for evc_map in self.evc_maps:
+ dl.append(evc_map.remove())
+
+ return defer.gatherResults(dl, consumeErrors=True)
+
+ @inlineCallbacks
+ def delete(self, delete_maps=True):
+ """
+ Remove from hardware and delete/clean-up EVC Object
+ """
+ log.info('deleting', evc=self, delete_maps=delete_maps)
+
+ assert self._flow, 'Delete EVC must have flow reference'
+ try:
+ dl = [self.remove()]
+ self._valid = False
+
+ if delete_maps:
+ for evc_map in self.evc_maps:
+ dl.append(evc_map.delete(None)) # TODO: implement bulk-flow procedures
+
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ except Exception as e:
+ log.exception('removal', e=e)
+
+ self._evc_maps = None
+ f, self._flow = self._flow, None
+ if f is not None and f.handler is not None:
+ f.handler.remove_evc(self)
+
+ returnValue('Done')
+
+ def reflow(self, reflow_maps=True):
+ """
+ Attempt to install/re-install a flow
+ :param reflow_maps: (boolean) Flag indication if EVC-MAPs should be reflowed as well
+ :return: (deferred)
+ """
+ self._installed = False
+
+ if reflow_maps:
+ for evc_map in self.evc_maps:
+ evc_map.installed = False
+
+ return self.schedule_install()
+
+ def _decode(self):
+ """
+ Examine flow rules and extract appropriate settings for this EVC
+ """
+ if self._flow.handler.is_nni_port(self._flow.in_port):
+ self._men_ports.append(self._flow.handler.get_port_name(self._flow.in_port))
+ else:
+ self._status_message = 'EVCs with UNI ports are not supported'
+ return False # UNI Ports handled in the EVC Maps
+
+ self._s_tag = self._flow.vlan_id
+
+ if self._flow.inner_vid is not None:
+ self._switching_method = EVC.SwitchingMethod.DOUBLE_TAGGED
+
+ # For the Utility VLAN, multiple ingress ACLs (different GEMs) will need to
+ # be trapped on this EVC. Since these are usually untagged, we have to force
+ # the EVC to preserve CE VLAN tags.
+
+ if self._s_tag == self._flow.handler.utility_vlan:
+ self._ce_vlan_preservation = True
+
+ # Note: The following fields may get set when the first EVC-MAP
+ # is associated with this object. Once set, they cannot be changed to
+ # another value.
+ # self._stpid
+ # self._switching_method
+ # self._ce_vlan_preservation
+ # self._men_to_uni_tag_manipulation
+ return True
+
+ # BULK operations
+
+ @staticmethod
+ def remove_all(client, regex_=EVC_NAME_REGEX_ALL):
+ """
+ Remove all matching EVCs from hardware
+ :param client: (ncclient) NETCONF Client to use
+ :param regex_: (String) Regular expression for name matching
+ :return: (deferred)
+ """
+ # Do a 'get' on the evc config an you should get the names
+ get_xml = """
+ <filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <evcs xmlns="http://www.adtran.com/ns/yang/adtran-evcs">
+ <evc><name/></evc>
+ </evcs>
+ </filter>
+ """
+ log.debug('query', xml=get_xml, regex=regex_)
+
+ def request_failed(results, operation):
+ log.error('{}-failed'.format(operation), results=results)
+ # No further actions. Periodic poll later on will scrub any old EVCs if needed
+
+ def delete_complete(results):
+ log.debug('delete-complete', results=results)
+
+ def do_delete(rpc_reply, regexpr):
+ log.debug('query-complete', rpc_reply=rpc_reply)
+
+ if rpc_reply.ok:
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+ entries = result_dict['data']['evcs'] if 'evcs' in result_dict['data'] else {}
+
+ if 'evc' in entries:
+ p = re.compile(regexpr)
+
+ if isinstance(entries['evc'], list):
+ names = {entry['name'] for entry in entries['evc'] if 'name' in entry
+ and p.match(entry['name'])}
+ else:
+ names = set()
+ for item in entries['evc'].items():
+ if isinstance(item, tuple) and item[0] == 'name':
+ names.add(item[1])
+ break
+
+ if len(names) > 0:
+ del_xml = '<evcs xmlns="http://www.adtran.com/ns/yang/adtran-evcs"' + \
+ ' xc:operation = "delete">'
+ for name in names:
+ del_xml += '<evc>'
+ del_xml += '<name>{}</name>'.format(name)
+ del_xml += '</evc>'
+ del_xml += '</evcs>'
+ log.debug('removing', xml=del_xml)
+
+ return client.edit_config(del_xml)
+
+ return succeed('no entries')
+
+ d = client.get(get_xml)
+ d.addCallbacks(do_delete, request_failed, callbackArgs=[regex_], errbackArgs=['get'])
+ d.addCallbacks(delete_complete, request_failed, errbackArgs=['edit-config'])
+ return d
diff --git a/adapters/adtran_common/flow/evc_map.py b/adapters/adtran_common/flow/evc_map.py
new file mode 100644
index 0000000..688124a
--- /dev/null
+++ b/adapters/adtran_common/flow/evc_map.py
@@ -0,0 +1,1015 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import xmltodict
+import re
+import structlog
+from enum import Enum
+from acl import ACL
+from twisted.internet import defer, reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+from ncclient.operations.rpc import RPCError
+
+
+log = structlog.get_logger()
+
+# NOTE: For the EVC Map name, the ingress-port number is the VOLTHA port number (not pon-id since
+# it covers NNI ports as well in order to handle the NNI-NNI case. For flows that
+# cover an entire pon, the name will have the ONU ID and GEM ID appended to it upon
+# installation with a period as a separator.
+
+EVC_MAP_NAME_FORMAT = 'VOLTHA-{}-{}' # format(logical-ingress-port-number, flow-id)
+EVC_MAP_NAME_REGEX_ALL = 'VOLTHA-*'
+
+
+class EVCMap(object):
+ """
+ Class to wrap EVC functionality
+ """
+ class EvcConnection(Enum):
+ NO_EVC_CONNECTION = 0
+ EVC = 1
+ DISCARD = 2
+ DEFAULT = NO_EVC_CONNECTION
+
+ @staticmethod
+ def xml(value):
+ # Note we do not have XML for 'EVC' enumeration.
+ if value is None:
+ value = EVCMap.EvcConnection.DEFAULT
+ if value == EVCMap.EvcConnection.DISCARD:
+ return '<no-evc-connection/>'
+ elif value == EVCMap.EvcConnection.DISCARD:
+ return 'discard/'
+ raise ValueError('Invalid EvcConnection enumeration')
+
+ class PriorityOption(Enum):
+ INHERIT_PRIORITY = 0
+ EXPLICIT_PRIORITY = 1
+ DEFAULT = INHERIT_PRIORITY
+
+ @staticmethod
+ def xml(value):
+ if value is None:
+ value = EVCMap.PriorityOption.DEFAULT
+ if value == EVCMap.PriorityOption.INHERIT_PRIORITY:
+ return '<inherit-pri/>'
+ elif value == EVCMap.PriorityOption.EXPLICIT_PRIORITY:
+ return '<explicit-pri/>'
+ raise ValueError('Invalid PriorityOption enumeration')
+
+ def __init__(self, flow, evc, is_ingress_map):
+ self._handler = flow.handler # Same for all Flows attached to this EVC MAP
+ self._flows = {flow.flow_id: flow}
+ self._evc = None
+ self._new_acls = dict() # ACL Name -> ACL Object (To be installed into h/w)
+ self._existing_acls = dict() # ACL Name -> ACL Object (Already in H/w)
+ self._is_ingress_map = is_ingress_map
+ self._pon_id = None
+ self._onu_id = None # Remains None if associated with a multicast flow
+ self._installed = False
+ self._needs_update = False
+ self._status_message = None
+ self._deferred = None
+ self._name = None
+ self._enabled = True
+ self._uni_port = None
+ self._evc_connection = EVCMap.EvcConnection.DEFAULT
+ self._men_priority = EVCMap.PriorityOption.DEFAULT
+ self._men_pri = 0 # If Explicit Priority
+
+ self._c_tag = None
+ self._men_ctag_priority = EVCMap.PriorityOption.DEFAULT
+ self._men_ctag_pri = 0 # If Explicit Priority
+ self._match_ce_vlan_id = None
+ self._match_untagged = False
+ self._match_destination_mac_address = None
+ self._match_l2cp = False
+ self._match_broadcast = False
+ self._match_multicast = False
+ self._match_unicast = False
+ self._match_igmp = False
+
+ from common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
+ self._tech_profile_id = DEFAULT_TECH_PROFILE_TABLE_ID
+ self._gem_ids_and_vid = None # { key -> onu-id, value -> tuple(sorted GEM Port IDs, onu_vid) }
+ self._upstream_bandwidth = None
+ self._shaper_name = None
+
+ # ACL logic
+ self._eth_type = None
+ self._ip_protocol = None
+ self._ipv4_dst = None
+ self._udp_dst = None
+ self._udp_src = None
+
+ try:
+ self._valid = self._decode(evc)
+
+ except Exception as e:
+ log.exception('decode', e=e)
+ self._valid = False
+
+ def __str__(self):
+ return "EVCMap-{}: UNI: {}, hasACL: {}".format(self._name, self._uni_port,
+ self._needs_acl_support)
+
+ @staticmethod
+ def create_ingress_map(flow, evc, dry_run=False):
+ evc_map = EVCMap(flow, evc, True)
+
+ if evc_map._valid and not dry_run:
+ evc.add_evc_map(evc_map)
+ evc_map._evc = evc
+
+ return evc_map
+
+ @staticmethod
+ def create_egress_map(flow, evc, dry_run=False):
+ evc_map = EVCMap(flow, evc, False)
+
+ if evc_map._valid and not dry_run:
+ evc.add_evc_map(evc_map)
+ evc_map._evc = evc
+
+ return evc_map
+
+ @property
+ def valid(self):
+ return self._valid
+
+ @property
+ def installed(self):
+ return self._installed
+
+ @property
+ def needs_update(self):
+ """ True if an parameter/ACL/... needs update or map needs to be reflowed after a failure"""
+ return self._needs_update
+
+ @needs_update.setter
+ def needs_update(self, value):
+ assert not value, 'needs update can only be reset' # Can only reset
+ self._needs_update = False
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def status(self):
+ return self._status_message
+
+ @status.setter
+ def status(self, value):
+ self._status_message = value
+
+ @property
+ def evc(self):
+ return self._evc
+
+ @property
+ def _needs_acl_support(self):
+ if self._ipv4_dst is not None: # In case MCAST downstream has ACL on it
+ return False
+
+ return self._eth_type is not None or self._ip_protocol is not None or\
+ self._udp_dst is not None or self._udp_src is not None
+
+ @property
+ def pon_id(self):
+ return self._pon_id # May be None
+
+ @property
+ def onu_id(self):
+ return self._onu_id # May be None if associated with a multicast flow
+
+ # @property
+ # def onu_ids(self):
+ # return self._gem_ids_and_vid.keys()
+
+ @property
+ def gem_ids_and_vid(self):
+ return self._gem_ids_and_vid.copy()
+
+ @staticmethod
+ def _xml_header(operation=None):
+ return '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"{}><evc-map>'.\
+ format('' if operation is None else ' xc:operation="{}"'.format(operation))
+
+ @staticmethod
+ def _xml_trailer():
+ return '</evc-map></evc-maps>'
+
+ def get_evcmap_name(self, onu_id, gem_id):
+ return'{}.{}.{}.{}'.format(self.name, self.pon_id, onu_id, gem_id)
+
+ def _common_install_xml(self):
+ xml = '<enabled>{}</enabled>'.format('true' if self._enabled else 'false')
+ xml += '<uni>{}</uni>'.format(self._uni_port)
+
+ evc_name = self._evc.name if self._evc is not None else None
+ if evc_name is not None:
+ xml += '<evc>{}</evc>'.format(evc_name)
+ else:
+ xml += EVCMap.EvcConnection.xml(self._evc_connection)
+
+ xml += '<match-untagged>{}</match-untagged>'.format('true'
+ if self._match_untagged
+ else 'false')
+
+ # TODO: The following is not yet supported (and in some cases, not decoded)
+ # self._men_priority = EVCMap.PriorityOption.INHERIT_PRIORITY
+ # self._men_pri = 0 # If Explicit Priority
+ #
+ # self._men_ctag_priority = EVCMap.PriorityOption.INHERIT_PRIORITY
+ # self._men_ctag_pri = 0 # If Explicit Priority
+ #
+ # self._match_ce_vlan_id = None
+ # self._match_untagged = True
+ # self._match_destination_mac_address = None
+ return xml
+
+ def _ingress_install_xml(self, onu_s_gem_ids_and_vid, acl_list, create):
+ from ..onu import Onu
+
+ if len(acl_list):
+ xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' +\
+ ' xmlns:adtn-evc-map-acl="http://www.adtran.com/ns/yang/adtran-evc-map-access-control-list">'
+ else:
+ xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps">'
+
+ for onu_or_vlan_id, gem_ids_and_vid in onu_s_gem_ids_and_vid.iteritems():
+ first_gem_id = True
+ gem_ids = gem_ids_and_vid[0]
+ vid = gem_ids_and_vid[1]
+ ident = '{}.{}'.format(self._pon_id, onu_or_vlan_id) if vid is None \
+ else onu_or_vlan_id
+
+ for gem_id in gem_ids:
+ xml += '<evc-map{}>'.format('' if not create else ' xc:operation="create"')
+ xml += '<name>{}.{}.{}</name>'.format(self.name, ident, gem_id)
+ xml += '<ce-vlan-id>{}</ce-vlan-id>'.format(Onu.gem_id_to_gvid(gem_id))
+
+ # GEM-IDs are a sorted list (ascending). First gemport handles downstream traffic
+ if first_gem_id and (self._c_tag is not None or vid is not None):
+ first_gem_id = False
+ vlan = vid or self._c_tag
+ xml += '<network-ingress-filter>'
+ xml += '<men-ctag>{}</men-ctag>'.format(vlan) # Added in August 2017 model
+ xml += '</network-ingress-filter>'
+
+ if len(acl_list):
+ xml += '<adtn-evc-map-acl:access-lists>'
+ for acl in acl_list:
+ xml += ' <adtn-evc-map-acl:ingress-acl>'
+ xml += acl.evc_map_ingress_xml()
+ xml += ' </adtn-evc-map-acl:ingress-acl>'
+ xml += '</adtn-evc-map-acl:access-lists>'
+ xml += self._common_install_xml()
+ xml += '</evc-map>'
+ xml += '</evc-maps>'
+ return xml
+
+ def _egress_install_xml(self):
+ xml = EVCMap._xml_header()
+ xml += '<name>{}</name>'.format(self.name)
+ xml += self._common_install_xml()
+ xml += EVCMap._xml_trailer()
+ return xml
+
+ def _ingress_remove_acl_xml(self, onu_s_gem_ids_and_vid, acl):
+ xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' +\
+ ' xmlns:adtn-evc-map-acl="http://www.adtran.com/ns/yang/adtran-evc-map-access-control-list">'
+ for onu_or_vlan_id, gem_ids_and_vid in onu_s_gem_ids_and_vid.iteritems():
+ first_gem_id = True
+ vid = gem_ids_and_vid[1]
+ ident = '{}.{}'.format(self._pon_id, onu_or_vlan_id) if vid is None \
+ else onu_or_vlan_id
+
+ for gem_id in gem_ids_and_vid[0]:
+ xml += '<evc-map>'
+ xml += '<name>{}.{}.{}</name>'.format(self.name, ident, gem_id)
+ xml += '<adtn-evc-map-acl:access-lists>'
+ xml += ' <adtn-evc-map-acl:ingress-acl xc:operation="delete">'
+ xml += acl.evc_map_ingress_xml()
+ xml += ' </adtn-evc-map-acl:ingress-acl>'
+ xml += '</adtn-evc-map-acl:access-lists>'
+ xml += '</evc-map>'
+ xml += '</evc-maps>'
+ return xml
+
+ @inlineCallbacks
+ def install(self):
+ def gem_ports():
+ ports = []
+ for gems_and_vids in self._gem_ids_and_vid.itervalues():
+ ports.extend(gems_and_vids[0])
+ return ports
+
+ log.debug('install-evc-map', valid=self._valid, gem_ports=gem_ports())
+
+ if self._valid and len(gem_ports()) > 0:
+ # Install ACLs first (if not yet installed)
+ work_acls = self._new_acls.copy()
+ self._new_acls = dict()
+
+ log.debug('install-evc-map-acls', install_acls=len(work_acls))
+ for acl in work_acls.itervalues():
+ try:
+ yield acl.install()
+
+ except Exception as e:
+ log.exception('acl-install-failed', name=self.name, e=e)
+ self._new_acls.update(work_acls)
+ raise
+
+ # Any user-data flows attached to this map ?
+ c_tag = None
+ for flow_id, flow in self._flows.items():
+ c_tag = flow.inner_vid or flow.vlan_id or c_tag
+
+ self._c_tag = c_tag
+
+ # Now EVC-MAP
+ if not self._installed or self._needs_update:
+ log.debug('needs-install-or-update', installed=self._installed, update=self._needs_update)
+ is_installed = self._installed
+ self._installed = True
+ try:
+ self._cancel_deferred()
+
+ log.info('upstream-bandwidth')
+ try:
+ yield self.update_upstream_flow_bandwidth()
+
+ except Exception as e:
+ log.exception('upstream-bandwidth-failed', name=self.name, e=e)
+ raise
+
+ map_xml = self._ingress_install_xml(self._gem_ids_and_vid, work_acls.values(),
+ not is_installed) \
+ if self._is_ingress_map else self._egress_install_xml()
+
+ log.debug('install', xml=map_xml, name=self.name)
+ results = yield self._handler.netconf_client.edit_config(map_xml)
+ self._installed = results.ok
+ self._needs_update = results.ok
+ self._status_message = '' if results.ok else results.error
+
+ if results.ok:
+ self._existing_acls.update(work_acls)
+ else:
+ self._new_acls.update(work_acls)
+
+ except RPCError as rpc_err:
+ if rpc_err.tag == 'data-exists': # Known race due to bulk-flow operation
+ pass
+
+ except Exception as e:
+ log.exception('evc-map-install-failed', name=self.name, e=e)
+ self._installed = is_installed
+ self._new_acls.update(work_acls)
+ raise
+
+ # Install any needed shapers
+ if self._installed:
+ try:
+ yield self.update_downstream_flow_bandwidth()
+
+ except Exception as e:
+ log.exception('shaper-install-failed', name=self.name, e=e)
+ raise
+
+ returnValue(self._installed and self._valid)
+
+ def _ingress_remove_xml(self, onus_gem_ids_and_vid):
+ xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' + \
+ ' xc:operation="delete">'
+
+ for onu_id, gem_ids_and_vid in onus_gem_ids_and_vid.iteritems():
+ for gem_id in gem_ids_and_vid[0]:
+ xml += '<evc-map>'
+ xml += '<name>{}.{}.{}</name>'.format(self.name, onu_id, gem_id)
+ xml += '</evc-map>'
+ xml += '</evc-maps>'
+ return xml
+
+ def _egress_remove_xml(self):
+ return EVCMap._xml_header('delete') + \
+ '<name>{}</name>'.format(self.name) + EVCMap._xml_trailer()
+
+ def _remove(self):
+ if not self.installed:
+ returnValue('Not installed')
+
+ log.info('removing', evc_map=self)
+
+ def _success(rpc_reply):
+ log.debug('remove-success', rpc_reply=rpc_reply)
+ self._installed = False
+
+ def _failure(failure):
+ log.error('remove-failed', failure=failure)
+ self._installed = False
+
+ def _remove_acls(_):
+ acls, self._new_acls = self._new_acls, dict()
+ existing, self._existing_acls = self._existing_acls, dict()
+ acls.update(existing)
+
+ dl = []
+ for acl in acls.itervalues():
+ dl.append(acl.remove())
+
+ if len(dl) > 0:
+ defer.gatherResults(dl, consumeErrors=True)
+
+ def _remove_shaper(_):
+ if self._shaper_name is not None:
+ self.update_downstream_flow_bandwidth(remove=True)
+
+ map_xml = self._ingress_remove_xml(self._gem_ids_and_vid) if self._is_ingress_map \
+ else self._egress_remove_xml()
+
+ d = self._handler.netconf_client.edit_config(map_xml)
+ d.addCallbacks(_success, _failure)
+ d.addBoth(_remove_acls)
+ d.addBoth(_remove_shaper)
+ return d
+
+ @inlineCallbacks
+ def delete(self, flow):
+ """
+ Remove from hardware and delete/clean-up EVC-MAP Object
+
+ :param flow: (FlowEntry) Specific flow to remove from the MAP or None if all
+ flows should be removed
+ :return:
+ """
+ flows = [flow] if flow is not None else list(self._flows.values())
+ removing_all = len(flows) == len(self._flows)
+
+ log.debug('delete', removing_all=removing_all)
+ if not removing_all:
+ for f in flows:
+ self._remove_flow(f)
+
+ else:
+ if self._evc is not None:
+ self._evc.remove_evc_map(self)
+ self._evc = None
+
+ self._valid = False
+ self._cancel_deferred()
+ try:
+ yield self._remove()
+
+ except Exception as e:
+ log.exception('removal', e=e)
+
+ returnValue('Done')
+
+ def reflow_needed(self):
+ log.debug('reflow-needed', installed=self.installed, needs_update=self.needs_update)
+ reflow = not self.installed or self.needs_update
+
+ if not reflow:
+ pass # TODO: implement retrieve & compare of EVC Map parameters
+
+ return reflow
+
+ @staticmethod
+ def find_matching_ingress_flow(flow, upstream_flow_table):
+ """
+ Look for an existing EVC-MAP that may match this flow. Called when upstream signature
+ for a flow does not make match. This can happen if an ACL flow is added and only an User
+ Data flow exists, or if only an ACL flow exists.
+
+ :param flow: (FlowEntry) flow to add
+ :param upstream_flow_table: (dict of FlowEntry) Existing upstream flows for this device,
+ including the flow we are looking to add
+ :return: (EVCMap) if appropriate one is found, else None
+ """
+ # A User Data flow will have:
+ # signature: <dev>.1.5.2.242
+ # down-sig: <dev>.1.*.2.*
+ # logical-port: 66
+ # is-acl-flow: False
+ #
+ # An ACL flow will have:
+ # signature: <dev>.1.5.[4092 or 4094].None (untagged VLAN == utility VLAN case)
+ # down-sig: <dev>.1.*.[4092 or 4094].*
+ # logical-port: 66
+ # is-acl-flow: True
+ #
+ # Reduce the upstream flow table to only those that match the ingress,
+ # and logical-ports match (and is not this flow) and have a map
+
+ log.debug('find-matching-ingress-flow', logical_port=flow.logical_port, flow=flow.output)
+ candidate_flows = [f for f in upstream_flow_table.itervalues() if
+ f.in_port == flow.in_port and
+ f.logical_port == flow.logical_port and
+ f.output == flow.output and
+ f.evc_map is not None] # This weeds out this flow
+
+ log.debug('find-matching-ingress-flow', candidate_flows=candidate_flows)
+ return candidate_flows[0].evc_map if len(candidate_flows) > 0 else None
+
+ def add_flow(self, flow, evc):
+ """
+ Add a new flow to an existing EVC-MAP. This can be called to add:
+ o an ACL flow to an existing utility EVC, or
+ o an ACL flow to an existing User Data Flow, or
+ o a User Data Flow to an existing ACL flow (and this needs the EVC updated
+ as well.
+
+ Note that the Downstream EVC provided is the one that matches this flow. If
+ this is adding an ACL to and existing User data flow, we DO NOT want to
+ change the EVC Map's EVC
+
+ :param flow: (FlowEntry) New flow
+ :param evc: (EVC) Matching EVC for downstream flow
+ """
+ from flow_entry import FlowEntry
+ # Create temporary EVC-MAP
+ assert flow.flow_direction in FlowEntry.upstream_flow_types, \
+ 'Only Upstream flows additions are supported at this time'
+
+ log.debug('add-flow-to-evc', flow=flow, evc=evc)
+
+ tmp_map = EVCMap.create_ingress_map(flow, evc, dry_run=True) \
+ if flow.flow_direction in FlowEntry.upstream_flow_types \
+ else EVCMap.create_egress_map(flow, evc, dry_run=True)
+
+ if tmp_map is None or not tmp_map.valid:
+ return None
+
+ self._flows[flow.flow_id] = flow
+ self._needs_update = True
+
+ # Are there ACLs to add to any existing (or empty) ACLs
+ if len(tmp_map._new_acls) > 0:
+ self._new_acls.update(tmp_map._new_acls) # New ACL flow
+ log.debug('add-acl-flows', map=str(self), new=tmp_map._new_acls)
+
+ # Look up existing EVC for this flow. If it is a service EVC for
+ # Packet In/Out, and this is a regular flow, migrate to the newer EVC
+ if self._evc.service_evc and not evc.service_evc:
+ log.info('new-evc-for-map', old=self._evc.name, new=evc.name)
+ self._evc.remove_evc_map(self)
+ evc.add_evc_map(self)
+ self._evc = evc
+
+ return self
+
+ @inlineCallbacks
+ def _remove_flow(self, flow):
+ """
+ Remove a specific flow from an EVC_MAP. This includes removing any
+ ACL entries associated with the flow and could result in moving the
+ EVC-MAP over to another EVC.
+
+ :param flow: (FlowEntry) Flow to remove
+ """
+ try:
+ del self._flows[flow.flow_id]
+
+ log('remove-flow-to-evc', flow=flow)
+ # Remove any ACLs
+ acl_name = ACL.flow_to_name(flow)
+ acl = None
+
+ # if not yet installed just remove it from list
+ if acl_name in self._new_acls:
+ del self._new_acls[acl_name]
+ else:
+ acl = self._existing_acls[acl_name]
+ if acl is not None:
+ # Remove ACL from EVC-MAP entry
+
+ try:
+ map_xml = self._ingress_remove_acl_xml(self._gem_ids_and_vid, acl)
+ log.debug('remove', xml=map_xml, name=acl.name)
+ results = yield self._handler.netconf_client.edit_config(map_xml)
+ if results.ok:
+ del self._existing_acls[acl.name]
+
+ # Scan EVC to see if it needs to move back to the Utility
+ # or Untagged EVC from a user data EVC
+ if self._evc and not self._evc.service_evc and\
+ len(self._flows) > 0 and\
+ all(f.is_acl_flow for f in self._flows.itervalues()):
+
+ self._evc.remove_evc_map(self)
+ first_flow = self._flows.itervalues().next()
+ self._evc = first_flow.get_utility_evc(True)
+ self._evc.add_evc_map(self)
+ log.debug('moved-acl-flows-to-utility-evc', newevcname=self._evc.name)
+
+ self._needs_update = True
+ self._evc.schedule_install()
+
+ except Exception as e:
+ log.exception('acl-remove-from-evc', e=e)
+
+ # Remove ACL itself
+ try:
+ yield acl.remove()
+
+ except Exception as e:
+ log.exception('acl-remove', e=e)
+
+ except Exception as e:
+ log.exception('remove-failed', e=e)
+
+ @staticmethod
+ def create_evc_map_name(flow):
+ # Note: When actually installed into the OLT, the .onu_id.gem_port is
+ # appended to the name
+ return EVC_MAP_NAME_FORMAT.format(flow.logical_port, flow.flow_id)
+
+ @staticmethod
+ def decode_evc_map_name(name):
+ """
+ Reverse engineer EVC-MAP name parameters. Helpful in quick packet-in
+ processing
+
+ :param name: (str) EVC Map Name
+ :return: (dict) Logical Ingress Port, OpenFlow Flow-ID
+ """
+ items = name.split('-') if name is not None else dict()
+
+ # Note: When actually installed into the OLT, the .onu_id.gem_port is
+ # appended to the name
+ return {'ingress-port': items[1],
+ 'flow-id': items[2].split('.')[0]} if len(items) > 2 else dict()
+
+ @inlineCallbacks
+ def update_upstream_flow_bandwidth(self):
+ """
+ Upstream flow bandwidth comes from the flow_entry related to this EVC-MAP
+ and if no bandwidth property is found, allow full bandwidth
+ """
+ # all flows should should be on the same PON
+ flow = self._flows.itervalues().next()
+ is_pon = flow.handler.is_pon_port(flow.in_port)
+
+ if self._is_ingress_map and is_pon:
+ pon_port = flow.handler.get_southbound_port(flow.in_port)
+ if pon_port is None:
+ returnValue('no PON')
+
+ session = self._handler.rest_client
+ # TODO: Refactor with tech profiles
+ tconts = None # pon_port.tconts
+ traffic_descriptors = None # pon_port.traffic_descriptors
+
+ if traffic_descriptors is None or tconts is None:
+ returnValue('no TDs on PON')
+
+ bandwidth = self._upstream_bandwidth or 10000000000
+
+ if self.pon_id is not None and self.onu_id is not None:
+ name = 'tcont-{}-{}-data'.format(self.pon_id, self.onu_id)
+ td = traffic_descriptors.get(name)
+ tcont = tconts.get(name)
+
+ if td is not None and tcont is not None:
+ alloc_id = tcont.alloc_id
+ td.maximum_bandwidth = bandwidth
+ try:
+ results = yield td.add_to_hardware(session)
+ log.debug('td-modify-results', results=results)
+
+ except Exception as _e:
+ pass
+
+ @inlineCallbacks
+ def update_downstream_flow_bandwidth(self, remove=False):
+ """
+ Downstream flow bandwidth is extracted from the related EVC flow_entry
+ bandwidth property. It is written to this EVC-MAP only if it is found
+ """
+ xml = None
+ results = None
+
+ if remove:
+ name, self._shaper_name = self._shaper_name, None
+ if name is not None:
+ xml = self._shaper_remove_xml(name)
+ else:
+ if self._evc is not None and self._evc.flow_entry is not None \
+ and self._evc.flow_entry.bandwidth is not None:
+ self._shaper_name = self._name
+ xml = self._shaper_install_xml(self._shaper_name,
+ self._evc.flow_entry.bandwidth * 1000) # kbps
+ if xml is not None:
+ try:
+ log.info('downstream-bandwidth', xml=xml, name=self.name, remove=remove)
+ results = yield self._handler.netconf_client.edit_config(xml)
+
+ except RPCError as rpc_err:
+ if rpc_err.tag == 'data-exists':
+ pass
+
+ except Exception as e:
+ log.exception('downstream-bandwidth', name=self.name, remove=remove, e=e)
+ raise
+
+ returnValue(results)
+
+ def _shaper_install_xml(self, name, bandwidth):
+ xml = '<adtn-shaper:shapers xmlns:adtn-shaper="http://www.adtran.com/ns/yang/adtran-traffic-shapers" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="merge">'
+ for onu_id, gem_ids_and_vid in self._gem_ids_and_vid.iteritems():
+ for gem_id in gem_ids_and_vid[0]:
+ xml += ' <adtn-shaper:shaper>'
+ xml += ' <adtn-shaper:name>{}.{}.{}</adtn-shaper:name>'.format(name, onu_id, gem_id)
+ xml += ' <adtn-shaper:enabled>true</adtn-shaper:enabled>'
+ xml += ' <adtn-shaper:rate>{}</adtn-shaper:rate>'.format(bandwidth)
+ xml += ' <adtn-shaper-evc-map:evc-map xmlns:adtn-shaper-evc-map="http://www.adtran.com/ns/yang/adtran-traffic-shaper-evc-maps">{}.{}.{}</adtn-shaper-evc-map:evc-map>'.format(self.name, onu_id, gem_id)
+ xml += ' </adtn-shaper:shaper>'
+ xml += '</adtn-shaper:shapers>'
+ return xml
+
+ def _shaper_remove_xml(self, name):
+ xml = '<adtn-shaper:shapers xmlns:adtn-shaper="http://www.adtran.com/ns/yang/adtran-traffic-shapers" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete">'
+ for onu_id, gem_ids_and_vid in self._gem_ids_and_vid.iteritems():
+ for gem_id in gem_ids_and_vid[0]:
+ xml += ' <adtn-shaper:shaper >'
+ xml += ' <adtn-shaper:name>{}.{}.{}</adtn-shaper:name>'.format(name, onu_id, gem_id)
+ xml += ' </adtn-shaper:shaper>'
+ xml += '</adtn-shaper:shapers>'
+ return xml
+
+ def _setup_tech_profiles(self):
+ # Set up the TCONT / GEM Ports for this connection (Downstream only of course)
+ # all flows should have same GEM port setup
+ flow = self._flows.itervalues().next()
+ is_pon = flow.handler.is_pon_port(flow.in_port)
+
+ if self._is_ingress_map and is_pon:
+ pon_port = flow.handler.get_southbound_port(flow.in_port)
+
+ if pon_port is None:
+ return
+
+ onu = next((onu for onu in pon_port.onus if onu.logical_port == flow.logical_port), None)
+
+ if onu is None: # TODO: Add multicast support later (self.onu_id == None)
+ return
+
+ self._pon_id = pon_port.pon_id
+ self._onu_id = onu.onu_id
+
+ # Identify or allocate TCONT and GEM Ports. If the ONU has been informed of the
+ # GEM PORTs that belong to it, the tech profiles were already set up by a previous
+ # flows
+ onu_gems = onu.gem_ids(self._tech_profile_id)
+
+ if len(onu_gems) > 0:
+ self._gem_ids_and_vid[onu.onu_id] = (onu_gems, flow.vlan_id)
+ return
+
+ uni_id = self._handler.platform.uni_id_from_uni_port(flow.logical_port)
+ pon_profile = self._handler.tech_profiles[self.pon_id]
+ alloc_id = None
+
+ try:
+ (ofp_port_name, ofp_port_no) = self._handler.get_ofp_port_name(self.pon_id,
+ self.onu_id,
+ flow.logical_port)
+ if ofp_port_name is None:
+ log.error("port-name-not-found")
+ return
+
+ # Check tech profile instance already exists for derived port name
+ tech_profile = pon_profile.get_tech_profile_instance(self._tech_profile_id,
+ ofp_port_name)
+ log.debug('Get-tech-profile-instance-status',
+ tech_profile_instance=tech_profile)
+
+ if tech_profile is None:
+ # create tech profile instance
+ tech_profile = pon_profile.create_tech_profile_instance(self._tech_profile_id,
+ ofp_port_name,
+ self.pon_id)
+ if tech_profile is None:
+ raise Exception('Tech-profile-instance-creation-failed')
+ else:
+ log.debug('Tech-profile-instance-already-exist-for-given port-name',
+ ofp_port_name=ofp_port_name)
+
+ # upstream scheduler
+ us_scheduler = pon_profile.get_us_scheduler(tech_profile)
+
+ # downstream scheduler
+ ds_scheduler = pon_profile.get_ds_scheduler(tech_profile)
+
+ # create Tcont protobuf
+ pb_tconts = pon_profile.get_tconts(tech_profile, us_scheduler, ds_scheduler)
+
+ # create TCONTs & GEM Ports locally
+ for pb_tcont in pb_tconts:
+ from ..xpon.olt_tcont import OltTCont
+ tcont = OltTCont.create(pb_tcont,
+ self.pon_id,
+ self.onu_id,
+ self._tech_profile_id,
+ uni_id,
+ ofp_port_no)
+ if tcont is not None:
+ onu.add_tcont(tcont)
+
+ # Fetch alloc id and gemports from tech profile instance
+ alloc_id = tech_profile.us_scheduler.alloc_id
+
+ onu_gems = [gem.gemport_id for gem in tech_profile.upstream_gem_port_attribute_list]
+
+ for gem in tech_profile.upstream_gem_port_attribute_list:
+ from ..xpon.olt_gem_port import OltGemPort
+ gem_port = OltGemPort.create(self._handler,
+ gem,
+ tech_profile.us_scheduler.alloc_id,
+ self._tech_profile_id,
+ self.pon_id,
+ self.onu_id,
+ uni_id,
+ ofp_port_no)
+ if gem_port is not None:
+ onu.add_gem_port(gem_port)
+
+ self._gem_ids_and_vid = {onu.onu_id: (onu_gems, flow.vlan_id)}
+
+ # Send technology profile information to ONU
+ reactor.callLater(0, self._handler.setup_onu_tech_profile, self._pon_id,
+ self.onu_id, flow.logical_port)
+
+ except BaseException as e:
+ log.exception(exception=e)
+
+ # Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store
+ pon_intf_onu_id = (self.pon_id, self.onu_id, uni_id)
+ resource_manager = self._handler.resource_mgr.resource_managers[self.pon_id]
+
+ resource_manager.update_alloc_ids_for_onu(pon_intf_onu_id, list([alloc_id]))
+ resource_manager.update_gemport_ids_for_onu(pon_intf_onu_id, onu_gems)
+
+ self._handler.resource_mgr.update_gemports_ponport_to_onu_map_on_kv_store(onu_gems,
+ self.pon_id,
+ self.onu_id,
+ uni_id)
+
+ def _decode(self, evc):
+ from evc import EVC
+ from flow_entry import FlowEntry
+
+ # Only called from initializer, so first flow is only flow
+ flow = self._flows.itervalues().next()
+
+ self._name = EVCMap.create_evc_map_name(flow)
+
+ if evc:
+ self._evc_connection = EVCMap.EvcConnection.EVC
+ else:
+ self._status_message = 'Can only create EVC-MAP if EVC supplied'
+ return False
+
+ is_pon = flow.handler.is_pon_port(flow.in_port)
+ is_uni = flow.handler.is_uni_port(flow.in_port)
+
+ if flow.bandwidth is not None:
+ self._upstream_bandwidth = flow.bandwidth * 1000000
+
+ if is_pon or is_uni:
+ # Preserve CE VLAN tag only if utility VLAN/EVC
+ self._uni_port = flow.handler.get_port_name(flow.in_port)
+ evc.ce_vlan_preservation = evc.ce_vlan_preservation or False
+ else:
+ self._status_message = 'EVC-MAPS without UNI or PON ports are not supported'
+ return False # UNI Ports handled in the EVC Maps
+
+ # ACL logic
+ self._eth_type = flow.eth_type
+
+ if self._eth_type == FlowEntry.EtherType.IPv4:
+ self._ip_protocol = flow.ip_protocol
+ self._ipv4_dst = flow.ipv4_dst
+
+ if self._ip_protocol == FlowEntry.IpProtocol.UDP:
+ self._udp_dst = flow.udp_dst
+ self._udp_src = flow.udp_src
+
+ # If no match of VLAN this may be for untagged traffic or upstream and needs to
+ # match the gem-port vid
+
+ self._setup_tech_profiles()
+
+ # self._match_untagged = flow.vlan_id is None and flow.inner_vid is None
+ self._c_tag = flow.inner_vid or flow.vlan_id
+
+ # If a push of a single VLAN is present with a POP of the VLAN in the EVC's
+ # flow, then this is a traditional EVC flow
+
+ evc.men_to_uni_tag_manipulation = EVC.Men2UniManipulation.POP_OUT_TAG_ONLY
+ evc.switching_method = EVC.SwitchingMethod.DOUBLE_TAGGED \
+ if self._c_tag is not None else EVC.SwitchingMethod.SINGLE_TAGGED
+
+ try:
+ acl = ACL.create(flow)
+ if acl.name not in self._new_acls:
+ self._new_acls[acl.name] = acl
+
+ except Exception as e:
+ log.exception('ACL-decoding', e=e)
+ return False
+
+ return True
+
+ # Bulk operations
+
+ @staticmethod
+ def remove_all(client, regex_=EVC_MAP_NAME_REGEX_ALL):
+ """
+ Remove all matching EVC Maps from hardware
+
+ :param client: (ncclient) NETCONF Client to use
+ :param regex_: (String) Regular expression for name matching
+ :return: (deferred)
+ """
+ # Do a 'get' on the evc-map config an you should get the names
+ get_xml = """
+ <filter>
+ <evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps">
+ <evc-map>
+ <name/>
+ </evc-map>
+ </evc-maps>
+ </filter>
+ """
+ log.debug('query', xml=get_xml, regex=regex_)
+
+ def request_failed(results, operation):
+ log.error('{}-failed'.format(operation), results=results)
+ # No further actions. Periodic poll later on will scrub any old EVC-Maps if needed
+
+ def delete_complete(results):
+ log.debug('delete-complete', results=results)
+
+ def do_delete(rpc_reply, regexpr):
+ log.debug('query-complete', rpc_reply=rpc_reply)
+
+ if rpc_reply.ok:
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+ entries = result_dict['data']['evc-maps'] if 'evc-maps' in result_dict['data'] else {}
+
+ if 'evc-map' in entries:
+ p = re.compile(regexpr)
+
+ if isinstance(entries['evc-map'], list):
+ names = {entry['name'] for entry in entries['evc-map']
+ if 'name' in entry and p.match(entry['name'])}
+ else:
+ names = set()
+ for item in entries['evc-map'].items():
+ if isinstance(item, tuple) and item[0] == 'name':
+ names.add(item[1])
+ break
+
+ if len(names) > 0:
+ del_xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' + \
+ ' xc:operation = "delete">'
+ for name in names:
+ del_xml += '<evc-map>'
+ del_xml += '<name>{}</name>'.format(name)
+ del_xml += '</evc-map>'
+ del_xml += '</evc-maps>'
+ log.debug('removing', xml=del_xml)
+
+ return client.edit_config(del_xml)
+
+ return succeed('no entries')
+
+ d = client.get(get_xml)
+ d.addCallbacks(do_delete, request_failed, callbackArgs=[regex_], errbackArgs=['get'])
+ d.addCallbacks(delete_complete, request_failed, errbackArgs=['edit-config'])
+ return d
+
+ def _cancel_deferred(self):
+ d, self._deferred = self._deferred, None
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
diff --git a/adapters/adtran_common/flow/flow_entry.py b/adapters/adtran_common/flow/flow_entry.py
new file mode 100644
index 0000000..cb8dd4a
--- /dev/null
+++ b/adapters/adtran_common/flow/flow_entry.py
@@ -0,0 +1,821 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from evc import EVC
+from evc_map import EVCMap
+from enum import IntEnum
+from utility_evc import UtilityEVC
+import pyvoltha.common.openflow.utils as fd
+from pyvoltha.protos.openflow_13_pb2 import OFPP_MAX, OFPP_CONTROLLER, OFPVID_PRESENT, OFPXMC_OPENFLOW_BASIC
+from twisted.internet.defer import returnValue, inlineCallbacks, gatherResults
+
+log = structlog.get_logger()
+
+# IP Protocol numbers
+_supported_ip_protocols = [
+ 1, # ICMP
+ 2, # IGMP
+ 6, # TCP
+ 17, # UDP
+]
+
+
+class FlowEntry(object):
+ """
+ Provide a class that wraps the flow rule and also provides state/status for a FlowEntry.
+
+ When a new flow is sent, it is first decoded to check for any potential errors. If None are
+ found, the entry is created and it is analyzed to see if it can be combined to with any other flows
+ to create or modify an existing EVC.
+
+ Note: Since only E-LINE is supported, modification of an existing EVC is not performed.
+ """
+ class PortType(IntEnum):
+ NNI = 0 # NNI Port
+ UNI = 1 # UNI Port
+ PON = 2 # PON Port (all UNIs on PON)
+ CONTROLLER = 3 # Controller port (packet in/out)
+
+ class FlowDirection(IntEnum):
+ UPSTREAM = 0 # UNI port to NNI Port
+ DOWNSTREAM = 1 # NNI port to UNI Port
+ CONTROLLER_UNI = 2 # Trap packet on UNI and send to controller
+ NNI_PON = 3 # NNI port to PON Port (all UNIs) - Utility VLAN & multicast
+
+ # The following are not yet supported
+ CONTROLLER_NNI = 4 # Trap packet on NNI and send to controller
+ CONTROLLER_PON = 5 # Trap packet on all UNIs of a PON and send to controller
+ NNI_NNI = 6 # NNI port to NNI Port
+ UNI_UNI = 7 # UNI port to UNI Port
+ OTHER = 9 # Unable to determine
+
+ upstream_flow_types = {FlowDirection.UPSTREAM, FlowDirection.CONTROLLER_UNI}
+ downstream_flow_types = {FlowDirection.DOWNSTREAM, FlowDirection.NNI_PON}
+
+ LEGACY_CONTROL_VLAN = 4000
+
+ # Well known EtherTypes
+ class EtherType(IntEnum):
+ EAPOL = 0x888E
+ IPv4 = 0x0800
+ IPv6 = 0x86DD
+ ARP = 0x0806
+ LLDP = 0x88CC
+
+ # Well known IP Protocols
+ class IpProtocol(IntEnum):
+ IGMP = 2
+ UDP = 17
+
+ def __init__(self, flow, handler):
+ self._flow = flow
+ self._handler = handler
+ self.flow_id = flow.id
+ self.evc = None # EVC this flow is part of
+ self.evc_map = None # EVC-MAP this flow is part of
+ self._flow_direction = FlowEntry.FlowDirection.OTHER
+ self._logical_port = None # Currently ONU VID is logical port if not doing xPON
+ self._is_multicast = False
+ self._is_acl_flow = False
+ self._bandwidth = None
+
+ # A value used to locate possible related flow entries
+ self.signature = None
+ self.downstream_signature = None # Valid for upstream EVC-MAP Flows
+
+ # Selection properties
+ self.in_port = None
+ self.vlan_id = None
+ self.pcp = None
+ self.eth_type = None
+ self.ip_protocol = None
+ self.ipv4_dst = None
+ self.udp_dst = None # UDP Port #
+ self.udp_src = None # UDP Port #
+ self.inner_vid = None
+
+ # Actions
+ self.output = None
+ self.pop_vlan = False
+ self.push_vlan_tpid = None
+ self.push_vlan_id = None
+
+ self._name = self.create_flow_name()
+
+ def __str__(self):
+ return 'flow_entry: {}, in: {}, out: {}, vid: {}, inner:{}, eth: {}, IP: {}'.format(
+ self.name, self.in_port, self.output, self.vlan_id, self.inner_vid,
+ self.eth_type, self.ip_protocol)
+
+ def __repr__(self):
+ return str(self)
+
+ @property
+ def name(self):
+ return self._name # TODO: Is a name really needed in production?
+
+ def create_flow_name(self):
+ return 'flow-{}-{}'.format(self.device_id, self.flow_id)
+
+ @property
+ def flow(self):
+ return self._flow
+
+ @property
+ def handler(self):
+ return self._handler
+
+ @property
+ def device_id(self):
+ return self.handler.device_id
+
+ @property
+ def bandwidth(self):
+ """ Bandwidth in Mbps (if any) """
+ return self._bandwidth
+
+ @property
+ def flow_direction(self):
+ return self._flow_direction
+
+ @property
+ def is_multicast_flow(self):
+ return self._is_multicast
+
+ @property
+ def is_acl_flow(self):
+ return self._is_acl_flow or self._needs_acl_support
+
+ @property
+ def logical_port(self):
+ return self._logical_port # NNI or UNI Logical Port
+
+ @staticmethod
+ def create(flow, handler):
+ """
+ Create the appropriate FlowEntry wrapper for the flow. This method returns a two
+ results.
+
+ The first result is the flow entry that was created. This could be a match to an
+ existing flow since it is a bulk update. None is returned only if no match to
+ an existing entry is found and decode failed (unsupported field)
+
+ The second result is the EVC this flow should be added to. This could be an
+ existing flow (so your adding another EVC-MAP) or a brand new EVC (no existing
+ EVC-MAPs). None is returned if there are not a valid EVC that can be created YET.
+
+ :param flow: (Flow) Flow entry passed to VOLTHA adapter
+ :param handler: (AdtranDeviceHandler) handler for the device
+ :return: (FlowEntry, EVC)
+ """
+ # Exit early if it already exists
+ try:
+ flow_entry = FlowEntry(flow, handler)
+
+ ######################################################################
+ # Decode the flow entry
+ if not flow_entry._decode(flow):
+ # TODO: When we support individual flow mods, we will need to return
+ # this flow back always
+ return None, None
+
+ ######################################################################
+ # Initialize flow_entry database (dicts) if needed and determine if
+ # the flows have already been handled.
+ downstream_sig_table = handler.downstream_flows
+ upstream_flow_table = handler.upstream_flows
+
+ log.debug('flow-entry-decoded', flow=flow_entry, signature=flow_entry.signature,
+ downstream_signature=flow_entry.downstream_signature)
+
+ if flow_entry.flow_direction in FlowEntry.upstream_flow_types and\
+ flow_entry.flow_id in upstream_flow_table:
+ log.debug('flow-entry-upstream-exists', flow=flow_entry)
+ return flow_entry, None
+
+ if flow_entry.flow_direction in FlowEntry.downstream_flow_types:
+ sig_table = downstream_sig_table.get(flow_entry.signature)
+ if sig_table is not None and flow_entry in sig_table.flows:
+ log.debug('flow-entry-downstream-exists', flow=flow_entry)
+ return flow_entry, None
+
+ ######################################################################
+ # Look for any matching flows in the other direction that might help
+ # make an EVC and then save it off in the device specific flow table
+ #
+ # TODO: For now, only support for E-LINE services between NNI and UNI
+ downstream_flow = None
+ upstream_flows = None
+ downstream_sig = None
+
+ if flow_entry._is_multicast: # Uni-directional flow
+ assert flow_entry._flow_direction in FlowEntry.downstream_flow_types, \
+ 'Only downstream Multicast supported'
+ downstream_flow = flow_entry
+ downstream_sig = flow_entry.signature
+ upstream_flows = []
+
+ elif flow_entry.flow_direction in FlowEntry.downstream_flow_types:
+ downstream_flow = flow_entry
+ downstream_sig = flow_entry.signature
+
+ elif flow_entry.flow_direction in FlowEntry.upstream_flow_types:
+ downstream_sig = flow_entry.downstream_signature
+
+ if downstream_sig is None:
+ # TODO: When we support individual flow mods, we will need to return
+ # this flow back always
+ log.debug('flow-entry-empty-downstream', flow=flow_entry)
+ return None, None
+
+ # Make sure a slot exists for the downstream signature and get its flow table
+ downstream_sig_table = downstream_sig_table.add(downstream_sig)
+ evc = downstream_sig_table.evc
+
+ # Save the new flow_entry to proper flow table
+ if flow_entry.flow_direction in FlowEntry.upstream_flow_types:
+ upstream_flow_table.add(flow_entry)
+ downstream_flow = evc.flow_entry if evc is not None else \
+ next((_flow for _flow in downstream_sig_table.flows.itervalues()
+ if isinstance(_flow, FlowEntry)), None)
+
+ elif flow_entry.flow_direction in FlowEntry.downstream_flow_types:
+ downstream_sig_table.flows.add(flow_entry)
+
+ # Now find all the upstream flows
+ if downstream_flow is not None:
+ upstream_flows = [_flow for _flow in upstream_flow_table.itervalues()
+ if _flow.downstream_signature == downstream_flow.signature]
+ if len(upstream_flows) == 0 and not downstream_flow.is_multicast_flow:
+ upstream_flows = None
+
+ log.debug('flow-entry-search-results', flow=flow_entry,
+ downstream_flow=downstream_flow, upstream_flows=upstream_flows)
+
+ ######################################################################
+ # Compute EVC and and maps
+ evc = FlowEntry._create_evc_and_maps(evc, downstream_flow, upstream_flows)
+
+ # Save off EVC (if we have one) for this flow if it is new
+ if evc is not None and evc.valid and downstream_sig_table.evc is None:
+ downstream_sig_table.evc = evc
+
+ return flow_entry, evc
+
+ except Exception as e:
+ log.exception('flow-entry-processing', e=e)
+ return None, None
+
+ @staticmethod
+ def _create_evc_and_maps(evc, downstream_flow, upstream_flows):
+ """
+ Give a set of flows, find (or create) the EVC and any needed EVC-MAPs
+
+ :param evc: (EVC) Existing EVC for downstream flow. May be null if not created
+ :param downstream_flow: (FlowEntry) NNI -> UNI flow (provides much of the EVC values)
+ :param upstream_flows: (list of FlowEntry) UNI -> NNI flows (provides much of the EVC-MAP values)
+
+ :return: EVC object
+ """
+ log.debug('flow-evc-and-maps', downstream_flow=downstream_flow,
+ upstream_flows=upstream_flows)
+
+ if (evc is None and downstream_flow is None) or upstream_flows is None:
+ return None
+
+ # Get any existing EVC if a flow is already created
+ if downstream_flow.evc is None:
+ if evc is not None:
+ downstream_flow.evc = evc
+
+ elif downstream_flow.is_multicast_flow:
+ from mcast import MCastEVC
+ downstream_flow.evc = MCastEVC.create(downstream_flow)
+
+ elif downstream_flow.is_acl_flow:
+ downstream_flow.evc = downstream_flow.get_utility_evc()
+ else:
+ downstream_flow.evc = EVC(downstream_flow)
+
+ if not downstream_flow.evc.valid:
+ log.debug('flow-evc-and-maps-downstream-invalid',
+ downstream_flow=downstream_flow,
+ upstream_flows=upstream_flows)
+ return None
+
+ # Create EVC-MAPs. Note upstream_flows is empty list for multicast
+ # For Packet In/Out support. The upstream flows for will have matching
+ # signatures. So the first one to get created should create the EVC and
+ # if it needs and ACL, do so then. The second one should just reference
+ # the first map.
+ #
+ # If the second has and ACL, then it should add it to the map.
+ # TODO: What to do if the second (or third, ...) is the data one.
+ # What should it do then?
+ sig_map_map = {f.signature: f.evc_map for f in upstream_flows
+ if f.evc_map is not None}
+
+ for flow in upstream_flows:
+ if flow.evc_map is None:
+ if flow.signature in sig_map_map:
+ # Found an explicitly matching existing EVC-MAP. Add flow to this EVC-MAP
+ flow.evc_map = sig_map_map[flow.signature].add_flow(flow, downstream_flow.evc)
+ else:
+ # May need to create a MAP or search for an existing ACL/user EVC-Map
+ # upstream_flow_table = _existing_upstream_flow_entries[flow.device_id]
+ upstream_flow_table = flow.handler.upstream_flows
+ existing_flow = EVCMap.find_matching_ingress_flow(flow, upstream_flow_table)
+
+ if existing_flow is None:
+ flow.evc_map = EVCMap.create_ingress_map(flow, downstream_flow.evc)
+ else:
+ flow.evc_map = existing_flow.add_flow(flow, downstream_flow.evc)
+
+ all_maps_valid = all(flow.evc_map.valid for flow in upstream_flows) \
+ or downstream_flow.is_multicast_flow
+
+ log.debug('flow-evc-and-maps-downstream',
+ downstream_flow=downstream_flow,
+ upstream_flows=upstream_flows, all_valid=all_maps_valid)
+
+ return downstream_flow.evc if all_maps_valid else None
+
+ def get_utility_evc(self, use_default_vlan_id=False):
+ assert self.is_acl_flow, 'Utility evcs are for acl flows only'
+ return UtilityEVC.create(self, use_default_vlan_id)
+
+ @property
+ def _needs_acl_support(self):
+ if self.ipv4_dst is not None: # In case MCAST downstream has ACL on it
+ return False
+
+ return self.eth_type is not None or self.ip_protocol is not None or\
+ self.ipv4_dst is not None or self.udp_dst is not None or self.udp_src is not None
+
+ def _decode(self, flow):
+ """
+ Examine flow rules and extract appropriate settings
+ """
+ log.debug('start-decode')
+ status = self._decode_traffic_selector(flow) and self._decode_traffic_treatment(flow)
+
+ # Determine direction of the flow and apply appropriate modifications
+ # to the decoded flows
+ if status:
+ if not self._decode_flow_direction():
+ return False
+
+ if self._flow_direction in FlowEntry.downstream_flow_types:
+ status = self._apply_downstream_mods()
+
+ elif self._flow_direction in FlowEntry.upstream_flow_types:
+ status = self._apply_upstream_mods()
+
+ else:
+ # TODO: Need to code this - Perhaps this is an NNI_PON for Multicast support?
+ log.error('unsupported-flow-direction')
+ status = False
+
+ log.debug('flow-evc-decode', direction=self._flow_direction, is_acl=self._is_acl_flow,
+ inner_vid=self.inner_vid, vlan_id=self.vlan_id, pop_vlan=self.pop_vlan,
+ push_vid=self.push_vlan_id, status=status)
+
+ # Create a signature that will help locate related flow entries on a device.
+ if status:
+ # These are not exact, just ones that may be put together to make an EVC. The
+ # basic rules are:
+ #
+ # 1 - Port numbers in increasing order
+ ports = [self.in_port, self.output]
+ ports.sort()
+ assert len(ports) == 2, 'Invalid port count: {}'.format(len(ports))
+
+ # 3 - The outer VID
+ # 4 - The inner VID. Wildcard if downstream
+ if self.push_vlan_id is None:
+ outer = self.vlan_id
+ inner = self.inner_vid
+ else:
+ outer = self.push_vlan_id
+ inner = self.vlan_id
+
+ upstream_sig = '{}'.format(ports[0])
+ downstream_sig = '{}'.format(ports[0])
+ upstream_sig += '.{}'.format(ports[1])
+ downstream_sig += '.{}'.format(ports[1] if self.handler.is_nni_port(ports[1]) else '*')
+
+ upstream_sig += '.{}.{}'.format(outer, inner)
+ downstream_sig += '.{}.*'.format(outer)
+
+ if self._flow_direction in FlowEntry.downstream_flow_types:
+ self.signature = downstream_sig
+
+ elif self._flow_direction in FlowEntry.upstream_flow_types:
+ self.signature = upstream_sig
+ self.downstream_signature = downstream_sig
+
+ else:
+ log.error('unsupported-flow')
+ status = False
+
+ log.debug('flow-evc-decode', upstream_sig=self.signature, downstream_sig=self.downstream_signature)
+ return status
+
+ def _decode_traffic_selector(self, flow):
+ """
+ Extract EVC related traffic selection settings
+ """
+ self.in_port = fd.get_in_port(flow)
+
+ if self.in_port > OFPP_MAX:
+ log.warn('logical-input-ports-not-supported', in_port=self.in_port)
+ return False
+
+ for field in fd.get_ofb_fields(flow):
+ if field.type == fd.IN_PORT:
+ if self._handler.is_nni_port(self.in_port) or self._handler.is_uni_port(self.in_port):
+ self._logical_port = self.in_port
+
+ elif field.type == fd.VLAN_VID:
+ if field.vlan_vid >= OFPVID_PRESENT + 4095:
+ self.vlan_id = None # pre-ONOS v1.13.5 or old EAPOL Rule
+ else:
+ self.vlan_id = field.vlan_vid & 0xfff
+
+ log.debug('*** field.type == VLAN_VID', value=field.vlan_vid, vlan_id=self.vlan_id)
+
+ elif field.type == fd.VLAN_PCP:
+ log.debug('*** field.type == VLAN_PCP', value=field.vlan_pcp)
+ self.pcp = field.vlan_pcp
+
+ elif field.type == fd.ETH_TYPE:
+ log.debug('*** field.type == ETH_TYPE', value=field.eth_type)
+ self.eth_type = field.eth_type
+
+ elif field.type == fd.IP_PROTO:
+ log.debug('*** field.type == IP_PROTO', value=field.ip_proto)
+ self.ip_protocol = field.ip_proto
+
+ if self.ip_protocol not in _supported_ip_protocols:
+ log.error('Unsupported IP Protocol', protocol=self.ip_protocol)
+ return False
+
+ elif field.type == fd.IPV4_DST:
+ log.debug('*** field.type == IPV4_DST', value=field.ipv4_dst)
+ self.ipv4_dst = field.ipv4_dst
+
+ elif field.type == fd.UDP_DST:
+ log.debug('*** field.type == UDP_DST', value=field.udp_dst)
+ self.udp_dst = field.udp_dst
+
+ elif field.type == fd.UDP_SRC:
+ log.debug('*** field.type == UDP_SRC', value=field.udp_src)
+ self.udp_src = field.udp_src
+
+ elif field.type == fd.METADATA:
+ if self._handler.is_nni_port(self.in_port):
+ # Downstream flow
+ log.debug('*** field.type == METADATA', value=field.table_metadata)
+
+ if field.table_metadata > 4095:
+ # ONOS v1.13.5 or later. c-vid in upper 32-bits
+ vid = field.table_metadata & 0x0FFF
+ if vid > 0:
+ self.inner_vid = vid # CTag is never '0'
+
+ elif field.table_metadata > 0:
+ # Pre-ONOS v1.13.5 (vid without the 4096 offset)
+ self.inner_vid = field.table_metadata
+
+ else:
+ # Upstream flow
+ pass # Not used upstream at this time
+
+ log.debug('*** field.type == METADATA', value=field.table_metadata,
+ inner_vid=self.inner_vid)
+ else:
+ log.warn('unsupported-selection-field', type=field.type)
+ self._status_message = 'Unsupported field.type={}'.format(field.type)
+ return False
+
+ return True
+
+ def _decode_traffic_treatment(self, flow):
+ # Loop through traffic treatment
+ for act in fd.get_actions(flow):
+ if act.type == fd.OUTPUT:
+ self.output = act.output.port
+
+ elif act.type == fd.POP_VLAN:
+ log.debug('*** action.type == POP_VLAN')
+ self.pop_vlan = True
+
+ elif act.type == fd.PUSH_VLAN:
+ log.debug('*** action.type == PUSH_VLAN', value=act.push)
+ tpid = act.push.ethertype
+ self.push_vlan_tpid = tpid
+
+ elif act.type == fd.SET_FIELD:
+ log.debug('*** action.type == SET_FIELD', value=act.set_field.field)
+ assert (act.set_field.field.oxm_class == OFPXMC_OPENFLOW_BASIC)
+ field = act.set_field.field.ofb_field
+
+ if field.type == fd.VLAN_VID:
+ self.push_vlan_id = field.vlan_vid & 0xfff
+ else:
+ log.debug('unsupported-set-field')
+ else:
+ log.warn('unsupported-action', action=act)
+ self._status_message = 'Unsupported action.type={}'.format(act.type)
+ return False
+
+ return True
+
+ def _decode_flow_direction(self):
+ # Determine direction of the flow
+ def port_type(port_number):
+ if port_number in self._handler.northbound_ports:
+ return FlowEntry.PortType.NNI
+
+ elif port_number in self._handler.southbound_ports:
+ return FlowEntry.PortType.PON
+
+ elif port_number <= OFPP_MAX:
+ return FlowEntry.PortType.UNI
+
+ elif port_number in {OFPP_CONTROLLER, 0xFFFFFFFD}: # OFPP_CONTROLLER is wrong in proto-file
+ return FlowEntry.PortType.CONTROLLER
+
+ return FlowEntry.PortType.OTHER
+
+ flow_dir_map = {
+ (FlowEntry.PortType.UNI, FlowEntry.PortType.NNI): FlowEntry.FlowDirection.UPSTREAM,
+ (FlowEntry.PortType.NNI, FlowEntry.PortType.UNI): FlowEntry.FlowDirection.DOWNSTREAM,
+ (FlowEntry.PortType.UNI, FlowEntry.PortType.CONTROLLER): FlowEntry.FlowDirection.CONTROLLER_UNI,
+ (FlowEntry.PortType.NNI, FlowEntry.PortType.PON): FlowEntry.FlowDirection.NNI_PON,
+ # The following are not yet supported
+ # (FlowEntry.PortType.NNI, FlowEntry.PortType.CONTROLLER): FlowEntry.FlowDirection.CONTROLLER_NNI,
+ # (FlowEntry.PortType.PON, FlowEntry.PortType.CONTROLLER): FlowEntry.FlowDirection.CONTROLLER_PON,
+ # (FlowEntry.PortType.NNI, FlowEntry.PortType.NNI): FlowEntry.FlowDirection.NNI_NNI,
+ # (FlowEntry.PortType.UNI, FlowEntry.PortType.UNI): FlowEntry.FlowDirection.UNI_UNI,
+ }
+ self._flow_direction = flow_dir_map.get((port_type(self.in_port), port_type(self.output)),
+ FlowEntry.FlowDirection.OTHER)
+ return self._flow_direction != FlowEntry.FlowDirection.OTHER
+
+ def _apply_downstream_mods(self):
+ # This is a downstream flow. It could be any one of the following:
+ #
+ # Legacy control VLAN:
+ # This is the old VLAN 4000 that was used to attach EAPOL and other
+ # controller flows to. Eventually these will change to CONTROLLER_UNI
+ # flows. For these, use the 'utility' VLAN instead so 4000 if available
+ # for other uses (AT&T uses it for downstream multicast video).
+ #
+ # Multicast VLAN:
+ # This is downstream multicast data.
+ # TODO: Test this to see if this needs to be in a separate NNI_PON mod-method
+ #
+ # User Data flow:
+ # This is for user data. Eventually we may need to support ACLs?
+ #
+ # May be for to controller flow downstream (no ethType)
+ if self.vlan_id == FlowEntry.LEGACY_CONTROL_VLAN and self.eth_type is None and self.pcp == 0:
+ return False # Do not install this flow. Utility VLAN is in charge
+
+ elif self.flow_direction == FlowEntry.FlowDirection.NNI_PON and \
+ self.vlan_id == self.handler.utility_vlan:
+ # Utility VLAN downstream flow/EVC
+ self._is_acl_flow = True
+
+ elif self.vlan_id in self._handler.multicast_vlans:
+ # multicast (ethType = IP) # TODO: May need to be an NNI_PON flow
+ self._is_multicast = True
+ self._is_acl_flow = True
+
+ else:
+ # Currently do not support ACLs on user data flows downstream
+ assert not self._needs_acl_support # User data, no special modifications needed at this time
+
+ return True
+
+ def _apply_upstream_mods(self):
+ #
+ # This is an upstream flow. It could be any of the following
+ #
+ # ACL/Packet capture:
+ # This is either a legacy (FlowDirection.UPSTREAM) or a new one
+ # that specifies an output port of controller (FlowDirection.CONTROLLER_UNI).
+ # Either way, these need to be placed on the Utility VLAN if the ONU attached
+ # does not have a user-data flow (C-Tag). If there is a C-Tag available,
+ # then place it on that VLAN.
+ #
+ # Once a user-data flow is established, move any of the ONUs ACL flows
+ # over to that VLAN (this is handled elsewhere).
+ #
+ # User Data flows:
+ # No special modifications are needed
+ #
+ try:
+ # Do not handle PON level ACLs in this method
+ assert(self._flow_direction != FlowEntry.FlowDirection.CONTROLLER_PON)
+
+ # Is this a legacy (VLAN 4000) upstream to-controller flow
+ if self._needs_acl_support and FlowEntry.LEGACY_CONTROL_VLAN == self.push_vlan_id:
+ self._flow_direction = FlowEntry.FlowDirection.CONTROLLER_UNI
+ self._is_acl_flow = True
+ self.push_vlan_id = self.handler.utility_vlan
+
+ return True
+
+ except Exception as e:
+ # TODO: Need to support flow retry if the ONU is not yet activated !!!!
+ log.exception('tag-fixup', e=e)
+ return False
+
+ @staticmethod
+ def drop_missing_flows(handler, valid_flow_ids):
+ dl = []
+ try:
+ flow_table = handler.upstream_flows
+ flows_to_drop = [flow for flow_id, flow in flow_table.items()
+ if flow_id not in valid_flow_ids]
+ dl.extend([flow.remove() for flow in flows_to_drop])
+
+ for sig_table in handler.downstream_flows.itervalues():
+ flows_to_drop = [flow for flow_id, flow in sig_table.flows.items()
+ if isinstance(flow, FlowEntry) and flow_id not in valid_flow_ids]
+ dl.extend([flow.remove() for flow in flows_to_drop])
+
+ except Exception as _e:
+ pass
+
+ return gatherResults(dl, consumeErrors=True) if len(dl) > 0 else returnValue('no-flows-to-drop')
+
+ @inlineCallbacks
+ def remove(self):
+ """
+ Remove this flow entry from the list of existing entries and drop EVC
+ if needed
+ """
+ # Remove from exiting table list
+ flow_id = self.flow_id
+ flow_table = None
+
+ if self.flow_direction in FlowEntry.upstream_flow_types:
+ flow_table = self._handler.upstream_flows
+
+ elif self.flow_direction in FlowEntry.downstream_flow_types:
+ sig_table = self._handler.downstream_flows.get(self.signature)
+ flow_table = sig_table.flows if sig_table is not None else None
+
+ if flow_table is None or flow_id not in flow_table.keys():
+ returnValue('NOP')
+
+ # Remove from flow table and clean up flow table if empty
+ flow_table.remove(flow_id)
+ evc_map, self.evc_map = self.evc_map, None
+ evc = None
+
+ if self.flow_direction in FlowEntry.downstream_flow_types:
+ sig_table = self._handler.downstream_flows.get(self.signature)
+ if len(flow_table) == 0: # Only 'evc' entry present
+ evc = sig_table.evc
+ else:
+ assert sig_table.evc is not None, 'EVC flow re-assignment error'
+
+ # Remove flow from the hardware
+ try:
+ dl = []
+ if evc_map is not None:
+ dl.append(evc_map.delete(self))
+
+ if evc is not None:
+ dl.append(evc.delete())
+
+ yield gatherResults(dl, consumeErrors=True)
+
+ except Exception as e:
+ log.exception('removal', e=e)
+
+ if self.flow_direction in FlowEntry.downstream_flow_types:
+ # If this flow owns the EVC, assign it to a remaining flow
+ sig_table = self._handler.downstream_flows.get(self.signature)
+ flow_evc = sig_table.evc
+
+ if flow_evc is not None and flow_evc.flow_entry is not None and flow_id == flow_evc.flow_entry.flow_id:
+ flow_evc.flow_entry = next((_flow for _flow in flow_table.itervalues()
+ if isinstance(_flow, FlowEntry)
+ and _flow.flow_id != flow_id), None)
+
+ # If evc was deleted, remove the signature table since now flows exist with
+ # that signature
+ if evc is not None:
+ self._handler.downstream_flows.remove(self.signature)
+
+ self.evc = None
+ returnValue('Done')
+
+ @staticmethod
+ def find_evc_map_flows(onu):
+ """
+ For a given OLT, find all the EVC Maps for a specific ONU
+ :param onu: (Onu) onu
+ :return: (list) of matching flows
+ """
+ # EVCs are only in the downstream table, EVC Maps are in upstream
+ onu_ports = onu.uni_ports
+
+ all_flow_entries = onu.olt.upstream_flows
+ evc_maps = [flow_entry.evc_map for flow_entry in all_flow_entries.itervalues()
+ if flow_entry.in_port in onu_ports
+ and flow_entry.evc_map is not None
+ and flow_entry.evc_map.valid]
+
+ return evc_maps
+
+ @staticmethod
+ def sync_flows_by_onu(onu, reflow=False):
+ """
+ Check status of all flows on a per-ONU basis. Called when values
+ within the ONU are modified that may affect traffic.
+
+ :param onu: (Onu) ONU to examine
+ :param reflow: (boolean) Flag, if True, requests that the flow be sent to
+ hardware even if the values in hardware are
+ consistent with the current flow settings
+ """
+ evc_maps = FlowEntry.find_evc_map_flows(onu)
+ evcs = {}
+
+ for evc_map in evc_maps:
+ if reflow or evc_map.reflow_needed():
+ evc_map.needs_update = False
+
+ if not evc_map.installed:
+ evc = evc_map.evc
+ if evc is not None:
+ evcs[evc.name] = evc
+
+ for evc in evcs.itervalues():
+ evc.installed = False
+ evc.schedule_install(delay=2)
+
+ ######################################################
+ # Bulk operations
+
+ @staticmethod
+ def clear_all(handler):
+ """
+ Remove all flows for the device.
+
+ :param handler: voltha adapter device handler
+ """
+ handler.downstream_flows.clear_all()
+ handler.upstream_flows.clear_all()
+
+ @staticmethod
+ def get_packetout_info(handler, logical_port):
+ """
+ Find parameters needed to send packet out successfully to the OLT.
+
+ :param handler: voltha adapter device handler
+ :param logical_port: (int) logical port number for packet to go out.
+
+ :return: physical port number, ctag, stag, evcmap name
+ """
+ from adapters.adtran_olt.onu import Onu
+
+ for flow_entry in handler.upstream_flows.itervalues():
+ log.debug('get-packetout-info', flow_entry=flow_entry)
+
+ # match logical port
+ if flow_entry.evc_map is not None and flow_entry.evc_map.valid and \
+ flow_entry.logical_port == logical_port:
+ evc_map = flow_entry.evc_map
+ gem_ids_and_vid = evc_map.gem_ids_and_vid
+
+ # must have valid gem id
+ if len(gem_ids_and_vid) > 0:
+ for onu_id, gem_ids_with_vid in gem_ids_and_vid.iteritems():
+ log.debug('get-packetout-info', onu_id=onu_id,
+ gem_ids_with_vid=gem_ids_with_vid)
+ if len(gem_ids_with_vid) > 0:
+ gem_ids = gem_ids_with_vid[0]
+ ctag = gem_ids_with_vid[1]
+ gem_id = gem_ids[0] # TODO: always grab first in list
+ return flow_entry.in_port, ctag, Onu.gem_id_to_gvid(gem_id), \
+ evc_map.get_evcmap_name(onu_id, gem_id)
+ return None, None, None, None
diff --git a/adapters/adtran_common/flow/flow_tables.py b/adapters/adtran_common/flow/flow_tables.py
new file mode 100644
index 0000000..48e2e7e
--- /dev/null
+++ b/adapters/adtran_common/flow/flow_tables.py
@@ -0,0 +1,163 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from flow_entry import FlowEntry
+from evc import EVC
+
+
+class DeviceFlows(object):
+ """ Tracks existing flows on the device """
+
+ def __init__(self):
+ self._flow_table = dict() # Key = (str)Flow ID, Value = FlowEntry
+
+ def __getitem__(self, item):
+ flow_id = item.flow_id if isinstance(item, FlowEntry) else item
+ return self._flow_table[flow_id]
+
+ def __iter__(self):
+ for _flow_id, _flow in self._flow_table.items():
+ yield _flow_id, _flow
+
+ def itervalues(self):
+ for _flow in self._flow_table.values():
+ yield _flow
+
+ def iterkeys(self):
+ for _id in self._flow_table.keys():
+ yield _id
+
+ def items(self):
+ return self._flow_table.items()
+
+ def values(self):
+ return self._flow_table.values()
+
+ def keys(self):
+ return self._flow_table.keys()
+
+ def __len__(self):
+ return len(self._flow_table)
+
+ def add(self, flow):
+ assert isinstance(flow, FlowEntry)
+ if flow.flow_id not in self._flow_table:
+ self._flow_table[flow.flow_id] = flow
+ return flow
+
+ def get(self, item):
+ flow_id = item.flow_id if isinstance(item, FlowEntry) else item
+ return self._flow_table.get(flow_id)
+
+ def remove(self, item):
+ flow_id = item.flow_id if isinstance(item, FlowEntry) else item
+ return self._flow_table.pop(flow_id, None)
+
+ def clear_all(self):
+ self._flow_table = dict()
+
+
+class DownstreamFlows(object):
+ """
+ Tracks existing flows that are downstream (NNI as source port)
+
+ The downstream table is slightly different than the base DeviceFlows
+ table as it is used to track flows that will become EVCs. The base
+ table tracks flows that will be EVC-maps (or related to them).
+
+ The downstream table is also indexed by a downstream signature that
+ is composed as follows:
+
+ <dev-id>.<ingress-port-number>.<s-tag>.*
+
+ In comparison, the upstream flows is similar, but instead of '*' it has the
+ c-tag (if any).
+
+ TODO: Drop device ID from signatures once flow tables are unique to a device handler
+ """
+ def __init__(self):
+ self._signature_table = dict() # Key = (str)Downstream signature
+ # |
+ # +-> downstream-signature
+ # |
+ # +-> 'evc' -> EVC
+ # |
+ # +-> flow-ids -> flow-entries...
+
+ def __getitem__(self, signature):
+ assert isinstance(signature, str)
+ return self._signature_table[signature]
+
+ def __iter__(self):
+ for _flow_id, _flow in self._signature_table.items():
+ yield _flow_id, _flow
+
+ def itervalues(self):
+ for _flow in self._signature_table.values():
+ yield _flow
+
+ def iterkeys(self):
+ for _id in self._signature_table.keys():
+ yield _id
+
+ def items(self):
+ return self._signature_table.items()
+
+ def values(self):
+ return self._signature_table.values()
+
+ def keys(self):
+ return self._signature_table.keys()
+
+ def __len__(self):
+ return len(self._signature_table)
+
+ def get(self, signature):
+ assert isinstance(signature, str)
+ return self._signature_table.get(signature)
+
+ def add(self, signature):
+ assert isinstance(signature, str)
+ """
+ Can be called by upstream flow to reserve a slot
+ """
+ if signature not in self._signature_table:
+ self._signature_table[signature] = DownstreamFlows.SignatureTableEntry(signature)
+ return self._signature_table[signature]
+
+ def remove(self, signature):
+ assert isinstance(signature, str)
+ return self._signature_table.pop(signature)
+
+ def clear_all(self):
+ self._signature_table = dict()
+
+ class SignatureTableEntry(object):
+ def __init__(self, signature):
+ self._signature = signature
+ self._evc = None
+ self._flow_table = DeviceFlows()
+
+ @property
+ def evc(self):
+ return self._evc
+
+ @evc.setter
+ def evc(self, evc):
+ assert isinstance(evc, (EVC, type(None)))
+ self._evc = evc
+
+ @property
+ def flows(self):
+ return self._flow_table
diff --git a/adapters/adtran_common/flow/mcast.py b/adapters/adtran_common/flow/mcast.py
new file mode 100644
index 0000000..54bf24f
--- /dev/null
+++ b/adapters/adtran_common/flow/mcast.py
@@ -0,0 +1,183 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pyvoltha.common.openflow.utils import *
+from evc import EVC
+from flow_entry import FlowEntry
+from twisted.internet import defer
+from twisted.internet.defer import returnValue, inlineCallbacks
+
+log = structlog.get_logger()
+
+EVC_NAME_FORMAT = 'VOLTHA-MCAST-{}' # format(flow.vlan_id)
+EVC_NAME_REGEX_ALL = EVC_NAME_FORMAT.format('*')
+
+
+_mcast_evcs = {} # device-id -> flow dictionary
+ # |
+ # +-> vlan-id -> evcs
+
+
+class MCastEVC(EVC):
+ """
+ Class to wrap Multicast EVC and EVC-MAP functionality
+ """
+ def __init__(self, flow_entry):
+ super(MCastEVC, self).__init__(flow_entry)
+ self._downstream_flows = {flow_entry.flow_id} # Matching Downstream Flow IDs
+
+ def __str__(self):
+ return "MCAST-{}: MEN: {}, VLAN: {}".format(self._name, self._men_ports, self._s_tag)
+
+ def _create_name(self):
+ #
+ # TODO: Take into account selection criteria and output to make the name
+ #
+ return EVC_NAME_FORMAT.format(self._flow.vlan_id)
+
+ def _create_evc_map(self, flow_entry):
+ from evc_map import EVCMap
+ flow = FakeUpstreamFlow(flow_entry.flow, flow_entry.handler)
+ return EVCMap.create_ingress_map(flow, self)
+
+ @staticmethod
+ def create(flow_entry):
+ from evc_map import EVCMap
+
+ device_id = flow_entry.device_id
+ if device_id not in _mcast_evcs:
+ _mcast_evcs[device_id] = {}
+
+ evc_table = _mcast_evcs[device_id]
+
+ try:
+ evc = evc_table.get(flow_entry.vlan_id)
+
+ if evc is None:
+ # Create EVC and initial EVC Map
+ evc = MCastEVC(flow_entry)
+ evc_table[flow_entry.vlan_id] = evc
+ else:
+ if flow_entry.flow_id in evc.downstream_flows: # TODO: Debug only to see if flow_ids are unique
+ pass
+ else:
+ evc.add_downstream_flows(flow_entry.flow_id)
+
+ fake_flow = FakeUpstreamFlow(flow_entry.flow, flow_entry.handler)
+ evc_map_name = EVCMap.create_evc_map_name(fake_flow)
+
+ if evc_map_name not in evc.evc_map_names:
+ EVCMap.create_ingress_map(fake_flow, evc)
+
+ return evc
+
+ except Exception as e:
+ log.exception('mcast-create', e=e)
+ return None
+
+ @property
+ def flow_entry(self):
+ return self._flow
+
+ @property
+ def downstream_flows(self):
+ return frozenset(self._downstream_flows)
+
+ def add_downstream_flows(self, flow_id):
+ self._downstream_flows.add(flow_id)
+
+ def remove_downstream_flows(self, flow_id):
+ self._downstream_flows.discard(flow_id)
+
+ @inlineCallbacks
+ def remove(self, remove_maps=True):
+ """
+ Remove EVC (and optional associated EVC-MAPs) from hardware
+ :param remove_maps: (boolean)
+ :return: (deferred)
+ """
+ log.info('removing', evc=self, remove_maps=remove_maps)
+
+ device_id = self._handler.device_id
+ flow_id = self._flow.id
+ evc_table = _mcast_evcs.get(device_id)
+
+ if evc_table is None or flow_id not in evc_table:
+ returnValue('NOP')
+
+ # Remove flow reference
+ if self._flow.flow_id in self._downstream_flows:
+ del self._downstream_flows[self._flow.flow_id]
+
+ if len(self._downstream_flows) == 0:
+ # Use base class to clean up
+ returnValue(super(MCastEVC, self).remove(remove_maps=True))
+
+ returnValue('More references')
+
+ @inlineCallbacks
+ def delete(self, delete_maps=True):
+ """
+ Remove from hardware and delete/clean-up EVC Object
+ """
+ log.info('deleting', evc=self, delete_maps=delete_maps)
+
+ try:
+ dl = [self.remove()]
+ if delete_maps:
+ for evc_map in self.evc_maps:
+ dl.append(evc_map.delete(self)) # TODO: implement bulk-flow procedures
+
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ except Exception as e:
+ log.exception('removal', e=e)
+
+ self._evc_maps = None
+ f, self._flow = self._flow, None
+ if f is not None and f.handler is not None:
+ f.handler.remove_evc(self)
+
+ def reflow(self, reflow_maps=True):
+ pass # TODO: Implement or use base class?
+
+ @staticmethod
+ def remove_all(client, regex_=EVC_NAME_REGEX_ALL):
+ """
+ Remove all matching EVCs from hardware
+ :param client: (ncclient) NETCONF Client to use
+ :param regex_: (String) Regular expression for name matching
+ :return: (deferred)
+ """
+ pass # TODO: ???
+
+
+class FakeUpstreamFlow(FlowEntry):
+ def __init__(self, flow, handler):
+ super(FakeUpstreamFlow, self).__init__(flow, handler)
+ self._decode()
+ # Change name that the base class set
+ self._name = self.create_flow_name()
+ self._flow_direction = FlowEntry.FlowDirection.UPSTREAM
+ self.in_port, self.output = self.output, self.in_port
+ self.flow_id = '{}-MCAST'.format(self.vlan_id)
+ self._logical_port = self.vlan_id
+ self.push_vlan_id = self.vlan_id
+ self.vlan_id = None
+ self.signature = None
+ self.inner_vid = None
+ self.pop_vlan = False
+
+ def create_flow_name(self):
+ return 'flow-{}-{}-MCAST'.format(self.device_id, self.vlan_id)
diff --git a/adapters/adtran_common/flow/utility_evc.py b/adapters/adtran_common/flow/utility_evc.py
new file mode 100644
index 0000000..fc7fd0b
--- /dev/null
+++ b/adapters/adtran_common/flow/utility_evc.py
@@ -0,0 +1,158 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pyvoltha.common.openflow.utils import *
+from evc import EVC
+from twisted.internet import defer
+from twisted.internet.defer import returnValue, inlineCallbacks
+
+log = structlog.get_logger()
+
+EVC_NAME_FORMAT = 'VOLTHA-UTILITY-{}' # format(flow.vlan_id)
+EVC_NAME_REGEX_ALL = EVC_NAME_FORMAT.format('*')
+
+
+_utility_evcs = {} # device-id -> flow dictionary
+ # |
+ # +-> utility-vlan-id -> evcs
+
+
+class UtilityEVC(EVC):
+ """
+ Class to wrap orphan ingress ACLs EVC functionality
+ """
+ def __init__(self, flow_entry):
+ super(UtilityEVC, self).__init__(flow_entry)
+ self._downstream_flows = {flow_entry.flow_id} # Matching Downstream Flow IDs
+ self.service_evc = True
+
+ def __str__(self):
+ return "VOLTHA-UTILITY-{}: MEN: {}, VLAN: {}".format(self._name, self._men_ports, self._s_tag)
+
+ def _create_name(self, vlan_id=None):
+ #
+ # TODO: Take into account selection criteria and output to make the name
+ #
+ return EVC_NAME_FORMAT.format(self._flow.vlan_id if vlan_id is None else vlan_id)
+
+ @staticmethod
+ def create(flow_entry, use_default_vlan_id=False):
+ device_id = flow_entry.device_id
+ vlan_id = flow_entry.vlan_id if not use_default_vlan_id else flow_entry.handler.utility_vlan
+ evc_table = _utility_evcs.get(device_id)
+
+ if evc_table is None:
+ _utility_evcs[device_id] = dict()
+ evc_table = _utility_evcs[device_id]
+
+ try:
+ evc = evc_table.get(vlan_id)
+
+ if evc is None:
+ # Create EVC and initial EVC Map
+ evc = UtilityEVC(flow_entry)
+
+ # reapply the stag and name if forced vlan id
+ if use_default_vlan_id:
+ evc._s_tag = vlan_id
+ evc._name = evc._create_name(vlan_id)
+
+ evc_table[vlan_id] = evc
+ else:
+ if flow_entry.flow_id in evc.downstream_flows: # TODO: Debug only to see if flow_ids are unique
+ pass
+ else:
+ evc.add_downstream_flows(flow_entry.flow_id)
+
+ return evc
+
+ except Exception as e:
+ log.exception('utility-create', e=e)
+ return None
+
+ @property
+ def downstream_flows(self):
+ return frozenset(self._downstream_flows)
+
+ def add_downstream_flows(self, flow_id):
+ self._downstream_flows.add(flow_id)
+
+ def remove_downstream_flows(self, flow_id):
+ self._downstream_flows.discard(flow_id)
+
+ def remove(self, remove_maps=True):
+ """
+ Remove EVC (and optional associated EVC-MAPs) from hardware
+ :param remove_maps: (boolean)
+ :return: (deferred)
+ """
+ log.info('removing', evc=self, remove_maps=remove_maps)
+
+ device_id = self._flow.handler.device_id
+ flow_id = self._flow.flow_id
+ evc_table = _utility_evcs.get(device_id)
+
+ if evc_table is None:
+ return defer.succeed('NOP')
+
+ # Remove flow reference
+ if self._flow.flow_id in self._downstream_flows:
+ self._downstream_flows.discard(self._flow.flow_id)
+
+ if len(self._downstream_flows) == 0:
+ # Use base class to clean up
+ return super(UtilityEVC, self).remove(remove_maps=True)
+
+ return defer.succeed('More references')
+
+ @inlineCallbacks
+ def delete(self, delete_maps=True):
+ """
+ Remove from hardware and delete/clean-up EVC Object
+ :return: (deferred)
+ """
+ log.info('deleting', evc=self, delete_maps=delete_maps)
+
+ assert self._flow, 'Delete EVC must have flow reference'
+ try:
+ dl = [self.remove()]
+ if delete_maps:
+ for evc_map in self.evc_maps:
+ dl.append(evc_map.delete(None)) # TODO: implement bulk-flow procedures
+
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ self._evc_maps = None
+ f, self._flow = self._flow, None
+ if f is not None and f.handler is not None:
+ f.handler.remove_evc(self)
+
+ except Exception as e:
+ log.exception('removal', e=e)
+
+ returnValue('Done')
+
+ def reflow(self, reflow_maps=True):
+ pass # TODO: Implement or use base class?
+
+ @staticmethod
+ def remove_all(client, regex_=EVC_NAME_REGEX_ALL):
+ """
+ Remove all matching EVCs from hardware
+ :param client: (ncclient) NETCONF Client to use
+ :param regex_: (String) Regular expression for name matching
+ :return: (deferred)
+ """
+ _utility_evcs.clear()
+ EVC.remove_all(client, regex_)
\ No newline at end of file
diff --git a/adapters/adtran_common/net/__init__.py b/adapters/adtran_common/net/__init__.py
new file mode 100644
index 0000000..d67fcf2
--- /dev/null
+++ b/adapters/adtran_common/net/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_common/net/adtran_netconf.py b/adapters/adtran_common/net/adtran_netconf.py
new file mode 100644
index 0000000..4e39a6a
--- /dev/null
+++ b/adapters/adtran_common/net/adtran_netconf.py
@@ -0,0 +1,373 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from lxml import etree
+from ncclient import manager
+from ncclient.operations import RPCError
+from ncclient.transport.errors import SSHError
+from twisted.internet import defer, threads
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = structlog.get_logger('ncclient')
+
+ADTRAN_NS = 'http://www.adtran.com/ns/yang'
+
+
+def adtran_module_url(module):
+ return '{}/{}'.format(ADTRAN_NS, module)
+
+
+def phys_entities_rpc():
+ return """
+ <filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <physical-entities-state xmlns="{}">
+ <physical-entity/>
+ </physical-entities-state>
+ </filter>
+ """.format(adtran_module_url('adtran-physical-entities'))
+
+
+class AdtranNetconfClient(object):
+ """
+ Performs NETCONF requests
+ """
+ def __init__(self, host_ip, port=830, username='', password='', timeout=10):
+ self._ip = host_ip
+ self._port = port
+ self._username = username
+ self._password = password
+ self._timeout = timeout
+ self._session = None
+
+ def __str__(self):
+ return "AdtranNetconfClient {}@{}:{}".format(self._username, self._ip, self._port)
+
+ @property
+ def capabilities(self):
+ """
+ Get the server's NETCONF capabilities
+
+ :return: (ncclient.capabilities.Capabilities) object representing the server's capabilities.
+ """
+ return self._session.server_capabilities if self._session else None
+
+ @property
+ def connected(self):
+ """
+ Is this client connected to a NETCONF server
+ :return: (boolean) True if connected
+ """
+ return self._session is not None and self._session.connected
+
+ def connect(self, connect_timeout=None):
+ """
+ Connect to the NETCONF server
+
+ o To disable attempting publickey authentication altogether, call with
+ allow_agent and look_for_keys as False.
+
+ o hostkey_verify enables hostkey verification from ~/.ssh/known_hosts
+
+ :return: (deferred) Deferred request
+ """
+ timeout = connect_timeout or self._timeout
+
+ return threads.deferToThread(self._do_connect, timeout)
+
+ def _do_connect(self, timeout):
+ try:
+ self._session = manager.connect(host=self._ip,
+ port=self._port,
+ username=self._username,
+ password=self._password,
+ allow_agent=False,
+ look_for_keys=False,
+ hostkey_verify=False,
+ timeout=timeout)
+
+ except SSHError as e:
+ # Log and rethrow exception so any errBack is called
+ log.warn('SSHError-during-connect', e=e)
+ raise e
+
+ except Exception as e:
+ # Log and rethrow exception so any errBack is called
+ log.exception('Connect-failed: {}', e=e)
+ raise e
+
+ # If debug logging is enabled, decrease the level, DEBUG is a significant
+ # performance hit during response XML decode
+
+ if log.isEnabledFor('DEBUG'):
+ log.setLevel('INFO')
+
+ # TODO: ncclient also supports RaiseMode:NONE to limit exceptions. To set use:
+ #
+ # self._session.raise_mode = RaiseMode:NONE
+ #
+ # and the when you get a response back, you can check 'response.ok' to
+ # see if it is 'True' if it is not, you can enumerate the 'response.errors'
+ # list for more information
+
+ return self._session
+
+ def close(self):
+ """
+ Close the connection to the NETCONF server
+ :return: (deferred) Deferred request
+ """
+ s, self._session = self._session, None
+
+ if s is None or not s.connected:
+ return defer.returnValue(True)
+
+ return threads.deferToThread(self._do_close, s)
+
+ def _do_close(self, old_session):
+ return old_session.close_session()
+
+ @inlineCallbacks
+ def _reconnect(self):
+ try:
+ yield self.close()
+ except:
+ pass
+
+ try:
+ yield self.connect()
+ except:
+ pass
+
+ def get_config(self, source='running'):
+ """
+ Get the configuration from the specified source
+
+ :param source: (string) Configuration source, 'running', 'candidate', ...
+
+ :return: (deferred) Deferred request that wraps the GetReply class
+ """
+ if not self._session:
+ raise NotImplemented('No SSH Session')
+
+ if not self._session.connected:
+ self._reconnect()
+
+ return threads.deferToThread(self._do_get_config, source)
+
+ def _do_get_config(self, source):
+ """
+ Get the configuration from the specified source
+
+ :param source: (string) Configuration source, 'running', 'candidate', ...
+
+ :return: (GetReply) The configuration.
+ """
+ return self._session.get_config(source)
+
+ def get(self, payload):
+ """
+ Get the requested data from the server
+
+ :param payload: Payload/filter
+ :return: (deferred) for GetReply
+ """
+ log.debug('get', filter=payload)
+
+ if not self._session:
+ raise NotImplemented('No SSH Session')
+
+ if not self._session.connected:
+ self._reconnect()
+
+ return threads.deferToThread(self._do_get, payload)
+
+ def _do_get(self, payload):
+ """
+ Get the requested data from the server
+
+ :param payload: Payload/filter
+ :return: (GetReply) response
+ """
+ try:
+ log.debug('get', payload=payload)
+ response = self._session.get(payload)
+ # To get XML, use response.xml
+ log.debug('response', response=response)
+
+ except RPCError as e:
+ log.exception('get', e=e)
+ raise
+
+ return response
+
+ def lock(self, source, lock_timeout):
+ """
+ Lock the configuration system
+ :return: (deferred) for RpcReply
+ """
+ log.info('lock', source=source, timeout=lock_timeout)
+
+ if not self._session or not self._session.connected:
+ raise NotImplemented('TODO: Support auto-connect if needed')
+
+ return threads.deferToThread(self._do_lock, source, lock_timeout)
+
+ def _do_lock(self, source, lock_timeout):
+ """
+ Lock the configuration system
+ """
+ try:
+ response = self._session.lock(source, timeout=lock_timeout)
+ # To get XML, use response.xml
+
+ except RPCError as e:
+ log.exception('lock', e=e)
+ raise
+
+ return response
+
+ def unlock(self, source):
+ """
+ Get the requested data from the server
+ :param source: RPC request
+
+ :return: (deferred) for RpcReply
+ """
+ log.info('unlock', source=source)
+
+ if not self._session or not self._session.connected:
+ raise NotImplemented('TODO: Support auto-connect if needed')
+
+ return threads.deferToThread(self._do_unlock, source)
+
+ def _do_unlock(self, source):
+ """
+ Lock the configuration system
+ """
+ try:
+ response = self._session.unlock(source)
+ # To get XML, use response.xml
+
+ except RPCError as e:
+ log.exception('unlock', e=e)
+ raise
+
+ return response
+
+ @inlineCallbacks
+ def edit_config(self, config, target='running', default_operation='none',
+ test_option=None, error_option=None, ignore_delete_error=False):
+ """
+ Loads all or part of the specified config to the target configuration datastore
+ with the ability to lock the datastore during the edit.
+
+ :param config is the configuration, which must be rooted in the config element.
+ It can be specified either as a string or an Element.format="xml"
+ :param target is the name of the configuration datastore being edited
+ :param default_operation if specified must be one of { 'merge', 'replace', or 'none' }
+ :param test_option if specified must be one of { 'test_then_set', 'set' }
+ :param error_option if specified must be one of { 'stop-on-error',
+ 'continue-on-error', 'rollback-on-error' } The
+ 'rollback-on-error' error_option depends on the
+ :rollback-on-error capability.
+ :param ignore_delete_error: (bool) For some startup deletes/clean-ups, we do a
+ delete high up in the config to get whole lists. If
+ these lists are empty, this helps suppress any error
+ message from NETConf on failure to delete an empty list
+
+ :return: (deferred) for RpcReply
+ """
+ if not self._session:
+ raise NotImplemented('No SSH Session')
+
+ if not self._session.connected:
+ try:
+ yield self._reconnect()
+
+ except Exception as e:
+ log.exception('edit-config-connect', e=e)
+
+ try:
+ if config[:7] != '<config':
+ config = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0"' + \
+ ' xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
+ config + '</config>'
+
+ log.debug('netconf-request', config=config, target=target,
+ default_operation=default_operation)
+
+ rpc_reply = yield threads.deferToThread(self._do_edit_config, target,
+ config, default_operation,
+ test_option, error_option)
+ except Exception as e:
+ if ignore_delete_error and 'operation="delete"' in config.lower():
+ returnValue('ignoring-delete-error')
+ log.exception('edit_config', e=e, config=config, target=target)
+ raise
+
+ returnValue(rpc_reply)
+
+ def _do_edit_config(self, target, config, default_operation, test_option, error_option,
+ ignore_delete_error=False):
+ """
+ Perform actual edit-config operation
+ """
+ try:
+ log.debug('edit-config', target=target, config=config)
+
+ response = self._session.edit_config(target=target, config=config
+ # TODO: Support additional options later
+ # ,default_operation=default_operation,
+ # test_option=test_option,
+ # error_option=error_option
+ )
+
+ log.debug('netconf-response', response=response)
+ # To get XML, use response.xml
+ # To check status, use response.ok (boolean)
+
+ except RPCError as e:
+ if not ignore_delete_error or 'operation="delete"' not in config.lower():
+ log.exception('do_edit_config', e=e, config=config, target=target)
+ raise
+
+ return response
+
+ def rpc(self, rpc_string):
+ """
+ Custom RPC request
+ :param rpc_string: (string) RPC request
+ :return: (deferred) for GetReply
+ """
+ log.debug('rpc', rpc=rpc_string)
+
+ if not self._session:
+ raise NotImplemented('No SSH Session')
+
+ if not self._session.connected:
+ self._reconnect()
+
+ return threads.deferToThread(self._do_rpc, rpc_string)
+
+ def _do_rpc(self, rpc_string):
+ try:
+ response = self._session.dispatch(etree.fromstring(rpc_string))
+ # To get XML, use response.xml
+
+ except RPCError as e:
+ log.exception('rpc', e=e)
+ raise
+
+ return response
diff --git a/adapters/adtran_common/net/adtran_rest.py b/adapters/adtran_common/net/adtran_rest.py
new file mode 100644
index 0000000..9020e82
--- /dev/null
+++ b/adapters/adtran_common/net/adtran_rest.py
@@ -0,0 +1,189 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import structlog
+import treq
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.error import ConnectionClosed, ConnectionDone, ConnectionLost
+
+log = structlog.get_logger()
+
+
+class RestInvalidResponseCode(Exception):
+ def __init__(self, message, url, code):
+ super(RestInvalidResponseCode, self).__init__(message)
+ self.url = url
+ self.code = code
+
+
+class AdtranRestClient(object):
+ """
+ Performs Adtran RESTCONF requests
+ """
+ # HTTP shortcuts
+ HELLO_URI = '/restconf/adtran-hello:hello'
+
+ REST_GET_REQUEST_HEADER = {'User-Agent': 'Adtran RESTConf',
+ 'Accept': ['application/json']}
+
+ REST_POST_REQUEST_HEADER = {'User-Agent': 'Adtran RESTConf',
+ 'Content-Type': 'application/json',
+ 'Accept': ['application/json']}
+
+ REST_PATCH_REQUEST_HEADER = REST_POST_REQUEST_HEADER
+ REST_PUT_REQUEST_HEADER = REST_POST_REQUEST_HEADER
+ REST_DELETE_REQUEST_HEADER = REST_GET_REQUEST_HEADER
+
+ HTTP_OK = 200
+ HTTP_CREATED = 201
+ HTTP_ACCEPTED = 202
+ HTTP_NON_AUTHORITATIVE_INFORMATION = 203
+ HTTP_NO_CONTENT = 204
+ HTTP_RESET_CONTENT = 205
+ HTTP_PARTIAL_CONTENT = 206
+ HTTP_NOT_FOUND = 404
+
+ _valid_methods = {'GET', 'POST', 'PATCH', 'DELETE'}
+ _valid_results = {'GET': [HTTP_OK, HTTP_NO_CONTENT],
+ 'POST': [HTTP_OK, HTTP_CREATED, HTTP_NO_CONTENT],
+ 'PUT': [HTTP_OK, HTTP_CREATED, HTTP_NO_CONTENT],
+ 'PATCH': [HTTP_OK],
+ 'DELETE': [HTTP_OK, HTTP_ACCEPTED, HTTP_NO_CONTENT, HTTP_NOT_FOUND]
+ }
+
+ for _method in _valid_methods:
+ assert _method in _valid_results # Make sure we have a results entry for each supported method
+
+ def __init__(self, host_ip, port, username='', password='', timeout=10):
+ """
+ REST Client initialization
+
+ :param host_ip: (string) IP Address of Adtran Device
+ :param port: (int) Port number
+ :param username: (string) Username for credentials
+ :param password: (string) Password for credentials
+ :param timeout: (int) Number of seconds to wait for a response before timing out
+ """
+ self._ip = host_ip
+ self._port = port
+ self._username = username
+ self._password = password
+ self._timeout = timeout
+
+ def __str__(self):
+ return "AdtranRestClient {}@{}:{}".format(self._username, self._ip, self._port)
+
+ @inlineCallbacks
+ def request(self, method, uri, data=None, name='', timeout=None, is_retry=False,
+ suppress_error=False):
+ """
+ Send a REST request to the Adtran device
+
+ :param method: (string) HTTP method
+ :param uri: (string) fully URL to perform method on
+ :param data: (string) optional data for the request body
+ :param name: (string) optional name of the request, useful for logging purposes
+ :param timeout: (int) Number of seconds to wait for a response before timing out
+ :param is_retry: (boolean) True if this method called recursively in order to recover
+ from a connection loss. Can happen sometimes in debug sessions
+ and in the real world.
+ :param suppress_error: (boolean) If true, do not output ERROR message on REST request failure
+ :return: (dict) On success with the proper results
+ """
+ log.debug('request', method=method, uri=uri, data=data, retry=is_retry)
+
+ if method.upper() not in self._valid_methods:
+ raise NotImplementedError("REST method '{}' is not supported".format(method))
+
+ url = 'http://{}:{}{}{}'.format(self._ip, self._port,
+ '/' if uri[0] != '/' else '',
+ uri)
+ response = None
+ timeout = timeout or self._timeout
+
+ try:
+ if method.upper() == 'GET':
+ response = yield treq.get(url,
+ auth=(self._username, self._password),
+ timeout=timeout,
+ headers=self.REST_GET_REQUEST_HEADER)
+ elif method.upper() == 'POST' or method.upper() == 'PUT':
+ response = yield treq.post(url,
+ data=data,
+ auth=(self._username, self._password),
+ timeout=timeout,
+ headers=self.REST_POST_REQUEST_HEADER)
+ elif method.upper() == 'PATCH':
+ response = yield treq.patch(url,
+ data=data,
+ auth=(self._username, self._password),
+ timeout=timeout,
+ headers=self.REST_PATCH_REQUEST_HEADER)
+ elif method.upper() == 'DELETE':
+ response = yield treq.delete(url,
+ auth=(self._username, self._password),
+ timeout=timeout,
+ headers=self.REST_DELETE_REQUEST_HEADER)
+ else:
+ raise NotImplementedError("REST method '{}' is not supported".format(method))
+
+ except NotImplementedError:
+ raise
+
+ except (ConnectionDone, ConnectionLost) as e:
+ if is_retry:
+ raise
+ returnValue(self.request(method, uri, data=data, name=name,
+ timeout=timeout, is_retry=True))
+
+ except ConnectionClosed:
+ returnValue(ConnectionClosed)
+
+ except Exception as e:
+ log.exception("rest-request", method=method, url=url, name=name, e=e)
+ raise
+
+ if response.code not in self._valid_results[method.upper()]:
+ message = "REST {} '{}' request to '{}' failed with status code {}".format(method, name,
+ url, response.code)
+ if not suppress_error:
+ log.error(message)
+ raise RestInvalidResponseCode(message, url, response.code)
+
+ if response.code in {self.HTTP_NO_CONTENT, self.HTTP_NOT_FOUND}:
+ returnValue(None)
+
+ else:
+ # TODO: May want to support multiple body encodings in the future
+
+ headers = response.headers
+ type_key = 'content-type'
+ type_val = 'application/json'
+
+ if not headers.hasHeader(type_key) or type_val not in headers.getRawHeaders(type_key, []):
+ raise Exception("REST {} '{}' request response from '{}' was not JSON",
+ method, name, url)
+
+ content = yield response.content()
+ try:
+ result = json.loads(content)
+
+ except Exception as e:
+ log.exception("json-decode", method=method, url=url, name=name,
+ content=content, e=e)
+ raise
+
+ returnValue(result)
diff --git a/adapters/adtran_common/net/adtran_zmq.py b/adapters/adtran_common/net/adtran_zmq.py
new file mode 100644
index 0000000..1d1341c
--- /dev/null
+++ b/adapters/adtran_common/net/adtran_zmq.py
@@ -0,0 +1,379 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import structlog
+
+from twisted.internet.defer import succeed
+from twisted.internet import threads
+
+from txzmq import ZmqEndpoint, ZmqFactory
+from txzmq.connection import ZmqConnection
+
+import zmq
+from zmq import constants
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import b, u
+from zmq.auth.base import Authenticator
+
+from threading import Thread, Event
+
+zmq_factory = ZmqFactory()
+
+
+class AdtranZmqClient(object):
+ """
+ Adtran ZeroMQ Client for PON Agent and/or packet in/out service
+ """
+ def __init__(self, ip_address, rx_callback, port):
+ self.log = structlog.get_logger()
+
+ external_conn = 'tcp://{}:{}'.format(ip_address, port)
+
+ self.zmq_endpoint = ZmqEndpoint('connect', external_conn)
+ self._socket = ZmqPairConnection(zmq_factory, self.zmq_endpoint)
+ self._socket.onReceive = rx_callback or AdtranZmqClient.rx_nop
+ self.auth = None
+
+ def send(self, data):
+ try:
+ self._socket.send(data)
+
+ except Exception as e:
+ self.log.exception('send', e=e)
+
+ def shutdown(self):
+ self._socket.onReceive = AdtranZmqClient.rx_nop
+ self._socket.shutdown()
+
+ @property
+ def socket(self):
+ return self._socket
+
+ @staticmethod
+ def rx_nop(_):
+ pass
+
+ def setup_plain_security(self, username, password):
+ self.log.debug('setup-plain-security')
+
+ def configure_plain(_):
+ self.log.debug('plain-security', username=username,
+ password=password)
+
+ self.auth.configure_plain(domain='*', passwords={username: password})
+ self._socket.socket.plain_username = username
+ self._socket.socket.plain_password = password
+
+ def add_endoints(_results):
+ self._socket.addEndpoints([self.zmq_endpoint])
+
+ def config_failure(_results):
+ raise Exception('Failed to configure plain-text security')
+
+ def endpoint_failure(_results):
+ raise Exception('Failed to complete endpoint setup')
+
+ self.auth = TwistedZmqAuthenticator()
+
+ d = self.auth.start()
+ d.addCallbacks(configure_plain, config_failure)
+ d.addCallbacks(add_endoints, endpoint_failure)
+
+ return d
+
+ def setup_curve_security(self):
+ self.log.debug('setup-curve-security')
+ raise NotImplementedError('TODO: curve transport security is not yet supported')
+
+
+class ZmqPairConnection(ZmqConnection):
+ """
+ Bidirectional messages to/from the socket.
+
+ Wrapper around ZeroMQ PUSH socket.
+ """
+ socketType = constants.PAIR
+
+ def messageReceived(self, message):
+ """
+ Called on incoming message from ZeroMQ.
+
+ :param message: message data
+ """
+ self.onReceive(message)
+
+ def onReceive(self, message):
+ """
+ Called on incoming message received from other end of the pair.
+
+ :param message: message data
+ """
+ raise NotImplementedError(self)
+
+ def send(self, message):
+ """
+ Send message via ZeroMQ socket.
+
+ Sending is performed directly to ZeroMQ without queueing. If HWM is
+ reached on ZeroMQ side, sending operation is aborted with exception
+ from ZeroMQ (EAGAIN).
+
+ After writing read is scheduled as ZeroMQ may not signal incoming
+ messages after we touched socket with write request.
+
+ :param message: message data, could be either list of str (multipart
+ message) or just str
+ :type message: str or list of str
+ """
+ from txzmq.compat import is_nonstr_iter
+ from twisted.internet import reactor
+
+ if not is_nonstr_iter(message):
+ self.socket.send(message, constants.NOBLOCK)
+ else:
+ # for m in message[:-1]:
+ # self.socket.send(m, constants.NOBLOCK | constants.SNDMORE)
+ # self.socket.send(message[-1], constants.NOBLOCK)
+ self.socket.send_multipart(message, flags=constants.NOBLOCK)
+
+ if self.read_scheduled is None:
+ self.read_scheduled = reactor.callLater(0, self.doRead)
+
+###############################################################################################
+###############################################################################################
+###############################################################################################
+###############################################################################################
+
+
+def _inherit_docstrings(cls):
+ """inherit docstrings from Authenticator, so we don't duplicate them"""
+ for name, method in cls.__dict__.items():
+ if name.startswith('_'):
+ continue
+ upstream_method = getattr(Authenticator, name, None)
+ if not method.__doc__:
+ method.__doc__ = upstream_method.__doc__
+ return cls
+
+
+@_inherit_docstrings
+class TwistedZmqAuthenticator(object):
+ """Run ZAP authentication in a background thread but communicate via Twisted ZMQ"""
+
+ def __init__(self, encoding='utf-8'):
+ self.log = structlog.get_logger()
+ self.context = zmq_factory.context
+ self.encoding = encoding
+ self.pipe = None
+ self.pipe_endpoint = "inproc://{0}.inproc".format(id(self))
+ self.thread = None
+
+ def allow(self, *addresses):
+ try:
+ self.pipe.send([b'ALLOW'] + [b(a, self.encoding) for a in addresses])
+
+ except Exception as e:
+ self.log.exception('allow', e=e)
+
+ def deny(self, *addresses):
+ try:
+ self.pipe.send([b'DENY'] + [b(a, self.encoding) for a in addresses])
+
+ except Exception as e:
+ self.log.exception('deny', e=e)
+
+ def configure_plain(self, domain='*', passwords=None):
+ try:
+ self.pipe.send([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
+
+ except Exception as e:
+ self.log.exception('configure-plain', e=e)
+
+ def configure_curve(self, domain='*', location=''):
+ try:
+ domain = b(domain, self.encoding)
+ location = b(location, self.encoding)
+ self.pipe.send([b'CURVE', domain, location])
+
+ except Exception as e:
+ self.log.exception('configure-curve', e=e)
+
+ def start(self, rx_callback=AdtranZmqClient.rx_nop):
+ """Start the authentication thread"""
+ try:
+ # create a socket to communicate with auth thread.
+
+ endpoint = ZmqEndpoint('bind', self.pipe_endpoint) # We are server, thread will be client
+ self.pipe = ZmqPairConnection(zmq_factory, endpoint)
+ self.pipe.onReceive = rx_callback
+
+ self.thread = LocalAuthenticationThread(self.context,
+ self.pipe_endpoint,
+ encoding=self.encoding)
+
+ return threads.deferToThread(TwistedZmqAuthenticator._do_thread_start,
+ self.thread, timeout=10)
+
+ except Exception as e:
+ self.log.exception('start', e=e)
+
+ @staticmethod
+ def _do_thread_start(thread, timeout=10):
+ thread.start()
+
+ # Event.wait:Changed in version 2.7: Previously, the method always returned None.
+ if sys.version_info < (2, 7):
+ thread.started.wait(timeout=timeout)
+
+ elif not thread.started.wait(timeout=timeout):
+ raise RuntimeError("Authenticator thread failed to start")
+
+ def stop(self):
+ """Stop the authentication thread"""
+ pipe, self.pipe = self.pipe, None
+ thread, self.thread = self.thread, None
+
+ if pipe:
+ pipe.send(b'TERMINATE')
+ pipe.onReceive = AdtranZmqClient.rx_nop
+ pipe.shutdown()
+
+ if thread.is_alive():
+ return threads.deferToThread(TwistedZmqAuthenticator._do_thread_join,
+ thread)
+ return succeed('done')
+
+ @staticmethod
+ def _do_thread_join(thread, timeout=1):
+ thread.join(timeout)
+ pass
+
+ def is_alive(self):
+ """Is the ZAP thread currently running?"""
+ return self.thread and self.thread.is_alive()
+
+ def __del__(self):
+ self.stop()
+
+
+# NOTE: Following is a duplicated from zmq code since class was not exported
+class LocalAuthenticationThread(Thread):
+ """A Thread for running a zmq Authenticator
+
+ This is run in the background by ThreadedAuthenticator
+ """
+
+ def __init__(self, context, endpoint, encoding='utf-8', authenticator=None):
+ super(LocalAuthenticationThread, self).__init__(name='0mq Authenticator')
+ self.log = structlog.get_logger()
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.started = Event()
+ self.authenticator = authenticator or Authenticator(context, encoding=encoding)
+
+ # create a socket to communicate back to main thread.
+ self.pipe = context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.connect(endpoint)
+
+ def run(self):
+ """Start the Authentication Agent thread task"""
+ try:
+ self.authenticator.start()
+ self.started.set()
+ zap = self.authenticator.zap_socket
+ poller = zmq.Poller()
+ poller.register(self.pipe, zmq.POLLIN)
+ poller.register(zap, zmq.POLLIN)
+ while True:
+ try:
+ socks = dict(poller.poll())
+ except zmq.ZMQError:
+ break # interrupted
+
+ if self.pipe in socks and socks[self.pipe] == zmq.POLLIN:
+ terminate = self._handle_pipe()
+ if terminate:
+ break
+
+ if zap in socks and socks[zap] == zmq.POLLIN:
+ self._handle_zap()
+
+ self.pipe.close()
+ self.authenticator.stop()
+
+ except Exception as e:
+ self.log.exception("run", e=e)
+
+ def _handle_zap(self):
+ """
+ Handle a message from the ZAP socket.
+ """
+ msg = self.authenticator.zap_socket.recv_multipart()
+ if not msg:
+ return
+ self.authenticator.handle_zap_message(msg)
+
+ def _handle_pipe(self):
+ """
+ Handle a message from front-end API.
+ """
+ terminate = False
+
+ # Get the whole message off the pipe in one go
+ msg = self.pipe.recv_multipart()
+
+ if msg is None:
+ terminate = True
+ return terminate
+
+ command = msg[0]
+ self.log.debug("auth received API command", command=command)
+
+ if command == b'ALLOW':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.allow(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to allow", addresses=addresses, e=e)
+
+ elif command == b'DENY':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.deny(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to deny", addresses=addresses, e=e)
+
+ elif command == b'PLAIN':
+ domain = u(msg[1], self.encoding)
+ json_passwords = msg[2]
+ self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))
+
+ elif command == b'CURVE':
+ # For now we don't do anything with domains
+ domain = u(msg[1], self.encoding)
+
+ # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ location = u(msg[2], self.encoding)
+ self.authenticator.configure_curve(domain, location)
+
+ elif command == b'TERMINATE':
+ terminate = True
+
+ else:
+ self.log.error("Invalid auth command from API", command=command)
+
+ return terminate
diff --git a/adapters/adtran_common/net/mock_netconf_client.py b/adapters/adtran_common/net/mock_netconf_client.py
new file mode 100644
index 0000000..314f2a0
--- /dev/null
+++ b/adapters/adtran_common/net/mock_netconf_client.py
@@ -0,0 +1,199 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import random
+import time
+from adtran_netconf import AdtranNetconfClient
+from pyvoltha.common.utils.asleep import asleep
+from ncclient.operations.rpc import RPCReply, RPCError
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = structlog.get_logger()
+
+_dummy_xml = '<rpc-reply message-id="br-549" ' + \
+ 'xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" ' + \
+ 'xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
+ '<data/>' + \
+ '</rpc-reply>'
+
+
+class MockNetconfClient(AdtranNetconfClient):
+ """
+ Performs NETCONF requests
+ """
+ def __init__(self, host_ip, port=830, username='', password='', timeout=20):
+ super(MockNetconfClient, self).__init__(host_ip, port=port, username=username,
+ password=password, timeout=timeout)
+ self._connected = False
+ self._locked = {}
+
+ def __str__(self):
+ return "MockNetconfClient {}@{}:{}".format(self._username, self._ip, self._port)
+
+ @property
+ def capabilities(self):
+ """
+ Get the server's NETCONF capabilities
+
+ :return: (ncclient.capabilities.Capabilities) object representing the server's capabilities.
+ """
+ return None
+
+ @property
+ def connected(self):
+ """
+ Is this client connected to a NETCONF server
+ :return: (boolean) True if connected
+ """
+ return self._connected
+
+ @inlineCallbacks
+ def connect(self, connect_timeout=None):
+ """
+ Connect to the NETCONF server
+ o To disable attempting publickey authentication altogether, call with
+ allow_agent and look_for_keys as False.`
+
+ o hostkey_verify enables hostkey verification from ~/.ssh/known_hosts
+
+ :return: (deferred) Deferred request
+ """
+ yield asleep(random.uniform(0.1, 5.0)) # Simulate NETCONF request delay
+ self._connected = True
+ self._locked = {}
+ returnValue(True)
+
+ @inlineCallbacks
+ def close(self):
+ """
+ Close the connection to the NETCONF server
+ :return: (deferred) Deferred request
+ """
+ yield asleep(random.uniform(0.1, 0.5)) # Simulate NETCONF request delay
+ self._connected = False
+ self._locked = {}
+ returnValue(True)
+
+ @inlineCallbacks
+ def get_config(self, source='running'):
+ """
+ Get the configuration from the specified source
+
+ :param source: (string) Configuration source, 'running', 'candidate', ...
+ :return: (deferred) Deferred request that wraps the GetReply class
+ """
+ yield asleep(random.uniform(0.1, 4.0)) # Simulate NETCONF request delay
+
+ # TODO: Customize if needed...
+ xml = _dummy_xml
+ returnValue(RPCReply(xml))
+
+ @inlineCallbacks
+ def get(self, payload):
+ """
+ Get the requested data from the server
+
+ :param payload: Payload/filter
+ :return: (defeered) for GetReply
+ """
+ yield asleep(random.uniform(0.1, 3.0)) # Simulate NETCONF request delay
+
+ # TODO: Customize if needed...
+ xml = _dummy_xml
+ returnValue(RPCReply(xml))
+
+ @inlineCallbacks
+ def lock(self, source, lock_timeout):
+ """
+ Lock the configuration system
+ :param source: is the name of the configuration datastore accessed
+ :param lock_timeout: timeout in seconds for holding the lock
+ :return: (defeered) for RpcReply
+ """
+ expire_time = time.time() + lock_timeout
+
+ if source not in self._locked:
+ self._locked[source] = None
+
+ while self._locked[source] is not None:
+ # Watch for lock timeout
+ if time.time() >= self._locked[source]:
+ self._locked[source] = None
+ break
+ yield asleep(0.1)
+
+ if time.time() < expire_time:
+ yield asleep(random.uniform(0.1, 0.5)) # Simulate NETCONF request delay
+ self._locked[source] = expire_time
+
+ returnValue(RPCReply(_dummy_xml) if expire_time > time.time() else RPCError('TODO'))
+
+ @inlineCallbacks
+ def unlock(self, source):
+ """
+ Get the requested data from the server
+ :param rpc_string: RPC request
+ :param source: is the name of the configuration datastore accessed
+ :return: (defeered) for RpcReply
+ """
+ if source not in self._locked:
+ self._locked[source] = None
+
+ if self._locked[source] is not None:
+ yield asleep(random.uniform(0.1, 0.5)) # Simulate NETCONF request delay
+
+ self._locked[source] = None
+ returnValue(RPCReply(_dummy_xml))
+
+ @inlineCallbacks
+ def edit_config(self, config, target='running', default_operation='merge',
+ test_option=None, error_option=None):
+ """
+ Loads all or part of the specified config to the target configuration datastore with the ability to lock
+ the datastore during the edit.
+
+ :param config is the configuration, which must be rooted in the config element. It can be specified
+ either as a string or an Element.format="xml"
+ :param target is the name of the configuration datastore being edited
+ :param default_operation if specified must be one of { 'merge', 'replace', or 'none' }
+ :param test_option if specified must be one of { 'test_then_set', 'set' }
+ :param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
+ The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
+
+ :return: (defeered) for RpcReply
+ """
+ try:
+ yield asleep(random.uniform(0.1, 2.0)) # Simulate NETCONF request delay
+
+ except Exception as e:
+ log.exception('edit_config', e=e)
+ raise
+
+ # TODO: Customize if needed...
+ xml = _dummy_xml
+ returnValue(RPCReply(xml))
+
+ @inlineCallbacks
+ def rpc(self, rpc_string):
+ """
+ Custom RPC request
+ :param rpc_string: (string) RPC request
+ :return: (defeered) for GetReply
+ """
+ yield asleep(random.uniform(0.1, 2.0)) # Simulate NETCONF request delay
+
+ # TODO: Customize if needed...
+ xml = _dummy_xml
+ returnValue(RPCReply(xml))
diff --git a/adapters/adtran_common/net/rcmd.py b/adapters/adtran_common/net/rcmd.py
new file mode 100644
index 0000000..3062b4c
--- /dev/null
+++ b/adapters/adtran_common/net/rcmd.py
@@ -0,0 +1,112 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import Deferred, succeed
+from twisted.internet.protocol import Factory, Protocol
+from twisted.conch.client.knownhosts import ConsoleUI, KnownHostsFile
+from twisted.conch.endpoints import SSHCommandClientEndpoint
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+_open = open
+
+
+class RCmd(object):
+ """
+ Execute a one-time remote command via SSH
+ """
+ def __init__(self, host, username, password,
+ command,
+ port=None,
+ keys=None,
+ known_hosts=None,
+ agent=None):
+ self.reactor = reactor
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+ self.keys = keys
+ # self.knownHosts = known_hosts
+ self.knownHosts = known_hosts
+ self.agent = agent
+ self.command = command
+ self.ui = RCmd.FixedResponseUI(True)
+
+ class NoiseProtocol(Protocol):
+ def __init__(self):
+ self.finished = Deferred()
+ self.strings = ["bif", "pow", "zot"]
+
+ def connectionMade(self):
+ log.debug('connection-made')
+ self._send_noise()
+
+ def _send_noise(self):
+ if self.strings:
+ self.transport.write(self.strings.pop(0) + "\n")
+ else:
+ self.transport.loseConnection()
+
+ def dataReceived(self, data):
+ log.debug('rx', data=data)
+ if self.finished is not None and not self.finished.called:
+ self.finished.callback(data)
+ self._send_noise()
+
+ def connectionLost(self, reason):
+ log.debug('connection-lost')
+ if not self.finished.called:
+ self.finished.callback(reason)
+
+ class PermissiveKnownHosts(KnownHostsFile):
+ def verifyHostKey(self, ui, hostname, ip, key):
+ log.debug('verifyHostKey')
+ return True
+
+ class FixedResponseUI(ConsoleUI):
+ def __init__(self, result):
+ super(RCmd.FixedResponseUI, self).__init__(lambda: _open("/dev/null",
+ "r+b",
+ buffering=0))
+ self.result = result
+
+ def prompt(self, _):
+ log.debug('prompt')
+ return succeed(True)
+
+ def warn(self, text):
+ log.debug('warn')
+ pass
+
+ def _endpoint_for_command(self, command):
+ return SSHCommandClientEndpoint.newConnection(
+ self.reactor, command, self.username, self.host,
+ port=self.port,
+ password=self.password,
+ keys=self.keys,
+ agentEndpoint=self.agent,
+ knownHosts=self.knownHosts,
+ ui=self.ui
+ )
+
+ def execute(self):
+ endpoint = self._endpoint_for_command(self.command)
+ factory = Factory()
+ factory.protocol = RCmd.NoiseProtocol
+
+ d = endpoint.connect(factory)
+ d.addCallback(lambda proto: proto.finished)
+ return d
diff --git a/adapters/adtran_common/port.py b/adapters/adtran_common/port.py
new file mode 100644
index 0000000..0fc49dc
--- /dev/null
+++ b/adapters/adtran_common/port.py
@@ -0,0 +1,251 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from enum import Enum
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+from pyvoltha.protos.common_pb2 import OperStatus, AdminState
+
+
+class AdtnPort(object):
+ """
+ A class similar to the 'Port' class in the VOLTHA
+ """
+ class State(Enum):
+ INITIAL = 0 # Created and initialization in progress
+ RUNNING = 1 # PON port contacted, ONU discovery active
+ STOPPED = 2 # Disabled
+ DELETING = 3 # Cleanup
+
+ def __init__(self, parent, **kwargs):
+ assert parent, 'parent is None'
+ assert 'port_no' in kwargs, 'Port number not found'
+
+ self.log = structlog.get_logger(device_id=parent.device_id)
+
+ self._parent = parent
+ self._port_no = kwargs.get('port_no')
+
+ # Set the following in your derived class. These names are used in
+ # various ways. Typically, the physical port name will be used during
+ # device handler conversations with the hardware (REST, NETCONF, ...)
+ # while the logical port name is what the outside world (ONOS, SEBA, ...)
+ # uses. All ports have a physical port name, but only ports exposed through
+ # VOLTHA as a logical port will have a logical port name
+
+ self._physical_port_name = None
+ self._logical_port_name = None
+ self._label = None
+ self._port = None
+
+ self.sync_tick = 20.0
+ self.sync_deferred = None # For sync of PON config to hardware
+
+ # TODO: Deprecate 'enabled' and use admin_state instead may want initial to always be
+ # disabled and then in derived classes, set it in the 'reset' method called on startup.
+ self._enabled = True
+ self._admin_state = AdminState.ENABLED
+
+ self._oper_status = OperStatus.DISCOVERED
+ self._state = AdtnPort.State.INITIAL
+
+ self.deferred = None # General purpose
+
+ # Statistics
+ self.rx_packets = 0
+ self.rx_bytes = 0
+ self.tx_packets = 0
+ self.tx_bytes = 0
+ self.timestamp = 0 # UTC when KPI items last updated
+
+ def __del__(self):
+ self.stop()
+
+ def get_port(self):
+ """
+ Get the VOLTHA PORT object for this port
+ :return: VOLTHA Port object
+ """
+ raise NotImplementedError('Add to your derived class')
+
+ @property
+ def port_no(self):
+ return self._port_no
+
+ @property
+ def intf_id(self):
+ return self.port_no
+
+ @property
+ def physical_port_name(self):
+ return self._physical_port_name
+
+ @property
+ def logical_port_name(self):
+ return self._logical_port_name
+
+ @property # For backwards compatibility
+ def name(self):
+ return self._logical_port_name
+
+ @property
+ def state(self):
+ return self._state
+
+ @state.setter
+ def state(self, value):
+ self._state = value
+
+ @property
+ def olt(self):
+ return self._parent
+
+ @property
+ def admin_state(self):
+ return self._admin_state
+
+ @admin_state.setter
+ def admin_state(self, value):
+ if self._admin_state != value:
+ self._admin_state = value
+ if self._admin_state == AdminState.ENABLED:
+ self.start()
+ else:
+ self.stop()
+ @property
+ def enabled(self):
+ return self._admin_state == AdminState.ENABLED
+
+ @enabled.setter
+ def enabled(self, value):
+ assert isinstance(value, bool), 'enabled is a boolean'
+ self.admin_state = AdminState.ENABLED if value else AdminState.DISABLED
+
+ @property
+ def oper_status(self):
+ return self._oper_status
+
+ @property
+ def adapter_agent(self):
+ return self.olt.adapter_agent
+
+ def get_logical_port(self):
+ """
+ Get the VOLTHA logical port for this port. For PON ports, a logical port
+ is not currently created, so always return None
+
+ :return: VOLTHA logical port or None if not supported
+ """
+ return None
+
+ def cancel_deferred(self):
+ d1, self.deferred = self.deferred, None
+ d2, self.sync_deferred = self.sync_deferred, None
+
+ for d in [d1, d2]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except Exception:
+ pass
+
+ def _update_adapter_agent(self):
+ raise NotImplementedError('Add to your derived class')
+
+ def start(self):
+ """
+ Start/enable this PON and start ONU discover
+ """
+ if self.state == AdtnPort.State.RUNNING:
+ return succeed('Running')
+
+ self.log.info('start-port')
+
+ self.cancel_deferred()
+ self.state = AdtnPort.State.INITIAL
+ self._oper_status = OperStatus.ACTIVATING
+ self._enabled = True
+
+ # Do the rest of the startup in an async method
+ self.deferred = reactor.callLater(0.5, self.finish_startup)
+ self._update_adapter_agent()
+
+ return succeed('Scheduled')
+
+ def finish_startup(self):
+ if self.state == AdtnPort.State.INITIAL:
+ self.log.debug('final-startup')
+
+ # If here, initial settings were successfully written to hardware
+
+ self._enabled = True
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE # TODO: is this correct, how do we tell GRPC
+ self.state = AdtnPort.State.RUNNING
+
+ self.sync_deferred = reactor.callLater(self.sync_tick,
+ self.sync_hardware)
+ self._update_adapter_agent()
+
+ @inlineCallbacks
+ def stop(self):
+ if self.state == AdtnPort.State.STOPPED:
+ self.log.debug('already stopped')
+ returnValue('Stopped')
+
+ self.log.info('stopping')
+ try:
+ self.cancel_deferred()
+ self._enabled = False
+ self._admin_state = AdminState.DISABLED
+ self._oper_status = OperStatus.UNKNOWN
+ self._update_adapter_agent()
+
+ self.state = AdtnPort.State.STOPPED
+
+ self.deferred = self.finish_stop()
+ yield self.deferred
+
+ except Exception as e:
+ self.log.exception('stop-failed', e=e)
+
+ returnValue('Stopped')
+
+ @inlineCallbacks
+ def finish_stop(self):
+ pass # Add to your derived class if needed
+ returnValue(None)
+
+ def restart(self):
+ if self.state == AdtnPort.State.RUNNING or self.state == AdtnPort.State.STOPPED:
+ start_it = (self.state == AdtnPort.State.RUNNING)
+ self.state = AdtnPort.State.INITIAL
+ return self.start() if start_it else self.stop()
+ return succeed('nop')
+
+ def delete(self):
+ """
+ Parent device is being deleted. Do not change any config but
+ stop all polling
+ """
+ self.log.info('Deleting')
+ self.state = AdtnPort.State.DELETING
+ self.cancel_deferred()
+
+ def sync_hardware(self):
+ raise NotImplementedError('Add to your derived class')
+
+# TODO: Continue to consolidate port functionality
diff --git a/adapters/adtran_common/xpon/__init__.py b/adapters/adtran_common/xpon/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/adtran_common/xpon/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_common/xpon/best_effort.py b/adapters/adtran_common/xpon/best_effort.py
new file mode 100644
index 0000000..99622af
--- /dev/null
+++ b/adapters/adtran_common/xpon/best_effort.py
@@ -0,0 +1,47 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+
+log = structlog.get_logger()
+
+
+class BestEffort(object):
+ def __init__(self, bandwidth, priority, weight):
+ self.bandwidth = bandwidth # bps
+ self.priority = priority # 0.255
+ self.weight = weight # 0..100
+
+ def __str__(self):
+ return "BestEffort: {}/p-{}/w-{}".format(self.bandwidth,
+ self.priority,
+ self.weight)
+
+ def to_dict(self):
+ val = {
+ 'bandwidth': self.bandwidth,
+ 'priority': self.priority,
+ 'weight': self.weight
+ }
+ return val
+
+ def add_to_hardware(self, session, pon_id, onu_id, alloc_id, best_effort):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(pon_id, onu_id, alloc_id)
+ data = json.dumps({'best-effort': best_effort.to_dict()})
+ name = 'tcont-best-effort-{}-{}: {}'.format(pon_id, onu_id, alloc_id)
+
+ return session.request('PATCH', uri, data=data, name=name)
diff --git a/adapters/adtran_common/xpon/gem_port.py b/adapters/adtran_common/xpon/gem_port.py
new file mode 100644
index 0000000..14dccb1
--- /dev/null
+++ b/adapters/adtran_common/xpon/gem_port.py
@@ -0,0 +1,63 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class GemPort(object):
+ """
+ Class to wrap TCont capabilities
+ """
+ def __init__(self, gem_id, alloc_id, uni_id, tech_profile_id,
+ encryption=False,
+ multicast=False,
+ traffic_class=None,
+ handler=None,
+ is_mock=False):
+
+ self.gem_id = gem_id
+ self._alloc_id = alloc_id
+ self.uni_id = uni_id
+ self.tech_profile_id = tech_profile_id
+ self.traffic_class = traffic_class
+ self._encryption = encryption
+ self.multicast = multicast
+ self._handler = handler
+ self._is_mock = is_mock
+ self.tech_profile_id = None # TODO: Make property and clean up object once tech profiles fully supported
+
+ # Statistics
+ self.rx_packets = 0
+ self.rx_bytes = 0
+ self.tx_packets = 0
+ self.tx_bytes = 0
+
+ def __str__(self):
+ return "GemPort: alloc-id: {}, gem-id: {}, uni-id: {}".format(self.alloc_id,
+ self.gem_id,
+ self.uni_id)
+
+ @property
+ def alloc_id(self):
+ return self._alloc_id
+
+ @property
+ def encryption(self):
+ return self._encryption
+
+ def to_dict(self):
+ return {
+ 'port-id': self.gem_id,
+ 'alloc-id': self.alloc_id,
+ 'encryption': self._encryption,
+ 'omci-transport': False
+ }
diff --git a/adapters/adtran_common/xpon/tcont.py b/adapters/adtran_common/xpon/tcont.py
new file mode 100644
index 0000000..79d94fa
--- /dev/null
+++ b/adapters/adtran_common/xpon/tcont.py
@@ -0,0 +1,29 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class TCont(object):
+ """
+ Class to wrap TCont capabilities
+ """
+ def __init__(self, alloc_id, tech_profile_id, traffic_descriptor, uni_id, is_mock=False):
+ self.alloc_id = alloc_id
+ self.traffic_descriptor = traffic_descriptor
+ self._is_mock = is_mock
+ self.tech_profile_id = tech_profile_id
+ self.uni_id = uni_id
+
+ def __str__(self):
+ return "TCont: alloc-id: {}, uni-id: {}".format(self.alloc_id,
+ self.uni_id)
diff --git a/adapters/adtran_common/xpon/traffic_descriptor.py b/adapters/adtran_common/xpon/traffic_descriptor.py
new file mode 100644
index 0000000..230605b
--- /dev/null
+++ b/adapters/adtran_common/xpon/traffic_descriptor.py
@@ -0,0 +1,75 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+
+
+class TrafficDescriptor(object):
+ """
+ Class to wrap the uplink traffic descriptor.
+ """
+ class AdditionalBwEligibility(Enum):
+ NONE = 0
+ BEST_EFFORT_SHARING = 1
+ NON_ASSURED_SHARING = 2 # Should match xpon.py values
+ DEFAULT = NONE
+
+ @staticmethod
+ def to_string(value):
+ return {
+ TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING: "non-assured-sharing",
+ TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING: "best-effort-sharing",
+ TrafficDescriptor.AdditionalBwEligibility.NONE: "none"
+ }.get(value, "unknown")
+
+ @staticmethod
+ def from_value(value):
+ """
+ Matches both Adtran and xPON values
+ :param value:
+ :return:
+ """
+ return {
+ 0: TrafficDescriptor.AdditionalBwEligibility.NONE,
+ 1: TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING,
+ 2: TrafficDescriptor.AdditionalBwEligibility.NON_ASSURED_SHARING,
+ }.get(value, TrafficDescriptor.AdditionalBwEligibility.DEFAULT)
+
+ def __init__(self, fixed, assured, maximum,
+ additional=AdditionalBwEligibility.DEFAULT,
+ best_effort=None):
+ self.fixed_bandwidth = fixed # bps
+ self.assured_bandwidth = assured # bps
+ self.maximum_bandwidth = maximum # bps
+ self.additional_bandwidth_eligibility = additional
+ self.best_effort = best_effort\
+ if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING\
+ else None
+
+ def __str__(self):
+ return "TrafficDescriptor: {}/{}/{}".format(self.fixed_bandwidth,
+ self.assured_bandwidth,
+ self.maximum_bandwidth)
+
+ def to_dict(self):
+ val = {
+ 'fixed-bandwidth': self.fixed_bandwidth,
+ 'assured-bandwidth': self.assured_bandwidth,
+ 'maximum-bandwidth': self.maximum_bandwidth,
+ 'additional-bandwidth-eligibility':
+ TrafficDescriptor.AdditionalBwEligibility.to_string(
+ self.additional_bandwidth_eligibility)
+ }
+ return val
+
diff --git a/adapters/adtran_olt/README.md b/adapters/adtran_olt/README.md
new file mode 100644
index 0000000..737b73b
--- /dev/null
+++ b/adapters/adtran_olt/README.md
@@ -0,0 +1,176 @@
+# Adtran OLT Device Adapter
+To preprovision an Adtran OLT, you will need to provide the IP Address and
+the NETCONF/REST credentials for the device. The NETCONF/REST credentials are an
+extension of the existing **preprovision_olt** command and these are placed after
+entering two dashes '_--_'. The full syntax to use is.
+
+| Short | Long | Default | Notes |
+| :---: | :----------------: | :--------: | ----- |
+| -u | --nc_username | '' | NETCONF Username |
+| -p | --nc_password | '' | NETCONF Password |
+| -t | --nc_port | 830 | NETCONF TCP Port |
+| -U | --rc_username | '' | REST Username |
+| -P | --rc_password | '' | REST Password |
+| -T | --rc_port | 8081 | REST TCP Port |
+| -z | --zmq_port | 5656 | ZeroMQ OMCI Proxy Port |
+| -M | --multicast_vlan | 4000 | Multicast VLANs (comma-delimited) |
+| -Z | --pio_port | 5657 | PIO Service ZeroMQ Port |
+| -o | --resource_mgr_key | adtran_olt | OLT Type to look up associated resource manager configuration |
+
+For example, if your Adtran OLT is address 10.17.174.193 with the default TCP ports and
+NETCONF credentials of admin/admin and REST credentials of ADMIN/ADMIN, the command line
+would be:
+
+```bash
+ preprovision_olt -t adtran_olt -i 10.17.174.193 -- -u admin -p admin -U ADMIN -P ADMIN
+```
+or
+```bash
+ preprovision_olt -t adtran_olt -i 10.17.174.193 -- --nc_username admin --nc_password admin --rc_username ADMIN --rc_password ADMIN
+```
+
+In addition to specifying the Adtran OLT by a single IP address, the host & port provisioning option
+is also supported. This allows you to configure the address of the Adtran OLT with the same command line
+option as the OpenOLT device adapter. For the port number, just specify the netconf port (default 830)
+as in:
+
+```bash
+ preprovision_olt -t adtran_olt -H 10.17.174.193:830
+```
+or
+```bash
+ preprovision_olt -t adtran_olt --host_and_port 10.17.174.193:830
+```
+
+## Resource Manager Provisioning Support
+Starting in Fall of 2018, Resource Manager Support was added as the default provisioning mechanism
+for the Adtran OLT as the xPON provisioning support will be deprecated by the v2.0 release in
+late-2018/early-2019.
+
+The Resource Manager is used to manage device PON resource pool and allocate PON resources from
+such pools. Resource Manager module currently manages assignment of ONU-ID, ALLOC-ID and
+GEM-PORT ID. The Resource Manager uses the KV store to back-up all the resource pool allocation data.
+
+The Adtran OLT adapter interacts with Resource Manager module for PON resource assignments. The
+adtranolt_resource_manager module is responsible for interfacing with the Resource Manager.
+
+The Resource Manager optionally uses olt_vendor_type specific resource ranges to initialize the
+PON resource pools. In order to utilize this option, create an entry for olt_vendor_type specific
+PON resource ranges on the KV store. Please make sure to use the same KV store used by the VOLTHA core.
+
+### For example
+To specify **ADTRAN OLT** device specific resource ranges, first create a JSON file
+_adtran_olt_resource_range.json_ with the following entry
+
+{
+ "onu_start_idx": 0,
+ "onu_end_idx": 127,
+ "alloc_id_start_idx": 1024,
+ "alloc_id_end_idx": 4222,
+ "gem_port_id_start_idx": 2176,
+ "gem_port_id_end_idx": 16383,
+ "num_of_pon_port": 16
+}
+This data should be put on the KV store location _resource_manager/xgspon/resource_ranges/adtran_olt_
+
+The format of the KV store location is resource_manager/<technology>/resource_ranges/<resource_mgr_key>
+
+In the below example the KV store is assumed to be Consul. However the same is applicable to be
+etcd or any other KV store. Please make sure to use the same KV store used by the VOLTHA core.
+
+```bash
+curl -X PUT -H "Content-Type: application/json" \
+ http://127.0.0.1:8500/v1/kv/resource_manager/xgspon/resource_ranges/adtran_olt \
+ -d @./adtran_olt_resource_range.json
+```
+The olt_vendor_type should be referred to during the preprovisioning step as shown below. The
+olt_vendor_type is an extra option and should be specified after --. The -o specifies the resource_mgr_key.
+
+ (voltha) preprovision_olt -t adtran -H 192.168.1.100:830 -- -o adtran_olt
+Once the OLT device is enabled, any further PON Resource assignments will happen within the PON Resource ranges defined in asfvolt16_resource_range.json and placed on the KV store.
+
+Additional Notes
+If a default resource range profile should be used with all olt_vendor_types, then place such Resource Range profile at the below path on the KV store.
+
+resource_manager/xgspon/resource_ranges/default
+
+## xPON Provisioning Support
+
+Currently the Adtran Device Adapter supports xPON provisioning to enable PON ports, or activate ONUs, you
+must use the appropriate commands. In the VOLTHA v2.0 release (Q4 2018?), the xPON provisioning will be removed
+from VOLTHA and replaced with Technology Profiles. _By default, this provisioning is now disabled and you should
+use the '-X' extra-arguments provisioning command switch if you wish to use it_.
+
+### REST Based xPON Pre-Provisioning
+In addition to CLI provisioning, the Adtran OLT Device Adapter can also be provisioned though the
+VOLTHA Northbound REST API. The following examples show curl commands when running with the **_Consul_**
+key-value store. Similar curl commands can be used when **_etcd_** is used as the key value store
+
+```bash
+VOLTHA_IP=localhost
+OLT_IP=10.17.174.228
+REST_PORT=`curl -s http://localhost:8500/v1/catalog/service/voltha-envoy-8443 | jq -r '.[0].ServicePort'`
+
+curl -k -s -X POST https://${VOLTHA_IP}:${REST_PORT}/api/v1/devices \
+ --header 'Content-Type: application/json' --header 'Accept: application/json' \
+ -d "{\"type\": \"adtran_olt\",\"ipv4_address\": \"${OLT_IP}\",\"extra_args\": \"-u admin -p admin -U ADMIN -P ADMIN\"}" \
+| jq '.' | tee /tmp/adtn-olt.json
+```
+This will not only pre-provision the OLT, but it will also return the created VOLTHA Device ID for use other commands.
+The output is also shown on the console as well:
+
+```bash
+curl -k -s -X POST https://${VOLTHA_IP}:${REST_PORT}/api/v1/devices \
+ --header 'Content-Type: application/json' --header 'Accept: application/json' \
+ -d "{\"type\": \"adtran_olt\",\"ipv4_address\": \"${OLT_IP}\",\"extra_args\": \"-u admin -p admin -U ADMIN -P ADMIN\"}" \
+| jq '.' | tee /tmp/adtn-olt.json
+{
+ "extra_args": "-u admin -p admin -U ADMIN -P ADMIN",
+ "vendor": "",
+ "channel_terminations": [],
+ "parent_port_no": 0,
+ "connect_status": "UNKNOWN",
+ "root": false,
+ "adapter": "adtran_olt",
+ "vlan": 0,
+ "hardware_version": "",
+ "ports": [],
+ "ipv4_address": "10.17.174.228",
+ "parent_id": "",
+ "oper_status": "UNKNOWN",
+ "admin_state": "PREPROVISIONED",
+ "reason": "",
+ "serial_number": "",
+ "model": "",
+ "type": "adtran_olt",
+ "id": "00017cbb382b9260",
+ "firmware_version": ""
+}
+```
+Besides specifying the "ipv4_address" leaf, you can alternatively use the "host_and_port" leaf to
+provide the IP Host address and the NetCONF port as in "10.17.174.228:830"
+
+### Enabling the Pre-Provisioned OLT
+To enable the OLT, you need the retrieve the OLT Device ID and issue a POST request to the proper URL as in:
+```bash
+DEVICE_ID=$(jq .id /tmp/adtn-olt.json | sed 's/"//g')
+
+curl -k -s -X POST https://${VOLTHA_IP}:${REST_PORT}/api/v1/local/devices/${DEVICE_ID}/enable
+```
+#### Other REST APIs
+To list out any devices, you can use the following command:
+
+```bash
+curl -k -s https://${VOLTHA_IP}:${REST_PORT}/api/v1/devices | json_pp
+```
+
+Other API endpoints (beyond the /v1/ field above) can be listed with the following command
+
+```bash
+curl -k -s https://${VOLTHA_IP}:${REST_PORT}/api/v1 | json_pp
+```
+
+# Tested OLT Device Driver versions
+
+The minimum version number of for the OLT software is: *_11971320F1-ML-3309_* or later
+
diff --git a/adapters/adtran_olt/__init__.py b/adapters/adtran_olt/__init__.py
new file mode 100644
index 0000000..d67fcf2
--- /dev/null
+++ b/adapters/adtran_olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_olt/adtran_olt.py b/adapters/adtran_olt/adtran_olt.py
new file mode 100644
index 0000000..c052b78
--- /dev/null
+++ b/adapters/adtran_olt/adtran_olt.py
@@ -0,0 +1,222 @@
+#
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+ADTRAN OLT Adapter.
+"""
+import structlog
+from twisted.internet import reactor, defer
+
+from pyvoltha.adapters.iadapter import OltAdapter
+from pyvoltha.protos import third_party
+from pyvoltha.protos.common_pb2 import AdminState
+
+from adtran_olt_handler import AdtranOltHandler
+
+
+_ = third_party
+log = structlog.get_logger()
+
+
+class AdtranOltAdapter(OltAdapter):
+ name = 'adtran_olt'
+
+ def __init__(self, core_proxy, adapter_proxy, config):
+ super(AdtranOltAdapter, self).__init__(core_proxy=core_proxy,
+ adapter_proxy=adapter_proxy,
+ config=config,
+ device_handler_class=AdtranOltHandler,
+ name=AdtranOltAdapter.name,
+ vendor='ADTRAN, Inc.',
+ version='2.0.0',
+ device_type=AdtranOltAdapter.name,
+ accepts_bulk_flow_update=True,
+ accepts_add_remove_flow_updates=False) # TODO: Implement me
+
+ log.debug('adtran_olt.__init__')
+
+ def health(self):
+ """
+ Return a 3-state health status using the voltha.HealthStatus message.
+
+ :return: Deferred or direct return with voltha.HealthStatus message
+ """
+ # TODO: Currently this is always healthy for every adapter.
+ # If we decide not to modify this, delete this method and use base class method
+ from pyvoltha.protos.health_pb2 import HealthStatus
+ return HealthStatus(state=HealthStatus.HEALTHY)
+
+ def abandon_device(self, device):
+ """
+ Make sure the adapter no longer looks after device. This is called
+ if device ownership is taken over by another Voltha instance.
+
+ :param device: A Voltha.Device object
+ :return: (Deferred) Shall be fired to acknowledge abandonment.
+ """
+ log.info('abandon-device', device=device)
+ raise NotImplementedError()
+
+ def adopt_device(self, device):
+ """
+ Make sure the adapter looks after given device. Called when a device
+ is provisioned top-down and needs to be activated by the adapter.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions. Such extensions shall be described as part of
+ the device type specification returned by device_types().
+ :return: (Deferred) Shall be fired to acknowledge device ownership.
+ """
+ log.info('adopt-device', device=device)
+ kwargs = {
+ 'adapter': self,
+ 'device-id': device.id
+ }
+ self.devices_handlers[device.id] = self.device_handler_class(**kwargs)
+ d = defer.Deferred()
+ reactor.callLater(0, self.devices_handlers[device.id].activate, d, False)
+ return d
+
+ def reconcile_device(self, device):
+ try:
+ self.devices_handlers[device.id] = self.device_handler_class(self,
+ device.id)
+ # Work only required for devices that are in ENABLED state
+ if device.admin_state == AdminState.ENABLED:
+
+ kwargs = {
+ 'adapter': self,
+ 'device-id': device.id
+ }
+ self.devices_handlers[device.id] =self.device_handler_class(**kwargs)
+ d = defer.Deferred()
+ reactor.callLater(0, self.devices_handlers[device.id].activate, d, True)
+
+ else:
+ # Invoke the children reconciliation which would setup the
+ # basic children data structures
+ self.core_proxy.reconcile_child_devices(device.id)
+ return device
+
+ except Exception, e:
+ log.exception('Exception', e=e)
+
+ def self_test_device(self, device):
+ """
+ This is called to Self a device based on a NBI call.
+ :param device: A Voltha.Device object.
+ :return: Will return result of self test
+ """
+ log.info('self-test-device', device=device.id)
+ # TODO: Support self test?
+ from pyvoltha.protos.voltha_pb2 import SelfTestResponse
+ return SelfTestResponse(result=SelfTestResponse.NOT_SUPPORTED)
+
+ def delete_device(self, device):
+ """
+ This is called to delete a device from the PON based on a NBI call.
+ If the device is an OLT then the whole PON will be deleted.
+
+ :param device: A Voltha.Device object.
+ :return: (Deferred) Shall be fired to acknowledge the deletion.
+ """
+ log.info('delete-device', device=device)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ reactor.callLater(0, handler.delete)
+ del self.device_handlers[device.id]
+ del self.logical_device_id_to_root_device_id[device.parent_id]
+
+ return device
+
+ def download_image(self, device, request):
+ """
+ This is called to request downloading a specified image into the standby partition
+ of a device based on a NBI call.
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :return: (Deferred) Shall be fired to acknowledge the download.
+ """
+ log.info('image_download', device=device, request=request)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.start_download(device, request, defer.Deferred())
+
+ def get_image_download_status(self, device, request):
+ """
+ This is called to inquire about a requested image download status based
+ on a NBI call. The adapter is expected to update the DownloadImage DB object
+ with the query result
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :return: (Deferred) Shall be fired to acknowledge
+ """
+ log.info('get_image_download', device=device, request=request)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.download_status(device, request, defer.Deferred())
+
+ def cancel_image_download(self, device, request):
+ """
+ This is called to cancel a requested image download
+ based on a NBI call. The admin state of the device will not
+ change after the download.
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :return: (Deferred) Shall be fired to acknowledge
+ """
+ log.info('cancel_image_download', device=device)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.cancel_download(device, request, defer.Deferred())
+
+ def activate_image_update(self, device, request):
+ """
+ This is called to activate a downloaded image from
+ a standby partition into active partition.
+ Depending on the device implementation, this call
+ may or may not cause device reboot.
+ If no reboot, then a reboot is required to make the
+ activated image running on device
+ This call is expected to be non-blocking.
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :return: (Deferred) OperationResponse object.
+ """
+ log.info('activate_image_update', device=device, request=request)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.activate_image(device, request, defer.Deferred())
+
+ def revert_image_update(self, device, request):
+ """
+ This is called to deactivate the specified image at
+ active partition, and revert to previous image at
+ standby partition.
+ Depending on the device implementation, this call
+ may or may not cause device reboot.
+ If no reboot, then a reboot is required to make the
+ previous image running on device
+ This call is expected to be non-blocking.
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :return: (Deferred) OperationResponse object.
+ """
+ log.info('revert_image_update', device=device, request=request)
+ handler = self.devices_handlers.get(device.id)
+ if handler is not None:
+ return handler.revert_image(device, request, defer.Deferred())
diff --git a/adapters/adtran_olt/adtran_olt.yml b/adapters/adtran_olt/adtran_olt.yml
new file mode 100644
index 0000000..8dc42a3
--- /dev/null
+++ b/adapters/adtran_olt/adtran_olt.yml
@@ -0,0 +1,67 @@
+---
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+logging:
+ version: 1
+
+ formatters:
+ brief:
+ format: '%(message)s'
+ default:
+ format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+ datefmt: '%Y%m%dT%H%M%S'
+
+ handlers:
+ console:
+ class : logging.StreamHandler
+ level: DEBUG
+ formatter: default
+ stream: ext://sys.stdout
+ localRotatingFile:
+ class: logging.handlers.RotatingFileHandler
+ filename: adtran_olt.log
+ formatter: default
+ maxBytes: 2097152
+ backupCount: 10
+ level: DEBUG
+ null:
+ class: logging.NullHandler
+
+ loggers:
+ amqp:
+ handlers: [null]
+ propagate: False
+ conf:
+ propagate: False
+ '': # root logger
+ handlers: [console, localRotatingFile]
+ level: DEBUG # this can be bumped up/down by -q and -v command line
+ # options
+ propagate: False
+
+
+kafka-cluster-proxy:
+ event_bus_publisher:
+ topic_mappings:
+ 'model-change-events':
+ kafka_topic: 'voltha.events'
+ filters: [null]
+ 'alarms':
+ kafka_topic: 'voltha.alarms'
+ filters: [null]
+ 'kpis':
+ kafka_topic: 'voltha.kpis'
+ filters: [null]
+
diff --git a/adapters/adtran_olt/adtran_olt_handler.py b/adapters/adtran_olt/adtran_olt_handler.py
new file mode 100644
index 0000000..ad32b84
--- /dev/null
+++ b/adapters/adtran_olt/adtran_olt_handler.py
@@ -0,0 +1,1400 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import random
+import xmltodict
+
+from twisted.internet import reactor
+from twisted.internet.defer import returnValue, inlineCallbacks, succeed
+
+from codec.olt_state import OltState
+from adapters.adtran_common.download import Download
+from adapters.adtran_common.flow.flow_entry import FlowEntry
+from net.pio_zmq import PioClient
+from net.pon_zmq import PonClient
+from resources.adtran_olt_resource_manager import AdtranOltResourceMgr
+from adapters.adtran_common.adtran_device_handler import AdtranDeviceHandler
+from resources import adtranolt_platform as platform
+from adapters.adtran_common.net.rcmd import RCmd
+
+from pyvoltha.common.tech_profile.tech_profile import *
+from pyvoltha.common.openflow.utils import ofp, mk_flow_stat, in_port, output, vlan_vid
+from pyvoltha.adapters.common.frameio.frameio import hexify
+from pyvoltha.adapters.extensions.omci.omci import *
+from pyvoltha.protos.voltha_pb2 import Device
+from pyvoltha.protos.common_pb2 import AdminState, OperStatus
+from pyvoltha.protos.device_pb2 import ImageDownload, Image, Port
+from pyvoltha.protos.openflow_13_pb2 import OFPP_MAX, OFPC_GROUP_STATS, OFPC_PORT_STATS, \
+ OFPC_TABLE_STATS, OFPC_FLOW_STATS, ofp_switch_features, ofp_desc
+
+
+class AdtranOltHandler(AdtranDeviceHandler):
+ """
+ The OLT Handler is used to wrap a single instance of a 10G OLT 1-U pizza-box
+ """
+ MIN_OLT_HW_VERSION = datetime.datetime(2017, 1, 5)
+
+ # Full table output
+
+ GPON_OLT_HW_URI = '/restconf/data/gpon-olt-hw'
+ GPON_OLT_HW_STATE_URI = GPON_OLT_HW_URI + ':olt-state'
+ GPON_OLT_HW_CONFIG_URI = GPON_OLT_HW_URI + ':olt'
+ GPON_PON_CONFIG_LIST_URI = GPON_OLT_HW_CONFIG_URI + '/pon'
+
+ # Per-PON info
+
+ GPON_PON_STATE_URI = GPON_OLT_HW_STATE_URI + '/pon={}' # .format(pon-id)
+ GPON_PON_CONFIG_URI = GPON_PON_CONFIG_LIST_URI + '={}' # .format(pon-id)
+
+ GPON_ONU_CONFIG_LIST_URI = GPON_PON_CONFIG_URI + '/onus/onu' # .format(pon-id)
+ GPON_ONU_CONFIG_URI = GPON_ONU_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id)
+
+ GPON_TCONT_CONFIG_LIST_URI = GPON_ONU_CONFIG_URI + '/t-conts/t-cont' # .format(pon-id,onu-id)
+ GPON_TCONT_CONFIG_URI = GPON_TCONT_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id,alloc-id)
+
+ GPON_GEM_CONFIG_LIST_URI = GPON_ONU_CONFIG_URI + '/gem-ports/gem-port' # .format(pon-id,onu-id)
+ GPON_GEM_CONFIG_URI = GPON_GEM_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id,gem-id)
+
+ GPON_PON_DISCOVER_ONU = '/restconf/operations/gpon-olt-hw:discover-onu'
+
+ BASE_ONU_OFFSET = 64
+
+ def __init__(self, **kwargs):
+ super(AdtranOltHandler, self).__init__(**kwargs)
+
+ self.status_poll = None
+ self.status_poll_interval = 5.0
+ self.status_poll_skew = self.status_poll_interval / 10
+ self._pon_agent = None
+ self._pio_agent = None
+ self._ssh_deferred = None
+ self._system_id = None
+ self._download_protocols = None
+ self._download_deferred = None
+ self._downloads = {} # name -> Download obj
+ self._pio_exception_map = []
+
+ self.downstream_shapping_supported = True # 1971320F1-ML-4154 and later
+
+ # FIXME: Remove once we containerize. Only exists to keep BroadCom OpenOMCI ONU Happy
+ # when it reaches up our rear and tries to yank out a UNI port number
+ self.platform_class = None
+
+ # To keep broadcom ONU happy
+ self.platform = platform() # TODO: Remove once tech-profiles & containerization are done !!!
+
+ def __del__(self):
+ # OLT Specific things here.
+ #
+ # If you receive this during 'enable' of the object, you probably threw an
+ # uncaught exception which triggered an errback in the VOLTHA core.
+ d, self.status_poll = self.status_poll, None
+
+ # Clean up base class as well
+ AdtranDeviceHandler.__del__(self)
+
+ def _cancel_deferred(self):
+ d1, self.status_poll = self.status_poll, None
+ d2, self._ssh_deferred = self._ssh_deferred, None
+ d3, self._download_deferred = self._download_deferred, None
+
+ for d in [d1, d2, d3]:
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except:
+ pass
+
+ def __str__(self):
+ return "AdtranOltHandler: {}".format(self.ip_address)
+
+ @property
+ def system_id(self):
+ return self._system_id
+
+ @system_id.setter
+ def system_id(self, value):
+ if self._system_id != value:
+ self._system_id = value
+
+ data = json.dumps({'olt-id': str(value)})
+ uri = AdtranOltHandler.GPON_OLT_HW_CONFIG_URI
+ self.rest_client.request('PATCH', uri, data=data, name='olt-system-id')
+
+ @inlineCallbacks
+ def get_device_info(self, device):
+ """
+ Perform an initial network operation to discover the device hardware
+ and software version. Serial Number would be helpful as well.
+
+ Upon successfully retrieving the information, remember to call the
+ 'start_heartbeat' method to keep in contact with the device being managed
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions. Such extensions shall be described as part of
+ the device type specification returned by device_types().
+ """
+ from codec.physical_entities_state import PhysicalEntitiesState
+ # TODO: After a CLI 'reboot' command, the device info may get messed up (prints labels and not values)
+ # # Enter device and type 'show'
+ device = {
+ 'model': 'n/a',
+ 'hardware_version': 'unknown',
+ 'serial_number': 'unknown',
+ 'vendor': 'ADTRAN, Inc.',
+ 'firmware_version': 'unknown',
+ 'running-revision': 'unknown',
+ 'candidate-revision': 'unknown',
+ 'startup-revision': 'unknown',
+ 'software-images': []
+ }
+ if self.is_virtual_olt:
+ returnValue(device)
+
+ try:
+ pe_state = PhysicalEntitiesState(self.netconf_client)
+ self.startup = pe_state.get_state()
+ results = yield self.startup
+
+ if results.ok:
+ modules = pe_state.get_physical_entities('adtn-phys-mod:module')
+
+ if isinstance(modules, list):
+ module = modules[0]
+
+ name = str(module.get('model-name', 'n/a')).translate(None, '?')
+ model = str(module.get('model-number', 'n/a')).translate(None, '?')
+
+ device['model'] = '{} - {}'.format(name, model) if len(name) > 0 else \
+ module.get('parent-entity', 'n/a')
+ device['hardware_version'] = str(module.get('hardware-revision',
+ 'n/a')).translate(None, '?')
+ device['serial_number'] = str(module.get('serial-number',
+ 'n/a')).translate(None, '?')
+ if 'software' in module:
+ if 'software' in module['software']:
+ software = module['software']['software']
+ if isinstance(software, dict):
+ device['running-revision'] = str(software.get('running-revision',
+ 'n/a')).translate(None, '?')
+ device['candidate-revision'] = str(software.get('candidate-revision',
+ 'n/a')).translate(None, '?')
+ device['startup-revision'] = str(software.get('startup-revision',
+ 'n/a')).translate(None, '?')
+ elif isinstance(software, list):
+ for sw_item in software:
+ sw_type = sw_item.get('name', '').lower()
+ if sw_type == 'firmware':
+ device['firmware_version'] = str(sw_item.get('running-revision',
+ 'unknown')).translate(None, '?')
+ elif sw_type == 'software':
+ for rev_type in ['startup-revision',
+ 'running-revision',
+ 'candidate-revision']:
+ if rev_type in sw_item:
+ image = Image(name=rev_type,
+ version=sw_item[rev_type],
+ is_active=(rev_type == 'running-revision'),
+ is_committed=True,
+ is_valid=True,
+ install_datetime='Not Available',
+ hash='Not Available')
+ device['software-images'].append(image)
+
+ # Update features based on version
+ # Format expected to be similar to: 1971320F1-ML-4154
+
+ running_version = next((image.version for image in device.get('software-images', list())
+ if image.is_active), '').split('-')
+ if len(running_version) > 2:
+ try:
+ self.downstream_shapping_supported = int(running_version[-1]) >= 4154
+ except ValueError:
+ pass
+
+ except Exception as e:
+ self.log.exception('dev-info-failure', e=e)
+ raise
+
+ returnValue(device)
+
+ def initialize_resource_manager(self):
+ # Initialize the resource and tech profile managers
+ extra_args = '--olt_model {}'.format(self.resource_manager_key)
+ self.resource_mgr = AdtranOltResourceMgr(self.device_id,
+ self.host_and_port,
+ extra_args,
+ self.default_resource_mgr_device_info)
+ self._populate_tech_profile_per_pon_port()
+
+ @property
+ def default_resource_mgr_device_info(self):
+ class AdtranOltDevInfo(object):
+ def __init__(self, pon_ports):
+ self.technology = "xgspon"
+ self.onu_id_start = 0
+ self.onu_id_end = platform.MAX_ONUS_PER_PON
+ self.alloc_id_start = platform.MIN_TCONT_ALLOC_ID
+ self.alloc_id_end = platform.MAX_TCONT_ALLOC_ID
+ self.gemport_id_start = platform.MIN_GEM_PORT_ID
+ self.gemport_id_end = platform.MAX_GEM_PORT_ID
+ self.pon_ports = len(pon_ports)
+ self.max_tconts = platform.MAX_TCONTS_PER_ONU
+ self.max_gem_ports = platform.MAX_GEM_PORTS_PER_ONU
+ self.intf_ids = pon_ports.keys() # PON IDs
+
+ return AdtranOltDevInfo(self.southbound_ports)
+
+ def _populate_tech_profile_per_pon_port(self):
+ self.tech_profiles = {intf_id: self.resource_mgr.resource_managers[intf_id].tech_profile
+ for intf_id in self.resource_mgr.device_info.intf_ids}
+
+ # Make sure we have as many tech_profiles as there are pon ports on
+ # the device
+ assert len(self.tech_profiles) == self.resource_mgr.device_info.pon_ports
+
+ def get_tp_path(self, intf_id, ofp_port_name):
+ # TODO: Should get Table id form the flow, as of now hardcoded to DEFAULT_TECH_PROFILE_TABLE_ID (64)
+ # 'tp_path' contains the suffix part of the tech_profile_instance path.
+ # The prefix to the 'tp_path' should be set to \
+ # TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX by the ONU adapter.
+ return self.tech_profiles[intf_id].get_tp_path(DEFAULT_TECH_PROFILE_TABLE_ID,
+ ofp_port_name)
+
+ def delete_tech_profile_instance(self, intf_id, onu_id, logical_port):
+ # Remove the TP instance associated with the ONU
+ ofp_port_name = self.get_ofp_port_name(intf_id, onu_id, logical_port)
+ tp_path = self.get_tp_path(intf_id, ofp_port_name)
+ return self.tech_profiles[intf_id].delete_tech_profile_instance(tp_path)
+
+ def get_ofp_port_name(self, pon_id, onu_id, logical_port_number):
+ parent_port_no = self.pon_id_to_port_number(pon_id)
+ child_device = self.adapter_agent.get_child_device(self.device_id,
+ parent_port_no=parent_port_no, onu_id=onu_id)
+ if child_device is None:
+ self.log.error("could-not-find-child-device", parent_port_no=pon_id, onu_id=onu_id)
+ return None, None
+
+ ports = self.adapter_agent.get_ports(child_device.id, Port.ETHERNET_UNI)
+ port = next((port for port in ports if port.port_no == logical_port_number), None)
+ logical_port = self.adapter_agent.get_logical_port(self.logical_device_id,
+ port.label)
+ ofp_port_name = (logical_port.ofp_port.name, logical_port.ofp_port.port_no)
+
+ return ofp_port_name
+
+ @inlineCallbacks
+ def enumerate_northbound_ports(self, device):
+ """
+ Enumerate all northbound ports of this device.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :return: (Deferred or None).
+ """
+ try:
+ # Also get the MAC Address for the OLT
+ command = "ip link | grep -A1 eth0 | sed -n -e 's/^.*ether //p' | awk '{ print $1 }'"
+ rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password,
+ command)
+ address = yield rcmd.execute()
+ self.mac_address = address.replace('\n', '')
+ self.log.info("mac-addr", mac_addr=self.mac_address)
+
+ except Exception as e:
+ log.exception('mac-address', e=e)
+ raise
+
+ try:
+ from codec.ietf_interfaces import IetfInterfacesState
+ from nni_port import MockNniPort
+
+ ietf_interfaces = IetfInterfacesState(self.netconf_client)
+
+ if self.is_virtual_olt:
+ results = MockNniPort.get_nni_port_state_results()
+ else:
+ self.startup = ietf_interfaces.get_state()
+ results = yield self.startup
+
+ ports = ietf_interfaces.get_port_entries(results, 'ethernet')
+ returnValue(ports)
+
+ except Exception as e:
+ log.exception('enumerate_northbound_ports', e=e)
+ raise
+
+ def process_northbound_ports(self, device, results):
+ """
+ Process the results from the 'enumerate_northbound_ports' method.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :param results: Results from the 'enumerate_northbound_ports' method that
+ you implemented. The type and contents are up to you to
+ :return: (Deferred or None).
+ """
+ from nni_port import NniPort, MockNniPort
+
+ for port in results.itervalues():
+ port_no = port.get('port_no')
+ assert port_no, 'Port number not found'
+
+ # May already exist if device was not fully reachable when first enabled
+ if port_no not in self.northbound_ports:
+ self.log.info('processing-nni', port_no=port_no, name=port['port_no'])
+ self.northbound_ports[port_no] = NniPort(self, **port) if not self.is_virtual_olt \
+ else MockNniPort(self, **port)
+
+ if len(self.northbound_ports) >= self.max_nni_ports: # TODO: For now, limit number of NNI ports to make debugging easier
+ break
+
+ self.num_northbound_ports = len(self.northbound_ports)
+
+ def _olt_version(self):
+ # Version
+ # 0 Unknown
+ # 1 V1 OMCI format
+ # 2 V2 OMCI format
+ # 3 2018-01-11 or later
+ version = 0
+ info = self._rest_support.get('module-info', [dict()])
+ hw_mod_ver_str = next((mod.get('revision') for mod in info
+ if mod.get('module-name', '').lower() == 'gpon-olt-hw'), None)
+
+ if hw_mod_ver_str is not None:
+ try:
+ from datetime import datetime
+ hw_mod_dt = datetime.strptime(hw_mod_ver_str, '%Y-%m-%d')
+ version = 2 if hw_mod_dt >= datetime(2017, 9, 21) else 2
+
+ except Exception as e:
+ self.log.exception('ver-str-check', e=e)
+
+ return version
+
+ @inlineCallbacks
+ def enumerate_southbound_ports(self, device):
+ """
+ Enumerate all southbound ports of this device.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :return: (Deferred or None).
+ """
+ ###############################################################################
+ # Determine number of southbound ports. We know it is 16, but this keeps this
+ # device adapter generic for our other OLTs up to this point.
+
+ self.startup = self.rest_client.request('GET', self.GPON_PON_CONFIG_LIST_URI,
+ 'pon-config')
+ try:
+ from codec.ietf_interfaces import IetfInterfacesState
+ from nni_port import MockNniPort
+
+ results = yield self.startup
+
+ ietf_interfaces = IetfInterfacesState(self.netconf_client)
+
+ if self.is_virtual_olt:
+ nc_results = MockNniPort.get_pon_port_state_results()
+ else:
+ self.startup = ietf_interfaces.get_state()
+ nc_results = yield self.startup
+
+ ports = ietf_interfaces.get_port_entries(nc_results, 'xpon')
+ if len(ports) == 0:
+ ports = ietf_interfaces.get_port_entries(nc_results,
+ 'channel-termination')
+ for data in results:
+ pon_id = data['pon-id']
+ port = ports[pon_id + 1]
+ port['pon-id'] = pon_id
+ port['admin_state'] = AdminState.ENABLED \
+ if data.get('enabled', True)\
+ else AdminState.DISABLED
+
+ except Exception as e:
+ log.exception('enumerate_southbound_ports', e=e)
+ raise
+
+ returnValue(ports)
+
+ def process_southbound_ports(self, device, results):
+ """
+ Process the results from the 'enumerate_southbound_ports' method.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ :param results: Results from the 'enumerate_southbound_ports' method that
+ you implemented. The type and contents are up to you to
+ :return: (Deferred or None).
+ """
+ from pon_port import PonPort
+
+ for pon in results.itervalues():
+ pon_id = pon.get('pon-id')
+ assert pon_id is not None, 'PON ID not found'
+ if pon['ifIndex'] is None:
+ pon['port_no'] = self.pon_id_to_port_number(pon_id)
+ else:
+ pass # Need to adjust ONU numbering !!!!
+
+ # May already exist if device was not fully reachable when first enabled
+ if pon_id not in self.southbound_ports:
+ self.southbound_ports[pon_id] = PonPort(self, **pon)
+
+ self.num_southbound_ports = len(self.southbound_ports)
+
+ def pon(self, pon_id):
+ return self.southbound_ports.get(pon_id)
+
+ def complete_device_specific_activation(self, device, reconciling):
+ """
+ Perform an initial network operation to discover the device hardware
+ and software version. Serial Number would be helpful as well.
+
+ This method is called from within the base class's activate generator.
+
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions. Such extensions shall be described as part of
+ the device type specification returned by device_types().
+
+ :param reconciling: (boolean) True if taking over for another VOLTHA
+ """
+ # ZeroMQ clients
+ self._zmq_startup()
+
+ # Download support
+ self._download_deferred = reactor.callLater(0, self._get_download_protocols)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+
+ # PON Status
+ self.status_poll = reactor.callLater(5, self.poll_for_status)
+ return succeed('Done')
+
+ def on_heatbeat_alarm(self, active):
+ if not active:
+ self.ready_network_access()
+
+ @inlineCallbacks
+ def _get_download_protocols(self):
+ if self._download_protocols is None:
+ try:
+ config = '<filter>' + \
+ '<file-servers-state xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">' + \
+ '<profiles>' + \
+ '<supported-protocol/>' + \
+ '</profiles>' + \
+ '</file-servers-state>' + \
+ '</filter>'
+
+ results = yield self.netconf_client.get(config)
+
+ result_dict = xmltodict.parse(results.data_xml)
+ entries = result_dict['data']['file-servers-state']['profiles']['supported-protocol']
+ self._download_protocols = [entry['#text'].split(':')[-1] for entry in entries
+ if '#text' in entry]
+
+ except Exception as e:
+ self.log.exception('protocols', e=e)
+ self._download_protocols = None
+ self._download_deferred = reactor.callLater(10, self._get_download_protocols)
+
+ @inlineCallbacks
+ def ready_network_access(self):
+ # Check for port status
+ command = 'netstat -pan | grep -i 0.0.0.0:{} | wc -l'.format(self.pon_agent_port)
+ rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
+
+ try:
+ self.log.debug('check-request', command=command)
+ results = yield rcmd.execute()
+ self.log.info('check-results', results=results, result_type=type(results))
+ create_it = int(results) != 1
+
+ except Exception as e:
+ self.log.exception('find', e=e)
+ create_it = True
+
+ if create_it:
+ def v1_method():
+ command = 'mkdir -p /etc/pon_agent; touch /etc/pon_agent/debug.conf; '
+ command += 'ps -ae | grep -i ngpon2_agent; '
+ command += 'service_supervisor stop ngpon2_agent; service_supervisor start ngpon2_agent; '
+ command += 'ps -ae | grep -i ngpon2_agent'
+
+ self.log.debug('create-request', command=command)
+ return RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
+
+ def v2_v3_method():
+ # Old V2 method
+ # For V2 images, want -> export ZMQ_LISTEN_ON_ANY_ADDRESS=1
+ # For V3+ images, want -> export AGENT_LISTEN_ON_ANY_ADDRESS=1
+
+ # V3 unifies listening port, compatible with v2
+ cmd = "sed --in-place '/add feature/aexport ZMQ_LISTEN_ON_ANY_ADDRESS=1' " \
+ "/etc/ngpon2_agent/ngpon2_agent_feature_flags; "
+ cmd += "sed --in-place '/add feature/aexport AGENT_LISTEN_ON_ANY_ADDRESS=1' " \
+ "/etc/ngpon2_agent/ngpon2_agent_feature_flags; "
+
+ # Note: 'ps' commands are to help decorate the logfile with useful info
+ cmd += 'ps -ae | grep -i ngpon2_agent; '
+ cmd += 'service_supervisor stop ngpon2_agent; service_supervisor start ngpon2_agent; '
+ cmd += 'ps -ae | grep -i ngpon2_agent'
+
+ self.log.debug('create-request', command=cmd)
+ return RCmd(self.ip_address, self.netconf_username, self.netconf_password, cmd)
+
+ # Look for version
+ next_run = 15
+ version = v2_v3_method # NOTE: Only v2 or later supported.
+
+ if version is not None:
+ try:
+ rcmd = version()
+ results = yield rcmd.execute()
+ self.log.info('create-results', results=results, result_type=type(results))
+
+ except Exception as e:
+ self.log.exception('mkdir-and-restart', e=e)
+ else:
+ next_run = 0
+
+ if next_run > 0:
+ self._ssh_deferred = reactor.callLater(next_run, self.ready_network_access)
+
+ returnValue('retrying' if next_run > 0 else 'ready')
+
+ def _zmq_startup(self):
+ # ZeroMQ clients
+ self._pon_agent = PonClient(self.ip_address,
+ port=self.pon_agent_port,
+ rx_callback=self.rx_pa_packet)
+
+ try:
+ self._pio_agent = PioClient(self.ip_address,
+ port=self.pio_port,
+ rx_callback=self.rx_pio_packet)
+ except Exception as e:
+ self._pio_agent = None
+ self.log.exception('pio-agent', e=e)
+
+ def _zmq_shutdown(self):
+ pon, self._pon_agent = self._pon_agent, None
+ pio, self._pio_agent = self._pio_agent, None
+
+ for c in [pon, pio]:
+ if c is not None:
+ try:
+ c.shutdown()
+ except:
+ pass
+
+ def _unregister_for_inter_adapter_messages(self):
+ try:
+ self.adapter_agent.unregister_for_inter_adapter_messages()
+ except:
+ pass
+
+ def disable(self):
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self._unregister_for_inter_adapter_messages()
+ self._zmq_shutdown()
+ self._pio_exception_map = []
+
+ super(AdtranOltHandler, self).disable()
+
+ def reenable(self, done_deferred=None):
+ super(AdtranOltHandler, self).reenable(done_deferred=done_deferred)
+
+ # Only do the re-enable if we fully came up on the very first enable attempt.
+ # If we had not, the base class will have initiated the 'activate' for us
+
+ if self._initial_enable_complete:
+ self._zmq_startup()
+ self.adapter_agent.register_for_inter_adapter_messages()
+ self.status_poll = reactor.callLater(1, self.poll_for_status)
+
+ def reboot(self):
+ if not self._initial_enable_complete:
+ # Never contacted the device on the initial startup, do 'activate' steps instead
+ return
+
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self._unregister_for_inter_adapter_messages()
+ self._zmq_shutdown()
+
+ # Download supported protocols may change (if new image gets activated)
+ self._download_protocols = None
+
+ super(AdtranOltHandler, self).reboot()
+
+ def _finish_reboot(self, timeout, previous_oper_status, previous_conn_status):
+ super(AdtranOltHandler, self)._finish_reboot(timeout, previous_oper_status, previous_conn_status)
+
+ self.ready_network_access()
+
+ # Download support
+ self._download_deferred = reactor.callLater(0, self._get_download_protocols)
+
+ # Register for adapter messages
+ self.adapter_agent.register_for_inter_adapter_messages()
+ self._zmq_startup()
+
+ self.status_poll = reactor.callLater(5, self.poll_for_status)
+
+ def delete(self):
+ self._cancel_deferred()
+
+ # Drop registration for adapter messages
+ self._unregister_for_inter_adapter_messages()
+ self._zmq_shutdown()
+
+ super(AdtranOltHandler, self).delete()
+
+ def rx_pa_packet(self, packets):
+ if self._pon_agent is not None:
+ for packet in packets:
+ try:
+ pon_id, onu_id, msg_bytes, is_omci = self._pon_agent.decode_packet(packet)
+
+ if is_omci:
+ proxy_address = self._pon_onu_id_to_proxy_address(pon_id, onu_id)
+
+ if proxy_address is not None:
+ self.adapter_agent.receive_proxied_message(proxy_address, msg_bytes)
+
+ except Exception as e:
+ self.log.exception('rx-pon-agent-packet', e=e)
+
+ def _compute_logical_port_no(self, port_no, evc_map, packet):
+ logical_port_no = None
+
+ # Upstream direction?
+ if self.is_pon_port(port_no):
+ #TODO: Validate the evc-map name
+ from python.adapters.adtran.adtran_common.flow.evc_map import EVCMap
+ map_info = EVCMap.decode_evc_map_name(evc_map)
+ logical_port_no = int(map_info.get('ingress-port'))
+
+ if logical_port_no is None:
+ # Get PON
+ pon = self.get_southbound_port(port_no)
+
+ # Examine Packet and decode gvid
+ if packet is not None:
+ pass
+
+ elif self.is_nni_port(port_no):
+ nni = self.get_northbound_port(port_no)
+ logical_port = nni.get_logical_port() if nni is not None else None
+ logical_port_no = logical_port.ofp_port.port_no if logical_port is not None else None
+
+ # TODO: Need to decode base on port_no & evc_map
+ return logical_port_no
+
+ def rx_pio_packet(self, packets):
+ self.log.debug('rx-packet-in', type=type(packets), data=packets)
+ assert isinstance(packets, list), 'Expected a list of packets'
+
+ # TODO self._pio_agent.socket.socket.closed might be a good check here as well
+ if self.logical_device_id is not None and self._pio_agent is not None:
+ for packet in packets:
+ url_type = self._pio_agent.get_url_type(packet)
+ if url_type == PioClient.UrlType.EVCMAPS_RESPONSE:
+ exception_map = self._pio_agent.decode_query_response_packet(packet)
+ self.log.debug('rx-pio-packet', exception_map=exception_map)
+ # update latest pio exception map
+ self._pio_exception_map = exception_map
+
+ elif url_type == PioClient.UrlType.PACKET_IN:
+ try:
+ from scapy.layers.l2 import Ether, Dot1Q
+ ifindex, evc_map, packet = self._pio_agent.decode_packet(packet)
+
+ # convert ifindex to physical port number
+ # pon port numbers start at 60001 and end at 600016 (16 pons)
+ if ifindex > 60000 and ifindex < 60017:
+ port_no = (ifindex - 60000) + 4
+ # nni port numbers start at 1401 and end at 1404 (16 nnis)
+ elif ifindex > 1400 and ifindex < 1405:
+ port_no = ifindex - 1400
+ else:
+ raise ValueError('Unknown physical port. ifindex: {}'.format(ifindex))
+
+ logical_port_no = self._compute_logical_port_no(port_no, evc_map, packet)
+
+ if logical_port_no is not None:
+ if self.is_pon_port(port_no) and packet.haslayer(Dot1Q):
+ # Scrub g-vid
+ inner_pkt = packet.getlayer(Dot1Q)
+ assert inner_pkt.haslayer(Dot1Q), 'Expected a C-Tag'
+ packet = Ether(src=packet.src, dst=packet.dst, type=inner_pkt.type)\
+ / inner_pkt.payload
+
+ self.adapter_agent.send_packet_in(logical_device_id=self.logical_device_id,
+ logical_port_no=logical_port_no,
+ packet=str(packet))
+ else:
+ self.log.warn('logical-port-not-found', port_no=port_no, evc_map=evc_map)
+
+ except Exception as e:
+ self.log.exception('rx-pio-packet', e=e)
+
+ else:
+ self.log.warn('packet-in-unknown-url-type', url_type=url_type)
+
+ def packet_out(self, egress_port, msg):
+ """
+ Pass a packet_out message content to adapter so that it can forward it
+ out to the device. This is only called on root devices.
+
+ :param egress_port: egress logical port number
+ :param msg: actual message
+ :return: None """
+
+ if self.pio_port is not None:
+ from scapy.layers.l2 import Ether, Dot1Q
+ from scapy.layers.inet import UDP
+
+ self.log.debug('sending-packet-out', egress_port=egress_port,
+ msg=hexify(msg))
+ pkt = Ether(msg)
+
+ # Remove any extra tags
+ while pkt.type == 0x8100:
+ msg_hex = hexify(msg)
+ msg_hex = msg_hex[:24] + msg_hex[32:]
+ bytes = []
+ msg_hex = ''.join(msg_hex.split(" "))
+ for i in range(0, len(msg_hex), 2):
+ bytes.append(chr(int(msg_hex[i:i+2], 16)))
+
+ msg = ''.join(bytes)
+ pkt = Ether(msg)
+
+ if self._pio_agent is not None:
+ port, ctag, vlan_id, evcmapname = FlowEntry.get_packetout_info(self, egress_port)
+ exceptiontype = None
+ if pkt.type == FlowEntry.EtherType.EAPOL:
+ exceptiontype = 'eapol'
+ ctag = self.utility_vlan
+ elif pkt.type == 2:
+ exceptiontype = 'igmp'
+ elif pkt.type == FlowEntry.EtherType.IPv4:
+ if UDP in pkt and pkt[UDP].sport == 67 and pkt[UDP].dport == 68:
+ exceptiontype = 'dhcp'
+
+ if exceptiontype is None:
+ self.log.warn('packet-out-exceptiontype-unknown', eEtherType=pkt.type)
+
+ elif port is not None and ctag is not None and vlan_id is not None and \
+ evcmapname is not None and self.pio_exception_exists(evcmapname, exceptiontype):
+
+ self.log.debug('sending-pio-packet-out', port=port, ctag=ctag, vlan_id=vlan_id,
+ evcmapname=evcmapname, exceptiontype=exceptiontype)
+ out_pkt = (
+ Ether(src=pkt.src, dst=pkt.dst) /
+ Dot1Q(vlan=vlan_id) /
+ Dot1Q(vlan=ctag, type=pkt.type) /
+ pkt.payload
+ )
+ data = self._pio_agent.encode_packet(port, str(out_pkt), evcmapname, exceptiontype)
+ self.log.debug('pio-packet-out', message=data)
+ try:
+ self._pio_agent.send(data)
+
+ except Exception as e:
+ self.log.exception('pio-send', egress_port=egress_port, e=e)
+ else:
+ self.log.warn('packet-out-flow-not-found', egress_port=egress_port)
+
+ def pio_exception_exists(self, name, exp):
+ # verify exception is in the OLT's reported exception map for this evcmap name
+ if exp is None:
+ return False
+ entry = next((entry for entry in self._pio_exception_map if entry['evc-map-name'] == name), None)
+ if entry is None:
+ return False
+ if exp not in entry['exception-types']:
+ return False
+ return True
+
+ def send_packet_exceptions_request(self):
+ if self._pio_agent is not None:
+ request = self._pio_agent.query_request_packet()
+ try:
+ self._pio_agent.send(request)
+
+ except Exception as e:
+ self.log.exception('pio-send', e=e)
+
+ def poll_for_status(self):
+ self.log.debug('Initiating-status-poll')
+
+ device = self.adapter_agent.get_device(self.device_id)
+
+ if device.admin_state == AdminState.ENABLED and\
+ device.oper_status != OperStatus.ACTIVATING and\
+ self.rest_client is not None:
+ uri = AdtranOltHandler.GPON_OLT_HW_STATE_URI
+ name = 'pon-status-poll'
+ self.status_poll = self.rest_client.request('GET', uri, name=name)
+ self.status_poll.addBoth(self.status_poll_complete)
+ else:
+ self.status_poll = reactor.callLater(0, self.status_poll_complete, 'inactive')
+
+ def status_poll_complete(self, results):
+ """
+ Results of the status poll
+ :param results:
+ """
+ from pon_port import PonPort
+
+ if isinstance(results, dict) and 'pon' in results:
+ try:
+ self.log.debug('status-success')
+ for pon_id, pon in OltState(results).pons.iteritems():
+ pon_port = self.southbound_ports.get(pon_id, None)
+
+ if pon_port is not None and pon_port.state == PonPort.State.RUNNING:
+ pon_port.process_status_poll(pon)
+
+ except Exception as e:
+ self.log.exception('PON-status-poll', e=e)
+
+ # Reschedule
+
+ delay = self.status_poll_interval
+ delay += random.uniform(-delay / 10, delay / 10)
+
+ self.status_poll = reactor.callLater(delay, self.poll_for_status)
+
+ def _create_utility_flow(self):
+ nni_port = self.northbound_ports.get(1).port_no
+ pon_port = self.southbound_ports.get(0).port_no
+
+ return mk_flow_stat(
+ priority=200,
+ match_fields=[
+ in_port(nni_port),
+ vlan_vid(ofp.OFPVID_PRESENT + self.utility_vlan)
+ ],
+ actions=[output(pon_port)]
+ )
+
+ @inlineCallbacks
+ def update_flow_table(self, flows, device):
+ """
+ Update the flow table on the OLT. If an existing flow is not in the list, it needs
+ to be removed from the device.
+
+ :param flows: List of flows that should be installed upon completion of this function
+ :param device: A voltha.Device object, with possible device-type
+ specific extensions.
+ """
+ self.log.debug('bulk-flow-update', num_flows=len(flows),
+ device_id=device.id, flows=flows)
+
+ valid_flows = []
+
+ if flows:
+ # Special helper egress Packet In/Out flows
+ special_flow = self._create_utility_flow()
+ valid_flow, evc = FlowEntry.create(special_flow, self)
+
+ if valid_flow is not None:
+ valid_flows.append(valid_flow.flow_id)
+
+ if evc is not None:
+ try:
+ evc.schedule_install()
+ self.add_evc(evc)
+
+ except Exception as e:
+ evc.status = 'EVC Install Exception: {}'.format(e.message)
+ self.log.exception('EVC-install', e=e)
+
+ # verify exception flows were installed by OLT PET process
+ reactor.callLater(5, self.send_packet_exceptions_request)
+
+ # Now process bulk flows
+ for flow in flows:
+ try:
+ # Try to create an EVC.
+ #
+ # The first result is the flow entry that was created. This could be a match to an
+ # existing flow since it is a bulk update. None is returned only if no match to
+ # an existing entry is found and decode failed (unsupported field)
+ #
+ # The second result is the EVC this flow should be added to. This could be an
+ # existing flow (so your adding another EVC-MAP) or a brand new EVC (no existing
+ # EVC-MAPs). None is returned if there are not a valid EVC that can be created YET.
+
+ valid_flow, evc = FlowEntry.create(flow, self)
+
+ if valid_flow is not None:
+ valid_flows.append(valid_flow.flow_id)
+
+ if evc is not None:
+ try:
+ evc.schedule_install()
+ self.add_evc(evc)
+
+ except Exception as e:
+ evc.status = 'EVC Install Exception: {}'.format(e.message)
+ self.log.exception('EVC-install', e=e)
+
+ except Exception as e:
+ self.log.exception('bulk-flow-update-add', e=e)
+
+ # Now drop all flows from this device that were not in this bulk update
+ try:
+ yield FlowEntry.drop_missing_flows(self, valid_flows)
+
+ except Exception as e:
+ self.log.exception('bulk-flow-update-remove', e=e)
+
+ def remove_from_flow_table(self, _flows):
+ """
+ Remove flows from the device
+
+ :param _flows: (list) Flows
+ """
+ raise NotImplementedError
+
+ def add_to_flow_table(self, _flows):
+ """
+ Remove flows from the device
+
+ :param _flows: (list) Flows
+ """
+ raise NotImplementedError
+
+ def get_ofp_device_info(self, device):
+ """
+ Retrieve the OLT device info. This includes the ofp_desc and
+ ofp_switch_features. The existing ofp structures can be used,
+ or all the attributes get added to the Device definition or a new proto
+ definition gets created. This API will allow the Core to create a
+ LogicalDevice associated with this device (OLT only).
+ :param device: device
+ :return: Proto Message (TBD)
+ """
+ from pyvoltha.protos.inter_container_pb2 import SwitchCapability
+ version = device.images.image[0].version
+
+ return SwitchCapability(
+ desc=ofp_desc(mfr_desc='VOLTHA Project',
+ hw_desc=device.hardware_version,
+ sw_desc=version,
+ serial_num=device.serial_number,
+ dp_desc='n/a'),
+ switch_features=ofp_switch_features(n_buffers=256, # TODO fake for now
+ n_tables=2, # TODO ditto
+ capabilities=( # TODO and ditto
+ OFPC_FLOW_STATS |
+ OFPC_TABLE_STATS |
+ OFPC_PORT_STATS |
+ OFPC_GROUP_STATS))
+ )
+
+ def get_ofp_port_info(self, device, port_no):
+ """
+ Retrieve the port info. This includes the ofp_port. The existing ofp
+ structure can be used, or all the attributes get added to the Port
+ definitions or a new proto definition gets created. This API will allow
+ the Core to create a LogicalPort associated with this device.
+ :param device: device
+ :param port_no: port number
+ :return: Proto Message (TBD)
+ """
+ from pyvoltha.protos.inter_container_pb2 import PortCapability
+ # Since the adapter created the device port then it has the reference of the port to
+ # return the capability. TODO: Do a lookup on the NNI port number and return the
+ # appropriate attributes
+ self.log.info('get_ofp_port_info', port_no=port_no,
+ info=self.ofp_port_no, device_id=device.id)
+
+ nni = self.get_northbound_port(port_no)
+ if nni is not None:
+ lp = nni.get_logical_port()
+ if lp is not None:
+ return PortCapability(port=lp)
+
+ # @inlineCallbacks
+ def send_proxied_message(self, proxy_address, msg):
+ self.log.debug('sending-proxied-message', msg=msg)
+
+ if isinstance(msg, Packet):
+ msg = str(msg)
+
+ if self._pon_agent is not None:
+ pon_id, onu_id = self._proxy_address_to_pon_onu_id(proxy_address)
+
+ pon = self.southbound_ports.get(pon_id)
+
+ if pon is not None and pon.enabled:
+ onu = pon.onu(onu_id)
+
+ if onu is not None and onu.enabled:
+ data = self._pon_agent.encode_omci_packet(msg, pon_id, onu_id)
+ try:
+ self._pon_agent.send(data)
+
+ except Exception as e:
+ self.log.exception('pon-agent-send', pon_id=pon_id, onu_id=onu_id, e=e)
+ else:
+ self.log.debug('onu-invalid-or-disabled', pon_id=pon_id, onu_id=onu_id)
+ else:
+ self.log.debug('pon-invalid-or-disabled', pon_id=pon_id)
+
+ def _onu_offset(self, onu_id):
+ # Start ONU's just past the southbound PON port numbers. Since ONU ID's start
+ # at zero, add one
+ # assert AdtranOltHandler.BASE_ONU_OFFSET > (self.num_northbound_ports + self.num_southbound_ports + 1)
+ assert AdtranOltHandler.BASE_ONU_OFFSET > (4 + self.num_southbound_ports + 1) # Skip over uninitialized ports
+ return AdtranOltHandler.BASE_ONU_OFFSET + onu_id
+
+ def _pon_onu_id_to_proxy_address(self, pon_id, onu_id):
+ if pon_id in self.southbound_ports:
+ pon = self.southbound_ports[pon_id]
+ onu = pon.onu(onu_id)
+ proxy_address = onu.proxy_address if onu is not None else None
+
+ else:
+ proxy_address = None
+
+ return proxy_address
+
+ def _proxy_address_to_pon_onu_id(self, proxy_address):
+ """
+ Convert the proxy address to the PON-ID and ONU-ID
+ :param proxy_address: (ProxyAddress)
+ :return: (tuple) pon-id, onu-id
+ """
+ onu_id = proxy_address.onu_id
+ pon_id = self._port_number_to_pon_id(proxy_address.channel_id)
+
+ return pon_id, onu_id
+
+ def pon_id_to_port_number(self, pon_id):
+ return pon_id + 1 + 4 # Skip over uninitialized ports
+
+ def _port_number_to_pon_id(self, port):
+ if self.is_uni_port(port):
+ # Convert to OLT device port
+ port = platform.intf_id_from_uni_port_num(port)
+
+ return port - 1 - 4 # Skip over uninitialized ports
+
+ def is_pon_port(self, port):
+ return self._port_number_to_pon_id(port) in self.southbound_ports
+
+ def is_uni_port(self, port):
+ return OFPP_MAX >= port >= (5 << 11)
+
+ def get_southbound_port(self, port):
+ pon_id = self._port_number_to_pon_id(port)
+ return self.southbound_ports.get(pon_id, None)
+
+ def get_northbound_port(self, port):
+ return self.northbound_ports.get(port, None)
+
+ def get_port_name(self, port, logical_name=False):
+ """
+ Get the name for a port
+
+ Port names are used in various ways within and outside of VOLTHA.
+ Typically, the physical port name will be used during device handler conversations
+ with the hardware (REST, NETCONF, ...) while the logical port name is what the
+ outside world (ONOS, SEBA, ...) uses.
+
+ All ports have a physical port name, but only ports exposed through VOLTHA
+ as a logical port will have a logical port name
+ """
+ if self.is_nni_port(port):
+ port = self.get_northbound_port(port)
+ return port.logical_port_name if logical_name else port.physical_port_name
+
+ if self.is_pon_port(port):
+ port = self.get_southbound_port(port)
+ return port.logical_port_name if logical_name else port.physical_port_name
+
+ if self.is_uni_port(port):
+ return 'uni-{}'.format(port)
+
+ if self.is_logical_port(port):
+ raise NotImplemented('Logical OpenFlow ports are not supported')
+
+ def _update_download_status(self, request, download):
+ if download is not None:
+ request.state = download.download_state
+ request.reason = download.failure_reason
+ request.image_state = download.image_state
+ request.additional_info = download.additional_info
+ request.downloaded_bytes = download.downloaded_bytes
+ else:
+ request.state = ImageDownload.DOWNLOAD_UNKNOWN
+ request.reason = ImageDownload.UNKNOWN_ERROR
+ request.image_state = ImageDownload.IMAGE_UNKNOWN
+ request.additional_info = "Download request '{}' not found".format(request.name)
+ request.downloaded_bytes = 0
+
+ self.adapter_agent.update_image_download(request)
+
+ def start_download(self, device, request, done):
+ """
+ This is called to request downloading a specified image into
+ the standby partition of a device based on a NBI call.
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+ :return: (Deferred) Shall be fired to acknowledge the download.
+ """
+ log.info('image_download', request=request)
+
+ try:
+ if not self._initial_enable_complete:
+ # Never contacted the device on the initial startup, do 'activate' steps instead
+ raise Exception('Device has not finished initial activation')
+
+ if request.name in self._downloads:
+ raise Exception("Download request with name '{}' already exists".
+ format(request.name))
+ try:
+ download = Download.create(self, request, self._download_protocols)
+
+ except Exception:
+ request.additional_info = 'Download request creation failed due to exception'
+ raise
+
+ try:
+ self._downloads[download.name] = download
+ self._update_download_status(request, download)
+ done.callback('started')
+ return done
+
+ except Exception:
+ request.additional_info = 'Download request startup failed due to exception'
+ del self._downloads[download.name]
+ download.cancel_download(request)
+ raise
+
+ except Exception as e:
+ self.log.exception('create', e=e)
+
+ request.reason = ImageDownload.UNKNOWN_ERROR if self._initial_enable_complete\
+ else ImageDownload.DEVICE_BUSY
+ request.state = ImageDownload.DOWNLOAD_FAILED
+ if not request.additional_info:
+ request.additional_info = e.message
+
+ self.adapter_agent.update_image_download(request)
+
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ raise
+
+ def download_status(self, device, request, done):
+ """
+ This is called to inquire about a requested image download status based
+ on a NBI call.
+
+ The adapter is expected to update the DownloadImage DB object with the
+ query result
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) Shall be fired to acknowledge
+ """
+ log.info('download_status', request=request)
+ download = self._downloads.get(request.name)
+
+ self._update_download_status(request, download)
+
+ if request.state not in [ImageDownload.DOWNLOAD_STARTED,
+ ImageDownload.DOWNLOAD_SUCCEEDED,
+ ImageDownload.DOWNLOAD_FAILED]:
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+
+ done.callback(request.state)
+ return done
+
+ def cancel_download(self, device, request, done):
+ """
+ This is called to cancel a requested image download based on a NBI
+ call. The admin state of the device will not change after the
+ download.
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) Shall be fired to acknowledge
+ """
+ log.info('cancel_download', request=request)
+
+ download = self._downloads.get(request.name)
+
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.cancel_download(request)
+ self._update_download_status(request, download)
+ done.callback(result)
+ else:
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
+
+ if device.admin_state == AdminState.DOWNLOADING_IMAGE:
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+
+ return done
+
+ def activate_image(self, device, request, done):
+ """
+ This is called to activate a downloaded image from a standby partition
+ into active partition.
+
+ Depending on the device implementation, this call may or may not
+ cause device reboot. If no reboot, then a reboot is required to make
+ the activated image running on device
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) OperationResponse object.
+ """
+ log.info('activate_image', request=request)
+
+ download = self._downloads.get(request.name)
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.activate_image()
+ self._update_download_status(request, download)
+ done.callback(result)
+ else:
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
+
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ return done
+
+ def revert_image(self, device, request, done):
+ """
+ This is called to deactivate the specified image at active partition,
+ and revert to previous image at standby partition.
+
+ Depending on the device implementation, this call may or may not
+ cause device reboot. If no reboot, then a reboot is required to
+ make the previous image running on device
+
+ :param device: A Voltha.Device object.
+ :param request: A Voltha.ImageDownload object.
+ :param done: (Deferred) Deferred to fire when done
+
+ :return: (Deferred) OperationResponse object.
+ """
+ log.info('revert_image', request=request)
+
+ download = self._downloads.get(request.name)
+ if download is not None:
+ del self._downloads[request.name]
+ result = download.revert_image()
+ self._update_download_status(request, download)
+ done.callback(result)
+ else:
+ self._update_download_status(request, download)
+ done.errback(KeyError('Download request not found'))
+
+ # restore admin state to enabled
+ device.admin_state = AdminState.ENABLED
+ self.adapter_agent.update_device(device)
+ return done
+
+ def add_onu_device(self, pon_id, onu_id, serial_number):
+ onu_device = self.adapter_agent.get_child_device(self.device_id,
+ serial_number=serial_number)
+ if onu_device is not None:
+ return onu_device
+
+ try:
+ # NOTE - channel_id of onu is set to pon_id
+ pon_port = self.pon_id_to_port_number(pon_id)
+ proxy_address = Device.ProxyAddress(device_id=self.device_id,
+ channel_id=pon_port,
+ onu_id=onu_id,
+ onu_session_id=onu_id)
+
+ self.log.debug("added-onu", port_no=pon_id,
+ onu_id=onu_id, serial_number=serial_number,
+ proxy_address=proxy_address)
+
+ self.adapter_agent.add_onu_device(
+ parent_device_id=self.device_id,
+ parent_port_no=pon_port,
+ vendor_id=serial_number[:4],
+ proxy_address=proxy_address,
+ root=True,
+ serial_number=serial_number,
+ admin_state=AdminState.ENABLED,
+ )
+
+ except Exception as e:
+ self.log.exception('onu-activation-failed', e=e)
+ return None
+
+ def setup_onu_tech_profile(self, pon_id, onu_id, logical_port_number):
+ # Send ONU Adapter related tech profile information.
+ self.log.debug('add-tech-profile-info')
+
+ uni_id = self.platform.uni_id_from_uni_port(logical_port_number)
+ parent_port_no = self.pon_id_to_port_number(pon_id)
+ onu_device = self.adapter_agent.get_child_device(self.device_id,
+ onu_id=onu_id,
+ parent_port_no=parent_port_no)
+
+ ofp_port_name, ofp_port_no = self.get_ofp_port_name(pon_id, onu_id,
+ logical_port_number)
+ if ofp_port_name is None:
+ self.log.error("port-name-not-found")
+ return
+
+ tp_path = self.get_tp_path(pon_id, ofp_port_name)
+
+ self.log.debug('Load-tech-profile-request-to-onu-handler', tp_path=tp_path)
+
+ msg = {'proxy_address': onu_device.proxy_address, 'uni_id': uni_id,
+ 'event': 'download_tech_profile', 'event_data': tp_path}
+
+ # Send the event message to the ONU adapter
+ self.adapter_agent.publish_inter_adapter_message(onu_device.id, msg)
diff --git a/adapters/adtran_olt/codec/__init__.py b/adapters/adtran_olt/codec/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/adtran_olt/codec/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_olt/codec/ietf_interfaces.py b/adapters/adtran_olt/codec/ietf_interfaces.py
new file mode 100644
index 0000000..0bdf691
--- /dev/null
+++ b/adapters/adtran_olt/codec/ietf_interfaces.py
@@ -0,0 +1,328 @@
+# CCopyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+import xmltodict
+import structlog
+from pyvoltha.protos.openflow_13_pb2 import OFPPF_1GB_FD, OFPPF_10GB_FD, OFPPF_40GB_FD, OFPPF_100GB_FD
+from pyvoltha.protos.openflow_13_pb2 import OFPPF_FIBER, OFPPF_COPPER
+from pyvoltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPC_PORT_DOWN, OFPPS_LINK_DOWN, OFPPF_OTHER
+from pyvoltha.protos.common_pb2 import OperStatus, AdminState
+
+log = structlog.get_logger()
+
+_ietf_interfaces_config_rpc = """
+ <filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
+ <interface/>
+ </interfaces>
+ </filter>
+"""
+
+_ietf_interfaces_state_rpc = """
+ <filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
+ <interface>
+ <name/>
+ <type/>
+ <admin-status/>
+ <oper-status/>
+ <last-change/>
+ <phys-address/>
+ <speed/>
+ </interface>
+ </interfaces-state>
+ </filter>
+"""
+
+_allowed_with_default_types = ['report-all', 'report-all-tagged', 'trim', 'explicit']
+
+# TODO: Centralize the item below as a function in a core util module
+
+
+def _with_defaults(default_type=None):
+ if default_type is None:
+ return ""
+
+ assert(default_type in _allowed_with_default_types)
+ return """
+ <with-defaults xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults">
+ {}</with-defaults>""".format(default_type)
+
+
+class IetfInterfacesConfig(object):
+ def __init__(self, session):
+ self._session = session
+
+ @inlineCallbacks
+ def get_config(self, source='running', with_defaults=None):
+
+ filter = _ietf_interfaces_config_rpc + _with_defaults(with_defaults)
+
+ request = self._session.get(source, filter=filter)
+ rpc_reply = yield request
+ returnValue(rpc_reply)
+
+ def get_interfaces(self, rpc_reply, interface_type=None):
+ """
+ Get the physical entities of a particular type
+ :param rpc_reply: Reply from previous get or request
+ :param interface_type: (String or List) The type of interface (case-insensitive)
+ :return: list) of OrderDict interface entries
+ """
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+
+ entries = result_dict['data']['interfaces']
+
+ if interface_type is None:
+ return entries
+
+ # for entry in entries:
+ # import pprint
+ # log.info(pprint.PrettyPrinter(indent=2).pformat(entry))
+
+ def _matches(entry, value):
+ if 'type' in entry and '#text' in entry['type']:
+ text_val = entry['type']['#text'].lower()
+ if isinstance(value, list):
+ return any(v.lower() in text_val for v in value)
+ return value.lower() in text_val
+ return False
+
+ return [entry for entry in entries if _matches(entry, interface_type)]
+
+
+class IetfInterfacesState(object):
+ def __init__(self, session):
+ self._session = session
+
+ @inlineCallbacks
+ def get_state(self):
+ try:
+ request = self._session.get(_ietf_interfaces_state_rpc)
+ rpc_reply = yield request
+ returnValue(rpc_reply)
+
+ except Exception as e:
+ log.exception('get_state', e=e)
+ raise
+
+ @staticmethod
+ def get_interfaces(self, rpc_reply, key='type', key_value=None):
+ """
+ Get the physical entities of a particular type
+ :param key_value: (String or List) The type of interface (case-insensitive)
+ :return: list) of OrderDict interface entries
+ """
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+ entries = result_dict['data']['interfaces-state']['interface']
+
+ if key_value is None:
+ return entries
+
+ for entry in entries:
+ import pprint
+ log.info(pprint.PrettyPrinter(indent=2).pformat(entry))
+
+ def _matches(entry, key, value):
+ if key in entry and '#text' in entry[key]:
+ text_val = entry[key]['#text'].lower()
+ if isinstance(value, list):
+ return any(v.lower() in text_val for v in value)
+ return value.lower() in text_val
+ return False
+
+ return [entry for entry in entries if _matches(entry, key, key_value)]
+
+ @staticmethod
+ def _get_admin_state(entry):
+ state_map = {
+ 'up': AdminState.ENABLED,
+ 'down': AdminState.DISABLED,
+ 'testing': AdminState.DISABLED
+ }
+ return state_map.get(entry.get('admin-status', 'down'),
+ AdminState.UNKNOWN)
+
+ @staticmethod
+ def _get_oper_status(entry):
+ state_map = {
+ 'up': OperStatus.ACTIVE,
+ 'down': OperStatus.FAILED,
+ 'testing': OperStatus.TESTING,
+ 'unknown': OperStatus.UNKNOWN,
+ 'dormant': OperStatus.DISCOVERED,
+ 'not-present': OperStatus.UNKNOWN,
+ 'lower-layer-down': OperStatus.FAILED
+ }
+ return state_map.get(entry.get('oper-status', 'down'),
+ OperStatus.UNKNOWN)
+
+ @staticmethod
+ def _get_mac_addr(entry):
+ mac_addr = entry.get('phys-address', None)
+ if mac_addr is None:
+ import random
+ # TODO: Get with qumram team about phys addr
+ mac_addr = '08:00:{}{}:{}{}:{}{}:00'.format(random.randint(0, 9),
+ random.randint(0, 9),
+ random.randint(0, 9),
+ random.randint(0, 9),
+ random.randint(0, 9),
+ random.randint(0, 9))
+ return mac_addr
+
+ @staticmethod
+ def _get_speed_value(entry):
+ speed = entry.get('speed') or IetfInterfacesState._get_speed_via_name(entry.get('name'))
+ if isinstance(speed, str):
+ return long(speed)
+ return speed
+
+ @staticmethod
+ def _get_speed_via_name(name):
+ speed_map = {
+ 'terabit': 1000000000000,
+ 'hundred-gigabit': 100000000000,
+ 'fourty-gigabit': 40000000000,
+ 'ten-gigabit': 10000000000,
+ 'gigabit': 1000000000,
+ }
+ for n,v in speed_map.iteritems():
+ if n in name.lower():
+ return v
+ return 0
+
+ @staticmethod
+ def _get_of_state(entry):
+ # If port up and ready: OFPPS_LIVE
+ # If port config bit is down: OFPPC_PORT_DOWN
+ # If port state bit is down: OFPPS_LINK_DOWN
+ # if IetfInterfacesState._get_admin_state(entry) == AdminState.ENABLED:
+ # return OFPPS_LIVE \
+ # if IetfInterfacesState._get_oper_status(entry) == OperStatus.ACTIVE \
+ # else OFPPS_LINK_DOWN
+ #
+ # return OFPPC_PORT_DOWN
+ # TODO: Update of openflow port state is not supported, so always say we are alive
+ return OFPPS_LIVE
+
+ @staticmethod
+ def _get_of_capabilities(entry):
+ # The capabilities field is a bitmap that uses a combination of the following flags :
+ # Capabilities supported by the datapath
+ # enum ofp_capabilities {
+ # OFPC_FLOW_STATS = 1 << 0, /* Flow statistics. */
+ # OFPC_TABLE_STATS = 1 << 1, /* Table statistics. */
+ # OFPC_PORT_STATS = 1 << 2, /* Port statistics. */
+ # OFPC_GROUP_STATS = 1 << 3, /* Group statistics. */
+ # OFPC_IP_REASM = 1 << 5, /* Can reassemble IP fragments. */
+ # OFPC_QUEUE_STATS = 1 << 6, /* Queue statistics. */
+ # OFPC_PORT_BLOCKED = 1 << 8, /* Switch will block looping ports. */
+ # OFPC_BUNDLES = 1 << 9, /* Switch supports bundles. */
+ # OFPC_FLOW_MONITORING = 1 << 10, /* Switch supports flow monitoring. */
+ # }
+ # enum ofp_port_features {
+ # OFPPF_10MB_HD = 1 << 0, /* 10 Mb half-duplex rate support. */
+ # OFPPF_10MB_FD = 1 << 1, /* 10 Mb full-duplex rate support. */
+ # OFPPF_100MB_HD = 1 << 2, /* 100 Mb half-duplex rate support. */
+ # OFPPF_100MB_FD = 1 << 3, /* 100 Mb full-duplex rate support. */
+ # OFPPF_1GB_HD = 1 << 4, /* 1 Gb half-duplex rate support. */
+ # OFPPF_1GB_FD = 1 << 5, /* 1 Gb full-duplex rate support. */
+ # OFPPF_10GB_FD = 1 << 6, /* 10 Gb full-duplex rate support. */
+ # OFPPF_40GB_FD = 1 << 7, /* 40 Gb full-duplex rate support. */
+ # OFPPF_100GB_FD = 1 << 8, /* 100 Gb full-duplex rate support. */
+ # OFPPF_1TB_FD = 1 << 9, /* 1 Tb full-duplex rate support. */
+ # OFPPF_OTHER = 1 << 10, /* Other rate, not in the list. */
+ # OFPPF_COPPER = 1 << 11, /* Copper medium. */
+ # OFPPF_FIBER = 1 << 12, /* Fiber medium. */
+ # OFPPF_AUTONEG = 1 << 13, /* Auto-negotiation. */
+ # OFPPF_PAUSE = 1 << 14, /* Pause. */
+ # OFPPF_PAUSE_ASYM = 1 << 15 /* Asymmetric pause. */
+ # }
+ # TODO: Look into adtran-physical-entities and decode xSFP type any other settings
+ return IetfInterfacesState._get_of_speed(entry) | OFPPF_FIBER
+
+ @staticmethod
+ def _get_of_speed(entry):
+ speed = IetfInterfacesState._get_speed_value(entry)
+ speed_map = {
+ 1000000000: OFPPF_1GB_FD,
+ 10000000000: OFPPF_10GB_FD,
+ 40000000000: OFPPF_40GB_FD,
+ 100000000000: OFPPF_100GB_FD,
+ }
+ # return speed_map.get(speed, OFPPF_OTHER)
+ # TODO: For now, force 100 GB
+ return OFPPF_100GB_FD
+
+ @staticmethod
+ def _get_port_number(name, if_index):
+ import re
+
+ formats = [
+ 'xpon \d/{1,2}\d', # OLT version 3 (Feb 2018++)
+ 'Hundred-Gigabit-Ethernet \d/\d/{1,2}\d', # OLT version 2
+ 'XPON \d/\d/{1,2}\d', # OLT version 2
+ 'hundred-gigabit-ethernet \d/{1,2}\d', # OLT version 1
+ 'channel-termination {1,2}\d', # OLT version 1
+ ]
+ p2 = re.compile('\d+')
+
+ for regex in formats:
+ p = re.compile(regex, re.IGNORECASE)
+ match = p.match(name)
+ if match is not None:
+ return int(p2.findall(name)[-1])
+
+ @staticmethod
+ def get_port_entries(rpc_reply, port_type):
+ """
+ Get the port entries that make up the northbound and
+ southbound interfaces
+
+ :param rpc_reply:
+ :param port_type:
+ :return:
+ """
+ ports = dict()
+ result_dict = xmltodict.parse(rpc_reply.data_xml)
+ entries = result_dict['data']['interfaces-state']['interface']
+ if not isinstance(entries, list):
+ entries = [entries]
+ port_entries = [entry for entry in entries if 'name' in entry and
+ port_type.lower() in entry['name'].lower()]
+
+ for entry in port_entries:
+ port = {
+ 'port_no': IetfInterfacesState._get_port_number(entry.get('name'),
+ entry.get('ifindex')),
+ 'name': entry.get('name', 'unknown'),
+ 'ifIndex': entry.get('ifIndex'),
+ # 'label': None,
+ 'mac_address': IetfInterfacesState._get_mac_addr(entry),
+ 'admin_state': IetfInterfacesState._get_admin_state(entry),
+ 'oper_status': IetfInterfacesState._get_oper_status(entry),
+ 'ofp_state': IetfInterfacesState._get_of_state(entry),
+ 'ofp_capabilities': IetfInterfacesState._get_of_capabilities(entry),
+ 'current_speed': IetfInterfacesState._get_of_speed(entry),
+ 'max_speed': IetfInterfacesState._get_of_speed(entry),
+ }
+ port_no = port['port_no']
+ if port_no not in ports:
+ ports[port_no] = port
+ else:
+ ports[port_no].update(port)
+
+ return ports
diff --git a/adapters/adtran_olt/codec/olt_config.py b/adapters/adtran_olt/codec/olt_config.py
new file mode 100644
index 0000000..473e2f6
--- /dev/null
+++ b/adapters/adtran_olt/codec/olt_config.py
@@ -0,0 +1,329 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+
+log = structlog.get_logger()
+
+
+class OltConfig(object):
+ """
+ Class to wrap decode of olt container (config) from the ADTRAN
+ gpon-olt-hw.yang YANG model
+ """
+ def __init__(self, packet):
+ self._packet = packet
+ self._pons = None
+
+ def __str__(self):
+ return "OltConfig: {}".format(self.olt_id)
+
+ @property
+ def olt_id(self):
+ """Unique OLT identifier"""
+ return self._packet.get('olt-id', '')
+
+ @property
+ def debug_output(self):
+ """least important level that will output everything"""
+ return self._packet.get('debug-output', 'warning')
+
+ @property
+ def pons(self):
+ if self._pons is None:
+ self._pons = OltConfig.Pon.decode(self._packet.get('pon', None))
+ return self._pons
+
+ class Pon(object):
+ """
+ Provides decode of PON list from within
+ """
+ def __init__(self, packet):
+ assert 'pon-id' in packet, 'pon-id not found'
+ self._packet = packet
+ self._onus = None
+
+ def __str__(self):
+ return "OltConfig.Pon: pon-id: {}".format(self.pon_id)
+
+ @staticmethod
+ def decode(pon_list):
+ pons = {}
+
+ if pon_list is not None:
+ for pon_data in pon_list:
+ pon = OltConfig.Pon(pon_data)
+ assert pon.pon_id not in pons
+ pons[pon.pon_id] = pon
+
+ return pons
+
+ @property
+ def pon_id(self):
+ """PON identifier"""
+ return self._packet['pon-id']
+
+ @property
+ def enabled(self):
+ """The desired state of the interface"""
+ return self._packet.get('enabled', False)
+
+ @property
+ def downstream_fec_enable(self):
+ """Enables downstream Forward Error Correction"""
+ return self._packet.get('downstream-fec-enable', False)
+
+ @property
+ def upstream_fec_enable(self):
+ """Enables upstream Forward Error Correction"""
+ return self._packet.get('upstream-fec-enable', False)
+
+ @property
+ def deployment_range(self):
+ """Maximum deployment distance (meters)"""
+ return self._packet.get('deployment-range', 25000)
+
+ @property
+ def onus(self):
+ if self._onus is None:
+ self._onus = OltConfig.Pon.Onu.decode(self._packet.get('onus', None))
+ return self._onus
+
+ class Onu(object):
+ """
+ Provides decode of onu list for a PON port
+ """
+ def __init__(self, packet):
+ assert 'onu-id' in packet, 'onu-id not found'
+ self._packet = packet
+ self._tconts = None
+ self._tconts_dict = None
+ self._gem_ports = None
+ self._gem_ports_dict = None
+
+ def __str__(self):
+ return "OltConfig.Pon.Onu: onu-id: {}".format(self.onu_id)
+
+ @staticmethod
+ def decode(onu_dict):
+ onus = {}
+
+ if onu_dict is not None:
+ if 'onu' in onu_dict:
+ for onu_data in onu_dict['onu']:
+ onu = OltConfig.Pon.Onu(onu_data)
+ assert onu.onu_id not in onus
+ onus[onu.onu_id] = onu
+ elif len(onu_dict) > 0 and 'onu-id' in onu_dict[0]:
+ onu = OltConfig.Pon.Onu(onu_dict[0])
+ assert onu.onu_id not in onus
+ onus[onu.onu_id] = onu
+
+ return onus
+
+ @property
+ def onu_id(self):
+ """The ID used to identify the ONU"""
+ return self._packet['onu-id']
+
+ @property
+ def serial_number_64(self):
+ """The serial number (base-64) is unique for each ONU"""
+ return self._packet.get('serial-number', '')
+
+ @property
+ def password(self):
+ """ONU Password"""
+ return self._packet.get('password', bytes(0))
+
+ @property
+ def enable(self):
+ """If true, places the ONU in service"""
+ return self._packet.get('enable', False)
+
+ @property
+ def tconts(self):
+ if self._tconts is None:
+ self._tconts = OltConfig.Pon.Onu.TCont.decode(self._packet.get('t-conts', None))
+ return self._tconts
+
+ @property
+ def tconts_dict(self): # TODO: Remove if not used
+ if self._tconts_dict is None:
+ self._tconts_dict = {tcont.alloc_id: tcont for tcont in self.tconts}
+ return self._tconts_dict
+
+ @property
+ def gem_ports(self):
+ if self._gem_ports is None:
+ self._gem_ports = OltConfig.Pon.Onu.GemPort.decode(self._packet.get('gem-ports', None))
+ return self._gem_ports
+
+ @property
+ def gem_ports_dict(self): # TODO: Remove if not used
+ if self._gem_ports_dict is None:
+ self._gem_ports_dict = {gem.gem_id: gem for gem in self.gem_ports}
+ return self._gem_ports_dict
+
+ class TCont(object):
+ """
+ Provides decode of onu list for the T-CONT container
+ """
+ def __init__(self, packet):
+ assert 'alloc-id' in packet, 'alloc-id not found'
+ self._packet = packet
+ self._traffic_descriptor = None
+ self._best_effort = None
+
+ def __str__(self):
+ return "OltConfig.Pon.Onu.TCont: alloc-id: {}".format(self.alloc_id)
+
+ @staticmethod
+ def decode(tcont_container):
+ tconts = {}
+
+ if tcont_container is not None:
+ for tcont_data in tcont_container.get('t-cont', []):
+ tcont = OltConfig.Pon.Onu.TCont(tcont_data)
+ assert tcont.alloc_id not in tconts
+ tconts[tcont.alloc_id] = tcont
+
+ return tconts
+
+ @property
+ def alloc_id(self):
+ """The ID used to identify the T-CONT"""
+ return self._packet['alloc-id']
+
+ @property
+ def traffic_descriptor(self):
+ """
+ Each Alloc-ID is provisioned with a traffic descriptor that specifies
+ the three bandwidth component parameters: fixed bandwidth, assured
+ bandwidth, and maximum bandwidth, as well as the ternary eligibility
+ indicator for additional bandwidth assignment
+ """
+ if self._traffic_descriptor is None and 'traffic-descriptor' in self._packet:
+ self._traffic_descriptor = OltConfig.Pon.Onu.TCont.\
+ TrafficDescriptor(self._packet['traffic-descriptor'])
+ return self._traffic_descriptor
+
+ class TrafficDescriptor(object):
+ def __init__(self, packet):
+ self._packet = packet
+
+ def __str__(self):
+ return "OltConfig.Pon.Onu.TCont.TrafficDescriptor: {}/{}/{}".\
+ format(self.fixed_bandwidth, self.assured_bandwidth,
+ self.maximum_bandwidth)
+
+ @property
+ def fixed_bandwidth(self):
+ try:
+ return int(self._packet.get('fixed-bandwidth', 0))
+ except:
+ return 0
+
+ @property
+ def assured_bandwidth(self):
+ try:
+ return int(self._packet.get('assured-bandwidth', 0))
+ except:
+ return 0
+
+ @property
+ def maximum_bandwidth(self):
+ try:
+ return int(self._packet.get('maximum-bandwidth', 0))
+ except:
+ return 0
+
+ @property
+ def additional_bandwidth_eligibility(self):
+ return self._packet.get('additional-bandwidth-eligibility', 'none')
+
+ @property
+ def best_effort(self):
+ if self._best_effort is None:
+ self._best_effort = OltConfig.Pon.Onu.TCont.BestEffort.decode(
+ self._packet.get('best-effort', None))
+ return self._best_effort
+
+ class BestEffort(object):
+ def __init__(self, packet):
+ self._packet = packet
+
+ def __str__(self):
+ return "OltConfig.Pon.Onu.TCont.BestEffort: {}".format(self.bandwidth)
+
+ @property
+ def bandwidth(self):
+ return self._packet['bandwidth']
+
+ @property
+ def priority(self):
+ return self._packet['priority']
+
+ @property
+ def weight(self):
+ return self._packet['weight']
+
+ class GemPort(object):
+ """
+ Provides decode of onu list for the gem-ports container
+ """
+ def __init__(self, packet):
+ assert 'port-id' in packet, 'port-id not found'
+ self._packet = packet
+
+ def __str__(self):
+ return "OltConfig.Pon.Onu.GemPort: port-id: {}/{}".\
+ format(self.port_id, self.alloc_id)
+
+ @staticmethod
+ def decode(gem_port_container):
+ gem_ports = {}
+
+ if gem_port_container is not None:
+ for gem_port_data in gem_port_container.get('gem-port', []):
+ gem_port = OltConfig.Pon.Onu.GemPort(gem_port_data)
+ assert gem_port.port_id not in gem_ports
+ gem_ports[gem_port.port_id] = gem_port
+
+ return gem_ports
+
+ @property
+ def port_id(self):
+ """The ID used to identify the GEM Port"""
+ return self._packet['port-id']
+
+ @property
+ def gem_id(self):
+ """The ID used to identify the GEM Port"""
+ return self.port_id
+
+ @property
+ def alloc_id(self):
+ """The Alloc-ID of the T-CONT to which this GEM port is mapped"""
+ return self._packet['alloc-id']
+
+ @property
+ def omci_transport(self):
+ """If true, this GEM port is used to transport the OMCI virtual connection"""
+ return self._packet.get('omci-transport', False)
+
+ @property
+ def encryption(self):
+ """If true, enable encryption using the advanced encryption standard(AES)"""
+ return self._packet.get('encryption', False)
diff --git a/adapters/adtran_olt/codec/olt_state.py b/adapters/adtran_olt/codec/olt_state.py
new file mode 100644
index 0000000..74413a4
--- /dev/null
+++ b/adapters/adtran_olt/codec/olt_state.py
@@ -0,0 +1,290 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+
+log = structlog.get_logger()
+
+
+class OltState(object):
+ """
+ Class to wrap decode of olt-state container from the ADTRAN
+ gpon-olt-hw.yang YANG model
+ """
+
+ def __init__(self, packet):
+ self._packet = packet
+ self._pons = None
+
+ def __str__(self):
+ return "OltState: {}".format(self.software_version)
+
+ @property
+ def software_version(self):
+ """The software version of olt driver"""
+ return self._packet.get('software-version', '')
+
+ @property
+ def pons(self):
+ if self._pons is None:
+ self._pons = OltState.Pon.decode(self._packet.get('pon', None))
+ return self._pons
+
+ #############################################################
+ # Act like a container for simple access into PON list
+
+ def __len__(self):
+ return len(self.pons)
+
+ def __getitem__(self, key):
+ if not isinstance(key, int):
+ raise TypeError('Key should be of type int')
+ if key not in self.pons:
+ raise KeyError("key '{}' not found".format(key))
+
+ return self.pons[key]
+
+ def __iter__(self):
+ raise NotImplementedError('TODO: Not yet implemented')
+
+ def __contains__(self, item):
+ if not isinstance(item, int):
+ raise TypeError('Item should be of type int')
+ return item in self.pons
+
+ # TODO: Look at generator support and if it is useful
+
+ class Pon(object):
+ """
+ Provides decode of PON list from within
+ """
+ def __init__(self, packet):
+ assert 'pon-id' in packet
+ self._packet = packet
+ self._onus = None
+ self._gems = None
+
+ def __str__(self):
+ return "OltState.Pon: pon-id: {}".format(self.pon_id)
+
+ @staticmethod
+ def decode(pon_list):
+ # log.debug('Decoding PON List:{}{}'.format(os.linesep,
+ # pprint.PrettyPrinter().pformat(pon_list)))
+ pons = {}
+ for pon_data in pon_list:
+ pon = OltState.Pon(pon_data)
+ assert pon.pon_id not in pons
+ pons[pon.pon_id] = pon
+
+ return pons
+
+ @property
+ def pon_id(self):
+ """PON identifier"""
+ return self._packet['pon-id']
+
+ @property
+ def downstream_wavelength(self):
+ """The wavelength, in nanometers, being used in the downstream direction"""
+ return self._packet.get('downstream-wavelength', 0)
+
+ @property
+ def upstream_wavelength(self):
+ """The wavelength, in nanometers, being used in the upstream direction"""
+ return self._packet.get('upstream-wavelength', 0)
+
+ @property
+ def downstream_channel_id(self):
+ """Downstream wavelength channel identifier associated with this PON."""
+ return self._packet.get('downstream-channel-id', 0)
+
+ @property
+ def rx_packets(self):
+ """Sum all of the RX Packets of GEM ports that are not base TCONT's"""
+ return int(self._packet.get('rx-packets', 0))
+
+ @property
+ def tx_packets(self):
+ """Sum all of the TX Packets of GEM ports that are not base TCONT's"""
+ return int(self._packet.get('tx-packets', 0))
+
+ @property
+ def rx_bytes(self):
+ """Sum all of the RX Octets of GEM ports that are not base TCONT's"""
+ return int(self._packet.get('rx-bytes', 0))
+
+ @property
+ def tx_bytes(self):
+ """Sum all of the TX Octets of GEM ports that are not base TCONT's"""
+ return int(self._packet.get('tx-bytes', 0))
+
+ @property
+ def tx_bip_errors(self):
+ """Sum the TX ONU bip errors to get TX BIP's per PON"""
+ return int(self._packet.get('tx-bip-errors', 0))
+
+ @property
+ def wm_tuned_out_onus(self):
+ """
+ bit array indicates the list of tuned out ONU's that are in wavelength
+ mobility protecting state.
+ onu-bit-octects:
+ type binary { length "4 .. 1024"; }
+ description each bit position indicates corresponding ONU's status
+ (true or false) whether that ONU's is in
+ wavelength mobility protecting state or not
+ For 128 ONTs per PON, the size of this
+ array will be 16. onu-bit-octects[0] and MSB bit in that byte
+ represents ONU 0 etc.
+ """
+ return self._packet.get('wm-tuned-out-onus', bytes(0))
+
+ @property
+ def ont_los(self):
+ """List of configured ONTs that have been previously discovered and are in a los of signal state"""
+ return self._packet.get('ont-los', [])
+
+ @property
+ def discovered_onu(self):
+ """
+ Immutable Set of each Optical Network Unit(ONU) that has been activated via discovery
+ key/value: serial-number (string)
+ """
+ return frozenset([sn['serial-number'] for sn in self._packet.get('discovered-onu', [])
+ if 'serial-number' in sn and sn['serial-number'] != 'AAAAAAAAAAA='])
+
+ @property
+ def gems(self):
+ """This list is not in the proposed BBF model, the stats are part of ietf-interfaces"""
+ if self._gems is None:
+ self._gems = OltState.Pon.Gem.decode(self._packet.get('gem', []))
+ return self._gems
+
+ @property
+ def onus(self):
+ """
+ The map of each Optical Network Unit(ONU). Key: ONU ID (int)
+ """
+ if self._onus is None:
+ self._onus = OltState.Pon.Onu.decode(self._packet.get('onu', []))
+ return self._onus
+
+ class Onu(object):
+ """
+ Provides decode of onu list for a PON port
+ """
+ def __init__(self, packet):
+ assert 'onu-id' in packet, 'onu-id not found in packet'
+ self._packet = packet
+
+ def __str__(self):
+ return "OltState.Pon.Onu: onu-id: {}".format(self.onu_id)
+
+ @staticmethod
+ def decode(onu_list):
+ # log.debug('onus:{}{}'.format(os.linesep,
+ # pprint.PrettyPrinter().pformat(onu_list)))
+ onus = {}
+ for onu_data in onu_list:
+ onu = OltState.Pon.Onu(onu_data)
+ assert onu.onu_id not in onus
+ onus[onu.onu_id] = onu
+
+ return onus
+
+ @property
+ def onu_id(self):
+ """The ID used to identify the ONU"""
+ return self._packet['onu-id']
+
+ @property
+ def oper_status(self):
+ """The operational state of each ONU"""
+ return self._packet.get('oper-status', 'unknown')
+
+ @property
+ def reported_password(self):
+ """The password reported by the ONU (binary)"""
+ return self._packet.get('reported-password', bytes(0))
+
+ @property
+ def rssi(self):
+ """The received signal strength indication of the ONU"""
+ return self._packet.get('rssi', -9999)
+
+ @property
+ def equalization_delay(self):
+ """Equalization delay (bits)"""
+ return self._packet.get('equalization-delay', 0)
+
+ @property
+ def fiber_length(self):
+ """Distance to ONU"""
+ return self._packet.get('fiber-length', 0)
+
+ class Gem(object):
+ """
+ Provides decode of onu list for a PON port
+ """
+ def __init__(self, packet):
+ assert 'onu-id' in packet, 'onu-id not found in packet'
+ assert 'port-id' in packet, 'port-id not found in packet'
+ assert 'alloc-id' in packet, 'alloc-id not found in packet'
+ self._packet = packet
+
+ def __str__(self):
+ return "OltState.Pon.Gem: onu-id: {}, gem-id".\
+ format(self.onu_id, self.gem_id)
+
+ @staticmethod
+ def decode(gem_list):
+ # log.debug('gems:{}{}'.format(os.linesep,
+ # pprint.PrettyPrinter().pformat(gem_list)))
+ gems = {}
+ for gem_data in gem_list:
+ gem = OltState.Pon.Gem(gem_data)
+ assert gem.gem_id not in gems
+ gems[gem.gem_id] = gem
+
+ return gems
+
+ @property
+ def onu_id(self):
+ """The ID used to identify the ONU"""
+ return self._packet['onu-id']
+
+ @property
+ def alloc_id(self):
+ return self._packet['alloc-id']
+
+ @property
+ def gem_id(self):
+ return self._packet['port-id']
+
+ @property
+ def tx_packets(self):
+ return int(self._packet.get('tx-packets', 0))
+
+ @property
+ def tx_bytes(self):
+ return int(self._packet.get('tx-bytes', 0))
+
+ @property
+ def rx_packets(self):
+ return int(self._packet.get('rx-packets', 0))
+
+ @property
+ def rx_bytes(self):
+ return int(self._packet.get('rx-bytes', 0))
diff --git a/adapters/adtran_olt/codec/physical_entities_state.py b/adapters/adtran_olt/codec/physical_entities_state.py
new file mode 100644
index 0000000..47187c9
--- /dev/null
+++ b/adapters/adtran_olt/codec/physical_entities_state.py
@@ -0,0 +1,80 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from adapters.adtran_common.net.adtran_netconf import adtran_module_url
+from twisted.internet.defer import inlineCallbacks, returnValue
+import xmltodict
+import structlog
+
+log = structlog.get_logger()
+
+_phys_entities_rpc = """
+ <filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <physical-entities-state xmlns="{}">
+ <physical-entity/>
+ </physical-entities-state>
+ </filter>
+ """.format(adtran_module_url('adtran-physical-entities'))
+
+
+class PhysicalEntitiesState(object):
+ def __init__(self, session):
+ self._session = session
+ self._rpc_reply = None
+
+ @inlineCallbacks
+ def get_state(self):
+ self._rpc_reply = None
+ request = self._session.get(_phys_entities_rpc)
+ self._rpc_reply = yield request
+ returnValue(self._rpc_reply)
+
+ @property
+ def physical_entities(self):
+ """
+ :return: (list) of OrderDict physical entities
+ """
+ if self._rpc_reply is None:
+ # TODO: Support auto-get?
+ return None
+
+ result_dict = xmltodict.parse(self._rpc_reply.data_xml)
+ return result_dict['data']['physical-entities-state']['physical-entity']
+
+ def get_physical_entities(self, classification=None):
+ """
+ Get the physical entities of a particular type
+ :param classification: (String or List) The classification or general hardware type of the
+ component identified by this physical entity
+ (case-insensitive)
+ :return: (list) of OrderDict physical entities
+ """
+ entries = self.physical_entities
+
+ if classification is None:
+ return entries
+
+ # for entry in entries:
+ # import pprint
+ # log.info(pprint.PrettyPrinter(indent=2).pformat(entry))
+
+ def _matches(entry, value):
+ if 'classification' in entry and '#text' in entry['classification']:
+ text_val = entry['classification']['#text'].lower()
+ if isinstance(value, list):
+ return any(v.lower() in text_val for v in value)
+ return value.lower() in text_val
+ return False
+
+ return [entry for entry in entries if _matches(entry, classification)]
diff --git a/adapters/adtran_olt/main.py b/adapters/adtran_olt/main.py
new file mode 100755
index 0000000..07bcc07
--- /dev/null
+++ b/adapters/adtran_olt/main.py
@@ -0,0 +1,558 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Adtran OLT Adapter main entry point"""
+
+import argparse
+import os
+import time
+
+import arrow
+import yaml
+from packaging.version import Version
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+
+from pyvoltha.common.structlog_setup import setup_logging, update_logging
+from pyvoltha.common.utils.asleep import asleep
+from pyvoltha.common.utils.deferred_utils import TimeOutError
+from pyvoltha.common.utils.dockerhelpers import get_my_containers_name
+from pyvoltha.common.utils.nethelpers import get_my_primary_local_ipv4, \
+ get_my_primary_interface
+from pyvoltha.common.utils.registry import registry, IComponent
+from pyvoltha.adapters.kafka.adapter_proxy import AdapterProxy
+from pyvoltha.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from pyvoltha.adapters.kafka.core_proxy import CoreProxy
+from pyvoltha.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+ get_messaging_proxy
+from pyvoltha.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adtran_olt import AdtranOltAdapter
+from pyvoltha.protos import third_party
+from pyvoltha.protos.adapter_pb2 import AdapterConfig
+
+_ = third_party
+
+
+defs = dict(
+ version_file='./VERSION',
+ config=os.environ.get('CONFIG', './adapters-adtran_olt.yml'),
+ container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+ '0-9]+)\..*$'),
+ consul=os.environ.get('CONSUL', 'localhost:8500'),
+ name=os.environ.get('NAME', 'adtran_olt'),
+ vendor=os.environ.get('VENDOR', 'Voltha Project'),
+ device_type=os.environ.get('DEVICE_TYPE', 'adtran_olt'),
+ accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+ accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+ etcd=os.environ.get('ETCD', 'localhost:2379'),
+ core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+ interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+ instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+ kafka_adapter=os.environ.get('KAFKA_ADAPTER', '172.20.10.3:9092'),
+ kafka_cluster=os.environ.get('KAFKA_CLUSTER', '172.20.10.3:9092'),
+ backend=os.environ.get('BACKEND', 'none'),
+ retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+ heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+
+ # Following are for debugging
+ debug_enabled=True,
+ debug_host='work.bcsw.net',
+ # debug_host='10.0.2.15',
+ debug_port=8765,
+)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+
+ _help = ('Path to adapters-adtran_olt.yml config file (default: %s). '
+ 'If relative, it is relative to main.py of Adtran OLT adapter.'
+ % defs['config'])
+ parser.add_argument('-c', '--config',
+ dest='config',
+ action='store',
+ default=defs['config'],
+ help=_help)
+
+ _help = 'Regular expression for extracting container number from ' \
+ 'container name (default: %s)' % defs['container_name_regex']
+ parser.add_argument('-X', '--container-number-extractor',
+ dest='container_name_regex',
+ action='store',
+ default=defs['container_name_regex'],
+ help=_help)
+
+ _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+ parser.add_argument('-C', '--consul',
+ dest='consul',
+ action='store',
+ default=defs['consul'],
+ help=_help)
+
+ _help = 'name of this adapter (default: %s)' % defs['name']
+ parser.add_argument('-na', '--name',
+ dest='name',
+ action='store',
+ default=defs['name'],
+ help=_help)
+
+ _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+ parser.add_argument('-ven', '--vendor',
+ dest='vendor',
+ action='store',
+ default=defs['vendor'],
+ help=_help)
+
+ _help = 'supported device type of this adapter (default: %s)' % defs[
+ 'device_type']
+ parser.add_argument('-dt', '--device_type',
+ dest='device_type',
+ action='store',
+ default=defs['device_type'],
+ help=_help)
+
+ _help = 'specifies whether the device type accepts bulk flow updates ' \
+ 'adapter (default: %s)' % defs['accept_bulk_flow']
+ parser.add_argument('-abf', '--accept_bulk_flow',
+ dest='accept_bulk_flow',
+ action='store',
+ default=defs['accept_bulk_flow'],
+ help=_help)
+
+ _help = 'specifies whether the device type accepts add/remove flow ' \
+ '(default: %s)' % defs['accept_atomic_flow']
+ parser.add_argument('-aaf', '--accept_atomic_flow',
+ dest='accept_atomic_flow',
+ action='store',
+ default=defs['accept_atomic_flow'],
+ help=_help)
+
+ _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+ parser.add_argument('-e', '--etcd',
+ dest='etcd',
+ action='store',
+ default=defs['etcd'],
+ help=_help)
+
+ _help = ('unique string id of this container instance (default: %s)'
+ % defs['instance_id'])
+ parser.add_argument('-i', '--instance-id',
+ dest='instance_id',
+ action='store',
+ default=defs['instance_id'],
+ help=_help)
+
+ _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+ parser.add_argument('-I', '--interface',
+ dest='interface',
+ action='store',
+ default=defs['interface'],
+ help=_help)
+
+ _help = 'omit startup banner log lines'
+ parser.add_argument('-n', '--no-banner',
+ dest='no_banner',
+ action='store_true',
+ default=False,
+ help=_help)
+
+ _help = 'do not emit periodic heartbeat log messages'
+ parser.add_argument('-N', '--no-heartbeat',
+ dest='no_heartbeat',
+ action='store_true',
+ default=False,
+ help=_help)
+
+ _help = "suppress debug and info logs"
+ parser.add_argument('-q', '--quiet',
+ dest='quiet',
+ action='count',
+ help=_help)
+
+ _help = 'enable verbose logging'
+ parser.add_argument('-v', '--verbose',
+ dest='verbose',
+ action='count',
+ help=_help)
+
+ _help = ('use docker container name as container instance id'
+ ' (overrides -i/--instance-id option)')
+ parser.add_argument('--instance-id-is-container-name',
+ dest='instance_id_is_container_name',
+ action='store_true',
+ default=False,
+ help=_help)
+
+ _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+ 'If not '
+ 'specified (None), the address from the config file is used'
+ % defs['kafka_adapter'])
+ parser.add_argument('-KA', '--kafka_adapter',
+ dest='kafka_adapter',
+ action='store',
+ default=defs['kafka_adapter'],
+ help=_help)
+
+ _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+ 'If not '
+ 'specified (None), the address from the config file is used'
+ % defs['kafka_cluster'])
+ parser.add_argument('-KC', '--kafka_cluster',
+ dest='kafka_cluster',
+ action='store',
+ default=defs['kafka_cluster'],
+ help=_help)
+
+ _help = 'backend to use for config persistence'
+ parser.add_argument('-b', '--backend',
+ default=defs['backend'],
+ choices=['none', 'consul', 'etcd'],
+ help=_help)
+
+ _help = 'topic of core on the kafka bus'
+ parser.add_argument('-ct', '--core_topic',
+ dest='core_topic',
+ action='store',
+ default=defs['core_topic'],
+ help=_help)
+
+ _help = 'Enable remote python debug'
+ parser.add_argument('-de', '--debug_enabled',
+ dest='debug_enabled',
+ action='store_true',
+ default=defs['debug_enabled'],
+ help=_help)
+
+ _help = 'remote debug hostname or IP address'
+ parser.add_argument('-dh', '--debug_host',
+ dest='debug_host',
+ action='store',
+ default=defs['debug_host'],
+ help=_help)
+
+ _help = 'remote debug port number'
+ parser.add_argument('-dp', '--debug_port',
+ dest='debug_port',
+ action='store',
+ default=defs['debug_port'],
+ help=_help)
+
+ args = parser.parse_args()
+
+ # post-processing
+
+ if args.instance_id_is_container_name:
+ args.instance_id = get_my_containers_name()
+
+ return args
+
+
+def setup_remote_debug(host, port, logger):
+ try:
+ import sys
+ sys.path.append('/voltha/pydevd/pycharm-debug.egg')
+ import pydevd
+ # Initial breakpoint
+
+ pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True, suspend=False)
+
+ except ImportError:
+ logger.error('Error importing pydevd package')
+ logger.error('REMOTE DEBUGGING will not be supported in this run...')
+ # Continue on, you do not want to completely kill VOLTHA, you just need to fix it.
+
+ except AttributeError:
+ logger.error('Attribute error. Perhaps try to explicitly set PYTHONPATH to'
+ 'pydevd directory and rlogger.errorun again?')
+ logger.error('REMOTE DEBUGGING will not be supported in this run...')
+ # Continue on, you do not want to completely kill VOLTHA, you just need to fix it.
+
+ except:
+ import sys
+ logger.error("pydevd startup exception: %s" % sys.exc_info()[0])
+ print('REMOTE DEBUGGING will not be supported in this run...')
+
+
+def load_config(args):
+ path = args.config
+ if path.startswith('.'):
+ dir = os.path.dirname(os.path.abspath(__file__))
+ path = os.path.join(dir, path)
+ path = os.path.abspath(path)
+ with open(path) as fd:
+ config = yaml.load(fd)
+ return config
+
+
+def print_banner(log):
+ log.info(" _____ _______ _____ _ _ ____ _ _______ ")
+ log.info(" /\ | __ \__ __| __ \ /\ | \ | | / __ \| | |__ __|")
+ log.info(" / \ | | | | | | | |__) | / \ | \| | | | | | | | | ")
+ log.info(" / /\ \ | | | | | | | _ / / /\ \ | . ` | | | | | | | | ")
+ log.info(" / ____ \| |__| | | | | | \ \ / ____ \| |\ | | |__| | |____| | ")
+ log.info(" /_/ \_\_____/ |_| |_| _\_\/_/ \_\_| \_| \____/|______|_| ")
+ log.info(" /\ | | | | ")
+ log.info(" / \ __| | __ _ _ __ | |_ ___ _ __ ")
+ log.info(" / /\ \ / _` |/ _` | '_ \| __/ _ \ '__| ")
+ log.info(" / ____ \ (_| | (_| | |_) | || __/ | ")
+ log.info(" /_/ \_\__,_|\__,_| .__/ \__\___|_| ")
+ log.info(" | | ")
+ log.info(" |_| ")
+ log.info("")
+ log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+ def __init__(self):
+
+ self.args = args = parse_args()
+ self.config = load_config(args)
+
+ verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+ self.log = setup_logging(self.config.get('logging', {}),
+ args.instance_id,
+ verbosity_adjust=verbosity_adjust)
+ self.log.info('container-number-extractor',
+ regex=args.container_name_regex)
+
+ if args.debug_enabled:
+ setup_remote_debug(args.debug_host, args.debug_port, self.log)
+
+ self.adtran_olt_adapter_version = self.get_version()
+ self.log.info('ADTRAN-OLT-Adapter-Version', version=self.adtran_olt_adapter_version)
+
+ if not args.no_banner:
+ print_banner(self.log)
+
+ self.adapter = None
+ self.core_proxy = None
+ self.adapter_proxy = None
+
+ # Create a unique instance id using the passed-in instance id and
+ # UTC timestamp
+ current_time = arrow.utcnow().timestamp
+ self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+ self.core_topic = args.core_topic
+ self.listening_topic = args.name
+ self.startup_components()
+
+ if not args.no_heartbeat:
+ self.start_heartbeat()
+ self.start_kafka_cluster_heartbeat(self.instance_id)
+
+ def get_version(self):
+ path = defs['version_file']
+ if not path.startswith('/'):
+ dir = os.path.dirname(os.path.abspath(__file__))
+ path = os.path.join(dir, path)
+
+ path = os.path.abspath(path)
+ version_file = open(path, 'r')
+ v = version_file.read()
+
+ # Use Version to validate the version string - exception will be raised
+ # if the version is invalid
+ Version(v)
+
+ version_file.close()
+ return v
+
+ def start(self):
+ self.start_reactor() # will not return except Keyboard interrupt
+
+ def stop(self):
+ pass
+
+ def get_args(self):
+ """Allow access to command line args"""
+ return self.args
+
+ def get_config(self):
+ """Allow access to content of config file"""
+ return self.config
+
+ def _get_adapter_config(self):
+ cfg = AdapterConfig()
+ return cfg
+
+ @inlineCallbacks
+ def startup_components(self):
+ try:
+ self.log.info('starting-internal-components',
+ consul=self.args.consul,
+ etcd=self.args.etcd)
+
+ registry.register('main', self)
+
+ # Update the logger to output the vcore id.
+ self.log = update_logging(instance_id=self.instance_id,
+ vcore_id=None)
+
+ yield registry.register(
+ 'kafka_cluster_proxy',
+ KafkaProxy(
+ self.args.consul,
+ self.args.kafka_cluster,
+ config=self.config.get('kafka-cluster-proxy', {})
+ )
+ ).start()
+
+ config = self._get_adapter_config()
+
+ self.core_proxy = CoreProxy(
+ kafka_proxy=None,
+ core_topic=self.core_topic,
+ my_listening_topic=self.listening_topic)
+
+ self.adapter_proxy = AdapterProxy(
+ kafka_proxy=None,
+ core_topic=self.core_topic,
+ my_listening_topic=self.listening_topic)
+
+ self.adapter = AdtranOltAdapter(core_proxy=self.core_proxy,
+ adapter_proxy=self.adapter_proxy,
+ config=config)
+
+ adtran_request_handler = AdapterRequestFacade(adapter=self.adapter)
+
+ yield registry.register(
+ 'kafka_adapter_proxy',
+ IKafkaMessagingProxy(
+ kafka_host_port=self.args.kafka_adapter,
+ # TODO: Add KV Store object reference
+ kv_store=self.args.backend,
+ default_topic=self.args.name,
+ group_id_prefix=self.args.instance_id,
+ target_cls=adtran_request_handler
+ )
+ ).start()
+
+ self.core_proxy.kafka_proxy = get_messaging_proxy()
+ self.adapter_proxy.kafka_proxy = get_messaging_proxy()
+
+ # retry for ever
+ res = yield self._register_with_core(-1)
+
+ self.log.info('started-internal-services')
+
+ except Exception as e:
+ self.log.exception('Failure-to-start-all-components', e=e)
+
+ @inlineCallbacks
+ def shutdown_components(self):
+ """Execute before the reactor is shut down"""
+ self.log.info('exiting-on-keyboard-interrupt')
+ for component in reversed(registry.iterate()):
+ yield component.stop()
+
+ import threading
+ self.log.info('THREADS:')
+ main_thread = threading.current_thread()
+ for t in threading.enumerate():
+ if t is main_thread:
+ continue
+ if not t.isDaemon():
+ continue
+ self.log.info('joining thread {} {}'.format(
+ t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+ t.join()
+
+ def start_reactor(self):
+ from twisted.internet import reactor
+ reactor.callWhenRunning(
+ lambda: self.log.info('twisted-reactor-started'))
+ reactor.addSystemEventTrigger('before', 'shutdown',
+ self.shutdown_components)
+ reactor.run()
+
+ @inlineCallbacks
+ def _register_with_core(self, retries):
+ while 1:
+ try:
+ resp = yield self.core_proxy.register(
+ self.adapter.adapter_descriptor(),
+ self.adapter.device_types())
+ if resp:
+ self.log.info('registered-with-core',
+ coreId=resp.instance_id)
+ returnValue(resp)
+ except TimeOutError as e:
+ self.log.warn("timeout-when-registering-with-core", e=e)
+ if retries == 0:
+ self.log.exception("no-more-retries", e=e)
+ raise
+ else:
+ retries = retries if retries < 0 else retries - 1
+ yield asleep(defs['retry_interval'])
+ except Exception as e:
+ self.log.exception("failed-registration", e=e)
+ raise
+
+ def start_heartbeat(self):
+
+ t0 = time.time()
+ t0s = time.ctime(t0)
+
+ def heartbeat():
+ self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+ lc = LoopingCall(heartbeat)
+ lc.start(10)
+
+ # Temporary function to send a heartbeat message to the external kafka
+ # broker
+ def start_kafka_cluster_heartbeat(self, instance_id):
+ # For heartbeat we will send a message to a specific "voltha-heartbeat"
+ # topic. The message is a protocol buf
+ # message
+ message = dict(
+ type='heartbeat',
+ adapter=self.args.name,
+ instance=instance_id,
+ ip=get_my_primary_local_ipv4()
+ )
+ topic = defs['heartbeat_topic']
+
+ def send_msg(start_time):
+ try:
+ kafka_cluster_proxy = get_kafka_proxy()
+ if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+ # self.log.debug('kafka-proxy-available')
+ message['ts'] = arrow.utcnow().timestamp
+ message['uptime'] = time.time() - start_time
+ # self.log.debug('start-kafka-heartbeat')
+ kafka_cluster_proxy.send_message(topic, dumps(message))
+ else:
+ self.log.error('kafka-proxy-unavailable')
+ except Exception, err:
+ self.log.exception('failed-sending-message-heartbeat', e=err)
+
+ try:
+ t0 = time.time()
+ lc = LoopingCall(send_msg, t0)
+ lc.start(10)
+ except Exception, e:
+ self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+ Main().start()
diff --git a/adapters/adtran_olt/net/__init__.py b/adapters/adtran_olt/net/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/adtran_olt/net/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_olt/net/pio_zmq.py b/adapters/adtran_olt/net/pio_zmq.py
new file mode 100644
index 0000000..d50b686
--- /dev/null
+++ b/adapters/adtran_olt/net/pio_zmq.py
@@ -0,0 +1,126 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import random
+from adapters.adtran_common.net.adtran_zmq import AdtranZmqClient
+from enum import IntEnum
+
+DEFAULT_PIO_TCP_PORT = 5555
+#DEFAULT_PIO_TCP_PORT = 5657
+
+
+class PioClient(AdtranZmqClient):
+ """
+ Adtran ZeroMQ Client for packet in/out service
+ """
+ def __init__(self, ip_address, rx_callback, port):
+ super(PioClient, self).__init__(ip_address, rx_callback, port)
+ self._seq_number = random.randint(1, 2**32)
+
+ class UrlType(IntEnum):
+ PACKET_IN = 0 # Packet In
+ PACKET_OUT = 1 # Packet Out
+ EVCMAPS_REQUEST = 2 # EVC-MAPs request
+ EVCMAPS_RESPONSE = 3 # EVC-MAPs response
+ UNKNOWN = 4 # UNKNOWN URL
+
+ def get_url_type(self, packet):
+ url_type = PioClient.UrlType.UNKNOWN
+ message = json.loads(packet)
+ if 'url' in message:
+ if message['url'] == 'adtran-olt-of-control/packet-in':
+ url_type = PioClient.UrlType.PACKET_IN
+ elif message['url'] == 'adtran-olt-of-control/packet-out':
+ url_type = PioClient.UrlType.PACKET_OUT
+ elif message['url'] == 'adtran-olt-of-control/evc-map-response':
+ url_type = PioClient.UrlType.EVCMAPS_RESPONSE
+ elif message['url'] == 'adtran-olt-of-control/evc-map-request':
+ url_type = PioClient.UrlType.EVCMAPS_REQUEST
+ return url_type
+
+ def decode_packet(self, packet):
+ from scapy.layers.l2 import Ether
+ try:
+ message = json.loads(packet)
+ self.log.debug('message', message=message)
+
+ for field in ['url', 'evc-map-name', 'total-len', 'port-number', 'message-contents']:
+ assert field in message, "Missing field '{}' in received packet".format(field)
+
+ decoded = message['message-contents'].decode('base64')
+
+ assert len(decoded.encode('hex'))/2 == message['total-len'], \
+ 'Decoded length ({}) != Message Encoded length ({})'.\
+ format(len(decoded.encode('hex')), message['total-len'])
+
+ return int(message['port-number']), message['evc-map-name'], Ether(decoded)
+
+ except Exception as e:
+ self.log.exception('decode', e=e)
+ raise
+
+ @property
+ def sequence_number(self):
+ if self._seq_number >= 2**32:
+ self._seq_number = 0
+ else:
+ self._seq_number += 1
+
+ return self._seq_number
+
+ def encode_packet(self, egress_port, packet, map_name='TODO', exception_type=''):
+ """
+ Encode a message for transmission as a Packet Out
+ :param egress_port: (int) egress physical port number
+ :param packet: (str) actual message
+ :param map_name: (str) EVC-MAP Name
+ :param exception_type: (str) Type of exception
+ """
+ return json.dumps({
+ 'url': 'adtran-olt-of-control/packet-out',
+ 'buffer-id': self.sequence_number,
+ 'total-len': len(packet),
+ 'evc-map-name': map_name,
+ 'exception-type': exception_type,
+ 'port-number': egress_port,
+ 'message-contents': packet.encode('base64')
+ })
+
+ def query_request_packet(self):
+ """
+ Create query-request to get all installed exceptions
+ :return: Request string
+ """
+ return json.dumps({
+ 'url': 'adtran-olt-of-control/evc-map-request'
+ })
+
+ def decode_query_response_packet(self, packet, map_name=None):
+ """
+ Create query-request to get all installed exceptions
+ :param map_name: (str) EVC-MAP Name (None=all)
+ :param packet: returned query response packet
+ :return: list of evcmaps and associated exceptions
+ """
+ from scapy.layers.l2 import Ether
+ message = json.loads(packet)
+ self.log.debug('message', message=message)
+
+ if 'url' in message and message['url'] == 'adtran-olt-of-control/evc-map-response':
+ maps=message['evc-map-list']
+ if maps is not None:
+ self.log.debug('evc-maps-query-response', maps=maps)
+ return maps
+ return []
diff --git a/adapters/adtran_olt/net/pon_zmq.py b/adapters/adtran_olt/net/pon_zmq.py
new file mode 100644
index 0000000..aa42917
--- /dev/null
+++ b/adapters/adtran_olt/net/pon_zmq.py
@@ -0,0 +1,61 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import struct
+import binascii
+from adapters.adtran_common.net.adtran_zmq import AdtranZmqClient
+
+DEFAULT_PON_AGENT_TCP_PORT = 5656
+
+
+class PonClient(AdtranZmqClient):
+ """
+ Adtran ZeroMQ Client for PON Agent service
+ """
+ def __init__(self, ip_address, rx_callback, port):
+ super(PonClient, self).__init__(ip_address, rx_callback, port)
+
+ def encode_omci_packet(self, msg, pon_index, onu_id):
+ """
+ Create an OMCI Tx Packet for the specified ONU
+
+ :param msg: (str) OMCI message to send
+ :param pon_index: (unsigned int) PON Port index
+ :param onu_id: (unsigned int) ONU ID
+
+ :return: (bytes) octet string to send
+ """
+ return json.dumps({"operation": "NOTIFY",
+ "url": "adtran-olt-pon-control/omci-message",
+ "pon-id": pon_index,
+ "onu-id": onu_id,
+ "message-contents": msg.decode("hex").encode("base64")
+ })
+
+ def decode_packet(self, packet):
+ """
+ Decode the PON-Agent packet provided by the ZMQ client
+
+ :param packet: (bytes) Packet
+ :return: (long, long, bytes, boolean) PON Index, ONU ID, Frame Contents (OMCI or Ethernet),\
+ and a flag indicating if it is OMCI
+ """
+ msg = json.loads(packet)
+ pon_id = msg['pon-id']
+ onu_id = msg['onu-id']
+ msg_data = msg['message-contents'].decode("base64")
+ is_omci = msg['operation'] == "NOTIFY" and 'omci-message' in msg['url']
+
+ return pon_id, onu_id, msg_data, is_omci
diff --git a/adapters/adtran_olt/nni_port.py b/adapters/adtran_olt/nni_port.py
new file mode 100644
index 0000000..af11e9b
--- /dev/null
+++ b/adapters/adtran_olt/nni_port.py
@@ -0,0 +1,457 @@
+#
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import random
+import arrow
+
+import structlog
+import xmltodict
+from adapters.adtran_common.port import AdtnPort
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
+from twisted.python.failure import Failure
+from pyvoltha.protos.common_pb2 import OperStatus, AdminState
+from pyvoltha.protos.device_pb2 import Port
+from pyvoltha.protos.logical_device_pb2 import LogicalPort
+from pyvoltha.protos.openflow_13_pb2 import OFPPF_100GB_FD, OFPPF_FIBER, OFPPS_LIVE, ofp_port
+
+
+class NniPort(AdtnPort):
+ """
+ Northbound network port, often Ethernet-based
+ """
+ def __init__(self, parent, **kwargs):
+ super(NniPort, self).__init__(parent, **kwargs)
+
+ # TODO: Weed out those properties supported by common 'Port' object
+
+ self.log = structlog.get_logger(port_no=kwargs.get('port_no'))
+ self.log.info('creating')
+
+ # ONOS/SEBA wants 'nni-<port>' for port names, OLT NETCONF wants their
+ # name (something like hundred-gigabit-ethernet 0/1) which is reported
+ # when we enumerated the ports
+ self._physical_port_name = kwargs.get('name', 'nni-{}'.format(self._port_no))
+ self._logical_port_name = 'nni-{}'.format(self._port_no)
+
+ self._logical_port = None
+
+ self.sync_tick = 10.0
+
+ self._stats_tick = 5.0
+ self._stats_deferred = None
+
+ # Local cache of NNI configuration
+ self._ianatype = '<type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:ethernetCsmacd</type>'
+
+ # And optional parameters
+ # TODO: Currently cannot update admin/oper status, so create this enabled and active
+ # self._admin_state = kwargs.pop('admin_state', AdminState.UNKNOWN)
+ # self._oper_status = kwargs.pop('oper_status', OperStatus.UNKNOWN)
+ self._enabled = True
+ self._admin_state = AdminState.ENABLED
+ self._oper_status = OperStatus.ACTIVE
+
+ self._label = self._physical_port_name
+ self._mac_address = kwargs.pop('mac_address', '00:00:00:00:00:00')
+ # TODO: Get with JOT and find out how to pull out MAC Address via NETCONF
+ # TODO: May need to refine capabilities into current, advertised, and peer
+
+ self._ofp_capabilities = kwargs.pop('ofp_capabilities', OFPPF_100GB_FD | OFPPF_FIBER)
+ self._ofp_state = kwargs.pop('ofp_state', OFPPS_LIVE)
+ self._current_speed = kwargs.pop('current_speed', OFPPF_100GB_FD)
+ self._max_speed = kwargs.pop('max_speed', OFPPF_100GB_FD)
+ self._device_port_no = kwargs.pop('device_port_no', self._port_no)
+
+ # Statistics
+ self.rx_dropped = 0
+ self.rx_error_packets = 0
+ self.rx_ucast_packets = 0
+ self.rx_bcast_packets = 0
+ self.rx_mcast_packets = 0
+ self.tx_dropped = 0
+ self.rx_ucast_packets = 0
+ self.tx_bcast_packets = 0
+ self.tx_mcast_packets = 0
+
+ def __str__(self):
+ return "NniPort-{}: Admin: {}, Oper: {}, parent: {}".format(self._port_no,
+ self._admin_state,
+ self._oper_status,
+ self._parent)
+
+ def get_port(self):
+ """
+ Get the VOLTHA PORT object for this port
+ :return: VOLTHA Port object
+ """
+ self.log.debug('get-port-status-update', port=self._port_no,
+ label=self._label)
+ if self._port is None:
+ self._port = Port(port_no=self._port_no,
+ label=self._label,
+ type=Port.ETHERNET_NNI,
+ admin_state=self._admin_state,
+ oper_status=self._oper_status)
+
+ if self._port.admin_state != self._admin_state or\
+ self._port.oper_status != self._oper_status:
+
+ self.log.debug('get-port-status-update', admin_state=self._admin_state,
+ oper_status=self._oper_status)
+ self._port.admin_state = self._admin_state
+ self._port.oper_status = self._oper_status
+
+ return self._port
+
+ @property
+ def iana_type(self):
+ return self._ianatype
+
+ def cancel_deferred(self):
+ super(NniPort, self).cancel_deferred()
+
+ d, self._stats_deferred = self._stats_deferred, None
+ try:
+ if d is not None and d.called:
+ d.cancel()
+ except:
+ pass
+
+ def _update_adapter_agent(self):
+ # adapter_agent add_port also does an update of port status
+ self.log.debug('update-adapter-agent', admin_state=self._admin_state,
+ oper_status=self._oper_status)
+ self.adapter_agent.add_port(self.olt.device_id, self.get_port())
+
+ def get_logical_port(self):
+ """
+ Get the VOLTHA logical port for this port
+ :return: VOLTHA logical port or None if not supported
+ """
+ def mac_str_to_tuple(mac):
+ """
+ Convert 'xx:xx:xx:xx:xx:xx' MAC address string to a tuple of integers.
+ Example: mac_str_to_tuple('00:01:02:03:04:05') == (0, 1, 2, 3, 4, 5)
+ """
+ return tuple(int(d, 16) for d in mac.split(':'))
+
+ if self._logical_port is None:
+ openflow_port = ofp_port(port_no=self._port_no,
+ hw_addr=mac_str_to_tuple(self._mac_address),
+ name=self._logical_port_name,
+ config=0,
+ state=self._ofp_state,
+ curr=self._ofp_capabilities,
+ advertised=self._ofp_capabilities,
+ peer=self._ofp_capabilities,
+ curr_speed=self._current_speed,
+ max_speed=self._max_speed)
+
+ self._logical_port = LogicalPort(id=self._logical_port_name,
+ ofp_port=openflow_port,
+ device_id=self._parent.device_id,
+ device_port_no=self._device_port_no,
+ root_port=True)
+ return self._logical_port
+
+ @inlineCallbacks
+ def finish_startup(self):
+
+ if self.state != AdtnPort.State.INITIAL:
+ returnValue('Done')
+
+ self.log.debug('final-startup')
+ # TODO: Start status polling of NNI interfaces
+ self.deferred = None # = reactor.callLater(3, self.do_stuff)
+
+ # Begin statistics sync
+ self._stats_deferred = reactor.callLater(self._stats_tick * 2, self._update_statistics)
+
+ try:
+ yield self.set_config('enabled', True)
+
+ super(NniPort, self).finish_startup()
+
+ except Exception as e:
+ self.log.exception('nni-start', e=e)
+ self._oper_status = OperStatus.UNKNOWN
+ self._update_adapter_agent()
+
+ returnValue('Enabled')
+
+ def finish_stop(self):
+
+ # NOTE: Leave all NNI ports active (may have inband management)
+ # TODO: Revisit leaving NNI Ports active on disable
+
+ return self.set_config('enabled', False)
+
+ @inlineCallbacks
+ def reset(self):
+ """
+ Set the NNI Port to a known good state on initial port startup. Actual
+ NNI 'Start' is done elsewhere
+ """
+ # if self.state != AdtnPort.State.INITIAL:
+ # self.log.error('reset-ignored', state=self.state)
+ # returnValue('Ignored')
+
+ self.log.info('resetting', label=self._label)
+
+ # Always enable our NNI ports
+
+ try:
+ results = yield self.set_config('enabled', True)
+ self._admin_state = AdminState.ENABLED
+ self._enabled = True
+ returnValue(results)
+
+ except Exception as e:
+ self.log.exception('reset', e=e)
+ self._admin_state = AdminState.UNKNOWN
+ raise
+
+ @inlineCallbacks
+ def set_config(self, leaf, value):
+ if isinstance(value, bool):
+ value = 'true' if value else 'false'
+
+ config = '<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">' + \
+ ' <interface>' + \
+ ' <name>{}</name>'.format(self._physical_port_name) + \
+ ' {}'.format(self._ianatype) + \
+ ' <{}>{}</{}>'.format(leaf, value, leaf) + \
+ ' </interface>' + \
+ '</interfaces>'
+ try:
+ results = yield self._parent.netconf_client.edit_config(config)
+ returnValue(results)
+
+ except Exception as e:
+ self.log.exception('set', leaf=leaf, value=value, e=e)
+ raise
+
+ def get_nni_config(self):
+ config = '<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
+ ' <interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">' + \
+ ' <interface>' + \
+ ' <name>{}</name>'.format(self._physical_port_name) + \
+ ' <enabled/>' + \
+ ' </interface>' + \
+ ' </interfaces>' + \
+ '</filter>'
+ return self._parent.netconf_client.get(config)
+
+ def get_nni_statistics(self):
+ state = '<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
+ ' <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">' + \
+ ' <interface>' + \
+ ' <name>{}</name>'.format(self._physical_port_name) + \
+ ' <admin-status/>' + \
+ ' <oper-status/>' + \
+ ' <statistics/>' + \
+ ' </interface>' + \
+ ' </interfaces>' + \
+ '</filter>'
+ return self._parent.netconf_client.get(state)
+
+ def sync_hardware(self):
+ if self.state == AdtnPort.State.RUNNING or self.state == AdtnPort.State.STOPPED:
+ def read_config(results):
+ #self.log.debug('read-config', results=results)
+ try:
+ result_dict = xmltodict.parse(results.data_xml)
+ interfaces = result_dict['data']['interfaces']
+ if 'if:interface' in interfaces:
+ entries = interfaces['if:interface']
+ else:
+ entries = interfaces['interface']
+
+ enabled = entries.get('enabled',
+ str(not self.enabled).lower()) == 'true'
+
+ if self.enabled == enabled:
+ return succeed('in-sync')
+
+ self.set_config('enabled', self.enabled)
+ self._oper_status = OperStatus.ACTIVE
+ self._update_adapter_agent()
+
+ except Exception as e:
+ self.log.exception('read-config', e=e)
+ return fail(Failure())
+
+ def failure(reason):
+ self.log.error('hardware-sync-failed', reason=reason)
+
+ def reschedule(_):
+ delay = self.sync_tick
+ delay += random.uniform(-delay / 10, delay / 10)
+ self.sync_deferred = reactor.callLater(delay, self.sync_hardware)
+
+ self.sync_deferred = self.get_nni_config()
+ self.sync_deferred.addCallbacks(read_config, failure)
+ self.sync_deferred.addBoth(reschedule)
+
+ def _decode_nni_statistics(self, entry):
+ # admin_status = entry.get('admin-status')
+ # oper_status = entry.get('oper-status')
+ # admin_status = entry.get('admin-status')
+ # phys_address = entry.get('phys-address')
+
+ stats = entry.get('statistics')
+ if stats is not None:
+ self.timestamp = arrow.utcnow().float_timestamp
+ self.rx_bytes = int(stats.get('in-octets', 0))
+ self.rx_ucast_packets = int(stats.get('in-unicast-pkts', 0))
+ self.rx_bcast_packets = int(stats.get('in-broadcast-pkts', 0))
+ self.rx_mcast_packets = int(stats.get('in-multicast-pkts', 0))
+ self.rx_error_packets = int(stats.get('in-errors', 0)) + int(stats.get('in-discards', 0))
+
+ self.tx_bytes = int(stats.get('out-octets', 0))
+ self.tx_ucast_packets = int(stats.get('out-unicast-pkts', 0))
+ self.tx_bcast_packets = int(stats.get('out-broadcast-pkts', 0))
+ self.tx_mcasy_packets = int(stats.get('out-multicast-pkts', 0))
+ self.tx_error_packets = int(stats.get('out-errors', 0)) + int(stats.get('out-discards', 0))
+
+ self.rx_packets = self.rx_ucast_packets + self.rx_mcast_packets + self.rx_bcast_packets
+ self.tx_packets = self.tx_ucast_packets + self.tx_mcast_packets + self.tx_bcast_packets
+ # No support for rx_crc_errors or bip_errors
+
+ def _update_statistics(self):
+ if self.state == AdtnPort.State.RUNNING:
+ def read_state(results):
+ # self.log.debug('read-state', results=results)
+ try:
+ result_dict = xmltodict.parse(results.data_xml)
+ entry = result_dict['data']['interfaces-state']['interface']
+ self._decode_nni_statistics(entry)
+ return succeed('done')
+
+ except Exception as e:
+ self.log.exception('read-state', e=e)
+ return fail(Failure())
+
+ def failure(reason):
+ self.log.error('update-stats-failed', reason=reason)
+
+ def reschedule(_):
+ delay = self._stats_tick
+ delay += random.uniform(-delay / 10, delay / 10)
+ self._stats_deferred = reactor.callLater(delay, self._update_statistics)
+
+ try:
+ self._stats_deferred = self.get_nni_statistics()
+ self._stats_deferred.addCallbacks(read_state, failure)
+ self._stats_deferred.addBoth(reschedule)
+
+ except Exception as e:
+ self.log.exception('nni-sync', port=self.name, e=e)
+ self._stats_deferred = reactor.callLater(self._stats_tick, self._update_statistics)
+
+
+class MockNniPort(NniPort):
+ """
+ A class similar to the 'Port' class in the VOLTHA but for a non-existent (virtual OLT)
+
+ TODO: Merge this with the Port class or cleanup where possible
+ so we do not duplicate fields/properties/methods
+ """
+
+ def __init__(self, parent, **kwargs):
+ super(MockNniPort, self).__init__(parent, **kwargs)
+
+ def __str__(self):
+ return "NniPort-mock-{}: Admin: {}, Oper: {}, parent: {}".format(self._port_no,
+ self._admin_state,
+ self._oper_status,
+ self._parent)
+
+ @staticmethod
+ def get_nni_port_state_results():
+ from ncclient.operations.retrieve import GetReply
+ raw = """
+ <?xml version="1.0" encoding="UTF-8"?>
+ <rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="urn:uuid:59e71979-01bb-462f-b17a-b3a45e1889ac">
+ <data>
+ <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
+ <interface><name>hundred-gigabit-ethernet 0/1</name></interface>
+ </interfaces-state>
+ </data>
+ </rpc-reply>
+ """
+ return GetReply(raw)
+
+ @staticmethod
+ def get_pon_port_state_results():
+ from ncclient.operations.retrieve import GetReply
+ raw = """
+ <?xml version="1.0" encoding="UTF-8"?>
+ <rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="urn:uuid:59e71979-01bb-462f-b17a-b3a45e1889ac">
+ <data>
+ <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
+ <interface><name>XPON 0/1</name></interface>
+ <interface><name>XPON 0/2</name></interface>
+ <interface><name>XPON 0/3</name></interface>
+ <interface><name>XPON 0/4</name></interface>
+ <interface><name>XPON 0/5</name></interface>
+ <interface><name>XPON 0/6</name></interface>
+ <interface><name>XPON 0/7</name></interface>
+ <interface><name>XPON 0/8</name></interface>
+ <interface><name>XPON 0/9</name></interface>
+ <interface><name>XPON 0/10</name></interface>
+ <interface><name>XPON 0/11</name></interface>
+ <interface><name>XPON 0/12</name></interface>
+ <interface><name>XPON 0/13</name></interface>
+ <interface><name>XPON 0/14</name></interface>
+ <interface><name>XPON 0/15</name></interface>
+ <interface><name>XPON 0/16</name></interface>
+ </interfaces-state>
+ </data>
+ </rpc-reply>
+ """
+ return GetReply(raw)
+
+ def reset(self):
+ """
+ Set the NNI Port to a known good state on initial port startup. Actual
+ NNI 'Start' is done elsewhere
+ """
+ if self.state != AdtnPort.State.INITIAL:
+ self.log.error('reset-ignored', state=self.state)
+ return fail()
+
+ self.log.info('resetting', label=self._label)
+
+ # Always enable our NNI ports
+
+ self._enabled = True
+ self._admin_state = AdminState.ENABLED
+ return succeed('Enabled')
+
+ def set_config(self, leaf, value):
+
+ if leaf == 'enabled':
+ self._enabled = value
+ else:
+ raise NotImplemented("Leaf '{}' is not supported".format(leaf))
+
+ return succeed('Success')
diff --git a/adapters/adtran_olt/onu.py b/adapters/adtran_olt/onu.py
new file mode 100644
index 0000000..648d33e
--- /dev/null
+++ b/adapters/adtran_olt/onu.py
@@ -0,0 +1,701 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import binascii
+import json
+import structlog
+from twisted.internet import reactor, defer
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+from pyvoltha.common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
+from pyvoltha.protos.device_pb2 import Device
+
+from adtran_olt_handler import AdtranOltHandler
+from adapters.adtran_common.net.adtran_rest import RestInvalidResponseCode
+
+_MAX_EXPEDITE_COUNT = 5
+_EXPEDITE_SECS = 2
+_HW_SYNC_SECS = 60
+
+
+class Onu(object):
+ """
+ Wraps an ONU
+ """
+ DEFAULT_PASSWORD = ''
+
+ def __init__(self, onu_info):
+ self._onu_id = onu_info['onu-id']
+ if self._onu_id is None:
+ raise ValueError('No ONU ID available')
+
+ pon = onu_info['pon']
+ self._olt = pon.olt
+ self._pon_id = pon.pon_id
+ self._name = '{}@{}'.format(pon.physical_port_name, self._onu_id)
+ self.log = structlog.get_logger(pon_id=self._pon_id, onu_id=self._onu_id)
+
+ self._valid = True # Set false during delete/cleanup
+ self._serial_number_base64 = Onu.string_to_serial_number(onu_info['serial-number'])
+ self._serial_number_string = onu_info['serial-number']
+ self._device_id = onu_info['device-id']
+ self._password = onu_info['password']
+ self._created = False
+ self._proxy_address = Device.ProxyAddress(device_id=self.olt.device_id,
+ channel_id=self.olt.pon_id_to_port_number(self._pon_id),
+ onu_id=self._onu_id,
+ onu_session_id=self._onu_id)
+ self._sync_tick = _HW_SYNC_SECS
+ self._expedite_sync = False
+ self._expedite_count = 0
+ self._resync_flows = False
+ self._sync_deferred = None # For sync of ONT config to hardware
+
+ self._gem_ports = {} # gem-id -> GemPort
+ self._tconts = {} # alloc-id -> TCont
+ self._uni_ports = onu_info['uni-ports']
+
+ # Provisionable items
+ self._enabled = onu_info['enabled']
+ self._upstream_fec_enable = onu_info.get('upstream-fec')
+
+ # KPI related items
+ self._rssi = -9999
+ self._equalization_delay = 0
+ self._fiber_length = 0
+ self._timestamp = None # Last time the KPI items were updated
+
+ def __str__(self):
+ return "ONU-{}:{}, SN: {}/{}".format(self._pon_id, self._onu_id,
+ self._serial_number_string, self._serial_number_base64)
+
+ @staticmethod
+ def serial_number_to_string(value):
+ sval = base64.decodestring(value)
+ unique = [elem.encode("hex") for elem in sval[4:8]]
+ return '{}{}{}{}{}'.format(sval[:4], unique[0], unique[1], unique[2], unique[3]).upper()
+
+ @staticmethod
+ def string_to_serial_number(value):
+ bvendor = [octet for octet in value[:4]]
+ bunique = [binascii.a2b_hex(value[offset:offset + 2]) for offset in xrange(4, 12, 2)]
+ bvalue = ''.join(bvendor + bunique)
+ return base64.b64encode(bvalue)
+
+ @property
+ def olt(self):
+ return self._olt
+
+ @property
+ def pon(self):
+ return self.olt.southbound_ports[self._pon_id]
+
+ @property
+ def intf_id(self):
+ return self.pon.intf_id
+
+ @property
+ def pon_id(self):
+ return self._pon_id
+
+ @property
+ def onu_id(self):
+ return self._onu_id
+
+ @property
+ def device_id(self):
+ return self._device_id
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def upstream_fec_enable(self):
+ return self._upstream_fec_enable
+
+ @upstream_fec_enable.setter
+ def upstream_fec_enable(self, value):
+ assert isinstance(value, bool), 'upstream FEC enabled is a boolean'
+ if self._upstream_fec_enable != value:
+ self._upstream_fec_enable = value
+
+ # Recalculate PON upstream FEC
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
+
+ @property
+ def password(self):
+ """
+ Get password. Base 64 format
+ """
+ return self._password
+
+ @password.setter
+ def password(self, value):
+ """
+ Set the password
+ :param value: (str) base 64 encoded value
+ """
+ if self._password is None and value is not None:
+ self._password = value
+ reg_id = (value.decode('base64')).rstrip('\00').lstrip('\00')
+ # Must remove any non-printable characters
+ reg_id = ''.join([i if 127 > ord(i) > 31 else '_' for i in reg_id])
+ # Generate alarm here for regID
+ from voltha.extensions.alarms.onu.onu_active_alarm import OnuActiveAlarm
+ self.log.info('onu-Active-Alarm', serial_number=self._serial_number_string)
+ device = self._olt.adapter_agent.get_device(self._olt.device_id)
+
+ OnuActiveAlarm(self._olt.alarms, self._olt.device_id, self._pon_id,
+ self._serial_number_string, reg_id, device.serial_number,
+ ipv4_address=device.ipv4_address).raise_alarm()
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, value):
+ if self._enabled != value:
+ self._enabled = value
+ self._resync_flows = True
+
+ self.set_config('enable', self._enabled)
+
+ if self._enabled:
+ self.start()
+ else:
+ self.stop()
+
+ # Recalculate PON upstream FEC
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
+
+ @property
+ def uni_ports(self):
+ return self._uni_ports
+
+ @property
+ def logical_port(self):
+ """Return the logical PORT number of this ONU's UNI"""
+ # TODO: once we support multiple UNIs, this needs to be revisited
+ return self._uni_ports[0]
+
+ @property
+ def gem_ports(self):
+ return self._gem_ports.values()
+
+ @property
+ def proxy_address(self):
+ return self._proxy_address
+
+ @property
+ def serial_number_64(self):
+ return self._serial_number_base64
+
+ @property
+ def serial_number(self):
+ return self._serial_number_string
+
+ @property
+ def timestamp(self):
+ # Last time the KPI items were updated
+ return self._timestamp
+
+ @timestamp.setter
+ def timestamp(self, value):
+ self._timestamp = value
+
+ @property
+ def rssi(self):
+ """The received signal strength indication of the ONU"""
+ return self._rssi
+
+ @rssi.setter
+ def rssi(self, value):
+ if self._rssi != value:
+ self._rssi = value
+ # TODO: Notify anyone?
+
+ @property
+ def equalization_delay(self):
+ """Equalization delay (bits)"""
+ return self._equalization_delay
+
+ @equalization_delay.setter
+ def equalization_delay(self, value):
+ if self._equalization_delay != value:
+ self._equalization_delay = value
+ # TODO: Notify anyone?
+
+ @property
+ def fiber_length(self):
+ """Distance to ONU"""
+ return self._fiber_length
+
+ @fiber_length.setter
+ def fiber_length(self, value):
+ if self._fiber_length != value:
+ self._fiber_length = value
+ # TODO: Notify anyone?
+
+ def _cancel_deferred(self):
+ d, self._sync_deferred = self._sync_deferred, None
+
+ if d is not None and not d.called:
+ try:
+ d.cancel()
+ except Exception:
+ pass
+
+ @inlineCallbacks
+ def create(self, reflow=False):
+ """
+ Create (or reflow) this ONU to hardware
+ :param reflow: (boolean) Flag, if True, indicating if this is a reflow ONU
+ information after an unmanaged OLT hardware reboot
+ """
+ self.log.debug('create', reflow=reflow)
+ self._cancel_deferred()
+
+ data = json.dumps({'onu-id': self._onu_id,
+ 'serial-number': self._serial_number_base64,
+ 'enable': self._enabled})
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_LIST_URI.format(self._pon_id)
+ name = 'onu-create-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
+ self._serial_number_base64, self._enabled)
+
+ first_sync = self._sync_tick if self._created else 5
+
+ if not self._created or reflow:
+ try:
+ yield self.olt.rest_client.request('POST', uri, data=data, name=name)
+ self._created = True
+
+ except Exception as e:
+ self.log.exception('onu-create', e=e)
+ # See if it failed due to already being configured
+ url = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
+ url += '/serial-number'
+
+ try:
+ results = yield self.olt.rest_client.request('GET', uri, name=name)
+ self.log.debug('onu-create-check', results=results)
+ if len(results) == 1 and results[0].get('serial-number', '') != self._serial_number_base64:
+ self._created = True
+
+ except Exception as _e:
+ self.log.warn('onu-exists-check', pon_id=self.pon_id, onu_id=self.onu_id,
+ serial_number=self.serial_number)
+
+ self._sync_deferred = reactor.callLater(first_sync, self._sync_hardware)
+
+ # Recalculate PON upstream FEC
+ self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
+ returnValue('created')
+
+ @inlineCallbacks
+ def delete(self):
+ """
+ Clean up ONU (gems/tconts). ONU removal from OLT h/w done by PonPort
+ :return: (deferred)
+ """
+ self._valid = False
+ self._cancel_deferred()
+
+ # Remove from H/W
+ gem_ids = self._gem_ports.keys()
+ alloc_ids = self._tconts.keys()
+
+ dl = []
+ for gem_id in gem_ids:
+ dl.append(self.remove_gem_id(gem_id))
+
+ try:
+ yield defer.gatherResults(dl, consumeErrors=True)
+ except Exception as _e:
+ pass
+
+ dl = []
+ for alloc_id in alloc_ids:
+ dl.append(self.remove_tcont(alloc_id))
+
+ try:
+ yield defer.gatherResults(dl, consumeErrors=True)
+ except Exception as _e:
+ pass
+
+ self._gem_ports.clear()
+ self._tconts.clear()
+ olt, self._olt = self._olt, None
+
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
+ name = 'onu-delete-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
+ self._serial_number_base64, self._enabled)
+ try:
+ yield olt.rest_client.request('DELETE', uri, name=name)
+
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
+
+ except Exception as e:
+ self.log.exception('onu-delete', e=e)
+
+ # Release resource manager resources for this ONU
+ pon_intf_id_onu_id = (self.pon_id, self.onu_id)
+ olt.resource_mgr.free_pon_resources_for_onu(pon_intf_id_onu_id)
+
+ returnValue('deleted')
+
+ def start(self):
+ self._cancel_deferred()
+ self._sync_deferred = reactor.callLater(0, self._sync_hardware)
+
+ def stop(self):
+ self._cancel_deferred()
+
+ def restart(self):
+ if not self._valid:
+ return succeed('Deleting')
+
+ self._cancel_deferred()
+ self._sync_deferred = reactor.callLater(0, self._sync_hardware)
+
+ return self.create()
+
+ def _sync_hardware(self):
+ from codec.olt_config import OltConfig
+ self.log.debug('sync-hardware')
+
+ def read_config(results):
+ self.log.debug('read-config', results=results)
+
+ dl = []
+
+ try:
+ config = OltConfig.Pon.Onu.decode([results])
+ assert self.onu_id in config, 'sync-onu-not-found-{}'.format(self.onu_id)
+ config = config[self.onu_id]
+
+ if self._enabled != config.enable:
+ dl.append(self.set_config('enable', self._enabled))
+
+ if self.serial_number_64 != config.serial_number_64:
+ dl.append(self.set_config('serial-number', self.serial_number_64))
+
+ if self._enabled:
+ # Sync TCONTs if everything else in sync
+ if len(dl) == 0:
+ dl.extend(sync_tconts(config.tconts))
+
+ # Sync GEM Ports if everything else in sync
+
+ if len(dl) == 0:
+ dl.extend(sync_gem_ports(config.gem_ports))
+
+ if len(dl) == 0:
+ sync_flows()
+
+ except Exception as e:
+ self.log.exception('hw-sync-read-config', e=e)
+
+ # Run h/w sync again a bit faster if we had to sync anything
+ self._expedite_sync = len(dl) > 0
+
+ # TODO: do checks
+ return defer.gatherResults(dl, consumeErrors=True)
+
+ def sync_tconts(hw_tconts):
+ hw_alloc_ids = frozenset(hw_tconts.iterkeys())
+ my_alloc_ids = frozenset(self._tconts.iterkeys())
+ dl = []
+
+ try:
+ extra_alloc_ids = hw_alloc_ids - my_alloc_ids
+ dl.extend(sync_delete_extra_tconts(extra_alloc_ids))
+
+ missing_alloc_ids = my_alloc_ids - hw_alloc_ids
+ dl.extend(sync_add_missing_tconts(missing_alloc_ids))
+
+ matching_alloc_ids = my_alloc_ids & hw_alloc_ids
+ matching_hw_tconts = {alloc_id: tcont
+ for alloc_id, tcont in hw_tconts.iteritems()
+ if alloc_id in matching_alloc_ids}
+ dl.extend(sync_matching_tconts(matching_hw_tconts))
+
+ except Exception as e2:
+ self.log.exception('hw-sync-tconts', e=e2)
+
+ return dl
+
+ def sync_delete_extra_tconts(alloc_ids):
+ return [self.remove_tcont(alloc_id) for alloc_id in alloc_ids]
+
+ def sync_add_missing_tconts(alloc_ids):
+ return [self.add_tcont(self._tconts[alloc_id], reflow=True) for alloc_id in alloc_ids]
+
+ def sync_matching_tconts(hw_tconts):
+ from xpon.traffic_descriptor import TrafficDescriptor
+
+ dl = []
+ # TODO: sync TD & Best Effort. Only other TCONT leaf is the key
+ for alloc_id, hw_tcont in hw_tconts.iteritems():
+ my_tcont = self._tconts[alloc_id]
+ my_td = my_tcont.traffic_descriptor
+ hw_td = hw_tcont.traffic_descriptor
+ if my_td is None:
+ continue
+
+ my_additional = TrafficDescriptor.AdditionalBwEligibility.\
+ to_string(my_td.additional_bandwidth_eligibility)
+
+ reflow = hw_td is None or \
+ my_td.fixed_bandwidth != hw_td.fixed_bandwidth or \
+ my_td.assured_bandwidth != hw_td.assured_bandwidth or \
+ my_td.maximum_bandwidth != hw_td.maximum_bandwidth or \
+ my_additional != hw_td.additional_bandwidth_eligibility
+
+ if not reflow and \
+ my_td.additional_bandwidth_eligibility == \
+ TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING and \
+ my_td.best_effort is not None:
+
+ hw_be = hw_td.best_effort
+ my_be = my_td.best_effort
+
+ reflow = hw_be is None or \
+ my_be.bandwidth != hw_be.bandwidth or \
+ my_be.priority != hw_be.priority or \
+ my_be.weight != hw_be.weight
+
+ if reflow:
+ dl.append(my_tcont.add_to_hardware(self.olt.rest_client))
+ return dl
+
+ def sync_gem_ports(hw_gem_ports):
+ hw_gems_ids = frozenset(hw_gem_ports.iterkeys())
+ my_gems_ids = frozenset(self._gem_ports.iterkeys())
+ dl = []
+
+ try:
+ extra_gems_ids = hw_gems_ids - my_gems_ids
+ dl.extend(sync_delete_extra_gem_ports(extra_gems_ids))
+
+ missing_gem_ids = my_gems_ids - hw_gems_ids
+ dl.extend(sync_add_missing_gem_ports(missing_gem_ids))
+
+ matching_gem_ids = my_gems_ids & hw_gems_ids
+ matching_hw_gem_ports = {gem_id: gem_port
+ for gem_id, gem_port in hw_gem_ports.iteritems()
+ if gem_id in matching_gem_ids}
+
+ dl.extend(sync_matching_gem_ports(matching_hw_gem_ports))
+ self._resync_flows |= len(dl) > 0
+
+ except Exception as ex:
+ self.log.exception('hw-sync-gem-ports', e=ex)
+
+ return dl
+
+ def sync_delete_extra_gem_ports(gem_ids):
+ return [self.remove_gem_id(gem_id) for gem_id in gem_ids]
+
+ def sync_add_missing_gem_ports(gem_ids):
+ return [self.add_gem_port(self._gem_ports[gem_id], reflow=True)
+ for gem_id in gem_ids]
+
+ def sync_matching_gem_ports(hw_gem_ports):
+ dl = []
+ for gem_id, hw_gem_port in hw_gem_ports.iteritems():
+ gem_port = self._gem_ports[gem_id]
+
+ if gem_port.alloc_id != hw_gem_port.alloc_id or\
+ gem_port.encryption != hw_gem_port.encryption or\
+ gem_port.omci_transport != hw_gem_port.omci_transport:
+ dl.append(gem_port.add_to_hardware(self.olt.rest_client,
+ operation='PATCH'))
+ return dl
+
+ def sync_flows():
+ from flow.flow_entry import FlowEntry
+
+ reflow, self._resync_flows = self._resync_flows, False
+ return FlowEntry.sync_flows_by_onu(self, reflow=reflow)
+
+ def failure(_reason):
+ # self.log.error('hardware-sync-get-config-failed', reason=_reason)
+ pass
+
+ def reschedule(_):
+ import random
+ delay = self._sync_tick if self._enabled else 5 * self._sync_tick
+
+ # Speed up sequential resync a limited number of times if out of sync
+ # With 60 second initial an typical worst case resync of 4 times, this
+ # should resync an ONU and all it's gem-ports and tconts within <90 seconds
+ if self._expedite_sync and self._enabled:
+ self._expedite_count += 1
+ if self._expedite_count < _MAX_EXPEDITE_COUNT:
+ delay = _EXPEDITE_SECS
+ else:
+ self._expedite_count = 0
+
+ delay += random.uniform(-delay / 10, delay / 10)
+ self._sync_deferred = reactor.callLater(delay, self._sync_hardware)
+ self._expedite_sync = False
+
+ # If PON is not enabled, skip hw-sync. If ONU not enabled, do it but less
+ # frequently
+ if not self.pon.enabled:
+ return reschedule('not-enabled')
+
+ try:
+ self._sync_deferred = self._get_config()
+ self._sync_deferred.addCallbacks(read_config, failure)
+ self._sync_deferred.addBoth(reschedule)
+
+ except Exception as e:
+ self.log.exception('hw-sync-main', e=e)
+ return reschedule('sync-exception')
+
+ def _get_config(self):
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self.onu_id)
+ name = 'pon-get-onu_config-{}-{}'.format(self._pon_id, self.onu_id)
+ return self.olt.rest_client.request('GET', uri, name=name)
+
+ def set_config(self, leaf, value):
+ self.log.debug('set-config', leaf=leaf, value=value)
+ data = json.dumps({leaf: value})
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
+ name = 'onu-set-config-{}-{}-{}: {}'.format(self._pon_id, self._onu_id, leaf, value)
+ return self.olt.rest_client.request('PATCH', uri, data=data, name=name)
+
+ @property
+ def alloc_ids(self):
+ """
+ Get alloc-id's of all T-CONTs
+ """
+ return frozenset(self._tconts.keys())
+
+ @inlineCallbacks
+ def add_tcont(self, tcont, reflow=False):
+ """
+ Creates/ a T-CONT with the given alloc-id
+
+ :param tcont: (TCont) Object that maintains the TCONT properties
+ :param reflow: (boolean) If true, force add (used during h/w resync)
+ :return: (deferred)
+ """
+ if not self._valid:
+ returnValue('Deleting')
+
+ if not reflow and tcont.alloc_id in self._tconts:
+ returnValue('already created')
+
+ self.log.info('add', tcont=tcont, reflow=reflow)
+ self._tconts[tcont.alloc_id] = tcont
+
+ try:
+ results = yield tcont.add_to_hardware(self.olt.rest_client)
+
+ except Exception as e:
+ self.log.exception('tcont', tcont=tcont, reflow=reflow, e=e)
+ results = 'resync needed'
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_tcont(self, alloc_id):
+ tcont = self._tconts.get(alloc_id)
+
+ if tcont is None:
+ returnValue('nop')
+
+ del self._tconts[alloc_id]
+ try:
+ results = yield tcont.remove_from_hardware(self.olt.rest_client)
+
+ except RestInvalidResponseCode as e:
+ results = None
+ if e.code != 404:
+ self.log.exception('tcont-delete', e=e)
+
+ except Exception as e:
+ self.log.exception('delete', e=e)
+ raise
+
+ returnValue(results)
+
+ def gem_port(self, gem_id):
+ return self._gem_ports.get(gem_id)
+
+ def gem_ids(self, tech_profile_id):
+ """Get all GEM Port IDs used by this ONU"""
+ assert tech_profile_id >= DEFAULT_TECH_PROFILE_TABLE_ID
+ return sorted([gem_id for gem_id, gem in self._gem_ports.items()
+ if not gem.multicast and
+ tech_profile_id == gem.tech_profile_id])
+
+ @inlineCallbacks
+ def add_gem_port(self, gem_port, reflow=False):
+ """
+ Add a GEM Port to this ONU
+
+ :param gem_port: (GemPort) GEM Port to add
+ :param reflow: (boolean) If true, force add (used during h/w resync)
+ :return: (deferred)
+ """
+ if not self._valid:
+ returnValue('Deleting')
+
+ if not reflow and gem_port.gem_id in self._gem_ports:
+ returnValue('nop')
+
+ self.log.info('add', gem_port=gem_port, reflow=reflow)
+ self._gem_ports[gem_port.gem_id] = gem_port
+
+ try:
+ results = yield gem_port.add_to_hardware(self.olt.rest_client)
+
+ except Exception as e:
+ self.log.exception('gem-port', gem_port=gem_port, reflow=reflow, e=e)
+ results = 'resync needed'
+
+ returnValue(results)
+
+ @inlineCallbacks
+ def remove_gem_id(self, gem_id):
+ gem_port = self._gem_ports.get(gem_id)
+
+ if gem_port is None:
+ returnValue('nop')
+
+ del self._gem_ports[gem_id]
+ try:
+ yield gem_port.remove_from_hardware(self.olt.rest_client)
+
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
+
+ except Exception as ex:
+ self.log.exception('gem-port-delete', e=ex)
+ raise
+
+ returnValue('done')
+
+ @staticmethod
+ def gem_id_to_gvid(gem_id):
+ """Calculate GEM VID (gvid) for a given GEM port id"""
+ return gem_id - 2048
diff --git a/adapters/adtran_olt/pon_port.py b/adapters/adtran_olt/pon_port.py
new file mode 100644
index 0000000..70ec564
--- /dev/null
+++ b/adapters/adtran_olt/pon_port.py
@@ -0,0 +1,886 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import random
+import arrow
+
+import structlog
+from adapters.adtran_common.port import AdtnPort
+from twisted.internet import reactor, defer
+from twisted.internet.defer import inlineCallbacks, returnValue
+from adtran_olt_handler import AdtranOltHandler
+from adapters.adtran_common.net.adtran_rest import RestInvalidResponseCode
+from codec.olt_config import OltConfig
+from onu import Onu
+from pyvoltha.adapters.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+from pyvoltha.adapters.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
+from pyvoltha.protos.common_pb2 import AdminState
+from pyvoltha.protos.device_pb2 import Port
+import resources.adtranolt_platform as platform
+
+
+class PonPort(AdtnPort):
+ """
+ GPON Port
+ """
+ MAX_ONUS_SUPPORTED = 128
+ MAX_DEPLOYMENT_RANGE = 25000 # Meters (OLT-PB maximum)
+
+ _MCAST_ONU_ID = 253
+ _MCAST_ALLOC_BASE = 0x500
+
+ # AutoActivate should be used if xPON configuration is not supported
+ _SUPPORTED_ACTIVATION_METHODS = ['autodiscovery', 'autoactivate']
+ _SUPPORTED_AUTHENTICATION_METHODS = ['serial-number']
+
+ def __init__(self, parent, **kwargs):
+ super(PonPort, self).__init__(parent, **kwargs)
+ assert 'pon-id' in kwargs, 'PON ID not found'
+
+ self._parent = parent
+ self._pon_id = kwargs['pon-id']
+ self.log = structlog.get_logger(device_id=parent.device_id, pon_id=self._pon_id)
+ self._port_no = kwargs['port_no']
+ self._physical_port_name = 'xpon 0/{}'.format(self._pon_id+1)
+ self._label = 'pon-{}'.format(self._pon_id)
+
+ self._in_sync = False
+ self._expedite_sync = False
+ self._expedite_count = 0
+
+ self._discovery_tick = 20.0
+ self._no_onu_discover_tick = self._discovery_tick / 2
+ self._discovered_onus = [] # List of serial numbers
+ self._discovery_deferred = None # Specifically for ONU discovery
+
+ self._onus = {} # serial_number-base64 -> ONU
+ self._onu_by_id = {} # onu-id -> ONU
+ self._mcast_gem_ports = {} # VLAN -> GemPort
+
+ self._active_los_alarms = set() # ONU-ID
+
+ # xPON configuration
+ self._activation_method = 'autoactivate'
+
+ self._downstream_fec_enable = True
+ self._upstream_fec_enable = True
+ self._deployment_range = 25000
+ self._authentication_method = 'serial-number'
+ self._mcast_aes = False
+
+ # Statistics
+ self.tx_bip_errors = 0
+
+ def __str__(self):
+ return "PonPort-{}: Admin: {}, Oper: {}, OLT: {}".format(self._label,
+ self._admin_state,
+ self._oper_status,
+ self.olt)
+
+ def get_port(self):
+ """
+ Get the VOLTHA PORT object for this port
+ :return: VOLTHA Port object
+ """
+ if self._port is None:
+ self._port = Port(port_no=self._port_no,
+ label=self._label,
+ type=Port.PON_OLT,
+ admin_state=self._admin_state,
+ oper_status=self._oper_status)
+
+ return self._port
+
+ @property
+ def pon_id(self):
+ return self._pon_id
+
+ @property
+ def onus(self):
+ """
+ Get a set of all ONUs. While the set is immutable, do not use this method
+ to get a collection that you will iterate through that my yield the CPU
+ such as inline callback. ONUs may be deleted at any time and they will
+ set some references to other objects to NULL during the 'delete' call.
+ Instead, get a list of ONU-IDs and iterate on these and call the 'onu'
+ method below (which will return 'None' if the ONU has been deleted.
+
+ :return: (frozenset) collection of ONU objects on this PON
+ """
+ return frozenset(self._onus.values())
+
+ @property
+ def onu_ids(self):
+ return frozenset(self._onu_by_id.keys())
+
+ def onu(self, onu_id):
+ return self._onu_by_id.get(onu_id)
+
+ @property
+ def in_service_onus(self):
+ return len({onu.onu_id for onu in self.onus
+ if onu.onu_id not in self._active_los_alarms})
+
+ @property
+ def closest_onu_distance(self):
+ distance = -1
+ for onu in self.onus:
+ if onu.fiber_length < distance or distance == -1:
+ distance = onu.fiber_length
+ return distance
+
+ @property
+ def downstream_fec_enable(self):
+ return self._downstream_fec_enable
+
+ @downstream_fec_enable.setter
+ def downstream_fec_enable(self, value):
+ assert isinstance(value, bool), 'downstream FEC enabled is a boolean'
+
+ if self._downstream_fec_enable != value:
+ self._downstream_fec_enable = value
+ if self.state == AdtnPort.State.RUNNING:
+ self.deferred = self._set_pon_config("downstream-fec-enable", value)
+
+ @property
+ def upstream_fec_enable(self):
+ return self._upstream_fec_enable
+
+ @upstream_fec_enable.setter
+ def upstream_fec_enable(self, value):
+ assert isinstance(value, bool), 'upstream FEC enabled is a boolean'
+ if self._upstream_fec_enable != value:
+ self._upstream_fec_enable = value
+ if self.state == AdtnPort.State.RUNNING:
+ self.deferred = self._set_pon_config("upstream-fec-enable", value)
+
+ @property
+ def any_upstream_fec_enabled(self):
+ for onu in self.onus:
+ if onu.upstream_fec_enable and onu.enabled:
+ return True
+ return False
+
+ @property
+ def mcast_aes(self):
+ return self._mcast_aes
+
+ @mcast_aes.setter
+ def mcast_aes(self, value):
+ assert isinstance(value, bool), 'MCAST AES is a boolean'
+ if self._mcast_aes != value:
+ self._mcast_aes = value
+ if self.state == AdtnPort.State.RUNNING:
+ pass # TODO
+
+ @property
+ def deployment_range(self):
+ """Maximum deployment range (in meters)"""
+ return self._deployment_range
+
+ @deployment_range.setter
+ def deployment_range(self, value):
+ """Maximum deployment range (in meters)"""
+ if not 0 <= value <= PonPort.MAX_DEPLOYMENT_RANGE:
+ raise ValueError('Deployment range should be 0..{} meters'.
+ format(PonPort.MAX_DEPLOYMENT_RANGE))
+ if self._deployment_range != value:
+ self._deployment_range = value
+ if self.state == AdtnPort.State.RUNNING:
+ self.deferred = self._set_pon_config("deployment-range", value)
+
+ @property
+ def discovery_tick(self):
+ return self._discovery_tick * 10
+
+ @discovery_tick.setter
+ def discovery_tick(self, value):
+ if value < 0:
+ raise ValueError("Polling interval must be >= 0")
+
+ if self.discovery_tick != value:
+ self._discovery_tick = value / 10
+
+ try:
+ if self._discovery_deferred is not None and \
+ not self._discovery_deferred.called:
+ self._discovery_deferred.cancel()
+ except:
+ pass
+ self._discovery_deferred = None
+
+ if self._discovery_tick > 0:
+ self._discovery_deferred = reactor.callLater(self._discovery_tick,
+ self._discover_onus)
+
+ @property
+ def activation_method(self):
+ return self._activation_method
+
+ @activation_method.setter
+ def activation_method(self, value):
+ value = value.lower()
+ if value not in PonPort._SUPPORTED_ACTIVATION_METHODS:
+ raise ValueError('Invalid ONU activation method')
+
+ self._activation_method = value
+
+ @property
+ def authentication_method(self):
+ return self._authentication_method
+
+ @authentication_method.setter
+ def authentication_method(self, value):
+ value = value.lower()
+ if value not in PonPort._SUPPORTED_AUTHENTICATION_METHODS:
+ raise ValueError('Invalid ONU authentication method')
+ self._authentication_method = value
+
+ def cancel_deferred(self):
+ super(PonPort, self).cancel_deferred()
+
+ d, self._discovery_deferred = self._discovery_deferred, None
+
+ try:
+ if d is not None and not d.called:
+ d.cancel()
+ except Exception as e:
+ pass
+
+ def _update_adapter_agent(self):
+ """
+ Update the port status and state in the core
+ """
+ self.log.debug('update-adapter-agent', admin_state=self._admin_state,
+ oper_status=self._oper_status)
+
+ # because the core does not provide methods for updating admin
+ # and oper status per port, we need to copy any existing port
+ # info so that we don't wipe out the peers
+ if self._port is not None:
+ agent_ports = self.adapter_agent.get_ports(self.olt.device_id, Port.PON_OLT)
+
+ agent_port = next((ap for ap in agent_ports if ap.port_no == self._port_no), None)
+
+ # copy current Port info
+ if agent_port is not None:
+ self._port = agent_port
+
+ # set new states
+ self._port.admin_state = self._admin_state
+ self._port.oper_status = self._oper_status
+
+ # adapter_agent add_port also does an update of existing port
+ self.adapter_agent.add_port(self.olt.device_id, self.get_port())
+
+ @inlineCallbacks
+ def finish_startup(self):
+ """
+ Do all startup offline since REST may fail
+ """
+ if self.state != AdtnPort.State.INITIAL:
+ returnValue('Done')
+
+ self.log.debug('final-startup')
+ results = None
+
+ try:
+ self.deferred = self._get_pon_config()
+ results = yield self.deferred
+
+ except Exception as e:
+ self.log.exception('initial-GET', e=e)
+ self.deferred = reactor.callLater(5, self.finish_startup)
+ returnValue(self.deferred)
+
+ # Load config from hardware
+
+ enabled = results.get('enabled', False)
+ downstream_fec_enable = results.get('downstream-fec-enable', False)
+ upstream_fec_enable = results.get('upstream-fec-enable', False)
+ deployment_range = results.get('deployment-range', 25000)
+ self._in_sync = True
+
+ if enabled != self._enabled:
+ try:
+ self.deferred = self._set_pon_config("enabled", True)
+ yield self.deferred
+
+ except Exception as e:
+ self.log.exception('final-startup-enable', e=e)
+ self.deferred = reactor.callLater(3, self.finish_startup)
+ returnValue(self.deferred)
+
+ if downstream_fec_enable != self._downstream_fec_enable:
+ try:
+ self.deferred = self._set_pon_config("downstream-fec-enable",
+ self._downstream_fec_enable)
+ yield self.deferred
+
+ except Exception as e:
+ self.log.warning('final-startup-downstream-FEC', e=e)
+ self._in_sync = False
+ # Non-fatal. May have failed due to no SFQ in slot
+
+ if upstream_fec_enable != self._upstream_fec_enable:
+ try:
+ self.deferred = self._set_pon_config("upstream-fec-enable",
+ self._upstream_fec_enable)
+ yield self.deferred
+
+ except Exception as e:
+ self.log.warning('final-startup-upstream-FEC', e=e)
+ self._in_sync = False
+ # Non-fatal. May have failed due to no SFQ in slot
+
+ if deployment_range != self._deployment_range:
+ try:
+ self.deferred = self._set_pon_config("deployment-range",
+ self._deployment_range)
+ yield self.deferred
+
+ except Exception as e:
+ self.log.warning('final-startup-deployment-range', e=e)
+ self._in_sync = False
+ # Non-fatal. May have failed due to no SFQ in slot
+
+ if len(self._onus) > 0:
+ dl = []
+ for onu_id in self.onu_ids:
+ onu = self.onu(onu_id)
+ if onu is not None:
+ dl.append(onu.restart())
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ # Begin to ONU discovery and hardware sync
+
+ self._discovery_deferred = reactor.callLater(5, self._discover_onus)
+
+ # If here, initial settings were successfully written to hardware
+
+ super(PonPort, self).finish_startup()
+ returnValue('Enabled')
+
+ @inlineCallbacks
+ def finish_stop(self):
+ # Remove all existing ONUs. They will need to be re-discovered
+ dl = []
+ onu_ids = frozenset(self._onu_by_id.keys())
+ for onu_id in onu_ids:
+ try:
+ dl.append(self.delete_onu(onu_id))
+
+ except Exception as e:
+ self.log.exception('onu-cleanup', onu_id=onu_id, e=e)
+
+ dl.append(self._set_pon_config("enabled", False))
+ results = yield defer.gatherResults(dl, consumeErrors=True)
+ returnValue(results)
+
+ @inlineCallbacks
+ def reset(self):
+ """
+ Set the PON Port to a known good state on initial port startup. Actual
+ PON 'Start' is done elsewhere
+ """
+ initial_port_state = AdminState.ENABLED
+ self.log.info('reset', initial_state=initial_port_state)
+
+ try:
+ self.deferred = self._get_pon_config()
+ results = yield self.deferred
+ enabled = results.get('enabled', False)
+
+ except Exception as e:
+ self.log.exception('get-config', e=e)
+ enabled = False
+
+ enable = initial_port_state == AdminState.ENABLED
+
+ if enable != enabled:
+ try:
+ self.deferred = yield self._set_pon_config("enabled", enable)
+ except Exception as e:
+ self.log.exception('reset-enabled', e=e, enabled=enabled)
+
+ # TODO: Move to 'set_pon_config' method and also make sure GRPC/Port is ok
+ self._admin_state = AdminState.ENABLED if enable else AdminState.DISABLED
+
+ try:
+ # Walk the provisioned ONU list and disable any existing ONUs
+ results = yield self._get_onu_config()
+
+ if isinstance(results, list) and len(results) > 0:
+ onu_configs = OltConfig.Pon.Onu.decode(results)
+ dl = []
+ for onu_id in onu_configs.iterkeys():
+ dl.append(self.delete_onu(onu_id))
+
+ try:
+ if len(dl) > 0:
+ yield defer.gatherResults(dl, consumeErrors=True)
+
+ except Exception as e:
+ self.log.exception('rest-ONU-delete', e=e)
+ pass # Non-fatal
+
+ except Exception as e:
+ self.log.exception('onu-delete', e=e)
+
+ returnValue('Reset complete')
+
+ def gem_ids(self, logical_port, flow_vlan, multicast_gems=False):
+ """
+ Get all GEM Port IDs used on a given PON
+
+ :param logical_port: (int) Logical port number of ONU. None if for all ONUs
+ on PON, if Multicast, VID for Multicast, or None for all
+ Multicast GEMPorts
+ :param flow_vlan: (int) If not None, this is the ingress tag (c-tag)
+ :param multicast_gems: (boolean) Select from available Multicast GEM Ports
+ :return: (dict) data_gem -> key -> onu-id, value -> tuple(sorted list of GEM Port IDs, onu_vid)
+ mcast_gem-> key -> mcast-vid, value -> GEM Port IDs
+ """
+ gem_ids = {}
+
+ if multicast_gems:
+ # Multicast GEMs belong to the PON, but we may need to register them on
+ # all ONUs. TODO: Rework when BBF MCAST is addressed in VOLTHA v2.O+
+ for vlan, gem_port in self._mcast_gem_ports.iteritems():
+ if logical_port is None or (logical_port == vlan and logical_port in self.olt.multicast_vlans):
+ gem_ids[vlan] = ([gem_port.gem_id], None)
+ else:
+ raise NotImplemented('TODO: This is deprecated')
+ # for onu_id, onu in self._onu_by_id.iteritems():
+ # if logical_port is None or logical_port == onu.logical_port:
+ # gem_ids[onu_id] = (onu.gem_ids(), flow_vlan)
+ return gem_ids
+
+ def _get_pon_config(self):
+ uri = AdtranOltHandler.GPON_PON_CONFIG_URI.format(self._pon_id)
+ name = 'pon-get-config-{}'.format(self._pon_id)
+ return self._parent.rest_client.request('GET', uri, name=name)
+
+ def _get_onu_config(self, onu_id=None):
+ if onu_id is None:
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_LIST_URI.format(self._pon_id)
+ else:
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, onu_id)
+
+ name = 'pon-get-onu_config-{}-{}'.format(self._pon_id, onu_id)
+ return self._parent.rest_client.request('GET', uri, name=name)
+
+ def _set_pon_config(self, leaf, value):
+ data = json.dumps({leaf: value})
+ uri = AdtranOltHandler.GPON_PON_CONFIG_URI.format(self._pon_id)
+ name = 'pon-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))
+ # If no optics on PON, then PON config fails with status 400, suppress this
+ suppress_error = len(self.onu_ids) == 0
+ return self._parent.rest_client.request('PATCH', uri, data=data, name=name,
+ suppress_error=suppress_error)
+
+ def _discover_onus(self):
+ self.log.debug('discovery', state=self._admin_state, in_sync=self._in_sync)
+ if self._admin_state == AdminState.ENABLED:
+ if self._in_sync:
+ data = json.dumps({'pon-id': self._pon_id})
+ uri = AdtranOltHandler.GPON_PON_DISCOVER_ONU
+ name = 'pon-discover-onu-{}'.format(self._pon_id)
+
+ self._discovery_deferred = self._parent.rest_client.request('POST', uri, data, name=name)
+ self._discovery_deferred.addBoth(self._onu_discovery_init_complete)
+ else:
+ self.discovery_deferred = reactor.callLater(0,
+ self._onu_discovery_init_complete,
+ None)
+
+ def _onu_discovery_init_complete(self, _result):
+ """
+ This method is called after the REST POST to request ONU discovery is
+ completed. The results (body) of the post is always empty / 204 NO CONTENT
+ """
+ delay = self._no_onu_discover_tick if len(self._onus) == 0 else self._discovery_tick
+ delay += random.uniform(-delay / 10, delay / 10)
+ self._discovery_deferred = reactor.callLater(delay, self._discover_onus)
+
+ def sync_hardware(self):
+ if self.state == AdtnPort.State.RUNNING or self.state == AdtnPort.State.STOPPED:
+ def read_config(results):
+ self.log.debug('read-config', results=results)
+ config = OltConfig.Pon.decode([results])
+ assert self.pon_id in config, 'sync-pon-not-found-{}'.format(self.pon_id)
+ config = config[self.pon_id]
+ self._in_sync = True
+
+ dl = []
+
+ if self.enabled != config.enabled:
+ self._in_sync = False
+ self._expedite_sync = True
+ dl.append(self._set_pon_config("enabled", self.enabled))
+
+ elif self.state == AdtnPort.State.RUNNING:
+ if self.deployment_range != config.deployment_range:
+ self._in_sync = False
+ self._expedite_sync = True
+ dl.append(self._set_pon_config("deployment-range",
+ self.deployment_range))
+
+ # A little side note: FEC enable/disable cannot be changed and
+ # will remain in the previous status until an optical module
+ # is plugged in.
+ if self.downstream_fec_enable != config.downstream_fec_enable:
+ self._in_sync = False
+ dl.append(self._set_pon_config("downstream-fec-enable",
+ self.downstream_fec_enable))
+
+ if self.upstream_fec_enable != config.upstream_fec_enable:
+ self._in_sync = False
+ self._expedite_sync = True
+ dl.append(self._set_pon_config("upstream-fec-enable",
+ self.upstream_fec_enable))
+ defer.gatherResults(dl, consumeErrors=True)
+ return config.onus
+
+ def sync_onus(hw_onus):
+ if self.state == AdtnPort.State.RUNNING:
+ self.log.debug('sync-pon-onu-results', config=hw_onus)
+
+ # ONU's have their own sync task, extra (should be deleted) are
+ # handled here.
+ hw_onu_ids = frozenset(hw_onus.keys())
+ my_onu_ids = frozenset(self._onu_by_id.keys())
+
+ extra_onus = hw_onu_ids - my_onu_ids
+ dl = [self.delete_onu(onu_id, hw_only=True) for onu_id in extra_onus]
+
+ if self.activation_method == "autoactivate":
+ # Autoactivation of ONUs requires missing ONU detection. If
+ # not found, create them here but let the TCont/GEM-Port restore
+ # be handle by ONU H/w sync logic.
+ for onu in [self._onu_by_id[onu_id] for onu_id in my_onu_ids - hw_onu_ids
+ if self._onu_by_id.get(onu_id) is not None]:
+ dl.append(onu.create(reflow=True))
+
+ return defer.gatherResults(dl, consumeErrors=True)
+
+ def failure(reason, what):
+ self.log.error('hardware-sync-{}-failed'.format(what), reason=reason)
+ self._in_sync = False
+ self._expedite_sync = False
+
+ def reschedule(_):
+ # Speed up sequential resync a limited number of times if out of sync.
+
+ delay = self.sync_tick
+
+ if self._expedite_sync:
+ self._expedite_count += 1
+ if self._expedite_count < 5:
+ delay = 1
+ else:
+ self._expedite_count = 0
+
+ delay += random.uniform(-delay / 10, delay / 10)
+ self.sync_deferred = reactor.callLater(delay, self.sync_hardware)
+
+ self.sync_deferred = self._get_pon_config()
+ self.sync_deferred.addCallbacks(read_config, failure, errbackArgs=['get-config'])
+ self.sync_deferred.addCallbacks(sync_onus, failure, errbackArgs=['pon-sync'])
+ self.sync_deferred.addBoth(reschedule)
+
+ def process_status_poll(self, status):
+ """
+ Process PON status poll request
+
+ :param status: (OltState.Pon object) results from RESTCONF GET
+ """
+ self.log.debug('process-status-poll', status=status)
+
+ if self._admin_state != AdminState.ENABLED:
+ return
+
+ # Process LOS list
+ self._process_los_alarms(frozenset(status.ont_los))
+
+ # Get new/missing from the discovered ONU leaf. Stale ONUs from previous
+ # configs are now cleaned up during h/w re-sync/reflow.
+ new, rediscovered_onus = self._process_status_onu_discovered_list(status.discovered_onu)
+
+ # Process newly discovered ONU list and rediscovered ONUs
+ for serial_number in new | rediscovered_onus:
+ reactor.callLater(0, self.add_onu, serial_number, status)
+
+ # PON Statistics
+ timestamp = arrow.utcnow().float_timestamp
+ self._process_statistics(status, timestamp)
+
+ # Process ONU info. Note that newly added ONUs will not be processed
+ # until the next pass
+ self._update_onu_status(status.onus, timestamp)
+
+ # Process GEM Port information
+ self._update_gem_status(status.gems, timestamp)
+
+ def _process_statistics(self, status, timestamp):
+ self.timestamp = timestamp
+ self.rx_packets = status.rx_packets
+ self.rx_bytes = status.rx_bytes
+ self.tx_packets = status.tx_packets
+ self.tx_bytes = status.tx_bytes
+ self.tx_bip_errors = status.tx_bip_errors
+
+ def _update_onu_status(self, onus, timestamp):
+ """
+ Process ONU status for this PON
+ :param onus: (dict) onu_id: ONU State
+ """
+ for onu_id, onu_status in onus.iteritems():
+ if onu_id in self._onu_by_id:
+ onu = self._onu_by_id[onu_id]
+ onu.timestamp = timestamp
+ onu.rssi = onu_status.rssi
+ onu.equalization_delay = onu_status.equalization_delay
+ onu.equalization_delay = onu_status.equalization_delay
+ onu.fiber_length = onu_status.fiber_length
+ onu.password = onu_status.reported_password
+
+ def _update_gem_status(self, gems, timestamp):
+ for gem_id, gem_status in gems.iteritems():
+ onu = self._onu_by_id.get(gem_status.onu_id)
+ if onu is not None:
+ gem_port = onu.gem_port(gem_status.gem_id)
+ if gem_port is not None:
+ gem_port.timestamp = timestamp
+ gem_port.rx_packets = gem_status.rx_packets
+ gem_port.rx_bytes = gem_status.rx_bytes
+ gem_port.tx_packets = gem_status.tx_packets
+ gem_port.tx_bytes = gem_status.tx_bytes
+
+ def _process_los_alarms(self, ont_los):
+ """
+ Walk current LOS and set/clear LOS as appropriate
+ :param ont_los: (frozenset) ONU IDs of ONUs in LOS alarm state
+ """
+ cleared_alarms = self._active_los_alarms - ont_los
+ new_alarms = ont_los - self._active_los_alarms
+
+ if len(cleared_alarms) > 0 or len(new_alarms) > 0:
+ self.log.info('onu-los', cleared=cleared_alarms, new=new_alarms)
+
+ for onu_id in cleared_alarms:
+ self._active_los_alarms.remove(onu_id)
+ OnuLosAlarm(self.olt.alarms, onu_id, self.port_no).clear_alarm()
+
+ for onu_id in new_alarms:
+ self._active_los_alarms.add(onu_id)
+ OnuLosAlarm(self.olt.alarms, onu_id, self.port_no).raise_alarm()
+ reactor.callLater(0, self.delete_onu, onu_id)
+
+ def _process_status_onu_discovered_list(self, discovered_onus):
+ """
+ Look for new ONUs
+
+ :param discovered_onus: (frozenset) Set of ONUs currently discovered
+ """
+ self.log.debug('discovered-ONUs', list=discovered_onus)
+
+ # Only request discovery if activation is auto-discovery or auto-activate
+ continue_discovery = ['autodiscovery', 'autoactivate']
+
+ if self._activation_method not in continue_discovery:
+ return set(), set()
+
+ my_onus = frozenset(self._onus.keys())
+
+ new_onus = discovered_onus - my_onus
+ rediscovered_onus = my_onus & discovered_onus
+
+ return new_onus, rediscovered_onus
+
+ def _get_onu_info(self, serial_number):
+ """
+ Parse through available xPON information for ONU configuration settings
+
+ :param serial_number: (string) Decoded (not base64) serial number string
+ :return: (dict) onu config data or None on lookup failure
+ """
+ try:
+ if self.activation_method == "autodiscovery":
+ # if self.authentication_method == 'serial-number':
+ raise NotImplemented('autodiscovery: Not supported at this time')
+
+ elif self.activation_method == "autoactivate":
+ onu_id = self.get_next_onu_id
+ enabled = True
+ upstream_fec_enabled = True
+
+ else:
+ self.log.error('unsupported-activation-method', method=self.activation_method)
+ return None
+
+ onu_info = {
+ 'device-id': self.olt.device_id,
+ 'serial-number': serial_number,
+ 'pon': self,
+ 'onu-id': onu_id,
+ 'enabled': enabled,
+ 'upstream-fec': upstream_fec_enabled,
+ 'password': Onu.DEFAULT_PASSWORD,
+ }
+ pon_id = self.olt.pon_id_to_port_number(self._pon_id)
+
+ # TODO: Currently only one UNI port and it is hardcoded to port 0
+ onu_info['uni-ports'] = [platform.mk_uni_port_num(pon_id, onu_id)]
+
+ # return onu_info
+ return onu_info
+
+ except Exception as e:
+ self.log.exception('get-onu-info-tech-profiles', e=e)
+ return None
+
+ @inlineCallbacks
+ def add_onu(self, serial_number_64, status):
+ """
+ Add an ONU to the PON
+
+ :param serial_number_64: (str) base-64 encoded serial number
+ :param status: (dict) OLT PON status. Used to detect if ONU is already provisioned
+ """
+ serial_number = Onu.serial_number_to_string(serial_number_64)
+ self.log.info('add-onu', serial_number=serial_number,
+ serial_number_64=serial_number_64, status=status)
+
+ # It takes a little while for a new ONU to be removed from the discovery
+ # list. Return early here so extra ONU IDs are not allocated
+ if serial_number_64 in self._onus:
+ returnValue('wait-for-fpga')
+
+ if serial_number_64 in status.onus:
+ # Handles fast entry into this task before FPGA can clear results of ONU delete
+ returnValue('sticky-onu')
+
+ # At our limit? TODO: Retrieve from device resource manager if available
+ if len(self._onus) >= self.MAX_ONUS_SUPPORTED:
+ self.log.warning('max-onus-provisioned', count=len(self._onus))
+ returnValue('max-onus-reached')
+
+ onu_info = self._get_onu_info(serial_number)
+ onu_id = onu_info['onu-id']
+
+ if onu_id is None:
+ self.log.warning('no-onu-ids-available', serial_number=serial_number,
+ serial_number_64=serial_number_64)
+ returnValue('no-ids-available')
+
+ # TODO: Is the best before or after creation in parent device?
+ alarm = OnuDiscoveryAlarm(self.olt.alarms, self.pon_id, serial_number)
+ reactor.callLater(0, alarm.raise_alarm)
+
+ # Have the core create the ONU device
+ self._parent.add_onu_device(self.pon_id, onu_id, serial_number)
+
+ try:
+ onu = Onu(onu_info)
+ self._onus[serial_number_64] = onu
+ self._onu_by_id[onu.onu_id] = onu
+
+ # Add Multicast to PON on a per-ONU basis
+ #
+ # for id_or_vid, gem_port in gem_ports.iteritems():
+ # try:
+ # if gem_port.multicast:
+ # self.log.debug('id-or-vid', id_or_vid=id_or_vid)
+ # vid = self.olt.multicast_vlans[0] if len(self.olt.multicast_vlans) else None
+ # if vid is not None:
+ # self.add_mcast_gem_port(gem_port, vid)
+ #
+ # except Exception as e:
+ # self.log.exception('id-or-vid', e=e)
+
+ _results = yield onu.create()
+
+ except Exception as e:
+ self.log.warning('add-onu', serial_number=serial_number_64, e=e)
+ # allowable exception. H/w re-sync will recover/fix any issues
+
+ @property
+ def get_next_onu_id(self):
+ return self._parent.resource_mgr.get_onu_id(self._pon_id)
+
+ def release_onu_id(self, onu_id):
+ self._parent.resource_mgr.free_onu_id(self._pon_id, onu_id)
+
+ @inlineCallbacks
+ def _remove_from_hardware(self, onu_id):
+ uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, onu_id)
+ name = 'pon-delete-onu-{}-{}'.format(self._pon_id, onu_id)
+
+ try:
+ yield self._parent.rest_client.request('DELETE', uri, name=name)
+
+ except RestInvalidResponseCode as e:
+ if e.code != 404:
+ self.log.exception('onu-delete', e=e)
+
+ except Exception as e:
+ self.log.exception('onu-hw-delete', onu_id=onu_id, e=e)
+
+ @inlineCallbacks
+ def delete_onu(self, onu_id, hw_only=False):
+ onu = self._onu_by_id.get(onu_id)
+
+ # Remove from any local dictionary
+ if onu_id in self._onu_by_id:
+ del self._onu_by_id[onu_id]
+
+ if onu is not None:
+ if onu.serial_number_64 in self._onus:
+ del self._onus[onu.serial_number_64]
+ try:
+ proxy_address = onu.proxy_address
+ onu.delete() # Remove from hardware
+
+ # And removal from VOLTHA adapter agent
+ if not hw_only:
+ self._parent.delete_child_device(proxy_address)
+
+ except Exception as e:
+ self.log.exception('onu-delete', serial_number=onu.serial_number, e=e)
+ else:
+ try:
+ yield self._remove_from_hardware(onu_id)
+
+ except Exception as e:
+ self.log.debug('onu-remove', serial_number=onu.serial_number, e=e)
+
+ # Remove from LOS list if needed TODO: Should a 'clear' alarm be sent as well ?
+ if onu is not None and onu.id in self._active_los_alarms:
+ self._active_los_alarms.remove(onu.id)
+
+ def add_mcast_gem_port(self, mcast_gem, vlan):
+ """
+ Add any new Multicast GEM Ports to the PON
+ :param mcast_gem: (GemPort)
+ """
+ if vlan in self._mcast_gem_ports:
+ return
+
+ assert len(self._mcast_gem_ports) == 0, 'Only 1 MCAST GEMPort until BBF Support'
+ assert 1 <= vlan <= 4095, 'Invalid Multicast VLAN ID'
+ assert len(self.olt.multicast_vlans) == 1, 'Only support 1 MCAST VLAN until BBF Support'
+
+ self._mcast_gem_ports[vlan] = mcast_gem
diff --git a/adapters/adtran_olt/resources/__init__.py b/adapters/adtran_olt/resources/__init__.py
new file mode 100644
index 0000000..9c454e3
--- /dev/null
+++ b/adapters/adtran_olt/resources/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2018-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/adapters/adtran_olt/resources/adtran_olt_resource_manager.py b/adapters/adtran_olt/resources/adtran_olt_resource_manager.py
new file mode 100644
index 0000000..caf5a46
--- /dev/null
+++ b/adapters/adtran_olt/resources/adtran_olt_resource_manager.py
@@ -0,0 +1,295 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import structlog
+
+from pyvoltha.adapters.common.pon_resource_manager.resource_manager import PONResourceManager
+from pyvoltha.common.utils.registry import registry
+# from voltha.core.config.config_backend import ConsulStore
+# from voltha.core.config.config_backend import EtcdStore
+from pyvoltha.adapters.common.kvstore.kvstore import create_kv_client
+from adtran_resource_manager import AdtranPONResourceManager
+
+
+class AdtranOltResourceMgr(object):
+
+ GEMPORT_IDS = "gemport_ids"
+ ALLOC_IDS = "alloc_ids"
+ BASE_PATH_KV_STORE = "adtran_olt/{}" # adtran_olt/<device_id>
+
+ def __init__(self, device_id, host_and_port, extra_args, device_info):
+ self.log = structlog.get_logger(id=device_id,
+ ip=host_and_port)
+ self.device_id = device_id
+ self.host_and_port = host_and_port
+ self.extra_args = extra_args
+ self.device_info = device_info
+ self.args = registry('main').get_args()
+ self._path_prefix = AdtranOltResourceMgr.BASE_PATH_KV_STORE.format(device_id)
+
+ # KV store's IP Address and PORT
+ # host, port = '127.0.0.1', 8500
+ if self.args.backend == 'etcd':
+ host, port = self.args.etcd.split(':', 1)
+ self.kv_store = create_kv_client('etcd', host, port)
+ # self.kv_store = EtcdStore(host, port,
+ # AdtranOltResourceMgr.BASE_PATH_KV_STORE.format(device_id))
+ elif self.args.backend == 'consul':
+ host, port = self.args.consul.split(':', 1)
+ self.kv_store = create_kv_client('consul', host, port)
+ # self.kv_store = ConsulStore(host, port,
+ # AdtranOltResourceMgr.BASE_PATH_KV_STORE.format(device_id))
+ else:
+ self.log.error('Invalid-backend')
+ raise Exception("Invalid-backend-for-kv-store")
+
+ self.resource_mgr = AdtranPONResourceManager(
+ self.device_info.technology,
+ self.extra_args,
+ self.device_id, self.args.backend,
+ host, port
+ )
+ # Tech profiles uses this resource manager to retrieve information on a per-interface
+ # basis
+ self.resource_managers = {intf_id: self.resource_mgr for intf_id in device_info.intf_ids}
+
+ # Flag to indicate whether information fetched from device should
+ # be used to initialize PON Resource Ranges
+ self.use_device_info = False
+
+ self.initialize_device_resource_range_and_pool()
+
+ def __del__(self):
+ self.log.info("clearing-device-resource-pool")
+ for key, resource_mgr in self.resource_mgrs.iteritems():
+ resource_mgr.clear_device_resource_pool()
+
+ def get_onu_id(self, pon_intf_id):
+ onu_id = self.resource_mgr.get_resource_id(pon_intf_id,
+ PONResourceManager.ONU_ID,
+ onu_id=None,
+ num_of_id=1)
+ if onu_id is not None:
+ pon_intf_onu_id = (pon_intf_id, onu_id)
+ self.resource_mgr.init_resource_map(pon_intf_onu_id)
+
+ return onu_id
+
+ def free_onu_id(self, pon_intf_id, onu_id):
+ self.resource_mgr.free_resource_id(pon_intf_id,
+ PONResourceManager.ONU_ID,
+ onu_id)
+ pon_intf_onu_id = (pon_intf_id, onu_id)
+ self.resource_mgr.remove_resource_map(pon_intf_onu_id)
+
+ def get_alloc_id(self, pon_intf_onu_id):
+ # Derive the pon_intf from the pon_intf_onu_id tuple
+ pon_intf = pon_intf_onu_id[0]
+ onu_id = pon_intf_onu_id[1]
+ alloc_id_list = self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_onu_id)
+
+ if alloc_id_list and len(alloc_id_list) > 0:
+ # Since we support only one alloc_id for the ONU at the moment,
+ # return the first alloc_id in the list, if available, for that
+ # ONU.
+ return alloc_id_list[0]
+
+ alloc_id_list = self.resource_mgr.get_resource_id(pon_intf,
+ PONResourceManager.ALLOC_ID,
+ onu_id=onu_id,
+ num_of_id=1)
+ if alloc_id_list and len(alloc_id_list) == 0:
+ self.log.error("no-alloc-id-available")
+ return None
+
+ # update the resource map on KV store with the list of alloc_id
+ # allocated for the pon_intf_onu_id tuple
+ self.resource_mgr.update_alloc_ids_for_onu(pon_intf_onu_id,
+ alloc_id_list)
+
+ # Since we request only one alloc id, we refer the 0th
+ # index
+ alloc_id = alloc_id_list[0]
+
+ return alloc_id
+
+ def get_gemport_id(self, pon_intf_onu_id, num_of_id=1):
+ # TODO: Remove this if never used
+ # Derive the pon_intf and onu_id from the pon_intf_onu_id tuple
+ pon_intf = pon_intf_onu_id[0]
+ onu_id = pon_intf_onu_id[1]
+ uni_id = pon_intf_onu_id[2]
+ assert False, 'unused function'
+
+ # gemport_id_list = self.resource_managers[pon_intf].get_current_gemport_ids_for_onu(
+ # pon_intf_onu_id)
+ # if gemport_id_list and len(gemport_id_list) > 0:
+ # return gemport_id_list
+ #
+ # gemport_id_list = self.resource_mgrs[pon_intf].get_resource_id(
+ # pon_intf_id=pon_intf,
+ # resource_type=PONResourceManager.GEMPORT_ID,
+ # num_of_id=num_of_id
+ # )
+ #
+ # if gemport_id_list and len(gemport_id_list) == 0:
+ # self.log.error("no-gemport-id-available")
+ # return None
+ #
+ # # update the resource map on KV store with the list of gemport_id
+ # # allocated for the pon_intf_onu_id tuple
+ # self.resource_managers[pon_intf].update_gemport_ids_for_onu(pon_intf_onu_id,
+ # gemport_id_list)
+ #
+ # self.update_gemports_ponport_to_onu_map_on_kv_store(gemport_id_list,
+ # pon_intf, onu_id, uni_id)
+ # return gemport_id_list
+
+ def free_pon_resources_for_onu(self, pon_intf_id_onu_id):
+ """ Typically called on ONU delete """
+
+ pon_intf_id = pon_intf_id_onu_id[0]
+ onu_id = pon_intf_id_onu_id[1]
+ try:
+ alloc_ids = self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_id_onu_id)
+ if alloc_ids is not None:
+ self.resource_mgr.free_resource_id(pon_intf_id,
+ PONResourceManager.ALLOC_ID,
+ alloc_ids, onu_id=onu_id)
+ except:
+ pass
+
+ try:
+ gemport_ids = self.resource_mgr.get_current_gemport_ids_for_onu(pon_intf_id_onu_id)
+ if gemport_ids is not None:
+ self.resource_mgr.free_resource_id(pon_intf_id,
+ PONResourceManager.GEMPORT_ID,
+ gemport_ids)
+ except:
+ pass
+
+ try:
+ self.resource_mgr.free_resource_id(pon_intf_id,
+ PONResourceManager.ONU_ID,
+ onu_id)
+ except:
+ pass
+
+ # Clear resource map associated with (pon_intf_id, gemport_id) tuple.
+ self.resource_mgr.remove_resource_map(pon_intf_id_onu_id)
+
+ # Clear the ONU Id associated with the (pon_intf_id, gemport_id) tuple.
+ if gemport_ids is not None:
+ for gemport_id in gemport_ids:
+ try:
+ self.kv_store.delete(self._make_path(str((pon_intf_id, gemport_id))))
+ # del self.kv_store[str((pon_intf_id, gemport_id))]
+ except:
+ pass
+
+ def initialize_device_resource_range_and_pool(self):
+ if not self.use_device_info:
+ status = self.resource_mgr.init_resource_ranges_from_kv_store()
+ if not status:
+ self.log.error("failed-to-load-resource-range-from-kv-store")
+ # When we have failed to read the PON Resource ranges from KV
+ # store, use the information selected as the default.
+ self.use_device_info = True
+
+ if self.use_device_info:
+ self.log.info("using-device-info-to-init-pon-resource-ranges")
+ self.resource_mgr.init_default_pon_resource_ranges(
+ onu_id_start_idx=self.device_info.onu_id_start,
+ onu_id_end_idx=self.device_info.onu_id_end,
+ alloc_id_start_idx=self.device_info.alloc_id_start,
+ alloc_id_end_idx=self.device_info.alloc_id_end,
+ gemport_id_start_idx=self.device_info.gemport_id_start,
+ gemport_id_end_idx=self.device_info.gemport_id_end,
+ num_of_pon_ports=self.device_info.pon_ports,
+ intf_ids=self.device_info.intf_ids
+ )
+
+ # After we have initialized resource ranges, initialize the
+ # resource pools accordingly.
+ self.resource_mgr.init_device_resource_pool()
+
+ def get_current_gemport_ids_for_onu(self, pon_intf_onu_id):
+ pon_intf_id = pon_intf_onu_id[0]
+ return self.resource_managers[pon_intf_id].get_current_gemport_ids_for_onu(pon_intf_onu_id)
+
+ def get_current_alloc_ids_for_onu(self, pon_intf_onu_id):
+ pon_intf_id = pon_intf_onu_id[0]
+ alloc_ids = self.resource_managers[pon_intf_id].get_current_alloc_ids_for_onu(pon_intf_onu_id)
+ if alloc_ids is None:
+ return None
+ # We support only one tcont at the moment
+ return alloc_ids[0]
+
+ def update_gemports_ponport_to_onu_map_on_kv_store(self, gemport_list, pon_port, onu_id, uni_id):
+ for gemport in gemport_list:
+ pon_intf_gemport = (pon_port, gemport)
+ # This information is used when packet_indication is received and
+ # we need to derive the ONU Id for which the packet arrived based
+ # on the pon_intf and gemport available in the packet_indication
+ # self.kv_store[str(pon_intf_gemport)] = ' '.join(map(str, (onu_id, uni_id)))
+ self.kv_store.put(self._make_path(str(pon_intf_gemport)), ' '.join(map(str, (onu_id, uni_id)))
+
+ def get_onu_uni_from_ponport_gemport(self, pon_port, gemport):
+ pon_intf_gemport = (pon_port, gemport)
+ #return tuple(map(int, self.kv_store[str(pon_intf_gemport)].split(' ')))
+ return tuple(map(int, self.kv_store.get(self._make_path(str(pon_intf_gemport))).split(' ')))
+
+ def get_flow_id(self, pon_intf_id, onu_id, uni_id, flow_store_cookie, flow_category=None):
+ pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+ try:
+ flow_ids = self.resource_managers[pon_intf_id]. \
+ get_current_flow_ids_for_onu(pon_intf_onu_id)
+ if flow_ids is not None:
+ for flow_id in flow_ids:
+ flows = self.get_flow_id_info(pon_intf_id, onu_id, uni_id, flow_id)
+ assert (isinstance(flows, list))
+ for flow in flows:
+
+ if flow_category is not None and \
+ 'flow_category' in flow and \
+ flow['flow_category'] == flow_category:
+ return flow_id
+ if flow['flow_store_cookie'] == flow_store_cookie:
+ return flow_id
+ except Exception as e:
+ self.log.error("error-retrieving-flow-info", e=e)
+
+ flow_id = self.resource_managers[pon_intf_id].get_resource_id(
+ pon_intf_onu_id[0], PONResourceManager.FLOW_ID)
+ if flow_id is not None:
+ self.resource_managers[pon_intf_id].update_flow_id_for_onu(
+ pon_intf_onu_id, flow_id
+ )
+
+ return flow_id
+
+ def get_flow_id_info(self, pon_intf_id, onu_id, uni_id, flow_id):
+ pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+ return self.resource_managers[pon_intf_id].get_flow_id_info(pon_intf_onu_id, flow_id)
+
+ def get_current_flow_ids_for_uni(self, pon_intf_id, onu_id, uni_id):
+ pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+ return self.resource_managers[pon_intf_id].get_current_flow_ids_for_onu(pon_intf_onu_id)
+
+ def update_flow_id_info_for_uni(self, pon_intf_id, onu_id, uni_id, flow_id, flow_data):
+ pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+ return self.resource_managers[pon_intf_id].update_flow_id_info_for_onu(
+ pon_intf_onu_id, flow_id, flow_data)
\ No newline at end of file
diff --git a/adapters/adtran_olt/resources/adtran_resource_manager.py b/adapters/adtran_olt/resources/adtran_resource_manager.py
new file mode 100644
index 0000000..9f2a0a4
--- /dev/null
+++ b/adapters/adtran_olt/resources/adtran_resource_manager.py
@@ -0,0 +1,358 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Resource Manager will be unique for each OLT device.
+
+It exposes APIs to create/free alloc_ids/onu_ids/gemport_ids. Resource Manager
+uses a KV store in backend to ensure resiliency of the data.
+"""
+from bitstring import BitArray
+import json
+from pyvoltha.adapters.common.pon_resource_manager.resource_manager import PONResourceManager
+import adtranolt_platform as platform
+
+
+class AdtranPONResourceManager(PONResourceManager):
+ """Implements APIs to initialize/allocate/release alloc/gemport/onu IDs."""
+
+ # Constants for internal usage.
+ ONU_MAP = 'onu_map'
+
+ def init_device_resource_pool(self):
+ """
+ Initialize resource pool for all PON ports.
+ """
+ for pon_id in self.intf_ids:
+ self.init_resource_id_pool(
+ pon_intf_id=pon_id,
+ resource_type=PONResourceManager.ONU_ID,
+ start_idx=self.pon_resource_ranges[PONResourceManager.ONU_ID_START_IDX],
+ end_idx=self.pon_resource_ranges[PONResourceManager.ONU_ID_END_IDX])
+
+ alloc_id_map = dict()
+ for onu_id in range(platform.MAX_ONUS_PER_PON):
+ alloc_id_map[onu_id] = [platform.mk_alloc_id(pon_id, onu_id, idx)
+ for idx in xrange(platform.MAX_TCONTS_PER_ONU)]
+
+ self.init_resource_id_pool(pon_intf_id=pon_id,
+ resource_type=PONResourceManager.ALLOC_ID,
+ resource_map=alloc_id_map)
+
+ self.init_resource_id_pool(
+ pon_intf_id=pon_id,
+ resource_type=PONResourceManager.GEMPORT_ID,
+ start_idx=self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_START_IDX],
+ end_idx=self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_END_IDX])
+
+ def clear_device_resource_pool(self):
+ """
+ Clear resource pool of all PON ports.
+ """
+ for pon_id in self.intf_ids:
+ self.clear_resource_id_pool(pon_intf_id=pon_id,
+ resource_type=PONResourceManager.ONU_ID)
+
+ self.clear_resource_id_pool(
+ pon_intf_id=pon_id,
+ resource_type=PONResourceManager.ALLOC_ID,
+ )
+
+ self.clear_resource_id_pool(
+ pon_intf_id=pon_id,
+ resource_type=PONResourceManager.GEMPORT_ID,
+ )
+ self.clear_resource_id_pool(
+ pon_intf_id=pon_id,
+ resource_type=PONResourceManager.FLOW_ID,
+ )
+
+ def init_resource_id_pool(self, pon_intf_id, resource_type, start_idx=None,
+ end_idx=None, resource_map=None):
+ """
+ Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param start_idx: start index for onu id pool
+ :param end_idx: end index for onu id pool
+ :param resource_map: (dict) Resource map if per-ONU specific
+ :return boolean: True if resource id pool initialized else false
+ """
+ status = False
+ path = self._get_path(pon_intf_id, resource_type)
+ if path is None:
+ return status
+
+ try:
+ # In case of adapter reboot and reconciliation resource in kv store
+ # checked for its presence if not kv store update happens
+ resource = self._get_resource(path)
+
+ if resource is not None:
+ self._log.info("Resource-already-present-in-store", path=path)
+ status = True
+
+ else:
+ if resource_map is None:
+ resource = self._format_resource(pon_intf_id, start_idx, end_idx)
+ self._log.info("Resource-initialized", path=path)
+
+ else:
+ resource = self._format_map_resource(pon_intf_id, resource_map)
+
+ # Add resource as json in kv store.
+ status = self._kv_store.update_to_kv_store(path, resource)
+
+ except Exception as e:
+ self._log.exception("error-initializing-resource-pool", e=e)
+
+ return status
+
+ def _generate_next_id(self, resource, onu_id=None):
+ """
+ Generate unique id having OFFSET as start index.
+
+ :param resource: resource used to generate ID
+ :return int: generated id
+ """
+ if onu_id is not None:
+ resource = resource[AdtranPONResourceManager.ONU_MAP][str(onu_id)]
+
+ pos = resource[PONResourceManager.POOL].find('0b0')
+ resource[PONResourceManager.POOL].set(1, pos)
+ return pos[0] + resource[PONResourceManager.START_IDX]
+
+ def _release_id(self, resource, unique_id, onu_id=None):
+ """
+ Release unique id having OFFSET as start index.
+
+ :param resource: resource used to release ID
+ :param unique_id: id need to be released
+ :param onu_id: ONU ID if unique per ONU
+ """
+ if onu_id is not None:
+ resource = resource[AdtranPONResourceManager.ONU_MAP][str(onu_id)]
+
+ pos = ((int(unique_id)) - resource[PONResourceManager.START_IDX])
+ resource[PONResourceManager.POOL].set(0, pos)
+
+ def get_resource_id(self, pon_intf_id, resource_type, onu_id=None, num_of_id=1):
+ """
+ Create alloc/gemport/onu id for given OLT PON interface.
+
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param num_of_id: required number of ids
+ :param onu_id: ONU ID if unique per ONU (Used for Alloc IDs)
+ :return list/int/None: list, int or None if resource type is
+ alloc_id/gemport_id, onu_id or invalid type
+ respectively
+ """
+ result = None
+
+ if num_of_id < 1:
+ self._log.error("invalid-num-of-resources-requested")
+ return result
+
+ path = self._get_path(pon_intf_id, resource_type)
+ if path is None:
+ return result
+
+ try:
+ resource = self._get_resource(path, onu_id)
+ if resource is not None and \
+ (resource_type == PONResourceManager.ONU_ID or
+ resource_type == PONResourceManager.FLOW_ID):
+ result = self._generate_next_id(resource)
+
+ elif resource is not None and \
+ resource_type == PONResourceManager.GEMPORT_ID:
+ if num_of_id == 1:
+ result = self._generate_next_id(resource)
+ else:
+ result = [self._generate_next_id(resource) for _ in range(num_of_id)]
+
+ elif resource is not None and \
+ resource_type == PONResourceManager.ALLOC_ID:
+ if num_of_id == 1:
+ result = self._generate_next_id(resource, onu_id)
+ else:
+ result = [self._generate_next_id(resource, onu_id) for _ in range(num_of_id)]
+ else:
+ raise Exception("get-resource-failed")
+
+ self._log.debug("Get-" + resource_type + "-success", result=result,
+ path=path)
+ # Update resource in kv store
+ self._update_resource(path, resource, onu_id=onu_id)
+
+ except Exception as e:
+ self._log.exception("Get-" + resource_type + "-id-failed",
+ path=path, e=e)
+ return result
+
+ def free_resource_id(self, pon_intf_id, resource_type, release_content, onu_id=None):
+ """
+ Release alloc/gemport/onu id for given OLT PON interface.
+
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param release_content: required number of ids
+ :param onu_id: ONU ID if unique per ONU
+ :return boolean: True if all IDs in given release_content released
+ else False
+ """
+ status = False
+
+ path = self._get_path(pon_intf_id, resource_type)
+ if path is None:
+ return status
+
+ try:
+ resource = self._get_resource(path, onu_id=onu_id)
+ if resource is None:
+ raise Exception("get-resource-for-free-failed")
+
+ if resource_type == PONResourceManager.ONU_ID:
+ self._release_id(resource, release_content)
+
+ elif resource_type == PONResourceManager.ALLOC_ID:
+ for content in release_content:
+ self._release_id(resource, content)
+
+ elif resource_type == PONResourceManager.GEMPORT_ID:
+ for content in release_content:
+ self._release_id(resource, content, onu_id)
+ else:
+ raise Exception("get-resource-for-free-failed")
+
+ self._log.debug("Free-" + resource_type + "-success", path=path)
+
+ # Update resource in kv store
+ status = self._update_resource(path, resource, onu_id=onu_id)
+
+ except Exception as e:
+ self._log.exception("Free-" + resource_type + "-failed",
+ path=path, e=e)
+ return status
+
+ def _update_resource(self, path, resource, onu_id=None):
+ """
+ Update resource in resource kv store.
+
+ :param path: path to update resource
+ :param resource: resource need to be updated
+ :return boolean: True if resource updated in kv store else False
+ """
+ if 'alloc_id' in path.lower():
+ assert onu_id is not None
+ poolResource = resource[AdtranPONResourceManager.ONU_MAP][str(onu_id)]
+ poolResource[PONResourceManager.POOL] = \
+ poolResource[PONResourceManager.POOL].bin
+ else:
+ resource[PONResourceManager.POOL] = \
+ resource[PONResourceManager.POOL].bin
+
+ return self._kv_store.update_to_kv_store(path, json.dumps(resource))
+
+ def _get_resource(self, path, onu_id=None):
+ """
+ Get resource from kv store.
+
+ :param path: path to get resource
+ :return: resource if resource present in kv store else None
+ """
+ # get resource from kv store
+ result = self._kv_store.get_from_kv_store(path)
+ if result is None:
+ return result
+
+ self._log.info("dumping-resource", result=result)
+ resource = result
+
+ if resource is not None:
+ # decode resource fetched from backend store to dictionary
+ resource = json.loads(resource)
+
+ if 'alloc_id' in path.lower():
+ assert onu_id is not None
+ poolResource = resource[AdtranPONResourceManager.ONU_MAP][str(onu_id)]
+ poolResource[PONResourceManager.POOL] = \
+ BitArray('0b' + poolResource[PONResourceManager.POOL])
+ else:
+ # resource pool in backend store stored as binary string whereas to
+ # access the pool to generate/release IDs it need to be converted
+ # as BitArray
+ resource[PONResourceManager.POOL] = \
+ BitArray('0b' + resource[PONResourceManager.POOL])
+
+ return resource
+
+ def _format_resource(self, pon_intf_id, start_idx, end_idx):
+ """
+ Format resource as json.
+
+ :param pon_intf_id: OLT PON interface id
+ :param start_idx: start index for id pool
+ :param end_idx: end index for id pool
+ :return dictionary: resource formatted as dictionary
+ """
+ # Format resource as json to be stored in backend store
+ resource = dict()
+ resource[PONResourceManager.PON_INTF_ID] = pon_intf_id
+ resource[PONResourceManager.START_IDX] = start_idx
+ resource[PONResourceManager.END_IDX] = end_idx
+
+ # resource pool stored in backend store as binary string
+ resource[PONResourceManager.POOL] = BitArray(end_idx-start_idx).bin
+
+ return json.dumps(resource)
+
+ def _format_map_resource(self, pon_intf_id, resource_map):
+ """
+ Format resource as json.
+ # TODO: Refactor the resource BitArray to be just a list of the resources.
+ # This is used to store available alloc-id's on a per-onu/pon basis
+ # which in BitArray string form, is a 768 byte string for just 4 possible
+ # alloc-IDs. This equates to 1.57 MB of storage when you take into
+ # account 128 ONUs and 16 PONs pre-provisioneed
+ :param pon_intf_id: OLT PON interface id
+ :param resource_map: (dict) ONU ID -> Scattered list of IDs
+ :return dictionary: resource formatted as dictionary
+ """
+ # Format resource as json to be stored in backend store
+ resource = dict()
+ resource[PONResourceManager.PON_INTF_ID] = pon_intf_id
+
+ onu_dict = dict()
+ for onu_id, resources in resource_map.items():
+ start_idx = min(resources)
+ end_idx = max(resources) + 1
+
+ onu_dict[onu_id] = {
+ PONResourceManager.START_IDX: start_idx,
+ PONResourceManager.END_IDX: end_idx,
+ }
+ # Set non-allowed values as taken
+ resource_map = BitArray(end_idx - start_idx)
+ not_available = {pos for pos in xrange(end_idx-start_idx)
+ if pos + start_idx not in resources}
+ resource_map.set(True, not_available)
+ onu_dict[onu_id][PONResourceManager.POOL] = resource_map.bin
+
+ resource[AdtranPONResourceManager.ONU_MAP] = onu_dict
+ return json.dumps(resource)
diff --git a/adapters/adtran_olt/resources/adtranolt_platform.py b/adapters/adtran_olt/resources/adtranolt_platform.py
new file mode 100644
index 0000000..3ec7b81
--- /dev/null
+++ b/adapters/adtran_olt/resources/adtranolt_platform.py
@@ -0,0 +1,182 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from pyvoltha.protos.device_pb2 import Port
+import pyvoltha.protos.device_pb2 as dev_pb2
+
+#######################################################################
+#
+# This is a copy of the OpenOLT file of a similar name and is used
+# when running in non-xPON (OpenOLT/SEBA) mode. We need to closely
+# watch for changes in the OpenOLT and eventually work together to
+# have a better way to do things (and more ONUs than 112)
+#
+# TODO: These duplicate some methods in the OLT Handler. Clean up
+# and use a separate file and include it into OLT Handler object
+# as something it derives from.
+#
+#######################################################################
+"""
+Encoding of identifiers
+=======================
+
+Alloc ID
+
+ Uniquely identifies a T-CONT
+ Ranges from 1024..16383 per ITU Standard
+ For Adtran, 1024..1919
+ Unique per PON interface
+
+ 9 8 7 0
+ +-----+----------+
+ | idx | onu_id | + (Min Alloc ID)
+ +-----+----------+
+
+ onu id = 8 bit
+ Alloc index = 2 bits (max 4 TCONTs/ONU)
+
+Flow id
+
+ Identifies a flow within a single OLT
+ Flow Id is unique per OLT
+ Multiple GEM ports can map to same flow id
+
+ 13 11 4 0
+ +--------+--------------+------+
+ | pon id | onu id | Flow |
+ | | | idx |
+ +--------+--------------+------+
+
+ 14 bits = 16384 flows (per OLT).
+
+ pon id = 4 bits = 16 PON ports
+ onu id = 7 bits = 128 ONUss per PON port
+ Flow index = 3 bits = 4 bi-directional flows per ONU
+ = 8 uni-directional flows per ONU
+
+
+Logical (OF) UNI port number
+
+ OpenFlow port number corresponding to PON UNI
+
+ 15 11 4 0
+ +--+--------+--------------+------+
+ |0 | pon id | onu id | 0 |
+ +--+--------+--------------+------+
+
+ pon id = 4 bits = 16 PON ports
+ onu id = 7 bits = 128 ONUs per PON port
+
+
+PON OLT (OF) port number
+
+ OpenFlow port number corresponding to PON OLT ports
+
+ 31 28 0
+ +--------+------------------------~~~------+
+ | 0x2 | pon intf id |
+ +--------+------------------------~~~------+
+
+"""
+
+MIN_TCONT_ALLOC_ID = 1024 # 1024..16383
+MAX_TCONT_ALLOC_ID = 16383
+
+MIN_GEM_PORT_ID = 2176 # 2176..4222
+MAX_GEM_PORT_ID = MIN_GEM_PORT_ID + 2046
+
+MAX_ONUS_PER_PON = 128
+MAX_TCONTS_PER_ONU = 4
+MAX_GEM_PORTS_PER_ONU = 16 # Hardware can handle more
+
+
+class adtran_platform(object):
+ def __init__(self):
+ pass
+
+ def mk_uni_port_num(self, intf_id, onu_id, uni_id=0):
+ return intf_id << 11 | onu_id << 4 | uni_id
+
+ def uni_id_from_uni_port(self, uni_port):
+ return uni_port & 0xF
+
+
+def mk_uni_port_num(intf_id, onu_id, uni_id=0):
+ """
+ Create a unique virtual UNI port number based up on PON and ONU ID
+ :param intf_id:
+ :param onu_id: (int) ONU ID (0..max)
+ :return: (int) UNI Port number
+ """
+ return intf_id << 11 | onu_id << 4 | uni_id
+
+
+def uni_id_from_uni_port(uni_port):
+ return uni_port & 0xF
+
+
+def intf_id_from_uni_port_num(port_num):
+ """
+ Extract the PON device port number from a virtual UNI Port number
+
+ :param port_num: (int) virtual UNI / vENET port number on OLT PON
+ :return: (int) PON Port number (note, this is not the PON ID)
+ """
+ return (port_num >> 11) & 0xF
+
+
+def mk_alloc_id(_, onu_id, idx=0):
+ """
+ Allocate a TCONT Alloc-ID. This is only called by the OLT
+
+ :param _: (int) PON ID (not used)
+ :param onu_id: (int) ONU ID (0..MAX_ONUS_PER_PON-1)
+ :param idx: (int) TCONT Index (0..7)
+ """
+ assert 0 <= onu_id < MAX_ONUS_PER_PON, 'Invalid ONU ID. Expect 0..{}'.format(MAX_ONUS_PER_PON-1)
+ assert 0 <= idx <= MAX_TCONTS_PER_ONU, 'Invalid TCONT instance. Expect 0..{}'.format(MAX_TCONTS_PER_ONU)
+ alloc_id = MIN_TCONT_ALLOC_ID + (idx << 8) + onu_id
+ return alloc_id
+
+
+def intf_id_from_nni_port_num(port_num):
+ # OpenOLT starts at 128. We start at 1 (one-to-one mapping)
+ # return port_num - 128
+ return port_num
+
+
+def intf_id_to_intf_type(intf_id):
+ # if (2 << 28 ^ intf_id) < 16:
+ # return Port.PON_OLT
+ # elif 128 <= intf_id <= 132:
+ # return Port.ETHERNET_NNI
+ if 5 <= intf_id <= 20:
+ return Port.PON_OLT
+ elif 1 <= intf_id <= 4:
+ return Port.ETHERNET_NNI
+ else:
+ raise Exception('Invalid intf_id value')
+
+
+def is_upstream(in_port, out_port):
+ # FIXME
+ # if out_port in [128, 129, 130, 131, 0xfffd, 0xfffffffd]:
+ # Not sure what fffd and the other is
+ return out_port in [1, 2, 3, 4, 0xfffd, 0xfffffffd]
+
+
+def is_downstream(in_port, out_port):
+ return not is_upstream(in_port, out_port)
diff --git a/adapters/adtran_olt/xpon/__init__.py b/adapters/adtran_olt/xpon/__init__.py
new file mode 100644
index 0000000..d67fcf2
--- /dev/null
+++ b/adapters/adtran_olt/xpon/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2019-present ADTRAN, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/adtran_olt/xpon/olt_gem_port.py b/adapters/adtran_olt/xpon/olt_gem_port.py
new file mode 100644
index 0000000..9159262
--- /dev/null
+++ b/adapters/adtran_olt/xpon/olt_gem_port.py
@@ -0,0 +1,126 @@
+#
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+
+from adapters.adtran_common.xpon.gem_port import GemPort
+from twisted.internet.defer import inlineCallbacks, returnValue
+from ..adtran_olt_handler import AdtranOltHandler
+
+log = structlog.get_logger()
+
+
+class OltGemPort(GemPort):
+ """
+ Adtran OLT specific implementation
+ """
+ def __init__(self, gem_id, alloc_id, tech_profile_id, pon_id, onu_id, uni_id,
+ encryption=False,
+ multicast=False,
+ traffic_class=None,
+ handler=None,
+ is_mock=False):
+ super(OltGemPort, self).__init__(gem_id, alloc_id, uni_id, tech_profile_id,
+ encryption=encryption,
+ multicast=multicast,
+ traffic_class=traffic_class,
+ handler=handler,
+ is_mock=is_mock)
+ self._timestamp = None
+ self._pon_id = pon_id
+ self._onu_id = onu_id # None if this is a multicast GEM Port
+
+ def __str__(self):
+ return "GemPort: {}/{}/{}, alloc-id: {}, gem-id: {}".format(self.pon_id, self.onu_id,
+ self.uni_id, self.alloc_id,
+ self.gem_id)
+
+ @staticmethod
+ def create(handler, gem, alloc_id, tech_profile_id, pon_id, onu_id, uni_id, _ofp_port_no):
+ return OltGemPort(gem.gemport_id,
+ alloc_id,
+ tech_profile_id,
+ pon_id, onu_id, uni_id,
+ encryption=gem.aes_encryption.lower() == 'true',
+ handler=handler,
+ multicast=False)
+
+ @property
+ def pon_id(self):
+ return self._pon_id
+
+ @property
+ def onu_id(self):
+ return self._onu_id
+
+ @property
+ def timestamp(self):
+ return self._timestamp
+
+ @timestamp.setter
+ def timestamp(self, value):
+ self._timestamp = value
+
+ @property
+ def encryption(self):
+ return self._encryption
+
+ @encryption.setter
+ def encryption(self, value):
+ assert isinstance(value, bool), 'encryption is a boolean'
+
+ if self._encryption != value:
+ self._encryption = value
+ self.set_config(self._handler.rest_client, 'encryption', value)
+
+ @inlineCallbacks
+ def add_to_hardware(self, session, operation='POST'):
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_LIST_URI.format(self.pon_id, self.onu_id)
+ data = json.dumps(self.to_dict())
+ name = 'gem-port-create-{}-{}: {}/{}'.format(self.pon_id, self.onu_id,
+ self.gem_id,
+ self.alloc_id)
+ try:
+ results = yield session.request(operation, uri, data=data, name=name)
+ returnValue(results)
+
+ except Exception as e:
+ if operation == 'POST':
+ returnValue(self.add_to_hardware(session, operation='PATCH'))
+ else:
+ log.exception('add-2-hw', gem=self, e=e)
+ raise
+
+ def remove_from_hardware(self, session):
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(self.pon_id, self.onu_id, self.gem_id)
+ name = 'gem-port-delete-{}-{}: {}'.format(self.pon_id, self.onu_id, self.gem_id)
+ return session.request('DELETE', uri, name=name)
+
+ def set_config(self, session, leaf, value):
+ from ..adtran_olt_handler import AdtranOltHandler
+
+ data = json.dumps({leaf: value})
+ uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(self.pon_id,
+ self.onu_id,
+ self.gem_id)
+ name = 'onu-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))
+ return session.request('PATCH', uri, data=data, name=name)
diff --git a/adapters/adtran_olt/xpon/olt_tcont.py b/adapters/adtran_olt/xpon/olt_tcont.py
new file mode 100644
index 0000000..db31543
--- /dev/null
+++ b/adapters/adtran_olt/xpon/olt_tcont.py
@@ -0,0 +1,90 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+from twisted.internet.defer import inlineCallbacks, returnValue
+from adapters.adtran_common.xpon.tcont import TCont
+# from python.adapters.openolt.protos import openolt_pb2
+from olt_traffic_descriptor import OltTrafficDescriptor
+from ..adtran_olt_handler import AdtranOltHandler
+
+log = structlog.get_logger()
+
+
+class OltTCont(TCont):
+ """
+ Adtran OLT specific implementation
+ """
+ def __init__(self, alloc_id, tech_profile_id, traffic_descriptor, pon_id, onu_id, uni_id, is_mock=False):
+ super(OltTCont, self).__init__(alloc_id, tech_profile_id, traffic_descriptor, uni_id, is_mock=is_mock)
+ self.pon_id = pon_id
+ self.onu_id = onu_id
+
+ def __str__(self):
+ return "TCont: {}/{}/{}, alloc-id: {}".format(self.pon_id, self.onu_id,
+ self.uni_id, self.alloc_id)
+
+ @staticmethod
+ def create(tcont, pon_id, onu_id, tech_profile_id, uni_id, ofp_port_no):
+ # Only valid information in the upstream tcont of a tech profile
+ if tcont.direction != openolt_pb2.UPSTREAM:
+ return None
+
+ td = OltTrafficDescriptor.create(tcont, pon_id, onu_id, uni_id, ofp_port_no)
+ return OltTCont(tcont.alloc_id, tech_profile_id, td, pon_id, onu_id, uni_id)
+
+ @inlineCallbacks
+ def add_to_hardware(self, session):
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_LIST_URI.format(self.pon_id, self.onu_id)
+ data = json.dumps({'alloc-id': self.alloc_id})
+ name = 'tcont-create-{}-{}: {}'.format(self.pon_id, self.onu_id, self.alloc_id)
+
+ # For TCONT, only leaf is the key. So only post needed
+ try:
+ results = yield session.request('POST', uri, data=data, name=name,
+ suppress_error=False)
+ except Exception as _e:
+ results = None
+
+ if self.traffic_descriptor is not None:
+ try:
+ results = yield self.traffic_descriptor.add_to_hardware(session)
+
+ except Exception as e:
+ log.exception('traffic-descriptor', tcont=self,
+ td=self.traffic_descriptor, e=e)
+ raise
+
+ returnValue(results)
+
+ def remove_from_hardware(self, session):
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(self.pon_id, self.onu_id, self.alloc_id)
+ name = 'tcont-delete-{}-{}: {}'.format(self.pon_id, self.onu_id, self.alloc_id)
+ return session.request('DELETE', uri, name=name)
+
+
+
+
+
+
+
+
+
diff --git a/adapters/adtran_olt/xpon/olt_traffic_descriptor.py b/adapters/adtran_olt/xpon/olt_traffic_descriptor.py
new file mode 100644
index 0000000..c6d90cf
--- /dev/null
+++ b/adapters/adtran_olt/xpon/olt_traffic_descriptor.py
@@ -0,0 +1,98 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import json
+from adapters.adtran_common.xpon.traffic_descriptor import TrafficDescriptor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from ..adtran_olt_handler import AdtranOltHandler
+
+log = structlog.get_logger()
+
+
+class OltTrafficDescriptor(TrafficDescriptor):
+ """
+ Adtran ONU specific implementation
+ """
+ def __init__(self, pon_id, onu_id, alloc_id, fixed, assured, maximum,
+ additional=TrafficDescriptor.AdditionalBwEligibility.DEFAULT,
+ best_effort=None,
+ is_mock=False):
+ super(OltTrafficDescriptor, self).__init__(fixed, assured, maximum,
+ additional=additional,
+ best_effort=best_effort)
+ self.pon_id = pon_id
+ self.onu_id = onu_id
+ self.alloc_id = alloc_id
+ self._is_mock = is_mock
+
+ @staticmethod
+ def create(tcont, pon_id, onu_id, _uni_id, _ofp_port_no):
+ alloc_id = tcont.alloc_id
+ shaping_info = tcont.traffic_shaping_info
+ fixed = shaping_info.cir
+ assured = 0
+ maximum = shaping_info.pir
+
+ best_effort = None
+ # if shaping_info.add_bw_ind == openolt_pb2.InferredAdditionBWIndication_Assured:
+ # pass
+ # TODO: Support additional BW decode
+ # elif shaping_info.add_bw_ind == openolt_pb2.InferredAdditionBWIndication_BestEffort:
+ # pass
+ # additional = TrafficDescriptor.AdditionalBwEligibility.from_value(
+ # traffic_disc['additional-bw-eligibility-indicator'])
+ #
+ # if additional == TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ # best_effort = BestEffort(traffic_disc['maximum-bandwidth'],
+ # traffic_disc['priority'],
+ # traffic_disc['weight'])
+ # else:
+ # best_effort = None
+
+ return OltTrafficDescriptor(pon_id, onu_id, alloc_id,
+ fixed, assured, maximum, best_effort=best_effort)
+
+ @inlineCallbacks
+ def add_to_hardware(self, session):
+ # TODO: Traffic descriptors are no longer shared, save pon and onu ID to base class
+ if self._is_mock:
+ returnValue('mock')
+
+ uri = AdtranOltHandler.GPON_TCONT_CONFIG_URI.format(self.pon_id,
+ self.onu_id,
+ self.alloc_id)
+ data = json.dumps({'traffic-descriptor': self.to_dict()})
+ name = 'tcont-td-{}-{}: {}'.format(self.pon_id, self.onu_id, self.alloc_id)
+ try:
+ results = yield session.request('PATCH', uri, data=data, name=name)
+
+ except Exception as e:
+ log.exception('traffic-descriptor', td=self, e=e)
+ raise
+
+ # TODO: Add support for best-effort sharing
+ # if self.additional_bandwidth_eligibility == \
+ # TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING:
+ # if self.best_effort is None:
+ # raise ValueError('TCONT is best-effort but does not define best effort sharing')
+ #
+ # try:
+ # results = yield self.best_effort.add_to_hardware(session)
+ #
+ # except Exception as e:
+ # log.exception('best-effort', best_effort=self.best_effort, e=e)
+ # raise
+
+ returnValue(results)
diff --git a/compose/adapters-adtran_olt.yml b/compose/adapters-adtran_olt.yml
new file mode 100644
index 0000000..6489c20
--- /dev/null
+++ b/compose/adapters-adtran_olt.yml
@@ -0,0 +1,41 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: '2'
+services:
+ ponsim_olt:
+ image: "${REGISTRY}${REPOSITORY}voltha-adapter-adtran-olt${TAG}"
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+ entrypoint:
+ - /app/adtran_olt
+ - -device_type
+ - "OLT"
+ - -internal_if
+ - "eth0"
+ - -external_if
+ - "eth0"
+ - -verbose
+ ports:
+ - "50060:50060"
+ networks:
+ - default
+
+networks:
+ default:
+ driver: bridge
diff --git a/docker/Dockerfile.adapter_adtran_olt b/docker/Dockerfile.adapter_adtran_olt
new file mode 100644
index 0000000..638b5b7
--- /dev/null
+++ b/docker/Dockerfile.adapter_adtran_olt
@@ -0,0 +1,26 @@
+# Copyright 2019 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-adtran-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Adtran specific
+COPY adapters/adtran_olt /voltha/adapters/adtran_olt
+
+# Exposing process and default entry point
+CMD ["python", "/voltha/adapters/adtran_olt/main.py"]
diff --git a/docker/Dockerfile.adapter_adtran_olt_pyvoltha b/docker/Dockerfile.adapter_adtran_olt_pyvoltha
new file mode 100644
index 0000000..369ca8c
--- /dev/null
+++ b/docker/Dockerfile.adapter_adtran_olt_pyvoltha
@@ -0,0 +1,28 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-adtran-base-local:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Adtran specific
+COPY adapters/adtran_olt /voltha/adapters/adtran_olt
+
+# Exposing process and default entry point
+CMD ["python", "/voltha/adapters/adtran_olt/main.py"]
+
+
diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base
new file mode 100644
index 0000000..b65e77b
--- /dev/null
+++ b/docker/Dockerfile.base
@@ -0,0 +1,41 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Update to have latest images
+RUN apt-get update && \
+ apt-get install -y python openssl iproute2 libpcap-dev wget build-essential git binutils python-dev libffi-dev libssl-dev
+
+# Install current version of pip rather than outdated pip from apt
+RUN wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+RUN python /tmp/get-pip.py
+
+# Install adapter requirements.
+COPY requirements.txt /tmp/requirements.txt
+RUN pip install -r /tmp/requirements.txt
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+RUN mkdir /voltha/adapters && touch /voltha/adapters/__init__.py
+ENV PYTHONPATH=/voltha
+
+COPY pki /voltha/pki/
+
+# Adtran specific
+COPY adapters/adtran_common /voltha/adapters/adtran_common
+COPY pydevd /voltha/pydevd
+
diff --git a/docker/Dockerfile.base_local b/docker/Dockerfile.base_local
new file mode 100644
index 0000000..802e93f
--- /dev/null
+++ b/docker/Dockerfile.base_local
@@ -0,0 +1,45 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Update to have latest images
+RUN apt-get update && \
+ apt-get install -y python openssl iproute2 libpcap-dev wget build-essential git binutils python-dev libffi-dev libssl-dev
+
+# Install current version of pip rather than outdated pip from apt
+RUN wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+RUN python /tmp/get-pip.py
+
+# Install adapter requirements.
+COPY requirements.txt /tmp/requirements.txt
+RUN pip install -r /tmp/requirements.txt
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+RUN mkdir /voltha/adapters && touch /voltha/adapters/__init__.py
+ENV PYTHONPATH=/voltha
+
+COPY pki /voltha/pki/
+
+# Adtran specific
+COPY adapters/adtran_common /voltha/adapters/adtran_common
+COPY pydevd /voltha/pydevd
+
+# Install local pyvoltha
+COPY pyvoltha/dist /pyvoltha/dist
+RUN pip install /pyvoltha/dist/*.tar.gz
+
diff --git a/env.sh b/env.sh
new file mode 100644
index 0000000..ec3b52f
--- /dev/null
+++ b/env.sh
@@ -0,0 +1,29 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# sourcing this file is needed to make local development and integration testing work
+export VOLTHA_BASE=$PWD
+
+# load local python virtualenv if exists, otherwise create it
+VENVDIR="venv-$(uname -s | tr '[:upper:]' '[:lower:]')"
+if [ ! -e "$VENVDIR/.built" ]; then
+ echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+ echo "Initializing OS-appropriate virtual env."
+ echo "This will take a few minutes."
+ echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+ make venv
+fi
+. $VENVDIR/bin/activate
+
+# add top-level voltha dir to pythonpath
+export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/cli:$VOLTHA_BASE/protos/third_party
diff --git a/pki/Makefile b/pki/Makefile
new file mode 100644
index 0000000..d3d9a18
--- /dev/null
+++ b/pki/Makefile
@@ -0,0 +1,97 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# VOLTHA pki makefile
+# Configuration is also given in voltha.cnf
+
+SHELL = bash -eu -o pipefail
+
+# parameters
+
+KEY_SIZE ?= 2048
+EXPIRATION_DAYS ?= 366
+
+
+# utility/validation targets
+
+help:
+ @echo "Usually you want to run 'make voltha.crt'"
+
+validate:
+ openssl verify -verbose -purpose sslserver -CAfile voltha-CA.pem voltha.crt
+
+printca: voltha-CA.pem
+ openssl x509 -in voltha-CA.pem -text -noout
+
+printkey: voltha.key
+ openssl rsa -in voltha.key -check
+
+printcsr: voltha.csr
+ openssl req -in voltha.csr -text -noout -verify
+
+printcrt: voltha.crt
+ openssl x509 -in voltha.crt -text -noout
+
+clean:
+ rm -rf root_ca voltha-CA.pem voltha.key voltha.csr voltha.crt
+
+# CA creation
+
+root_ca:
+ mkdir -p root_ca/private root_ca/newcerts
+ chmod 700 root_ca/private
+ echo 1000 > root_ca/serial
+ touch root_ca/index.txt
+
+root_ca/private/ca_root_phrase: root_ca
+ @echo "TestingVOLTHARootCAPassPhrase" > root_ca/private/ca_root_phrase
+
+root_ca/private/ca_key.pem: root_ca root_ca/private/ca_root_phrase
+ @echo "## Creating CA private key"
+ openssl genrsa -aes256 \
+ -passout file:root_ca/private/ca_root_phrase \
+ -out root_ca/private/ca_key.pem $(KEY_SIZE)
+
+voltha-CA.pem: voltha.cnf root_ca/private/ca_key.pem
+ @echo "## Creating self-signed CA public key: voltha-CA.pem"
+ openssl req -config voltha.cnf \
+ -new -x509 -days $(EXPIRATION_DAYS) -sha256 \
+ -extensions v3_ca \
+ -key root_ca/private/ca_key.pem \
+ -passin file:root_ca/private/ca_root_phrase \
+ -subj "/C=US/ST=California/L=Menlo Park/O=ONF/OU=Testing Only/CN=VOLTHA Test Root CA" \
+ -out voltha-CA.pem
+
+# server cert creation
+
+voltha.key:
+ @echo "## Creating server private key: voltha.key"
+ openssl genrsa -out voltha.key $(KEY_SIZE)
+
+voltha.csr: voltha.cnf voltha.key
+ @echo "## Creating signing request voltha.csr from voltha.key"
+ openssl req -config voltha.cnf \
+ -new -sha256 -key voltha.key \
+ -subj "/C=US/ST=California/L=Menlo Park/O=ONF/OU=Testing Only/CN=VOLTHA Server" \
+ -out voltha.csr
+
+voltha.crt: voltha-CA.pem voltha.cnf voltha.key voltha.csr
+ @echo "## Signing voltha.csr to create signed public key: voltha.crt"
+ openssl ca -config voltha.cnf \
+ -batch -days $(EXPIRATION_DAYS) -md sha256 \
+ -passin file:root_ca/private/ca_root_phrase \
+ -extensions server_cert \
+ -in voltha.csr \
+ -out voltha.crt
+
diff --git a/pki/voltha-CA.pem b/pki/voltha-CA.pem
new file mode 100644
index 0000000..a71091b
--- /dev/null
+++ b/pki/voltha-CA.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID2jCCAsKgAwIBAgIJAPp3/HUhTzcGMA0GCSqGSIb3DQEBCwUAMHoxCzAJBgNV
+BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRMwEQYDVQQHDApNZW5sbyBQYXJr
+MQwwCgYDVQQKDANPTkYxFTATBgNVBAsMDFRlc3RpbmcgT25seTEcMBoGA1UEAwwT
+Vk9MVEhBIFRlc3QgUm9vdCBDQTAeFw0xODA3MTAxODQwMDVaFw0xOTA3MTExODQw
+MDVaMHoxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRMwEQYDVQQH
+DApNZW5sbyBQYXJrMQwwCgYDVQQKDANPTkYxFTATBgNVBAsMDFRlc3RpbmcgT25s
+eTEcMBoGA1UEAwwTVk9MVEhBIFRlc3QgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMEawmybBpHIWVLGm9gqnfg4IpNNczCAeOB2w5UqsqIR
+mHMSa/f+wjDRztHhp+6FQfqN1ycWmrUAPyfYn63laRPM2VlnNOa0g8iS0uif2AaY
+3ms7PbjDNug2jtj/P7PNikHrd6cW/lWEXPhgGSWscNtlFvAjVwTs9pO6nELtw6XW
+wEgF40XB8UnwatD3J61G0TfcDlJMg0qMiTsnQzgrb6hUSI7IRSUKypFRii5lXts1
+Zt3VYz2yViMDat18ICz+oiVE3EL6YfTebM27m9UhhQn4BnBxwU18zcACz1SHGOPg
++hGFbO5NsXnVabvyNNuHabb4lDCYwcL8xGaPeqtm3jsCAwEAAaNjMGEwHQYDVR0O
+BBYEFBbNGGwDeW6Zmz9tF/QhGiExBmpnMB8GA1UdIwQYMBaAFBbNGGwDeW6Zmz9t
+F/QhGiExBmpnMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqG
+SIb3DQEBCwUAA4IBAQByQh5RKK5j01jjeBkCV0eonlMMWMKDIemKQLN6zlU3wvTV
+7++F1FT3IOhM3Oe/kS3JF7mG/jIhQuiycbIvmth/eUdPNJpePTHSilYVHLPXVMl2
+YUfkMyj5aRZCzSdzPfWEkJu/PceyBJP7vjnpOYOraqf6lU6sXBuTLVWRZADEQ9b4
+0oKa59pzOxFdtdDU5Pfnj/Vzaxsw8bpt/JINQb6VIqd71TASAdsuoQZXdYy7rvkl
+29M1gv2bTLxU7jE+5jIgfPtOde6cJeeuSNhKqaFJxTrbZFj4ZgQ4zXsr6QzO/hbV
+kLN8QechIcnf6F4tOTWEiPhs3yIE/947tFT3ZLcx
+-----END CERTIFICATE-----
diff --git a/pki/voltha.cnf b/pki/voltha.cnf
new file mode 100644
index 0000000..7552010
--- /dev/null
+++ b/pki/voltha.cnf
@@ -0,0 +1,89 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir = ./root_ca
+certs = $dir/certs
+crl_dir = $dir/crl
+new_certs_dir = $dir/newcerts
+database = $dir/index.txt
+serial = $dir/serial
+
+private_key = $dir/private/ca_key.pem
+certificate = voltha-CA.pem
+
+# Make new requests easier to sign - allow two subjects with same name
+# (Or revoke the old certificate first.)
+unique_subject = no
+preserve = no
+
+# for CA that signs client certs
+policy = policy_loose
+
+[ policy_loose ]
+# Allow the to sign more types of certs
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 2048
+default_days = 366
+default_md = sha256
+distinguished_name = req_distinguished_name
+string_mask = utf8only
+x509_extensions = v3_ca
+
+[ req_distinguished_name ]
+# See <https://en.wikipedia.org/wiki/Certificate_signing_request>.
+countryName = Country Name (2 letter code)
+stateOrProvinceName = State or Province Name
+localityName = Locality Name
+0.organizationName = Organization Name
+organizationalUnitName = Organizational Unit Name
+commonName = Common Name
+emailAddress = Email Address
+
+# Defaults DN
+countryName_default = US
+stateOrProvinceName_default = California
+localityName_default = Menlo Park
+0.organizationName_default = ONF
+organizationalUnitName_default = Testing Only
+commonName = VOLTHA Testing
+emailAddress_default = do-not-reply@opencord.org
+
+[ v3_ca ]
+# Extensions for a typical CA (`man x509v3_config`).
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always,issuer
+basicConstraints = critical, CA:TRUE
+keyUsage = critical, digitalSignature, cRLSign, keyCertSign
+
+[ server_cert ]
+# Extensions for server certificates (`man x509v3_config`).
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer:always
+basicConstraints = CA:FALSE
+keyUsage = critical, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = 'DNS:voltha.dns'
+
diff --git a/pki/voltha.crt b/pki/voltha.crt
new file mode 100644
index 0000000..efeef03
--- /dev/null
+++ b/pki/voltha.crt
@@ -0,0 +1,92 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4096 (0x1000)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=California, L=Menlo Park, O=ONF, OU=Testing Only, CN=VOLTHA Test Root CA
+ Validity
+ Not Before: Jul 10 18:40:05 2018 GMT
+ Not After : Jul 11 18:40:05 2019 GMT
+ Subject: C=US, ST=California, L=Menlo Park, O=ONF, OU=Testing Only, CN=VOLTHA Server
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:c6:90:b9:38:0e:d5:38:bd:20:54:8f:82:56:2b:
+ 54:da:16:6f:a2:84:63:99:f8:4b:8c:24:be:c6:17:
+ ee:ce:b1:e4:27:4c:4f:e0:7b:b9:1c:0c:a7:9d:45:
+ 37:39:1d:b0:41:fb:96:49:f4:02:1c:66:87:3a:87:
+ e6:59:fc:9d:4d:fb:73:74:50:8a:39:25:5c:7e:8f:
+ b4:de:3e:d5:10:5e:91:53:da:6a:3e:57:db:18:d8:
+ da:c6:33:90:ee:0a:6d:4d:e2:e9:cb:1b:21:c8:59:
+ 3e:e6:b2:bd:ee:d2:95:70:f7:0e:98:4e:bc:04:6b:
+ 5b:4f:63:0b:25:d8:0e:4f:10:f8:30:92:19:a8:1b:
+ a1:3a:be:51:73:24:bc:0f:f0:4c:26:8f:df:2a:a8:
+ cc:d8:38:7e:ad:d0:f5:cc:e9:e9:76:d8:3e:ff:55:
+ 94:23:69:74:8f:d2:00:51:c5:d6:56:61:09:0f:5e:
+ 70:4c:5f:5e:d6:a4:47:58:ff:73:40:c5:5e:e0:14:
+ 73:6c:8b:4d:54:e2:fc:d7:94:60:64:9b:db:2a:d6:
+ 38:a0:d3:ae:2e:47:d3:74:3c:0f:c0:fe:c6:af:af:
+ a0:08:1f:20:a8:3a:a7:74:58:af:94:35:66:4b:7c:
+ 97:26:1b:03:23:0f:3d:0a:9d:ea:9b:06:d4:96:ca:
+ 5c:4d
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Key Identifier:
+ F9:71:CB:9B:DC:B0:AB:C3:70:04:1B:9E:63:D0:21:01:CE:35:FF:19
+ X509v3 Authority Key Identifier:
+ keyid:16:CD:18:6C:03:79:6E:99:9B:3F:6D:17:F4:21:1A:21:31:06:6A:67
+ DirName:/C=US/ST=California/L=Menlo Park/O=ONF/OU=Testing Only/CN=VOLTHA Test Root CA
+ serial:FA:77:FC:75:21:4F:37:06
+
+ X509v3 Basic Constraints:
+ CA:FALSE
+ X509v3 Key Usage: critical
+ Digital Signature, Key Encipherment
+ X509v3 Extended Key Usage:
+ TLS Web Server Authentication
+ X509v3 Subject Alternative Name:
+ DNS:voltha.dns
+ Signature Algorithm: sha256WithRSAEncryption
+ 12:3e:b8:73:d1:ab:77:ec:7a:b0:d8:8e:94:8e:3c:fd:ff:b0:
+ 25:bf:e8:51:d7:b9:ae:55:03:28:cb:a2:9b:fb:86:9c:35:55:
+ 2b:c8:0c:c6:a9:b4:41:a3:12:d3:26:c9:33:93:4a:a1:7c:ad:
+ 06:eb:d5:d0:a4:63:e1:ad:7f:76:d7:7b:2b:44:ab:43:2b:26:
+ 84:a2:d6:5d:68:fc:bb:1b:15:3e:63:32:34:e8:1a:a4:d9:81:
+ 4b:28:17:e8:f7:1d:3a:d5:cb:37:87:77:04:3f:96:6d:17:e6:
+ 1e:90:0e:a8:6c:01:58:84:d4:1a:b4:9f:51:79:9c:03:23:1b:
+ b6:97:0c:28:a4:af:67:0b:da:b2:fa:6e:41:49:00:8a:36:11:
+ f8:80:50:61:03:c3:b5:df:f7:e5:ea:4b:9c:3f:68:68:e0:f8:
+ 78:f1:1d:ff:0b:23:45:2a:d6:19:a8:f6:b9:19:25:e0:46:ce:
+ 8b:56:ca:e5:da:2a:35:65:b8:e2:8d:6d:46:1e:9f:f3:4b:4d:
+ 7a:c0:f5:48:71:42:f6:95:f9:e5:c9:61:8f:7a:96:63:88:64:
+ 68:55:3e:d6:c6:c0:e2:cd:c9:03:93:87:4e:6f:c4:b4:fb:c3:
+ c4:ec:93:ad:88:28:17:fc:77:b8:a2:99:f6:26:ca:6f:36:2d:
+ 26:4f:d3:44
+-----BEGIN CERTIFICATE-----
+MIIEhTCCA22gAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwejELMAkGA1UEBhMCVVMx
+EzARBgNVBAgMCkNhbGlmb3JuaWExEzARBgNVBAcMCk1lbmxvIFBhcmsxDDAKBgNV
+BAoMA09ORjEVMBMGA1UECwwMVGVzdGluZyBPbmx5MRwwGgYDVQQDDBNWT0xUSEEg
+VGVzdCBSb290IENBMB4XDTE4MDcxMDE4NDAwNVoXDTE5MDcxMTE4NDAwNVowdDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEzARBgNVBAcMCk1lbmxv
+IFBhcmsxDDAKBgNVBAoMA09ORjEVMBMGA1UECwwMVGVzdGluZyBPbmx5MRYwFAYD
+VQQDDA1WT0xUSEEgU2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAxpC5OA7VOL0gVI+CVitU2hZvooRjmfhLjCS+xhfuzrHkJ0xP4Hu5HAynnUU3
+OR2wQfuWSfQCHGaHOofmWfydTftzdFCKOSVcfo+03j7VEF6RU9pqPlfbGNjaxjOQ
+7gptTeLpyxshyFk+5rK97tKVcPcOmE68BGtbT2MLJdgOTxD4MJIZqBuhOr5RcyS8
+D/BMJo/fKqjM2Dh+rdD1zOnpdtg+/1WUI2l0j9IAUcXWVmEJD15wTF9e1qRHWP9z
+QMVe4BRzbItNVOL815RgZJvbKtY4oNOuLkfTdDwPwP7Gr6+gCB8gqDqndFivlDVm
+S3yXJhsDIw89Cp3qmwbUlspcTQIDAQABo4IBGTCCARUwHQYDVR0OBBYEFPlxy5vc
+sKvDcAQbnmPQIQHONf8ZMIGsBgNVHSMEgaQwgaGAFBbNGGwDeW6Zmz9tF/QhGiEx
+BmpnoX6kfDB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTETMBEG
+A1UEBwwKTWVubG8gUGFyazEMMAoGA1UECgwDT05GMRUwEwYDVQQLDAxUZXN0aW5n
+IE9ubHkxHDAaBgNVBAMME1ZPTFRIQSBUZXN0IFJvb3QgQ0GCCQD6d/x1IU83BjAJ
+BgNVHRMEAjAAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAV
+BgNVHREEDjAMggp2b2x0aGEuZG5zMA0GCSqGSIb3DQEBCwUAA4IBAQASPrhz0at3
+7Hqw2I6Ujjz9/7Alv+hR17muVQMoy6Kb+4acNVUryAzGqbRBoxLTJskzk0qhfK0G
+69XQpGPhrX9213srRKtDKyaEotZdaPy7GxU+YzI06Bqk2YFLKBfo9x061cs3h3cE
+P5ZtF+YekA6obAFYhNQatJ9ReZwDIxu2lwwopK9nC9qy+m5BSQCKNhH4gFBhA8O1
+3/fl6kucP2ho4Ph48R3/CyNFKtYZqPa5GSXgRs6LVsrl2io1ZbjijW1GHp/zS016
+wPVIcUL2lfnlyWGPepZjiGRoVT7WxsDizckDk4dOb8S0+8PE7JOtiCgX/He4opn2
+JspvNi0mT9NE
+-----END CERTIFICATE-----
diff --git a/pki/voltha.key b/pki/voltha.key
new file mode 100644
index 0000000..614efa1
--- /dev/null
+++ b/pki/voltha.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAxpC5OA7VOL0gVI+CVitU2hZvooRjmfhLjCS+xhfuzrHkJ0xP
+4Hu5HAynnUU3OR2wQfuWSfQCHGaHOofmWfydTftzdFCKOSVcfo+03j7VEF6RU9pq
+PlfbGNjaxjOQ7gptTeLpyxshyFk+5rK97tKVcPcOmE68BGtbT2MLJdgOTxD4MJIZ
+qBuhOr5RcyS8D/BMJo/fKqjM2Dh+rdD1zOnpdtg+/1WUI2l0j9IAUcXWVmEJD15w
+TF9e1qRHWP9zQMVe4BRzbItNVOL815RgZJvbKtY4oNOuLkfTdDwPwP7Gr6+gCB8g
+qDqndFivlDVmS3yXJhsDIw89Cp3qmwbUlspcTQIDAQABAoIBAQCjM4YYbhCP7too
+xj8A5eJ60V/1ukxG2430ZKssE7WiyxFTv2QThH0Tcc898wq9dgpNYSQYKmalwxs4
+X0RUB82er6IoIp5I880+9Ixa8leaea+GtQkHrpwUov/FUdK343gNQQiZd/ZfPgL2
+CEkVhp1vWI/1XRkppLdK24PpGdhA4KXOKJTXrNCf4p7oJz+rEQrGuWrmHcoQV6Kc
+TrBMFvRRTaL6C2f+Nww8HMtpJjCCgMYNkHLH4sL2SbPiORwBJdXq+ajYfxqL6Swx
+DGUJqBJs8m1DDIAFDiDGhDiwLDBA/as8Ii1+wpayPfa5ulUvh3EkTyku9SXbJxJg
+7SBrzwVhAoGBAOno5coqNebro27vEKLnZX/Ttw1DWfklNuvF1e+O2nlttDL0K27p
+fMK2SAcIPz0RDFpt0kNuzMTkblvXseKZgnnYdVeDM0jIjfSDYyi6aI62UiTIb+m1
+mHljCBXu/V9kxNgcvt7R4rPStVtzvAI+I4kNxibh5O2XYw1MwsPdIDV1AoGBANlR
+UuZA1SKLgUMV5dFuJ7w31QY93AHq+XYbBbXM85IyQr4u05S6tgEQ0kI4odGP9boU
+GP5qoy3hem2c/K8QbZeGYD83zhsguEkq+FBavtqxCCIFJDvtCu+Hg8uQ4YGxTtdx
+Q9G6XBbL/reJ9o5ptRTm6FO/ya5Q1x5g7okV8bh5AoGBAI+g9MjolmSPOLG7limR
+kN+m7vXz72FvGoQ33J/Wxgxd8pJ/H9RhBrzBFQVMaRhkSYOVf9DsTkxwP9uhXJdZ
+z6Zl5dewtmLw00nbC55MqDtJdLMlaKLHYTLYPnTJZUeYJs7VB9bmZiApODdJn554
+7XUQwiXJ+7pwhN/7zHRcaZSpAoGBALhgghguu2YCvFnhk0m7zuSm7grMowPRi3ND
++/VB/Ql1sSDQc9zFCLQgxHNAvzIglNgaQxZf/TBpFEk5nz0ckA62CKS6QRjNCu2x
+ElqCk1jSSFcsy5A4TkXpUM1+j4VMnNq3E1Y2aflBfEvWNqSfVO519nlPx9ooZks0
+7EzMnHfpAoGBAOWg98M+I/FqziskKlB9QWTnFww/lGTTAGgOBHic0BJElzxiTVrx
+ZtnUCwlPi0fWSdrkWRcNYIMtfcDVA2Ifb9xhDHZLzfxznylhKiKrBSqAnQXjpkF7
+GGJLwMEzAjeb45HxydWoHWa0OaB1T9ZngAJs7mxFWYiPpS9ToO62L/IT
+-----END RSA PRIVATE KEY-----
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..0ccfe39
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,8 @@
+ncclient==0.5.3
+packaging==17.1
+pyvoltha==0.1.7
+lxml==3.6.4
+zmq==0.0.0
+pyzmq==16.0.3
+txZMQ==0.8.0
+xmltodict==0.11.0