VOL-1451 Initial checkin of openonu build

Produced docker container capable of building and running
openonu/brcm_openonci_onu.  Copied over current onu code
and resolved all imports by copying into the local source tree.

Change-Id: Ib9785d37afc65b7d32ecf74aee2456352626e2b6
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2843ab3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,64 @@
+# PyCharm
+.idea
+exportToHTML
+
+# Python
+*.pyc
+
+# Emacs
+*~
+.#*
+
+# Vagrant
+.vagrant
+*.box
+
+# venv
+.venv
+
+# Ansible
+ansible/*.retry
+
+# Any vi swap files
+*.swp
+
+# Virtualenv
+venv
+venv-darwin
+venv-linux
+adapters/venv
+adapters/venv-linux
+adapters/venv-darwin
+
+# Protobuf output files
+python/**/*_pb2.py
+python/**/*_pb2_grpc.py
+python/**/*.desc
+protos/**/*.pb.go
+protos/**/*.desc
+protos/voltha.pb
+
+# Editors
+*.bak
+*.project
+*.pydevproject
+
+# Docker
+.docker-base-built
+
+# Mac stuff
+.DS_Store
+**/.DS_Store
+
+# Generated docs
+**/*.pdf
+
+# Vagrant logfile
+*.log
+
+# Files copied over during make
+python/protos/*.proto
+
+# voltha cli history
+python/cli/.voltha_cli_history
+
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/python/Makefile b/python/Makefile
new file mode 100644
index 0000000..ec74b70
--- /dev/null
+++ b/python/Makefile
@@ -0,0 +1,209 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifneq ($(VOLTHA_BUILD),docker)
+ifeq ($(VOLTHA_BASE)_set,_set)
+$(error To get started, please source the env.sh file)
+endif
+endif
+
+ifeq ($(TAG),)
+TAG := latest
+endif
+
+ifeq ($(TARGET_TAG),)
+TARGET_TAG := latest
+endif
+
+# If no DOCKER_HOST_IP is specified grab a v4 IP address associated with
+# the default gateway
+ifeq ($(DOCKER_HOST_IP),)
+DOCKER_HOST_IP := $(shell ifconfig $$(netstat -rn | grep -E '^(default|0.0.0.0)' | head -1 | awk '{print $$NF}') | grep inet | awk '{print $$2}' | sed -e 's/addr://g')
+endif
+
+ifneq ($(http_proxy)$(https_proxy),)
+# Include proxies from the environment
+DOCKER_PROXY_ARGS = \
+       --build-arg http_proxy=$(http_proxy) \
+       --build-arg https_proxy=$(https_proxy) \
+       --build-arg ftp_proxy=$(ftp_proxy) \
+       --build-arg no_proxy=$(no_proxy) \
+       --build-arg HTTP_PROXY=$(HTTP_PROXY) \
+       --build-arg HTTPS_PROXY=$(HTTPS_PROXY) \
+       --build-arg FTP_PROXY=$(FTP_PROXY) \
+       --build-arg NO_PROXY=$(NO_PROXY)
+endif
+
+DOCKER_BUILD_ARGS = \
+	--build-arg TAG=$(TAG) \
+	--build-arg REGISTRY=$(REGISTRY) \
+	--build-arg REPOSITORY=$(REPOSITORY) \
+	$(DOCKER_PROXY_ARGS) $(DOCKER_CACHE_ARG) \
+	 --rm --force-rm \
+	$(DOCKER_BUILD_EXTRA_ARGS)
+
+VENVDIR := venv-$(shell uname -s | tr '[:upper:]' '[:lower:]')
+
+DOCKER_IMAGE_LIST = \
+	base \
+	adapter-openonu \
+
+# The following list was scavanged from the compose / stack files as well as
+# from the Dockerfiles. If nothing else it highlights that VOLTHA is not
+# using consistent versions for some of the containers.
+
+FETCH_BUILD_IMAGE_LIST = \
+       alpine:3.6 \
+       centos:7 \
+       centurylink/ca-certs:latest \
+       grpc/python:latest \
+       ubuntu:xenial
+
+FETCH_COMPOSE_IMAGE_LIST = \
+        wurstmeister/kafka:latest \
+        wurstmeister/zookeeper:latest
+
+# find k8s -type f | xargs grep image: | awk '{print $NF}' | sed -e 's/\"//g' | sed '/:.*$/!s/$/:latest/g' | sort -u | sed -e 's/^/       /g' -e 's/$/ \\/g'
+# Manually remove some image from this list as they don't reflect the new 
+# naming conventions for the VOLTHA build
+FETCH_K8S_IMAGE_LIST = \
+       wurstmeister/kafka:1.0.0 \
+       zookeeper:3.4.11
+
+FETCH_IMAGE_LIST = $(shell echo $(FETCH_BUILD_IMAGE_LIST) $(FETCH_COMPOSE_IMAGE_LIST) $(FETCH_K8S_IMAGE_LIST) | tr ' ' '\n' | sort -u)
+
+.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) base protoc protos adapter_openonu
+
+# This should to be the first and default target in this Makefile
+help:
+	@echo "Usage: make [<target>]"
+	@echo "where available targets are:"
+	@echo
+	@echo "build        : Build the Adapters protos and docker images.\n\
+               If this is the first time you are building, choose \"make build\" option."
+	@echo "clean        : Remove files created by the build and tests"
+	@echo "distclean    : Remove venv directory"
+	@echo "fetch        : Pre-fetch artifacts for subsequent local builds"
+	@echo "help         : Print this help"
+	@echo "protoc       : Build a container with protoc installed"
+	@echo "protos       : Compile all grpc/protobuf files"
+	@echo "rebuild-venv : Rebuild local Python virtualenv from scratch"
+	@echo "venv         : Build local Python virtualenv if did not exist yet"
+	@echo "containers   : Build all the docker containers"
+	@echo "base         : Build the base docker container used by all other dockers"
+	@echo "adapter_openonu       : Build the openonu openomci adapter docker container"
+	@echo "tag          : Tag a set of images"
+	@echo "push         : Push the docker images to an external repository"
+	@echo "pull         : Pull the docker images from a repository"
+	@echo
+
+## New directories can be added here
+#DIRS:=
+
+## If one directory depends on another directory that
+## dependency can be expressed here
+##
+## For example, if the Tibit directory depended on the eoam
+## directory being built first, then that can be expressed here.
+##  driver/tibit: eoam
+
+# Parallel Build
+$(DIRS):
+	@echo "    MK $@"
+	$(Q)$(MAKE) -C $@
+
+# Parallel Clean
+DIRS_CLEAN = $(addsuffix .clean,$(DIRS))
+$(DIRS_CLEAN):
+	@echo "    CLEAN $(basename $@)"
+	$(Q)$(MAKE) -C $(basename $@) clean
+
+# Parallel Flake8
+DIRS_FLAKE8 = $(addsuffix .flake8,$(DIRS))
+$(DIRS_FLAKE8):
+	@echo "    FLAKE8 $(basename $@)"
+	-$(Q)$(MAKE) -C $(basename $@) flake8
+
+build: protoc protos containers
+
+containers: base adapter_openonu
+
+base:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-base:${TAG} -f docker/Dockerfile.base .
+
+adapter_openonu:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-openonu:${TAG} -f docker/Dockerfile.adapter_openonu .
+
+protoc:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} -f docker/Dockerfile.protoc .
+
+protos:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} -f docker/Dockerfile.protos .
+
+tag: $(patsubst  %,%.tag,$(DOCKER_IMAGE_LIST))
+
+push: tag $(patsubst  %,%.push,$(DOCKER_IMAGE_LIST))
+
+pull: $(patsubst  %,%.pull,$(DOCKER_IMAGE_LIST))
+
+%.tag:
+	docker tag ${REGISTRY}${REPOSITORY}voltha-$(subst .tag,,$@):${TAG} ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .tag,,$@):${TARGET_TAG}
+
+%.push:
+	docker push ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .push,,$@):${TARGET_TAG}
+
+%.pull:
+	docker pull ${REGISTRY}${REPOSITORY}voltha-$(subst .pull,,$@):${TAG}
+
+clean:
+	find . -name '*.pyc' | xargs rm -f
+
+distclean: clean
+	rm -rf ${VENVDIR}
+
+fetch:
+	@bash -c ' \
+		for i in $(FETCH_IMAGE_LIST); do \
+			docker pull $$i; \
+		done'
+
+purge-venv:
+	rm -fr ${VENVDIR}
+
+rebuild-venv: purge-venv venv
+
+ifneq ($(VOLTHA_BUILD),docker)
+venv: ${VENVDIR}/.built
+else
+venv:
+endif
+
+${VENVDIR}/.built:
+	@ virtualenv ${VENVDIR}
+	@ . ${VENVDIR}/bin/activate && \
+	    pip install --upgrade pip; \
+	    if ! pip install -r requirements.txt; \
+	    then \
+	        echo "On MAC OS X, if the installation failed with an error \n'<openssl/opensslv.h>': file not found,"; \
+	        echo "see the BUILD.md file for a workaround"; \
+	    else \
+	        uname -s > ${VENVDIR}/.built; \
+	    fi
+
+
+flake8: $(DIRS_FLAKE8)
+
+# end file
diff --git a/python/__init__.py b/python/__init__.py
new file mode 100644
index 0000000..cfcdc97
--- /dev/null
+++ b/python/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
\ No newline at end of file
diff --git a/python/adapters/__init__.py b/python/adapters/__init__.py
new file mode 100644
index 0000000..58aca1e
--- /dev/null
+++ b/python/adapters/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/brcm_openomci_onu/VERSION b/python/adapters/brcm_openomci_onu/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/python/adapters/brcm_openomci_onu/__init__.py b/python/adapters/brcm_openomci_onu/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/brcm_openomci_onu/brcm_openomci_onu.py b/python/adapters/brcm_openomci_onu/brcm_openomci_onu.py
new file mode 100644
index 0000000..ad89dc8
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/brcm_openomci_onu.py
@@ -0,0 +1,331 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Broadcom OpenOMCI OLT/ONU adapter.
+
+This adapter does NOT support XPON
+"""
+
+from twisted.internet import reactor, task
+from zope.interface import implementer
+
+from voltha.adapters.brcm_openomci_onu.brcm_openomci_onu_handler import BrcmOpenomciOnuHandler
+from voltha.adapters.interface import IAdapterInterface
+from voltha.protos import third_party
+from voltha.protos.adapter_pb2 import Adapter
+from voltha.protos.adapter_pb2 import AdapterConfig
+from voltha.protos.common_pb2 import LogLevel
+from voltha.protos.device_pb2 import DeviceType, DeviceTypes, Port, Image
+from voltha.protos.health_pb2 import HealthStatus
+
+from common.frameio.frameio import hexify
+from voltha.extensions.omci.openomci_agent import OpenOMCIAgent, OpenOmciAgentDefaults
+from voltha.extensions.omci.omci_me import *
+from voltha.extensions.omci.database.mib_db_dict import MibDbVolatileDict
+from omci.brcm_capabilities_task import BrcmCapabilitiesTask
+from omci.brcm_get_mds_task import BrcmGetMdsTask
+from omci.brcm_mib_sync import BrcmMibSynchronizer
+from copy import deepcopy
+
+
+_ = third_party
+log = structlog.get_logger()
+
+
+@implementer(IAdapterInterface)
+class BrcmOpenomciOnuAdapter(object):
+
+    name = 'brcm_openomci_onu'
+
+    supported_device_types = [
+        DeviceType(
+            id=name,
+            vendor_ids=['OPEN', 'ALCL', 'BRCM', 'TWSH', 'ALPH', 'ISKT', 'SFAA', 'BBSM'],
+            adapter=name,
+            accepts_bulk_flow_update=True
+        )
+    ]
+
+    def __init__(self, adapter_agent, config):
+        log.debug('function-entry', config=config)
+        self.adapter_agent = adapter_agent
+        self.config = config
+        self.descriptor = Adapter(
+            id=self.name,
+            vendor='Voltha project',
+            version='0.50',
+            config=AdapterConfig(log_level=LogLevel.INFO)
+        )
+        self.devices_handlers = dict()
+
+        # Customize OpenOMCI for Broadcom ONUs
+        self.broadcom_omci = deepcopy(OpenOmciAgentDefaults)
+
+        self.broadcom_omci['mib-synchronizer']['state-machine'] = BrcmMibSynchronizer
+        self.broadcom_omci['omci-capabilities']['tasks']['get-capabilities'] = BrcmCapabilitiesTask
+
+        # Defer creation of omci agent to a lazy init that allows subclasses to override support classes
+
+        # register for adapter messages
+        self.adapter_agent.register_for_inter_adapter_messages()
+
+    def custom_me_entities(self):
+        return None
+
+    @property
+    def omci_agent(self):
+        if not hasattr(self, '_omci_agent') or self._omci_agent is None:
+            log.debug('creating-omci-agent')
+            self._omci_agent = OpenOMCIAgent(self.adapter_agent.core,
+                                             support_classes=self.broadcom_omci)
+        return self._omci_agent
+
+    def start(self):
+        log.debug('starting')
+        self.omci_agent.start()
+        log.info('started')
+
+    def stop(self):
+        log.debug('stopping')
+
+        omci, self._omci_agent = self._omci_agent, None
+        if omci is not None:
+            self._omci_agent.stop()
+
+        log.info('stopped')
+
+    def adapter_descriptor(self):
+        return self.descriptor
+
+    def device_types(self):
+        return DeviceTypes(items=self.supported_device_types)
+
+    def health(self):
+        return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
+
+    def change_master_state(self, master):
+        raise NotImplementedError()
+
+    def adopt_device(self, device):
+        log.info('adopt_device', device_id=device.id)
+        self.devices_handlers[device.id] = BrcmOpenomciOnuHandler(self, device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].activate, device)
+        return device
+
+    def reconcile_device(self, device):
+        log.info('reconcile-device', device_id=device.id)
+        self.devices_handlers[device.id] = BrcmOpenomciOnuHandler(self, device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reconcile, device)
+
+    def abandon_device(self, device):
+        raise NotImplementedError()
+
+    def disable_device(self, device):
+        log.info('disable-onu-device', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.disable(device)
+
+    def reenable_device(self, device):
+        log.info('reenable-onu-device', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.reenable(device)
+
+    def reboot_device(self, device):
+        log.info('reboot-device', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.reboot()
+
+    def download_image(self, device, request):
+        raise NotImplementedError()
+
+    def get_image_download_status(self, device, request):
+        raise NotImplementedError()
+
+    def cancel_image_download(self, device, request):
+        raise NotImplementedError()
+
+    def activate_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def revert_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device - Not implemented yet', device=device.id)
+        raise NotImplementedError()
+
+    def delete_device(self, device):
+        log.info('delete-device', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.delete(device)
+            del self.devices_handlers[device.id]
+        return
+
+    def get_device_details(self, device):
+        raise NotImplementedError()
+
+    # TODO(smbaker): When BrcmOpenomciOnuAdapter is updated to inherit from OnuAdapter, this function can be deleted
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+    def update_flows_bulk(self, device, flows, groups):
+        '''
+        log.info('bulk-flow-update', device_id=device.id,
+                  flows=flows, groups=groups)
+        '''
+        assert len(groups.items) == 0
+        handler = self.devices_handlers[device.id]
+        return handler.update_flow_table(device, flows.items)
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        raise NotImplementedError()
+
+    def send_proxied_message(self, proxy_address, msg):
+        log.debug('send-proxied-message', proxy_address=proxy_address, msg=msg)
+
+    def receive_proxied_message(self, proxy_address, msg):
+        log.debug('receive-proxied-message', proxy_address=proxy_address,
+                 device_id=proxy_address.device_id, msg=hexify(msg))
+        # Device_id from the proxy_address is the olt device id. We need to
+        # get the onu device id using the port number in the proxy_address
+        device = self.adapter_agent. \
+            get_child_device_with_proxy_address(proxy_address)
+        if device:
+            handler = self.devices_handlers[device.id]
+            handler.receive_message(msg)
+
+    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+        log.info('packet-out', logical_device_id=logical_device_id,
+                 egress_port_no=egress_port_no, msg_len=len(msg))
+
+    def receive_inter_adapter_message(self, msg):
+        log.debug('receive_inter_adapter_message', msg=msg)
+        proxy_address = msg['proxy_address']
+        assert proxy_address is not None
+        # Device_id from the proxy_address is the olt device id. We need to
+        # get the onu device id using the port number in the proxy_address
+        device = self.adapter_agent. \
+            get_child_device_with_proxy_address(proxy_address)
+        if device:
+            handler = self.devices_handlers[device.id]
+            handler.event_messages.put(msg)
+        else:
+            log.error("device-not-found")
+
+    def create_interface(self, device, data):
+        log.debug('create-interface', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.create_interface(data)
+
+    def update_interface(self, device, data):
+        log.debug('update-interface', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.update_interface(data)
+
+    def remove_interface(self, device, data):
+        log.debug('remove-interface', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.remove_interface(data)
+
+    def receive_onu_detect_state(self, device_id, state):
+        raise NotImplementedError()
+
+    def create_tcont(self, device, tcont_data, traffic_descriptor_data):
+        log.debug('create-tcont', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.create_tcont(tcont_data, traffic_descriptor_data)
+
+    def update_tcont(self, device, tcont_data, traffic_descriptor_data):
+        raise NotImplementedError()
+
+    def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
+        log.debug('remove-tcont', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.remove_tcont(tcont_data, traffic_descriptor_data)
+
+    def create_gemport(self, device, data):
+        log.debug('create-gemport', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.create_gemport(data)
+
+    def update_gemport(self, device, data):
+        raise NotImplementedError()
+
+    def remove_gemport(self, device, data):
+        log.debug('remove-gemport', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.remove_gemport(data)
+
+    def create_multicast_gemport(self, device, data):
+        log.debug('create-multicast-gemport', device_id=device.id)
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                handler.create_multicast_gemport(data)
+
+    def update_multicast_gemport(self, device, data):
+        raise NotImplementedError()
+
+    def remove_multicast_gemport(self, device, data):
+        raise NotImplementedError()
+
+    def create_multicast_distribution_set(self, device, data):
+        raise NotImplementedError()
+
+    def update_multicast_distribution_set(self, device, data):
+        raise NotImplementedError()
+
+    def remove_multicast_distribution_set(self, device, data):
+        raise NotImplementedError()
+
+    def suppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def unsuppress_alarm(self, filter):
+        raise NotImplementedError()
+
+
diff --git a/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py b/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py
new file mode 100644
index 0000000..42dd0a9
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py
@@ -0,0 +1,1044 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Broadcom OpenOMCI OLT/ONU adapter handler.
+"""
+
+import json
+import ast
+import structlog
+
+from collections import OrderedDict
+
+from twisted.internet import reactor, task
+from twisted.internet.defer import DeferredQueue, inlineCallbacks, returnValue, TimeoutError
+
+from heartbeat import HeartBeat
+from voltha.extensions.kpi.onu.onu_pm_metrics import OnuPmMetrics
+from voltha.extensions.kpi.onu.onu_omci_pm import OnuOmciPmMetrics
+from voltha.extensions.alarms.adapter_alarms import AdapterAlarms
+
+from common.utils.indexpool import IndexPool
+import voltha.core.flow_decomposer as fd
+from voltha.registry import registry
+from voltha.core.config.config_backend import ConsulStore
+from voltha.core.config.config_backend import EtcdStore
+from voltha.protos import third_party
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, ofp_port
+from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
+from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
+from voltha.extensions.omci.onu_configuration import OMCCVersion
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEvents, \
+    OnuDeviceEntry, IN_SYNC_KEY
+from voltha.adapters.brcm_openomci_onu.omci.brcm_mib_download_task import BrcmMibDownloadTask
+from voltha.adapters.brcm_openomci_onu.omci.brcm_tp_service_specific_task import BrcmTpServiceSpecificTask
+from voltha.adapters.brcm_openomci_onu.omci.brcm_uni_lock_task import BrcmUniLockTask
+from voltha.adapters.brcm_openomci_onu.omci.brcm_vlan_filter_task import BrcmVlanFilterTask
+from voltha.adapters.brcm_openomci_onu.onu_gem_port import *
+from voltha.adapters.brcm_openomci_onu.onu_tcont import *
+from voltha.adapters.brcm_openomci_onu.pon_port import *
+from voltha.adapters.brcm_openomci_onu.uni_port import *
+from voltha.adapters.brcm_openomci_onu.onu_traffic_descriptor import *
+from common.tech_profile.tech_profile import TechProfile
+
+OP = EntityOperations
+RC = ReasonCodes
+
+_ = third_party
+log = structlog.get_logger()
+
+_STARTUP_RETRY_WAIT = 20
+
+
+class BrcmOpenomciOnuHandler(object):
+
+    def __init__(self, adapter, device_id):
+        self.log = structlog.get_logger(device_id=device_id)
+        self.log.debug('function-entry')
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.parent_adapter = None
+        self.parent_id = None
+        self.device_id = device_id
+        self.incoming_messages = DeferredQueue()
+        self.event_messages = DeferredQueue()
+        self.proxy_address = None
+        self.tx_id = 0
+        self._enabled = False
+        self.alarms = None
+        self.pm_metrics = None
+        self._omcc_version = OMCCVersion.Unknown
+        self._total_tcont_count = 0  # From ANI-G ME
+        self._qos_flexibility = 0  # From ONT2_G ME
+
+        self._onu_indication = None
+        self._unis = dict()  # Port # -> UniPort
+
+        self._pon = None
+        # TODO: probably shouldnt be hardcoded, determine from olt maybe?
+        self._pon_port_number = 100
+        self.logical_device_id = None
+
+        self._heartbeat = HeartBeat.create(self, device_id)
+
+        # Set up OpenOMCI environment
+        self._onu_omci_device = None
+        self._dev_info_loaded = False
+        self._deferred = None
+
+        self._in_sync_subscription = None
+        self._connectivity_subscription = None
+        self._capabilities_subscription = None
+
+        self.mac_bridge_service_profile_entity_id = 0x201
+        self.gal_enet_profile_entity_id = 0x1
+
+        self._tp_service_specific_task = dict()
+        self._tech_profile_download_done = dict()
+
+        # Initialize KV store client
+        self.args = registry('main').get_args()
+        if self.args.backend == 'etcd':
+            host, port = self.args.etcd.split(':', 1)
+            self.kv_client = EtcdStore(host, port,
+                                       TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX)
+        elif self.args.backend == 'consul':
+            host, port = self.args.consul.split(':', 1)
+            self.kv_client = ConsulStore(host, port,
+                                         TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX)
+        else:
+            self.log.error('Invalid-backend')
+            raise Exception("Invalid-backend-for-kv-store")
+
+        # Handle received ONU event messages
+        reactor.callLater(0, self.handle_onu_events)
+
+    @property
+    def enabled(self):
+        return self._enabled
+
+    @enabled.setter
+    def enabled(self, value):
+        if self._enabled != value:
+            self._enabled = value
+
+    @property
+    def omci_agent(self):
+        return self.adapter.omci_agent
+
+    @property
+    def omci_cc(self):
+        return self._onu_omci_device.omci_cc if self._onu_omci_device is not None else None
+
+    @property
+    def heartbeat(self):
+        return self._heartbeat
+
+    @property
+    def uni_ports(self):
+        return self._unis.values()
+
+    def uni_port(self, port_no_or_name):
+        if isinstance(port_no_or_name, (str, unicode)):
+            return next((uni for uni in self.uni_ports
+                         if uni.name == port_no_or_name), None)
+
+        assert isinstance(port_no_or_name, int), 'Invalid parameter type'
+        return next((uni for uni in self.uni_ports
+                    if uni.logical_port_number == port_no_or_name), None)
+
+    @property
+    def pon_port(self):
+        return self._pon
+
+    def receive_message(self, msg):
+        if self.omci_cc is not None:
+            self.omci_cc.receive_message(msg)
+
+    # Called once when the adapter creates the device/onu instance
+    def activate(self, device):
+        self.log.debug('function-entry', device=device)
+
+        # first we verify that we got parent reference and proxy info
+        assert device.parent_id
+        assert device.proxy_address.device_id
+
+        # register for proxied messages right away
+        self.proxy_address = device.proxy_address
+        self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+        self.parent_id = device.parent_id
+        parent_device = self.adapter_agent.get_device(self.parent_id)
+        if parent_device.type == 'openolt':
+            self.parent_adapter = registry('adapter_loader'). \
+                get_agent(parent_device.adapter).adapter
+
+        if self.enabled is not True:
+            self.log.info('activating-new-onu')
+            # populate what we know.  rest comes later after mib sync
+            device.root = True
+            device.vendor = 'Broadcom'
+            device.connect_status = ConnectStatus.REACHABLE
+            device.oper_status = OperStatus.DISCOVERED
+            device.reason = 'activating-onu'
+
+            # pm_metrics requires a logical device id
+            parent_device = self.adapter_agent.get_device(device.parent_id)
+            self.logical_device_id = parent_device.parent_id
+            assert self.logical_device_id, 'Invalid logical device ID'
+
+            self.adapter_agent.update_device(device)
+
+            self.log.debug('set-device-discovered')
+
+            self._init_pon_state(device)
+
+            ############################################################################
+            # Setup PM configuration for this device
+            # Pass in ONU specific options
+            kwargs = {
+                OnuPmMetrics.DEFAULT_FREQUENCY_KEY: OnuPmMetrics.DEFAULT_ONU_COLLECTION_FREQUENCY,
+                'heartbeat': self.heartbeat,
+                OnuOmciPmMetrics.OMCI_DEV_KEY: self._onu_omci_device
+            }
+            self.pm_metrics = OnuPmMetrics(self.adapter_agent, self.device_id,
+                                           self.logical_device_id, grouped=True,
+                                           freq_override=False, **kwargs)
+            pm_config = self.pm_metrics.make_proto()
+            self._onu_omci_device.set_pm_config(self.pm_metrics.omci_pm.openomci_interval_pm)
+            self.log.info("initial-pm-config", pm_config=pm_config)
+            self.adapter_agent.update_device_pm_config(pm_config, init=True)
+
+            ############################################################################
+            # Setup Alarm handler
+            self.alarms = AdapterAlarms(self.adapter_agent, device.id, self.logical_device_id)
+            # Note, ONU ID and UNI intf set in add_uni_port method
+            self._onu_omci_device.alarm_synchronizer.set_alarm_params(mgr=self.alarms,
+                                                                      ani_ports=[self._pon])
+            self.enabled = True
+        else:
+            self.log.info('onu-already-activated')
+
+    # Called once when the adapter needs to re-create device.  usually on vcore restart
+    def reconcile(self, device):
+        self.log.debug('function-entry', device=device)
+
+        # first we verify that we got parent reference and proxy info
+        assert device.parent_id
+        assert device.proxy_address.device_id
+
+        # register for proxied messages right away
+        self.proxy_address = device.proxy_address
+        self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+        if self.enabled is not True:
+            self.log.info('reconciling-broadcom-onu-device')
+
+            self._init_pon_state(device)
+
+            # need to restart state machines on vcore restart.  there is no indication to do it for us.
+            self._onu_omci_device.start()
+            device.reason = "restarting-openomci"
+            self.adapter_agent.update_device(device)
+
+            # TODO: this is probably a bit heavy handed
+            # Force a reboot for now.  We need indications to reflow to reassign tconts and gems given vcore went away
+            # This may not be necessary when mib resync actually works
+            reactor.callLater(1, self.reboot)
+
+            self.enabled = True
+        else:
+            self.log.info('onu-already-activated')
+
+    @inlineCallbacks
+    def handle_onu_events(self):
+        event_msg = yield self.event_messages.get()
+        try:
+            if event_msg['event'] == 'download_tech_profile':
+                tp_path = event_msg['event_data']
+                uni_id = event_msg['uni_id']
+                self.load_and_configure_tech_profile(uni_id, tp_path)
+
+        except Exception as e:
+            self.log.error("exception-handling-onu-event", e=e)
+
+        # Handle next event
+        reactor.callLater(0, self.handle_onu_events)
+
+    def _init_pon_state(self, device):
+        self.log.debug('function-entry', device=device)
+
+        self._pon = PonPort.create(self, self._pon_port_number)
+        self.adapter_agent.add_port(device.id, self._pon.get_port())
+
+        self.log.debug('added-pon-port-to-agent', pon=self._pon)
+
+        parent_device = self.adapter_agent.get_device(device.parent_id)
+        self.logical_device_id = parent_device.parent_id
+
+        self.adapter_agent.update_device(device)
+
+        # Create and start the OpenOMCI ONU Device Entry for this ONU
+        self._onu_omci_device = self.omci_agent.add_device(self.device_id,
+                                                           self.adapter_agent,
+                                                           support_classes=self.adapter.broadcom_omci,
+                                                           custom_me_map=self.adapter.custom_me_entities())
+        # Port startup
+        if self._pon is not None:
+            self._pon.enabled = True
+
+    # TODO: move to UniPort
+    def update_logical_port(self, logical_device_id, port_id, state):
+        try:
+            self.log.info('updating-logical-port', logical_port_id=port_id,
+                          logical_device_id=logical_device_id, state=state)
+            logical_port = self.adapter_agent.get_logical_port(logical_device_id,
+                                                               port_id)
+            logical_port.ofp_port.state = state
+            self.adapter_agent.update_logical_port(logical_device_id,
+                                                   logical_port)
+        except Exception as e:
+            self.log.exception("exception-updating-port", e=e)
+
+    def delete(self, device):
+        self.log.info('delete-onu', device=device)
+        if self.parent_adapter:
+            try:
+                self.parent_adapter.delete_child_device(self.parent_id, device)
+            except AttributeError:
+                self.log.debug('parent-device-delete-child-not-implemented')
+        else:
+            self.log.debug("parent-adapter-not-available")
+
+    def _create_tconts(self, uni_id, us_scheduler):
+        alloc_id = us_scheduler['alloc_id']
+        q_sched_policy = us_scheduler['q_sched_policy']
+        self.log.debug('create-tcont', us_scheduler=us_scheduler)
+
+        tcontdict = dict()
+        tcontdict['alloc-id'] = alloc_id
+        tcontdict['q_sched_policy'] = q_sched_policy
+        tcontdict['uni_id'] = uni_id
+
+        # TODO: Not sure what to do with any of this...
+        tddata = dict()
+        tddata['name'] = 'not-sure-td-profile'
+        tddata['fixed-bandwidth'] = "not-sure-fixed"
+        tddata['assured-bandwidth'] = "not-sure-assured"
+        tddata['maximum-bandwidth'] = "not-sure-max"
+        tddata['additional-bw-eligibility-indicator'] = "not-sure-additional"
+
+        td = OnuTrafficDescriptor.create(tddata)
+        tcont = OnuTCont.create(self, tcont=tcontdict, td=td)
+
+        self._pon.add_tcont(tcont)
+
+        self.log.debug('pon-add-tcont', tcont=tcont)
+
+    # Called when there is an olt up indication, providing the gem port id chosen by the olt handler
+    def _create_gemports(self, uni_id, gem_ports, alloc_id_ref, direction):
+        self.log.debug('create-gemport',
+                       gem_ports=gem_ports, direction=direction)
+
+        for gem_port in gem_ports:
+            gemdict = dict()
+            gemdict['gemport_id'] = gem_port['gemport_id']
+            gemdict['direction'] = direction
+            gemdict['alloc_id_ref'] = alloc_id_ref
+            gemdict['encryption'] = gem_port['aes_encryption']
+            gemdict['discard_config'] = dict()
+            gemdict['discard_config']['max_probability'] = \
+                gem_port['discard_config']['max_probability']
+            gemdict['discard_config']['max_threshold'] = \
+                gem_port['discard_config']['max_threshold']
+            gemdict['discard_config']['min_threshold'] = \
+                gem_port['discard_config']['min_threshold']
+            gemdict['discard_policy'] = gem_port['discard_policy']
+            gemdict['max_q_size'] = gem_port['max_q_size']
+            gemdict['pbit_map'] = gem_port['pbit_map']
+            gemdict['priority_q'] = gem_port['priority_q']
+            gemdict['scheduling_policy'] = gem_port['scheduling_policy']
+            gemdict['weight'] = gem_port['weight']
+            gemdict['uni_id'] = uni_id
+
+            gem_port = OnuGemPort.create(self, gem_port=gemdict)
+
+            self._pon.add_gem_port(gem_port)
+
+            self.log.debug('pon-add-gemport', gem_port=gem_port)
+
+    def _do_tech_profile_configuration(self, uni_id, tp):
+        num_of_tconts = tp['num_of_tconts']
+        us_scheduler = tp['us_scheduler']
+        alloc_id = us_scheduler['alloc_id']
+        self._create_tconts(uni_id, us_scheduler)
+        upstream_gem_port_attribute_list = tp['upstream_gem_port_attribute_list']
+        self._create_gemports(uni_id, upstream_gem_port_attribute_list, alloc_id, "UPSTREAM")
+        downstream_gem_port_attribute_list = tp['downstream_gem_port_attribute_list']
+        self._create_gemports(uni_id, downstream_gem_port_attribute_list, alloc_id, "DOWNSTREAM")
+
+    def load_and_configure_tech_profile(self, uni_id, tp_path):
+        self.log.debug("loading-tech-profile-configuration", uni_id=uni_id, tp_path=tp_path)
+
+        if uni_id not in self._tp_service_specific_task:
+            self._tp_service_specific_task[uni_id] = dict()
+
+        if uni_id not in self._tech_profile_download_done:
+            self._tech_profile_download_done[uni_id] = dict()
+
+        if tp_path not in self._tech_profile_download_done[uni_id]:
+            self._tech_profile_download_done[uni_id][tp_path] = False
+
+        if not self._tech_profile_download_done[uni_id][tp_path]:
+            try:
+                if tp_path in self._tp_service_specific_task[uni_id]:
+                    self.log.info("tech-profile-config-already-in-progress",
+                                   tp_path=tp_path)
+                    return
+
+                tp = self.kv_client[tp_path]
+                tp = ast.literal_eval(tp)
+                self.log.debug("tp-instance", tp=tp)
+                self._do_tech_profile_configuration(uni_id, tp)
+
+                def success(_results):
+                    self.log.info("tech-profile-config-done-successfully")
+                    device = self.adapter_agent.get_device(self.device_id)
+                    device.reason = 'tech-profile-config-download-success'
+                    self.adapter_agent.update_device(device)
+                    if tp_path in self._tp_service_specific_task[uni_id]:
+                        del self._tp_service_specific_task[uni_id][tp_path]
+                    self._tech_profile_download_done[uni_id][tp_path] = True
+
+                def failure(_reason):
+                    self.log.warn('tech-profile-config-failure-retrying',
+                                   _reason=_reason)
+                    device = self.adapter_agent.get_device(self.device_id)
+                    device.reason = 'tech-profile-config-download-failure-retrying'
+                    self.adapter_agent.update_device(device)
+                    if tp_path in self._tp_service_specific_task[uni_id]:
+                        del self._tp_service_specific_task[uni_id][tp_path]
+                    self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT, self.load_and_configure_tech_profile,
+                                                       uni_id, tp_path)
+
+                self.log.info('downloading-tech-profile-configuration')
+                self._tp_service_specific_task[uni_id][tp_path] = \
+                       BrcmTpServiceSpecificTask(self.omci_agent, self, uni_id)
+                self._deferred = \
+                       self._onu_omci_device.task_runner.queue_task(self._tp_service_specific_task[uni_id][tp_path])
+                self._deferred.addCallbacks(success, failure)
+
+            except Exception as e:
+                self.log.exception("error-loading-tech-profile", e=e)
+        else:
+            self.log.info("tech-profile-config-already-done")
+
+    def update_pm_config(self, device, pm_config):
+        # TODO: This has not been tested
+        self.log.info('update_pm_config', pm_config=pm_config)
+        self.pm_metrics.update(pm_config)
+
+    # Calling this assumes the onu is active/ready and had at least an initial mib downloaded.   This gets called from
+    # flow decomposition that ultimately comes from onos
+    def update_flow_table(self, device, flows):
+        self.log.debug('function-entry', device=device, flows=flows)
+
+        #
+        # We need to proxy through the OLT to get to the ONU
+        # Configuration from here should be using OMCI
+        #
+        # self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
+
+        # no point in pushing omci flows if the device isnt reachable
+        if device.connect_status != ConnectStatus.REACHABLE or \
+           device.admin_state != AdminState.ENABLED:
+            self.log.warn("device-disabled-or-offline-skipping-flow-update",
+                          admin=device.admin_state, connect=device.connect_status)
+            return
+
+        def is_downstream(port):
+            return port == self._pon_port_number
+
+        def is_upstream(port):
+            return not is_downstream(port)
+
+        for flow in flows:
+            _type = None
+            _port = None
+            _vlan_vid = None
+            _udp_dst = None
+            _udp_src = None
+            _ipv4_dst = None
+            _ipv4_src = None
+            _metadata = None
+            _output = None
+            _push_tpid = None
+            _field = None
+            _set_vlan_vid = None
+            self.log.debug('bulk-flow-update', device_id=device.id, flow=flow)
+            try:
+                _in_port = fd.get_in_port(flow)
+                assert _in_port is not None
+
+                _out_port = fd.get_out_port(flow)  # may be None
+
+                if is_downstream(_in_port):
+                    self.log.debug('downstream-flow', in_port=_in_port, out_port=_out_port)
+                    uni_port = self.uni_port(_out_port)
+                elif is_upstream(_in_port):
+                    self.log.debug('upstream-flow', in_port=_in_port, out_port=_out_port)
+                    uni_port = self.uni_port(_in_port)
+                else:
+                    raise Exception('port should be 1 or 2 by our convention')
+
+                self.log.debug('flow-ports', in_port=_in_port, out_port=_out_port, uni_port=str(uni_port))
+
+                for field in fd.get_ofb_fields(flow):
+                    if field.type == fd.ETH_TYPE:
+                        _type = field.eth_type
+                        self.log.debug('field-type-eth-type',
+                                       eth_type=_type)
+
+                    elif field.type == fd.IP_PROTO:
+                        _proto = field.ip_proto
+                        self.log.debug('field-type-ip-proto',
+                                       ip_proto=_proto)
+
+                    elif field.type == fd.IN_PORT:
+                        _port = field.port
+                        self.log.debug('field-type-in-port',
+                                       in_port=_port)
+
+                    elif field.type == fd.VLAN_VID:
+                        _vlan_vid = field.vlan_vid & 0xfff
+                        self.log.debug('field-type-vlan-vid',
+                                       vlan=_vlan_vid)
+
+                    elif field.type == fd.VLAN_PCP:
+                        _vlan_pcp = field.vlan_pcp
+                        self.log.debug('field-type-vlan-pcp',
+                                       pcp=_vlan_pcp)
+
+                    elif field.type == fd.UDP_DST:
+                        _udp_dst = field.udp_dst
+                        self.log.debug('field-type-udp-dst',
+                                       udp_dst=_udp_dst)
+
+                    elif field.type == fd.UDP_SRC:
+                        _udp_src = field.udp_src
+                        self.log.debug('field-type-udp-src',
+                                       udp_src=_udp_src)
+
+                    elif field.type == fd.IPV4_DST:
+                        _ipv4_dst = field.ipv4_dst
+                        self.log.debug('field-type-ipv4-dst',
+                                       ipv4_dst=_ipv4_dst)
+
+                    elif field.type == fd.IPV4_SRC:
+                        _ipv4_src = field.ipv4_src
+                        self.log.debug('field-type-ipv4-src',
+                                       ipv4_dst=_ipv4_src)
+
+                    elif field.type == fd.METADATA:
+                        _metadata = field.table_metadata
+                        self.log.debug('field-type-metadata',
+                                       metadata=_metadata)
+
+                    else:
+                        raise NotImplementedError('field.type={}'.format(
+                            field.type))
+
+                for action in fd.get_actions(flow):
+
+                    if action.type == fd.OUTPUT:
+                        _output = action.output.port
+                        self.log.debug('action-type-output',
+                                       output=_output, in_port=_in_port)
+
+                    elif action.type == fd.POP_VLAN:
+                        self.log.debug('action-type-pop-vlan',
+                                       in_port=_in_port)
+
+                    elif action.type == fd.PUSH_VLAN:
+                        _push_tpid = action.push.ethertype
+                        self.log.debug('action-type-push-vlan',
+                                       push_tpid=_push_tpid, in_port=_in_port)
+                        if action.push.ethertype != 0x8100:
+                            self.log.error('unhandled-tpid',
+                                           ethertype=action.push.ethertype)
+
+                    elif action.type == fd.SET_FIELD:
+                        _field = action.set_field.field.ofb_field
+                        assert (action.set_field.field.oxm_class ==
+                                OFPXMC_OPENFLOW_BASIC)
+                        self.log.debug('action-type-set-field',
+                                       field=_field, in_port=_in_port)
+                        if _field.type == fd.VLAN_VID:
+                            _set_vlan_vid = _field.vlan_vid & 0xfff
+                            self.log.debug('set-field-type-vlan-vid',
+                                           vlan_vid=_set_vlan_vid)
+                        else:
+                            self.log.error('unsupported-action-set-field-type',
+                                           field_type=_field.type)
+                    else:
+                        self.log.error('unsupported-action-type',
+                                       action_type=action.type, in_port=_in_port)
+
+                # TODO: We only set vlan omci flows.  Handle omci matching ethertypes at some point in another task
+                if _type is not None:
+                    self.log.warn('ignoring-flow-with-ethType', ethType=_type)
+                elif _set_vlan_vid is None or _set_vlan_vid == 0:
+                    self.log.warn('ignorning-flow-that-does-not-set-vlanid')
+                else:
+                    self.log.warn('set-vlanid', uni_id=uni_port.port_number, set_vlan_vid=_set_vlan_vid)
+                    self._add_vlan_filter_task(device, uni_port, _set_vlan_vid)
+
+            except Exception as e:
+                self.log.exception('failed-to-install-flow', e=e, flow=flow)
+
+
+    def _add_vlan_filter_task(self, device, uni_port, _set_vlan_vid):
+        assert uni_port is not None
+
+        def success(_results):
+            self.log.info('vlan-tagging-success', uni_port=uni_port, vlan=_set_vlan_vid)
+            device.reason = 'omci-flows-pushed'
+            self._vlan_filter_task = None
+
+        def failure(_reason):
+            self.log.warn('vlan-tagging-failure', uni_port=uni_port, vlan=_set_vlan_vid)
+            device.reason = 'omci-flows-failed-retrying'
+            self._vlan_filter_task = reactor.callLater(_STARTUP_RETRY_WAIT,
+                                                       self._add_vlan_filter_task, device, uni_port, _set_vlan_vid)
+
+        self.log.info('setting-vlan-tag')
+        self._vlan_filter_task = BrcmVlanFilterTask(self.omci_agent, self.device_id, uni_port, _set_vlan_vid)
+        self._deferred = self._onu_omci_device.task_runner.queue_task(self._vlan_filter_task)
+        self._deferred.addCallbacks(success, failure)
+
+    def get_tx_id(self):
+        self.log.debug('function-entry')
+        self.tx_id += 1
+        return self.tx_id
+
+    # TODO: Actually conform to or create a proper interface.
+    # this and the other functions called from the olt arent very clear.
+    # Called each time there is an onu "up" indication from the olt handler
+    def create_interface(self, data):
+        self.log.debug('function-entry', data=data)
+        self._onu_indication = data
+
+        onu_device = self.adapter_agent.get_device(self.device_id)
+
+        self.log.debug('starting-openomci-statemachine')
+        self._subscribe_to_events()
+        reactor.callLater(1, self._onu_omci_device.start)
+        onu_device.reason = "starting-openomci"
+        self.adapter_agent.update_device(onu_device)
+        self._heartbeat.enabled = True
+
+    # Currently called each time there is an onu "down" indication from the olt handler
+    # TODO: possibly other reasons to "update" from the olt?
+    def update_interface(self, data):
+        self.log.debug('function-entry', data=data)
+        oper_state = data.get('oper_state', None)
+
+        onu_device = self.adapter_agent.get_device(self.device_id)
+
+        if oper_state == 'down':
+            self.log.debug('stopping-openomci-statemachine')
+            reactor.callLater(0, self._onu_omci_device.stop)
+
+            # Let TP download happen again
+            for uni_id in self._tp_service_specific_task:
+                self._tp_service_specific_task[uni_id].clear()
+            for uni_id in self._tech_profile_download_done:
+                self._tech_profile_download_done[uni_id].clear()
+
+            self.disable_ports(onu_device)
+            onu_device.reason = "stopping-openomci"
+            onu_device.connect_status = ConnectStatus.UNREACHABLE
+            onu_device.oper_status = OperStatus.DISCOVERED
+            self.adapter_agent.update_device(onu_device)
+        else:
+            self.log.debug('not-changing-openomci-statemachine')
+
+    # Not currently called by olt or anything else
+    def remove_interface(self, data):
+        self.log.debug('function-entry', data=data)
+
+        onu_device = self.adapter_agent.get_device(self.device_id)
+
+        self.log.debug('stopping-openomci-statemachine')
+        reactor.callLater(0, self._onu_omci_device.stop)
+
+        # Let TP download happen again
+        for uni_id in self._tp_service_specific_task:
+            self._tp_service_specific_task[uni_id].clear()
+        for uni_id in self._tech_profile_download_done:
+            self._tech_profile_download_done[uni_id].clear()
+
+        self.disable_ports(onu_device)
+        onu_device.reason = "stopping-openomci"
+        self.adapter_agent.update_device(onu_device)
+
+        # TODO: im sure there is more to do here
+
+    # Not currently called.  Would be called presumably from the olt handler
+    def remove_gemport(self, data):
+        self.log.debug('remove-gemport', data=data)
+        gem_port = GemportsConfigData()
+        gem_port.CopyFrom(data)
+        device = self.adapter_agent.get_device(self.device_id)
+        if device.connect_status != ConnectStatus.REACHABLE:
+            self.log.error('device-unreachable')
+            return
+
+    # Not currently called.  Would be called presumably from the olt handler
+    def remove_tcont(self, tcont_data, traffic_descriptor_data):
+        self.log.debug('remove-tcont', tcont_data=tcont_data, traffic_descriptor_data=traffic_descriptor_data)
+        device = self.adapter_agent.get_device(self.device_id)
+        if device.connect_status != ConnectStatus.REACHABLE:
+            self.log.error('device-unreachable')
+            return
+
+        # TODO: Create some omci task that encompases this what intended
+
+    # Not currently called.  Would be called presumably from the olt handler
+    def create_multicast_gemport(self, data):
+        self.log.debug('function-entry', data=data)
+
+        # TODO: create objects and populate for later omci calls
+
+    def disable(self, device):
+        self.log.debug('function-entry', device=device)
+        try:
+            self.log.info('sending-uni-lock-towards-device', device=device)
+
+            def stop_anyway(reason):
+                # proceed with disable regardless if we could reach the onu. for example onu is unplugged
+                self.log.debug('stopping-openomci-statemachine')
+                reactor.callLater(0, self._onu_omci_device.stop)
+
+                # Let TP download happen again
+                for uni_id in self._tp_service_specific_task:
+                    self._tp_service_specific_task[uni_id].clear()
+                for uni_id in self._tech_profile_download_done:
+                    self._tech_profile_download_done[uni_id].clear()
+
+                self.disable_ports(device)
+                device.oper_status = OperStatus.UNKNOWN
+                device.reason = "omci-admin-lock"
+                self.adapter_agent.update_device(device)
+
+            # lock all the unis
+            task = BrcmUniLockTask(self.omci_agent, self.device_id, lock=True)
+            self._deferred = self._onu_omci_device.task_runner.queue_task(task)
+            self._deferred.addCallbacks(stop_anyway, stop_anyway)
+        except Exception as e:
+            log.exception('exception-in-onu-disable', exception=e)
+
+    def reenable(self, device):
+        self.log.debug('function-entry', device=device)
+        try:
+            # Start up OpenOMCI state machines for this device
+            # this will ultimately resync mib and unlock unis on successful redownloading the mib
+            self.log.debug('restarting-openomci-statemachine')
+            self._subscribe_to_events()
+            device.reason = "restarting-openomci"
+            self.adapter_agent.update_device(device)
+            reactor.callLater(1, self._onu_omci_device.start)
+            self._heartbeat.enabled = True
+        except Exception as e:
+            log.exception('exception-in-onu-reenable', exception=e)
+
+    def reboot(self):
+        self.log.info('reboot-device')
+        device = self.adapter_agent.get_device(self.device_id)
+        if device.connect_status != ConnectStatus.REACHABLE:
+            self.log.error("device-unreachable")
+            return
+
+        def success(_results):
+            self.log.info('reboot-success', _results=_results)
+            self.disable_ports(device)
+            device.connect_status = ConnectStatus.UNREACHABLE
+            device.oper_status = OperStatus.DISCOVERED
+            device.reason = "rebooting"
+            self.adapter_agent.update_device(device)
+
+        def failure(_reason):
+            self.log.info('reboot-failure', _reason=_reason)
+
+        self._deferred = self._onu_omci_device.reboot()
+        self._deferred.addCallbacks(success, failure)
+
+    def disable_ports(self, onu_device):
+        self.log.info('disable-ports', device_id=self.device_id,
+                      onu_device=onu_device)
+
+        # Disable all ports on that device
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        parent_device = self.adapter_agent.get_device(onu_device.parent_id)
+        assert parent_device
+        logical_device_id = parent_device.parent_id
+        assert logical_device_id
+        ports = self.adapter_agent.get_ports(onu_device.id, Port.ETHERNET_UNI)
+        for port in ports:
+            port_id = 'uni-{}'.format(port.port_no)
+            # TODO: move to UniPort
+            self.update_logical_port(logical_device_id, port_id, OFPPS_LINK_DOWN)
+
+    def enable_ports(self, onu_device):
+        self.log.info('enable-ports', device_id=self.device_id, onu_device=onu_device)
+
+        # Disable all ports on that device
+        self.adapter_agent.enable_all_ports(self.device_id)
+
+        parent_device = self.adapter_agent.get_device(onu_device.parent_id)
+        assert parent_device
+        logical_device_id = parent_device.parent_id
+        assert logical_device_id
+        ports = self.adapter_agent.get_ports(onu_device.id, Port.ETHERNET_UNI)
+        for port in ports:
+            port_id = 'uni-{}'.format(port.port_no)
+            # TODO: move to UniPort
+            self.update_logical_port(logical_device_id, port_id, OFPPS_LIVE)
+
+    # Called just before openomci state machine is started.  These listen for events from selected state machines,
+    # most importantly, mib in sync.  Which ultimately leads to downloading the mib
+    def _subscribe_to_events(self):
+        self.log.debug('function-entry')
+
+        # OMCI MIB Database sync status
+        bus = self._onu_omci_device.event_bus
+        topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                               OnuDeviceEvents.MibDatabaseSyncEvent)
+        self._in_sync_subscription = bus.subscribe(topic, self.in_sync_handler)
+
+        # OMCI Capabilities
+        bus = self._onu_omci_device.event_bus
+        topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                               OnuDeviceEvents.OmciCapabilitiesEvent)
+        self._capabilities_subscription = bus.subscribe(topic, self.capabilties_handler)
+
+    # Called when the mib is in sync
+    def in_sync_handler(self, _topic, msg):
+        self.log.debug('function-entry', _topic=_topic, msg=msg)
+        if self._in_sync_subscription is not None:
+            try:
+                in_sync = msg[IN_SYNC_KEY]
+
+                if in_sync:
+                    # Only call this once
+                    bus = self._onu_omci_device.event_bus
+                    bus.unsubscribe(self._in_sync_subscription)
+                    self._in_sync_subscription = None
+
+                    # Start up device_info load
+                    self.log.debug('running-mib-sync')
+                    reactor.callLater(0, self._mib_in_sync)
+
+            except Exception as e:
+                self.log.exception('in-sync', e=e)
+
+    def capabilties_handler(self, _topic, _msg):
+        self.log.debug('function-entry', _topic=_topic, msg=_msg)
+        if self._capabilities_subscription is not None:
+            self.log.debug('capabilities-handler-done')
+
+    # Mib is in sync, we can now query what we learned and actually start pushing ME (download) to the ONU.
+    # Currently uses a basic mib download task that create a bridge with a single gem port and uni, only allowing EAP
+    # Implement your own MibDownloadTask if you wish to setup something different by default
+    def _mib_in_sync(self):
+        self.log.debug('function-entry')
+
+        omci = self._onu_omci_device
+        in_sync = omci.mib_db_in_sync
+
+        device = self.adapter_agent.get_device(self.device_id)
+        device.reason = 'discovery-mibsync-complete'
+        self.adapter_agent.update_device(device)
+
+        if not self._dev_info_loaded:
+            self.log.info('loading-device-data-from-mib', in_sync=in_sync, already_loaded=self._dev_info_loaded)
+
+            omci_dev = self._onu_omci_device
+            config = omci_dev.configuration
+
+            # TODO: run this sooner somehow. shouldnt have to wait for mib sync to push an initial download
+            # In Sync, we can register logical ports now. Ideally this could occur on
+            # the first time we received a successful (no timeout) OMCI Rx response.
+            try:
+
+                # sort the lists so we get consistent port ordering.
+                ani_list = sorted(config.ani_g_entities) if config.ani_g_entities else []
+                uni_list = sorted(config.uni_g_entities) if config.uni_g_entities else []
+                pptp_list = sorted(config.pptp_entities) if config.pptp_entities else []
+                veip_list = sorted(config.veip_entities) if config.veip_entities else []
+
+                if ani_list is None or (pptp_list is None and veip_list is None):
+                    device.reason = 'onu-missing-required-elements'
+                    self.log.warn("no-ani-or-unis")
+                    self.adapter_agent.update_device(device)
+                    raise Exception("onu-missing-required-elements")
+
+                # Currently logging the ani, pptp, veip, and uni for information purposes.
+                # Actually act on the veip/pptp as its ME is the most correct one to use in later tasks.
+                # And in some ONU the UNI-G list is incomplete or incorrect...
+                for entity_id in ani_list:
+                    ani_value = config.ani_g_entities[entity_id]
+                    self.log.debug("discovered-ani", entity_id=entity_id, value=ani_value)
+                    # TODO: currently only one OLT PON port/ANI, so this works out.  With NGPON there will be 2..?
+                    self._total_tcont_count = ani_value.get('total-tcont-count')
+                    self.log.debug("set-total-tcont-count", tcont_count=self._total_tcont_count)
+
+                for entity_id in uni_list:
+                    uni_value = config.uni_g_entities[entity_id]
+                    self.log.debug("discovered-uni", entity_id=entity_id, value=uni_value)
+
+                uni_entities = OrderedDict()
+                for entity_id in pptp_list:
+                    pptp_value = config.pptp_entities[entity_id]
+                    self.log.debug("discovered-pptp", entity_id=entity_id, value=pptp_value)
+                    uni_entities[entity_id] = UniType.PPTP
+
+                for entity_id in veip_list:
+                    veip_value = config.veip_entities[entity_id]
+                    self.log.debug("discovered-veip", entity_id=entity_id, value=veip_value)
+                    uni_entities[entity_id] = UniType.VEIP
+
+                uni_id = 0
+                for entity_id, uni_type in uni_entities.iteritems():
+                    try:
+                        self._add_uni_port(entity_id, uni_id, uni_type)
+                        uni_id += 1
+                    except AssertionError as e:
+                        self.log.warn("could not add UNI", entity_id=entity_id, uni_type=uni_type, e=e)
+
+                multi_uni = len(self._unis) > 1
+                for uni_port in self._unis.itervalues():
+                    uni_port.add_logical_port(uni_port.port_number, multi_uni)
+
+                self.adapter_agent.update_device(device)
+
+                self._qos_flexibility = config.qos_configuration_flexibility or 0
+                self._omcc_version = config.omcc_version or OMCCVersion.Unknown
+
+                if self._unis:
+                    self._dev_info_loaded = True
+                else:
+                    device.reason = 'no-usable-unis'
+                    self.adapter_agent.update_device(device)
+                    self.log.warn("no-usable-unis")
+                    raise Exception("no-usable-unis")
+
+            except Exception as e:
+                self.log.exception('device-info-load', e=e)
+                self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT, self._mib_in_sync)
+
+        else:
+            self.log.info('device-info-already-loaded', in_sync=in_sync, already_loaded=self._dev_info_loaded)
+
+        if self._dev_info_loaded:
+            if device.admin_state == AdminState.ENABLED:
+                def success(_results):
+                    self.log.info('mib-download-success', _results=_results)
+                    device = self.adapter_agent.get_device(self.device_id)
+                    device.reason = 'initial-mib-downloaded'
+                    device.oper_status = OperStatus.ACTIVE
+                    device.connect_status = ConnectStatus.REACHABLE
+                    self.enable_ports(device)
+                    self.adapter_agent.update_device(device)
+                    self._mib_download_task = None
+
+                def failure(_reason):
+                    self.log.warn('mib-download-failure-retrying', _reason=_reason)
+                    device.reason = 'initial-mib-download-failure-retrying'
+                    self.adapter_agent.update_device(device)
+                    self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT, self._mib_in_sync)
+
+                # Download an initial mib that creates simple bridge that can pass EAP.  On success (above) finally set
+                # the device to active/reachable.   This then opens up the handler to openflow pushes from outside
+                self.log.info('downloading-initial-mib-configuration')
+                self._mib_download_task = BrcmMibDownloadTask(self.omci_agent, self)
+                self._deferred = self._onu_omci_device.task_runner.queue_task(self._mib_download_task)
+                self._deferred.addCallbacks(success, failure)
+            else:
+                self.log.info('admin-down-disabling')
+                self.disable(device)
+        else:
+            self.log.info('device-info-not-loaded-skipping-mib-download')
+
+
+    def _add_uni_port(self, entity_id, uni_id, uni_type=UniType.PPTP):
+        self.log.debug('function-entry')
+
+        device = self.adapter_agent.get_device(self.device_id)
+        parent_device = self.adapter_agent.get_device(device.parent_id)
+
+        parent_adapter_agent = registry('adapter_loader').get_agent(parent_device.adapter)
+        if parent_adapter_agent is None:
+            self.log.error('parent-adapter-could-not-be-retrieved')
+
+        # TODO: This knowledge is locked away in openolt.  and it assumes one onu equals one uni...
+        parent_device = self.adapter_agent.get_device(device.parent_id)
+        parent_adapter = parent_adapter_agent.adapter.devices[parent_device.id]
+        uni_no = parent_adapter.platform.mk_uni_port_num(
+            self._onu_indication.intf_id, self._onu_indication.onu_id, uni_id)
+
+        # TODO: Some or parts of this likely need to move to UniPort. especially the format stuff
+        uni_name = "uni-{}".format(uni_no)
+
+        mac_bridge_port_num = uni_id + 1 # TODO +1 is only to test non-zero index
+
+        self.log.debug('uni-port-inputs', uni_no=uni_no, uni_id=uni_id, uni_name=uni_name, uni_type=uni_type,
+                       entity_id=entity_id, mac_bridge_port_num=mac_bridge_port_num)
+
+        uni_port = UniPort.create(self, uni_name, uni_id, uni_no, uni_name, uni_type)
+        uni_port.entity_id = entity_id
+        uni_port.enabled = True
+        uni_port.mac_bridge_port_num = mac_bridge_port_num
+
+        self.log.debug("created-uni-port", uni=uni_port)
+
+        self.adapter_agent.add_port(device.id, uni_port.get_port())
+        parent_adapter_agent.add_port(device.parent_id, uni_port.get_port())
+
+        self._unis[uni_port.port_number] = uni_port
+
+        self._onu_omci_device.alarm_synchronizer.set_alarm_params(onu_id=self._onu_indication.onu_id,
+                                                                  uni_ports=self._unis.values())
+        # TODO: this should be in the PonPortclass
+        pon_port = self._pon.get_port()
+
+        # Delete reference to my own UNI as peer from parent.
+        # TODO why is this here, add_port_reference_to_parent already prunes duplicates
+        me_as_peer = Port.PeerPort(device_id=device.parent_id, port_no=uni_port.port_number)
+        partial_pon_port = Port(port_no=pon_port.port_no, label=pon_port.label,
+                                type=pon_port.type, admin_state=pon_port.admin_state,
+                                oper_status=pon_port.oper_status,
+                                peers=[me_as_peer]) # only list myself as a peer to avoid deleting all other UNIs from parent
+        self.adapter_agent.delete_port_reference_from_parent(self.device_id, partial_pon_port)
+
+        pon_port.peers.extend([me_as_peer])
+
+        self._pon._port = pon_port
+
+        self.adapter_agent.add_port_reference_to_parent(self.device_id,
+                                                        pon_port)
diff --git a/python/adapters/brcm_openomci_onu/heartbeat.py b/python/adapters/brcm_openomci_onu/heartbeat.py
new file mode 100644
index 0000000..4a7ab1f
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/heartbeat.py
@@ -0,0 +1,179 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet import reactor
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus
+from voltha.extensions.omci.omci_me import OntGFrame
+
+
+class HeartBeat(object):
+    """Wraps health-check support for ONU"""
+    INITIAL_DELAY = 60                      # Delay after start until first check
+    TICK_DELAY = 2                          # Heartbeat interval
+
+    def __init__(self, handler, device_id):
+        self.log = structlog.get_logger(device_id=device_id)
+        self._enabled = False
+        self._handler = handler
+        self._device_id = device_id
+        self._defer = None
+        self._alarm_active = False
+        self._heartbeat_count = 0
+        self._heartbeat_miss = 0
+        self._alarms_raised_count = 0
+        self.heartbeat_failed_limit = 5
+        self.heartbeat_last_reason = ''
+        self.heartbeat_interval = self.TICK_DELAY
+
+    def __str__(self):
+        return "HeartBeat: count:{}, miss: {}".format(self._heartbeat_count,
+                                                      self._heartbeat_miss)
+
+    @staticmethod
+    def create(handler, device_id):
+        return HeartBeat(handler, device_id)
+
+    def _start(self, delay=INITIAL_DELAY):
+        self._defer = reactor.callLater(delay, self.check_pulse)
+
+    def _stop(self):
+        d, self._defeered = self._defeered, None
+        if d is not None and not d.called():
+            d.cancel()
+
+    @property
+    def enabled(self):
+        return self._enabled
+
+    @enabled.setter
+    def enabled(self, value):
+        if self._enabled != value:
+            self._enabled = value
+
+            # if value:
+            #     self._start()
+            # else:
+            #     self._stop()
+
+    @property
+    def check_item(self):
+        return 'vendor_id'
+
+    @property
+    def check_value(self):
+        # device = self._handler.adapter_agent.get_device(self._device_id)
+        # return device.serial_number
+        return 'ADTN'
+
+    @property
+    def alarm_active(self):
+        return self._alarm_active
+
+    @property
+    def heartbeat_count(self):
+        return self._heartbeat_count
+
+    @property
+    def heartbeat_miss(self):
+        return self._heartbeat_miss
+
+    @property
+    def alarms_raised_count(self):
+        return self._alarms_raised_count
+
+    def check_pulse(self):
+        if self.enabled:
+            try:
+                self._defer = self._handler.openomci.omci_cc.send(OntGFrame(self.check_item).get())
+                self._defer.addCallbacks(self._heartbeat_success, self._heartbeat_fail)
+
+            except Exception as e:
+                self._defer = reactor.callLater(5, self._heartbeat_fail, e)
+
+    def _heartbeat_success(self, results):
+        self.log.debug('heartbeat-success')
+
+        try:
+            omci_response = results.getfieldval("omci_message")
+            data = omci_response.getfieldval("data")
+            value = data[self.check_item]
+
+            if value != self.check_value:
+                self._heartbeat_miss = self.heartbeat_failed_limit
+                self.heartbeat_last_reason = "Invalid {}, got '{}' but expected '{}'".\
+                    format(self.check_item, value, self.check_value)
+            else:
+                self._heartbeat_miss = 0
+                self.heartbeat_last_reason = ''
+
+        except Exception as e:
+            self._heartbeat_miss = self.heartbeat_failed_limit
+            self.heartbeat_last_reason = e.message
+
+        self.heartbeat_check_status(results)
+
+    def _heartbeat_fail(self, failure):
+        self._heartbeat_miss += 1
+        self.log.info('heartbeat-miss', failure=failure,
+                      count=self._heartbeat_count,
+                      miss=self._heartbeat_miss)
+        self.heartbeat_last_reason = 'OMCI connectivity error'
+        self.heartbeat_check_status(None)
+
+    def on_heartbeat_alarm(self, active):
+        # TODO: Do something here ?
+        #
+        #  TODO: If failed (active = true) due to bad serial-number shut off the UNI port?
+        pass
+
+    def heartbeat_check_status(self, results):
+        """
+        Check the number of heartbeat failures against the limit and emit an alarm if needed
+        """
+        device = self._handler.adapter_agent.get_device(self._device_id)
+
+        try:
+            from voltha.extensions.alarms.heartbeat_alarm import HeartbeatAlarm
+
+            if self._heartbeat_miss >= self.heartbeat_failed_limit:
+                if device.connect_status == ConnectStatus.REACHABLE:
+                    self.log.warning('heartbeat-failed', count=self._heartbeat_miss)
+                    device.connect_status = ConnectStatus.UNREACHABLE
+                    device.oper_status = OperStatus.FAILED
+                    device.reason = self.heartbeat_last_reason
+                    self._handler.adapter_agent.update_device(device)
+                    HeartbeatAlarm(self._handler.alarms, 'onu', self._heartbeat_miss).raise_alarm()
+                    self._alarm_active = True
+                    self.on_heartbeat_alarm(True)
+            else:
+                # Update device states
+                if device.connect_status != ConnectStatus.REACHABLE and self._alarm_active:
+                    device.connect_status = ConnectStatus.REACHABLE
+                    device.oper_status = OperStatus.ACTIVE
+                    device.reason = ''
+                    self._handler.adapter_agent.update_device(device)
+                    HeartbeatAlarm(self._handler.alarms, 'onu').clear_alarm()
+
+                    self._alarm_active = False
+                    self._alarms_raised_count += 1
+                    self.on_heartbeat_alarm(False)
+
+        except Exception as e:
+            self.log.exception('heartbeat-check', e=e)
+
+        # Reschedule next heartbeat
+        if self.enabled:
+            self._heartbeat_count += 1
+            self._defer = reactor.callLater(self.heartbeat_interval, self.check_pulse)
diff --git a/python/adapters/brcm_openomci_onu/main.py b/python/adapters/brcm_openomci_onu/main.py
new file mode 100755
index 0000000..ed1d15f
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/main.py
@@ -0,0 +1,489 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""OpenONU Adapter main entry point"""
+
+import argparse
+import os
+import time
+
+import arrow
+import yaml
+from packaging.version import Version
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+
+from common.structlog_setup import setup_logging, update_logging
+from common.utils.asleep import asleep
+from common.utils.deferred_utils import TimeOutError
+from common.utils.dockerhelpers import get_my_containers_name
+from common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from voltha.core.registry import registry, IComponent
+from kafka.adapter_proxy import AdapterProxy
+from kafka.adapter_request_facade import AdapterRequestFacade
+from kafka.core_proxy import CoreProxy
+from kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+    get_messaging_proxy
+from kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from brcm_openomci_onu import BrcmOpenomciOnuAdapter
+from voltha.protos import third_party
+from voltha.protos.adapter_pb2 import AdapterConfig
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './openonu.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'openonu'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'openonu'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to openonu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of openonu adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+   log.info('                                                    ')
+   log.info('               OpenOnu Adapter                      ')
+   log.info('                                                    ')
+   log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.adapter_version = self.get_version()
+        self.log.info('OpenONU-Adapter-Version', version=
+        self.adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        self.adapter = None
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            self.adapter_proxy = AdapterProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            self.adapter = BrcmOpenomciOnuAdapter(
+                core_proxy=self.core_proxy, adapter_proxy=self.adapter_proxy,
+                config=config)
+            openonu_request_handler = AdapterRequestFacade(
+                adapter=self.adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    group_id_prefix=self.args.instance_id,
+                    target_cls=openonu_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+            self.adapter_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(
+                    self.adapter.adapter_descriptor(),
+                    self.adapter.device_types())
+                if resp:
+                    self.log.info('registered-with-core',
+                                  coreId=resp.instance_id)
+
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/python/adapters/brcm_openomci_onu/omci/__init__.py b/python/adapters/brcm_openomci_onu/omci/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_capabilities_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_capabilities_task.py
new file mode 100644
index 0000000..6bf5b93
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_capabilities_task.py
@@ -0,0 +1,155 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from voltha.extensions.omci.tasks.onu_capabilities_task import OnuCapabilitiesTask
+from twisted.internet.defer import failure
+
+
+class BrcmCapabilitiesTask(OnuCapabilitiesTask):
+    """
+    OpenOMCI MIB Capabilities Task - BROADCOM ONUs
+
+    This task requests information on supported MEs via the OMCI (ME#287)
+    Managed entity.
+
+    This task should be ran after MIB Synchronization and before any MIB
+    Downloads to the ONU.
+
+    Upon completion, the Task deferred callback is invoked with dictionary
+    containing the supported managed entities and message types.
+
+    results = {
+                'supported-managed-entities': {set of supported managed entities},
+                'supported-message-types': {set of supported message types}
+              }
+    """
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self.log.debug('function-entry')
+
+        super(BrcmCapabilitiesTask, self).__init__(omci_agent, device_id)
+        self._omci_managed = False      # TODO: Look up capabilities/model number
+
+    @property
+    def supported_managed_entities(self):
+        """
+        Return a set of the Managed Entity class IDs supported on this ONU
+
+        None is returned if not MEs have been discovered
+
+        :return: (set of ints)
+        """
+        self.log.debug('function-entry')
+
+        if self._omci_managed:
+            return super(BrcmCapabilitiesTask, self).supported_managed_entities
+
+        # TODO: figure out why broadcom wont answer for ME 287 to get this.  otherwise manually fill in
+        me_1287800f1 = [
+            2, 5, 6, 7, 11, 24, 45, 46, 47, 48, 49, 50, 51, 52, 78, 79, 84, 89, 130,
+            131, 133, 134, 135, 136, 137, 148, 157, 158, 159, 162, 163, 164, 171, 240,
+            241, 242, 256, 257, 262, 263, 264, 266, 268, 272, 273, 274, 276, 277, 278,
+            279, 280, 281, 287, 296, 297, 298, 307, 308, 309, 310, 311, 312, 321, 322,
+            329, 330, 332, 334, 336, 340, 341, 342, 343, 347, 348, 425, 426
+        ]
+        return frozenset(list(me_1287800f1))
+
+    @property
+    def supported_message_types(self):
+        """
+        Return a set of the Message Types supported on this ONU
+
+        None is returned if no message types have been discovered
+
+        :return: (set of EntityOperations)
+        """
+        self.log.debug('function-entry')
+
+        if self._omci_managed:
+            return super(BrcmCapabilitiesTask, self).supported_message_types
+
+        # TODO: figure out why broadcom wont answer for ME 287 to get this.  otherwise manually fill in
+        from voltha.extensions.omci.omci_entities import EntityOperations
+        op_11287800f1 = [
+            EntityOperations.Create,
+            EntityOperations.CreateComplete,
+            EntityOperations.Delete,
+            EntityOperations.Set,
+            EntityOperations.Get,
+            EntityOperations.GetComplete,
+            EntityOperations.GetAllAlarms,
+            EntityOperations.GetAllAlarmsNext,
+            EntityOperations.MibUpload,
+            EntityOperations.MibUploadNext,
+            EntityOperations.MibReset,
+            EntityOperations.AlarmNotification,
+            EntityOperations.AttributeValueChange,
+            EntityOperations.Test,
+            EntityOperations.StartSoftwareDownload,
+            EntityOperations.DownloadSection,
+            EntityOperations.EndSoftwareDownload,
+            EntityOperations.ActivateSoftware,
+            EntityOperations.CommitSoftware,
+            EntityOperations.SynchronizeTime,
+            EntityOperations.Reboot,
+            EntityOperations.GetNext,
+        ]
+        return frozenset(op_11287800f1)
+
+    def perform_get_capabilities(self):
+        """
+        Perform the MIB Capabilities sequence.
+
+        The sequence is to perform a Get request with the attribute mask equal
+        to 'me_type_table'.  The response to this request will carry the size
+        of (number of get-next sequences).
+
+        Then a loop is entered and get-next commands are sent for each sequence
+        requested.
+        """
+        self.log.debug('function-entry')
+
+        self.log.info('perform-get')
+
+        if self._omci_managed:
+            # Return generator deferred/results
+            return super(BrcmCapabilitiesTask, self).perform_get_capabilities()
+
+        # Fixed values, no need to query
+        try:
+            self._supported_entities = self.supported_managed_entities
+            self._supported_msg_types = self.supported_message_types
+
+            self.log.debug('get-success',
+                           supported_entities=self.supported_managed_entities,
+                           supported_msg_types=self.supported_message_types)
+            results = {
+                'supported-managed-entities': self.supported_managed_entities,
+                'supported-message-types': self.supported_message_types
+            }
+            self.deferred.callback(results)
+
+        except Exception as e:
+            self.log.exception('get-failed', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_get_mds_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_get_mds_task.py
new file mode 100644
index 0000000..eabf356
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_get_mds_task.py
@@ -0,0 +1,61 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from voltha.extensions.omci.tasks.get_mds_task import GetMdsTask
+
+
+class BrcmGetMdsTask(GetMdsTask):
+    """
+    OpenOMCI Get MIB Data Sync value task - Broadcom ONU
+
+    On successful completion, this task will call the 'callback' method of the
+    deferred returned by the start method and return the value of the MIB
+    Data Sync attribute of the ONT Data ME
+    """
+    name = "BRCM: Get MDS Task"
+
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self.log.debug('function-entry')
+
+        super(BrcmGetMdsTask, self).__init__(omci_agent, device_id)
+
+        self.name = BrcmGetMdsTask.name
+        self._device = omci_agent.get_device(device_id)
+        self._omci_managed = False      # TODO: Look up capabilities/model number/check handler
+
+    def perform_get_mds(self):
+        """
+        Get the 'mib_data_sync' attribute of the ONU
+        """
+        self.log.debug('function-entry')
+        self.log.info('perform-get-mds')
+
+        if self._omci_managed:
+            return super(BrcmGetMdsTask, self).perform_get_mds()
+
+        # Non-OMCI managed BRCM ONUs always return 0 for MDS, use the MIB
+        # sync value and depend on an accelerated mib resync to do the
+        # proper comparison
+
+        self.deferred.callback(self._device.mib_synchronizer.mib_data_sync)
+
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_mib_download_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_mib_download_task.py
new file mode 100644
index 0000000..3341219
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_mib_download_task.py
@@ -0,0 +1,449 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from common.frameio.frameio import hexify
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, TimeoutError, failure
+from voltha.extensions.omci.omci_me import *
+from voltha.extensions.omci.tasks.task import Task
+from voltha.extensions.omci.omci_defs import *
+from voltha.adapters.brcm_openomci_onu.uni_port import *
+from voltha.adapters.brcm_openomci_onu.pon_port \
+    import BRDCM_DEFAULT_VLAN, TASK_PRIORITY, DEFAULT_TPID, DEFAULT_GEM_PAYLOAD
+
+OP = EntityOperations
+RC = ReasonCodes
+
+
+class MibDownloadFailure(Exception):
+    """
+    This error is raised by default when the download fails
+    """
+
+
+class MibResourcesFailure(Exception):
+    """
+    This error is raised by when one or more resources required is not available
+    """
+
+
+class BrcmMibDownloadTask(Task):
+    """
+    OpenOMCI MIB Download Example
+
+    This task takes the legacy OMCI 'script' for provisioning the Broadcom ONU
+    and converts it to run as a Task on the OpenOMCI Task runner.  This is
+    in order to begin to decompose service instantiation in preparation for
+    Technology Profile work.
+
+    Once technology profiles are ready, some of this task may hang around or
+    be moved into OpenOMCI if there are any very common settings/configs to do
+    for any profile that may be provided in the v2.0 release
+
+    Currently, the only service tech profiles expected by v2.0 will be for AT&T
+    residential data service and DT residential data service.
+    """
+
+    name = "Broadcom MIB Download Example Task"
+
+    def __init__(self, omci_agent, handler):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+
+        self.log = structlog.get_logger(device_id=handler.device_id)
+        self.log.debug('function-entry')
+
+        super(BrcmMibDownloadTask, self).__init__(BrcmMibDownloadTask.name,
+                                                  omci_agent,
+                                                  handler.device_id,
+                                                  priority=TASK_PRIORITY)
+        self._handler = handler
+        self._onu_device = omci_agent.get_device(handler.device_id)
+        self._local_deferred = None
+
+        # Frame size
+        self._max_gem_payload = DEFAULT_GEM_PAYLOAD
+
+        self._pon = handler.pon_port
+
+        # Defaults
+        self._input_tpid = DEFAULT_TPID
+        self._output_tpid = DEFAULT_TPID
+
+        self._vlan_tcis_1 = BRDCM_DEFAULT_VLAN
+        self._cvid = BRDCM_DEFAULT_VLAN
+        self._vlan_config_entity_id = self._vlan_tcis_1
+
+        # Entity IDs. IDs with values can probably be most anything for most ONUs,
+        #             IDs set to None are discovered/set
+
+        self._mac_bridge_service_profile_entity_id = \
+            self._handler.mac_bridge_service_profile_entity_id
+        self._ieee_mapper_service_profile_entity_id = \
+            self._pon.ieee_mapper_service_profile_entity_id
+        self._mac_bridge_port_ani_entity_id = \
+            self._pon.mac_bridge_port_ani_entity_id
+        self._gal_enet_profile_entity_id = \
+            self._handler.gal_enet_profile_entity_id
+
+        self._free_ul_prior_q_entity_ids = set()
+        self._free_dl_prior_q_entity_ids = set()
+
+    def cancel_deferred(self):
+        self.log.debug('function-entry')
+        super(BrcmMibDownloadTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start the MIB Download
+        """
+        self.log.debug('function-entry')
+        super(BrcmMibDownloadTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_mib_download)
+
+    def stop(self):
+        """
+        Shutdown MIB Synchronization tasks
+        """
+        self.log.debug('function-entry')
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(BrcmMibDownloadTask, self).stop()
+
+    def check_status_and_state(self, results, operation=''):
+        """
+        Check the results of an OMCI response.  An exception is thrown
+        if the task was cancelled or an error was detected.
+
+        :param results: (OmciFrame) OMCI Response frame
+        :param operation: (str) what operation was being performed
+        :return: True if successful, False if the entity existed (already created)
+        """
+        self.log.debug('function-entry')
+
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+
+        self.log.debug("OMCI Result: %s", operation,
+                       omci_msg=omci_msg, status=status,
+                       error_mask=error_mask, failed_mask=failed_mask,
+                       unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            self.strobe_watchdog()
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
+
+        raise MibDownloadFailure('{} failed with a status of {}, error_mask: {}, failed_mask: {}, unsupported_mask: {}'
+                                 .format(operation, status, error_mask, failed_mask, unsupported_mask))
+
+    @inlineCallbacks
+    def perform_mib_download(self):
+        """
+        Send the commands to minimally configure the PON, Bridge, and
+        UNI ports for this device. The application of any service flows
+        and other characteristics are done as needed.
+        """
+        try:
+            self.log.debug('function-entry')
+            self.log.info('perform-download')
+
+            device = self._handler.adapter_agent.get_device(self.device_id)
+
+            if self._handler.enabled and len(self._handler.uni_ports) > 0:
+                device.reason = 'performing-initial-mib-download'
+                self._handler.adapter_agent.update_device(device)
+
+            try:
+                # Lock the UNI ports to prevent any alarms during initial configuration
+                # of the ONU
+                self.strobe_watchdog()
+
+                # Provision the initial bridge configuration
+                yield self.perform_initial_bridge_setup()
+
+                for uni_port in self._handler.uni_ports:
+                    yield self.enable_uni(uni_port, True)
+
+                    # Provision the initial bridge configuration
+                    yield self.perform_uni_initial_bridge_setup(uni_port)
+
+                    # And re-enable the UNIs if needed
+                    yield self.enable_uni(uni_port, False)
+
+                self.deferred.callback('initial-download-success')
+
+            except TimeoutError as e:
+                self.log.error('initial-download-failure', e=e)
+                self.deferred.errback(failure.Failure(e))
+
+            except Exception as e:
+                self.log.exception('initial-download-failure', e=e)
+                self.deferred.errback(failure.Failure(e))
+
+            else:
+                e = MibResourcesFailure('Required resources are not available',
+                                        len(self._handler.uni_ports))
+                self.deferred.errback(failure.Failure(e))
+        except BaseException as e:
+            self.log.debug('@thyy_mib_check:', exception=e)
+
+    @inlineCallbacks
+    def perform_initial_bridge_setup(self):
+        self.log.debug('function-entry')
+
+        omci_cc = self._onu_device.omci_cc
+        # TODO: too many magic numbers
+
+        try:
+            ########################################################################################
+            # Create GalEthernetProfile - Once per ONU/PON interface
+            #
+            #  EntityID will be referenced by:
+            #            - GemInterworkingTp
+            #  References:
+            #            - Nothing
+
+            msg = GalEthernetProfileFrame(
+                self._gal_enet_profile_entity_id,
+                max_gem_payload_size=self._max_gem_payload
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-gal-ethernet-profile')
+
+        except TimeoutError as e:
+            self.log.warn('rx-timeout-0', e=e)
+            raise
+
+        except Exception as e:
+            self.log.exception('omci-setup-0', e=e)
+            raise
+
+        returnValue(None)
+
+    @inlineCallbacks
+    def perform_uni_initial_bridge_setup(self, uni_port):
+        self.log.debug('function-entry')
+        omci_cc = self._onu_device.omci_cc
+        frame = None
+        try:
+            ################################################################################
+            # Common - PON and/or UNI                                                      #
+            ################################################################################
+            # MAC Bridge Service Profile
+            #
+            #  EntityID will be referenced by:
+            #            - MAC Bridge Port Configuration Data (PON & UNI)
+            #  References:
+            #            - Nothing
+
+            # TODO: magic. event if static, assign to a meaningful variable name
+            attributes = {
+                'spanning_tree_ind': False,
+                'learning_ind': True,
+                'priority': 0x8000,
+                'max_age': 20 * 256,
+                'hello_time': 2 * 256,
+                'forward_delay': 15 * 256,
+                'unknown_mac_address_discard': True
+            }
+            msg = MacBridgeServiceProfileFrame(
+                self._mac_bridge_service_profile_entity_id + uni_port.mac_bridge_port_num,
+                attributes
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-mac-bridge-service-profile')
+
+            ################################################################################
+            # PON Specific                                                                 #
+            ################################################################################
+            # IEEE 802.1 Mapper Service config - Once per PON
+            #
+            #  EntityID will be referenced by:
+            #            - MAC Bridge Port Configuration Data for the PON port
+            #  References:
+            #            - Nothing at this point. When a GEM port is created, this entity will
+            #              be updated to reference the GEM Interworking TP
+
+            msg = Ieee8021pMapperServiceProfileFrame(self._ieee_mapper_service_profile_entity_id + uni_port.mac_bridge_port_num)
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-8021p-mapper-service-profile')
+
+            ################################################################################
+            # Create MAC Bridge Port Configuration Data for the PON port via IEEE 802.1
+            # mapper service. Upon receipt by the ONU, the ONU will create an instance
+            # of the following before returning the response.
+            #
+            #     - MAC bridge port designation data
+            #     - MAC bridge port filter table data
+            #     - MAC bridge port bridge table data
+            #
+            #  EntityID will be referenced by:
+            #            - Implicitly by the VLAN tagging filter data
+            #  References:
+            #            - MAC Bridge Service Profile (the bridge)
+            #            - IEEE 802.1p mapper service profile for PON port
+
+            # TODO: magic. make a static variable for tp_type
+            msg = MacBridgePortConfigurationDataFrame(
+                self._mac_bridge_port_ani_entity_id + uni_port.mac_bridge_port_num,
+                bridge_id_pointer=self._mac_bridge_service_profile_entity_id + uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                port_num= 0xff, # Port ID - unique number within the bridge
+                tp_type=3, # TP Type (IEEE 802.1p mapper service)
+                tp_pointer=self._ieee_mapper_service_profile_entity_id + uni_port.mac_bridge_port_num  # TP ID, 8021p mapper ID
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-mac-bridge-port-configuration-data-part-1')
+
+            ################################################################################
+            # VLAN Tagging Filter config
+            #
+            #  EntityID will be referenced by:
+            #            - Nothing
+            #  References:
+            #            - MacBridgePortConfigurationData for the ANI/PON side
+            #
+            # Set anything, this request will not be used when using Extended Vlan
+
+            # TODO: magic. make a static variable for forward_op
+            msg = VlanTaggingFilterDataFrame(
+                self._mac_bridge_port_ani_entity_id + uni_port.mac_bridge_port_num,  # Entity ID
+                vlan_tcis=[self._vlan_tcis_1],        # VLAN IDs
+                forward_operation=0x10
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-vlan-tagging-filter-data')
+
+            ################################################################################
+            # UNI Specific                                                                 #
+            ################################################################################
+            # MAC Bridge Port config
+            # This configuration is for Ethernet UNI
+            #
+            #  EntityID will be referenced by:
+            #            - Nothing
+            #  References:
+            #            - MAC Bridge Service Profile (the bridge)
+            #            - PPTP Ethernet or VEIP UNI
+
+            # TODO: do this for all uni/ports...
+            # TODO: magic. make a static variable for tp_type
+
+            # default to PPTP
+            tp_type = None
+            if uni_port.type is UniType.VEIP:
+                tp_type = 11
+            elif uni_port.type is UniType.PPTP:
+                tp_type = 1
+            else:
+                tp_type = 1
+
+            msg = MacBridgePortConfigurationDataFrame(
+                uni_port.entity_id,            # Entity ID - This is read-only/set-by-create !!!
+                bridge_id_pointer=self._mac_bridge_service_profile_entity_id + uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                port_num=uni_port.mac_bridge_port_num,   # Port ID
+                tp_type=tp_type,                         # PPTP Ethernet or VEIP UNI
+                tp_pointer=uni_port.entity_id            # Ethernet UNI ID
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-mac-bridge-port-configuration-data-part-2')
+
+        except TimeoutError as e:
+            self.log.warn('rx-timeout-1', e=e)
+            raise
+
+        except Exception as e:
+            self.log.exception('omci-setup-1', e=e)
+            raise
+
+        returnValue(None)
+
+    @inlineCallbacks
+    def enable_uni(self, uni_port, force_lock):
+        """
+        Lock or unlock a single uni port
+
+        :param uni_port: UniPort to admin up/down
+        :param force_lock: (boolean) If True, force lock regardless of enabled state
+        """
+        self.log.debug('function-entry')
+
+        omci_cc = self._onu_device.omci_cc
+        frame = None
+
+        ################################################################################
+        #  Lock/Unlock UNI  -  0 to Unlock, 1 to lock
+        #
+        #  EntityID is referenced by:
+        #            - MAC bridge port configuration data for the UNI side
+        #  References:
+        #            - Nothing
+        try:
+            state = 1 if force_lock or not uni_port.enabled else 0
+            msg = None
+            if uni_port.type is UniType.PPTP:
+                msg = PptpEthernetUniFrame(uni_port.entity_id,
+                                           attributes=dict(administrative_state=state))
+            elif uni_port.type is UniType.VEIP:
+                msg = VeipUniFrame(uni_port.entity_id,
+                                   attributes=dict(administrative_state=state))
+            else:
+                self.log.warn('unknown-uni-type', uni_port=uni_port)
+
+            if msg:
+               frame = msg.set()
+               self.log.debug('openomci-msg', omci_msg=msg)
+               results = yield omci_cc.send(frame)
+               self.check_status_and_state(results, 'set-pptp-ethernet-uni-lock-restore')
+
+        except TimeoutError as e:
+            self.log.warn('rx-timeout', e=e)
+            raise
+
+        except Exception as e:
+            self.log.exception('omci-failure', e=e)
+            raise
+
+        returnValue(None)
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_mib_sync.py b/python/adapters/brcm_openomci_onu/omci/brcm_mib_sync.py
new file mode 100644
index 0000000..1898c52
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_mib_sync.py
@@ -0,0 +1,77 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet import reactor
+from voltha.extensions.omci.state_machines.mib_sync import MibSynchronizer
+
+log = structlog.get_logger()
+
+class BrcmMibSynchronizer(MibSynchronizer):
+    """
+    OpenOMCI MIB Synchronizer state machine for Broadcom ONUs
+    """
+
+    def __init__(self, agent, device_id, mib_sync_tasks, db,
+                 advertise_events=False):
+        """
+        Class initialization
+
+        :param agent: (OpenOmciAgent) Agent
+        :param device_id: (str) ONU Device ID
+        :param db: (MibDbVolatileDict) MIB Database
+        :param mib_sync_tasks: (dict) Tasks to run
+        :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self.log.debug('function-entry')
+
+        super(BrcmMibSynchronizer, self).__init__(agent, device_id, mib_sync_tasks, db,
+                                                  advertise_events=advertise_events)
+
+    def on_enter_starting(self):
+        """
+        Given resync and mib update is questionable (see below) flag the ONU as a new device which forces a mib
+        reset and a mib upload
+        """
+        self.log.warn('db-sync-not-supported-forcing-reset')
+        self._last_mib_db_sync_value = None
+        super(BrcmMibSynchronizer, self).on_enter_starting()
+
+    def on_enter_auditing(self):
+        """
+        Perform a MIB Audit.  Currently this is broken on BRCM based onu and its never in sync and continuously
+        retries. On disable/enable it never enables becaues its never in sync.  Effectively disable the function so
+        disable/enable works and we can figure out whats going on
+
+        Oddly enough this is only an issue with MibVolatileDict
+        """
+        # TODO: Actually fix resync
+        self.log.warn('audit-resync-not-supported')
+
+        self._deferred = reactor.callLater(0, self.success)
+
+    def on_enter_examining_mds(self):
+        """
+        Examine MIB difference counter between onu and voltha.  Currently same problem as on_enter_auditing.
+        examine mds is always mismatched and causing disable/enable to fail
+
+        Oddly enough this is only an issue with MibVolatileDict
+        """
+        # TODO: Actually fix resync
+        self.log.warn('examine-mds-resync-not-supported')
+
+        self._deferred = reactor.callLater(0, self.success)
+
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_tp_service_specific_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_tp_service_specific_task.py
new file mode 100644
index 0000000..ff0bd30
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_tp_service_specific_task.py
@@ -0,0 +1,482 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from common.frameio.frameio import hexify
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, TimeoutError, failure
+from voltha.extensions.omci.omci_me import *
+from voltha.extensions.omci.tasks.task import Task
+from voltha.extensions.omci.omci_defs import *
+from voltha.adapters.brcm_openomci_onu.uni_port import *
+from voltha.adapters.brcm_openomci_onu.pon_port \
+    import BRDCM_DEFAULT_VLAN, TASK_PRIORITY, DEFAULT_TPID, DEFAULT_GEM_PAYLOAD
+
+OP = EntityOperations
+RC = ReasonCodes
+
+
+class TechProfileDownloadFailure(Exception):
+    """
+    This error is raised by default when the download fails
+    """
+
+
+class TechProfileResourcesFailure(Exception):
+    """
+    This error is raised by when one or more resources required is not available
+    """
+
+
+class BrcmTpServiceSpecificTask(Task):
+    """
+    OpenOMCI Tech-Profile Download Task
+
+    """
+
+    name = "Broadcom Tech-Profile Download Task"
+
+    def __init__(self, omci_agent, handler, uni_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        log = structlog.get_logger(device_id=handler.device_id, uni_id=uni_id)
+        log.debug('function-entry')
+
+        super(BrcmTpServiceSpecificTask, self).__init__(BrcmTpServiceSpecificTask.name,
+                                                        omci_agent,
+                                                        handler.device_id,
+                                                        priority=TASK_PRIORITY,
+                                                        exclusive=True)
+
+        self.log = log
+
+        self._onu_device = omci_agent.get_device(handler.device_id)
+        self._local_deferred = None
+
+        # Frame size
+        self._max_gem_payload = DEFAULT_GEM_PAYLOAD
+
+        self._uni_port = handler.uni_ports[uni_id]
+        assert self._uni_port.uni_id == uni_id
+
+        # Port numbers
+        self._input_tpid = DEFAULT_TPID
+        self._output_tpid = DEFAULT_TPID
+
+        self._vlan_tcis_1 = BRDCM_DEFAULT_VLAN
+        self._cvid = BRDCM_DEFAULT_VLAN
+        self._vlan_config_entity_id = self._vlan_tcis_1
+
+        # Entity IDs. IDs with values can probably be most anything for most ONUs,
+        #             IDs set to None are discovered/set
+
+        self._mac_bridge_service_profile_entity_id = \
+            handler.mac_bridge_service_profile_entity_id
+        self._ieee_mapper_service_profile_entity_id = \
+            handler.pon_port.ieee_mapper_service_profile_entity_id
+        self._mac_bridge_port_ani_entity_id = \
+            handler.pon_port.mac_bridge_port_ani_entity_id
+        self._gal_enet_profile_entity_id = \
+            handler.gal_enet_profile_entity_id
+
+        # Extract the current set of TCONT and GEM Ports from the Handler's pon_port that are
+        # relevant to this task's UNI. It won't change. But, the underlying pon_port may change
+        # due to additional tasks on different UNIs. So, it we cannot use the pon_port affter
+        # this initializer
+        self._tconts = []
+        for tcont in handler.pon_port.tconts.itervalues():
+            if tcont.uni_id is not None and tcont.uni_id != self._uni_port.uni_id: continue
+            self._tconts.append(tcont)
+
+        self._gem_ports = []
+        for gem_port in handler.pon_port.gem_ports.itervalues():
+            if gem_port.uni_id is not None and gem_port.uni_id != self._uni_port.uni_id: continue
+            self._gem_ports.append(gem_port)
+
+        self.tcont_me_to_queue_map = dict()
+        self.uni_port_to_queue_map = dict()
+
+    def cancel_deferred(self):
+        self.log.debug('function-entry')
+        super(BrcmTpServiceSpecificTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start the Tech-Profile Download
+        """
+        self.log.debug('function-entry')
+        super(BrcmTpServiceSpecificTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_service_specific_steps)
+
+    def stop(self):
+        """
+        Shutdown Tech-Profile download tasks
+        """
+        self.log.debug('function-entry')
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(BrcmTpServiceSpecificTask, self).stop()
+
+    def check_status_and_state(self, results, operation=''):
+        """
+        Check the results of an OMCI response.  An exception is thrown
+        if the task was cancelled or an error was detected.
+
+        :param results: (OmciFrame) OMCI Response frame
+        :param operation: (str) what operation was being performed
+        :return: True if successful, False if the entity existed (already created)
+        """
+        self.log.debug('function-entry')
+
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+
+        self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg, status=status, error_mask=error_mask,
+                       failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            self.strobe_watchdog()
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
+
+        raise TechProfileDownloadFailure(
+            '{} failed with a status of {}, error_mask: {}, failed_mask: {}, unsupported_mask: {}'
+            .format(operation, status, error_mask, failed_mask, unsupported_mask))
+
+    @inlineCallbacks
+    def perform_service_specific_steps(self):
+        self.log.debug('function-entry')
+
+        omci_cc = self._onu_device.omci_cc
+
+        try:
+            ################################################################################
+            # TCONTS
+            #
+            #  EntityID will be referenced by:
+            #            - GemPortNetworkCtp
+            #  References:
+            #            - ONU created TCONT (created on ONU startup)
+
+            tcont_idents = self._onu_device.query_mib(Tcont.class_id)
+            self.log.debug('tcont-idents', tcont_idents=tcont_idents)
+
+            for tcont in self._tconts:
+                self.log.debug('tcont-loop', tcont=tcont)
+
+                if tcont.entity_id is None:
+                    free_entity_id = None
+                    for k, v in tcont_idents.items():
+                        alloc_check = v.get('attributes', {}).get('alloc_id', 0)
+                        # Some onu report both to indicate an available tcont
+                        if alloc_check == 0xFF or alloc_check == 0xFFFF:
+                            free_entity_id = k
+                            break
+
+                    self.log.debug('tcont-loop-free', free_entity_id=free_entity_id, alloc_id=tcont.alloc_id)
+
+                    if free_entity_id is None:
+                        self.log.error('no-available-tconts')
+                        break
+
+                    # Also assign entity id within tcont object
+                    results = yield tcont.add_to_hardware(omci_cc, free_entity_id)
+                    self.check_status_and_state(results, 'new-tcont-added')
+                else:
+                    # likely already added given entity_id is set, but no harm in doing it again
+                    results = yield tcont.add_to_hardware(omci_cc, tcont.entity_id)
+                    self.check_status_and_state(results, 'existing-tcont-added')
+
+            ################################################################################
+            # GEMS  (GemPortNetworkCtp and GemInterworkingTp)
+            #
+            #  For both of these MEs, the entity_id is the GEM Port ID. The entity id of the
+            #  GemInterworkingTp ME could be different since it has an attribute to specify
+            #  the GemPortNetworkCtp entity id.
+            #
+            #        for the GemPortNetworkCtp ME
+            #
+            #  GemPortNetworkCtp
+            #    EntityID will be referenced by:
+            #              - GemInterworkingTp
+            #    References:
+            #              - TCONT
+            #              - Hardcoded upstream TM Entity ID
+            #              - (Possibly in Future) Upstream Traffic descriptor profile pointer
+            #
+            #  GemInterworkingTp
+            #    EntityID will be referenced by:
+            #              - Ieee8021pMapperServiceProfile
+            #    References:
+            #              - GemPortNetworkCtp
+            #              - Ieee8021pMapperServiceProfile
+            #              - GalEthernetProfile
+            #
+
+            onu_g = self._onu_device.query_mib(OntG.class_id)
+            # If the traffic management option attribute in the ONU-G ME is 0
+            # (priority controlled) or 2 (priority and rate controlled), this
+            # pointer specifies the priority queue ME serving this GEM port
+            # network CTP. If the traffic management option attribute is 1
+            # (rate controlled), this attribute redundantly points to the
+            # T-CONT serving this GEM port network CTP.
+            traffic_mgmt_opt = \
+                onu_g.get('attributes', {}).get('traffic_management_options', 0)
+            self.log.debug("traffic-mgmt-option", traffic_mgmt_opt=traffic_mgmt_opt)
+
+            prior_q = self._onu_device.query_mib(PriorityQueueG.class_id)
+            for k, v in prior_q.items():
+                self.log.debug("prior-q", k=k, v=v)
+
+                try:
+                    _ = iter(v)
+                except TypeError:
+                    continue
+
+                if 'instance_id' in v:
+                    related_port = v['attributes']['related_port']
+                    if v['instance_id'] & 0b1000000000000000:
+                        tcont_me = (related_port & 0xffff0000) >> 16
+                        if tcont_me not in self.tcont_me_to_queue_map:
+                            self.log.debug("prior-q-related-port-and-tcont-me",
+                                            related_port=related_port,
+                                            tcont_me=tcont_me)
+                            self.tcont_me_to_queue_map[tcont_me] = list()
+
+                        self.tcont_me_to_queue_map[tcont_me].append(k)
+                    else:
+                        uni_port = (related_port & 0xffff0000) >> 16
+                        if uni_port ==  self._uni_port.entity_id:
+                            if uni_port not in self.uni_port_to_queue_map:
+                                self.log.debug("prior-q-related-port-and-uni-port-me",
+                                                related_port=related_port,
+                                                uni_port_me=uni_port)
+                                self.uni_port_to_queue_map[uni_port] = list()
+
+                            self.uni_port_to_queue_map[uni_port].append(k)
+
+
+            self.log.debug("ul-prior-q", ul_prior_q=self.tcont_me_to_queue_map)
+            self.log.debug("dl-prior-q", dl_prior_q=self.uni_port_to_queue_map)
+
+            for gem_port in self._gem_ports:
+                # TODO: Traffic descriptor will be available after meter bands are available
+                tcont = gem_port.tcont
+                if tcont is None:
+                    self.log.error('unknown-tcont-reference', gem_id=gem_port.gem_id)
+                    continue
+
+                ul_prior_q_entity_id = None
+                dl_prior_q_entity_id = None
+                if gem_port.direction == "upstream" or \
+                        gem_port.direction == "bi-directional":
+
+                    # Sort the priority queue list in order of priority.
+                    # 0 is highest priority and 0x0fff is lowest.
+                    self.tcont_me_to_queue_map[tcont.entity_id].sort()
+                    self.uni_port_to_queue_map[self._uni_port.entity_id].sort()
+                    # Get the priority queue associated with p-bit that is
+                    # mapped to the gem port.
+                    # p-bit-7 is highest priority and p-bit-0 is lowest
+                    # Gem port associated with p-bit-7 should be mapped to
+                    # highest priority queue and gem port associated with p-bit-0
+                    # should be mapped to lowest priority queue.
+                    # The self.tcont_me_to_queue_map and self.uni_port_to_queue_map
+                    # have priority queue entities ordered in descending order
+                    # of priority
+                    for i, p in enumerate(gem_port.pbit_map):
+                        if p == '1':
+                            ul_prior_q_entity_id = \
+                                self.tcont_me_to_queue_map[tcont.entity_id][i]
+                            dl_prior_q_entity_id = \
+                                self.uni_port_to_queue_map[self._uni_port.entity_id][i]
+                            break
+
+                    assert ul_prior_q_entity_id is not None and \
+                           dl_prior_q_entity_id is not None
+
+                    # TODO: Need to restore on failure.  Need to check status/results
+                    results = yield gem_port.add_to_hardware(omci_cc,
+                                             tcont.entity_id,
+                                             self._ieee_mapper_service_profile_entity_id +
+                                                      self._uni_port.mac_bridge_port_num,
+                                             self._gal_enet_profile_entity_id,
+                                             ul_prior_q_entity_id, dl_prior_q_entity_id)
+                    self.check_status_and_state(results, 'assign-gem-port')
+                elif gem_port.direction == "downstream":
+                    # Downstream is inverse of upstream
+                    # TODO: could also be a case of multicast. Not supported for now
+                    self.log.debug("skipping-downstream-gem", gem_port=gem_port)
+                    pass
+
+            ################################################################################
+            # Update the IEEE 802.1p Mapper Service Profile config
+            #
+            #  EntityID was created prior to this call. This is a set
+            #
+            #  References:
+            #            - Gem Interwork TPs are set here
+            #
+
+            gem_entity_ids = [OmciNullPointer] * 8
+            for gem_port in self._gem_ports:
+                self.log.debug("tp-gem-port", entity_id=gem_port.entity_id, uni_id=gem_port.uni_id)
+
+                if gem_port.direction == "upstream" or \
+                        gem_port.direction == "bi-directional":
+                    for i, p in enumerate(reversed(gem_port.pbit_map)):
+                        if p == '1':
+                            gem_entity_ids[i] = gem_port.entity_id
+                elif gem_port.direction == "downstream":
+                    # Downstream gem port p-bit mapper is inverse of upstream
+                    # TODO: Could also be a case of multicast. Not supported for now
+                    pass
+
+            msg = Ieee8021pMapperServiceProfileFrame(
+                self._ieee_mapper_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # 802.1p mapper Service Mapper Profile ID
+                interwork_tp_pointers=gem_entity_ids  # Interworking TP IDs
+            )
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'set-8021p-mapper-service-profile-ul')
+
+            ################################################################################
+            # Create Extended VLAN Tagging Operation config (PON-side)
+            #
+            #  EntityID relates to the VLAN TCIS
+            #  References:
+            #            - VLAN TCIS from previously created VLAN Tagging filter data
+            #            - PPTP Ethernet or VEIP UNI
+            #
+
+            # TODO: do this for all uni/ports...
+            # TODO: magic.  static variable for assoc_type
+
+            # default to PPTP
+            if self._uni_port.type is UniType.VEIP:
+                association_type = 10
+            elif self._uni_port.type is UniType.PPTP:
+                association_type = 2
+            else:
+                association_type = 2
+
+            attributes = dict(
+                association_type=association_type,                  # Assoc Type, PPTP/VEIP Ethernet UNI
+                associated_me_pointer=self._uni_port.entity_id,      # Assoc ME, PPTP/VEIP Entity Id
+
+                # See VOL-1311 - Need to set table during create to avoid exception
+                # trying to read back table during post-create-read-missing-attributes
+                # But, because this is a R/W attribute. Some ONU may not accept the
+                # value during create. It is repeated again in a set below.
+                input_tpid=self._input_tpid,  # input TPID
+                output_tpid=self._output_tpid,  # output TPID
+            )
+
+            msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
+                self._mac_bridge_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                attributes=attributes
+            )
+
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'create-extended-vlan-tagging-operation-configuration-data')
+
+            attributes = dict(
+                # Specifies the TPIDs in use and that operations in the downstream direction are
+                # inverse to the operations in the upstream direction
+                input_tpid=self._input_tpid,    # input TPID
+                output_tpid=self._output_tpid,  # output TPID
+                downstream_mode=0,              # inverse of upstream
+            )
+
+            msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
+                self._mac_bridge_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                attributes=attributes
+            )
+
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'set-extended-vlan-tagging-operation-configuration-data')
+
+            attributes = dict(
+                # parameters: Entity Id ( 0x900), Filter Inner Vlan Id(0x1000-4096,do not filter on Inner vid,
+                #             Treatment Inner Vlan Id : 2
+
+                # Update uni side extended vlan filter
+                # filter for untagged
+                # probably for eapol
+                # TODO: lots of magic
+                # TODO: magic 0x1000 / 4096?
+                received_frame_vlan_tagging_operation_table=
+                VlanTaggingOperation(
+                    filter_outer_priority=15,  # This entry is not a double-tag rule
+                    filter_outer_vid=4096,     # Do not filter on the outer VID value
+                    filter_outer_tpid_de=0,    # Do not filter on the outer TPID field
+
+                    filter_inner_priority=15,
+                    filter_inner_vid=4096,
+                    filter_inner_tpid_de=0,
+                    filter_ether_type=0,
+
+                    treatment_tags_to_remove=0,
+                    treatment_outer_priority=15,
+                    treatment_outer_vid=0,
+                    treatment_outer_tpid_de=0,
+
+                    treatment_inner_priority=0,
+                    treatment_inner_vid=self._cvid,
+                    treatment_inner_tpid_de=4,
+                )
+            )
+
+            msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
+                self._mac_bridge_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                attributes=attributes
+            )
+
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'set-extended-vlan-tagging-operation-configuration-data-table')
+
+            self.deferred.callback("tech-profile-download-success")
+
+        except TimeoutError as e:
+            self.log.warn('rx-timeout-2', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+        except Exception as e:
+            self.log.exception('omci-setup-2', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_uni_lock_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_uni_lock_task.py
new file mode 100644
index 0000000..c304a27
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_uni_lock_task.py
@@ -0,0 +1,140 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from voltha.extensions.omci.tasks.task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.omci_me import PptpEthernetUniFrame, VeipUniFrame
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class BrcmUniLockException(Exception):
+    pass
+
+
+class BrcmUniLockTask(Task):
+    """
+    Lock or unlock all discovered UNI/PPTP on the ONU
+    """
+    task_priority = 200
+    name = "Broadcom UNI Lock Task"
+
+    def __init__(self, omci_agent, device_id, lock=True, priority=task_priority):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param lock: (bool) If true administratively lock all the UNI.  If false unlock
+        :param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
+        """
+        super(BrcmUniLockTask, self).__init__(BrcmUniLockTask.name,
+                                                omci_agent,
+                                                device_id,
+                                                priority=priority,
+                                                exclusive=True)
+        self._device = omci_agent.get_device(device_id)
+        self._lock = lock
+        self._results = None
+        self._local_deferred = None
+        self._config = self._device.configuration
+
+    def cancel_deferred(self):
+        super(BrcmUniLockTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start UNI/PPTP Lock/Unlock Task
+        """
+        super(BrcmUniLockTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_lock)
+
+
+    @inlineCallbacks
+    def perform_lock(self):
+        """
+        Perform the lock/unlock
+        """
+        self.log.info('setting-uni-lock-state', lock=self._lock)
+
+        try:
+            state = 1 if self._lock else 0
+
+            # lock the whole ont and all the pptp.  some onu dont causing odd behavior.
+            msg = OntGFrame(attributes={'administrative_state': state})
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield self._device.omci_cc.send(frame)
+            self.strobe_watchdog()
+
+            status = results.fields['omci_message'].fields['success_code']
+            self.log.info('response-status', status=status)
+
+            # Success?
+            if status in (RC.Success.value, RC.InstanceExists):
+                self.log.debug('set-lock-ontg', lock=self._lock)
+            else:
+                self.log.warn('cannot-set-lock-ontg', lock=self._lock)
+
+            pptp_list = sorted(self._config.pptp_entities) if self._config.pptp_entities else []
+            veip_list = sorted(self._config.veip_entities) if self._config.veip_entities else []
+
+            for entity_id in pptp_list:
+                pptp_value = self._config.pptp_entities[entity_id]
+                msg = PptpEthernetUniFrame(entity_id,
+                                           attributes=dict(administrative_state=state))
+                self._send_uni_lock_msg(entity_id, pptp_value, msg)
+
+            for entity_id in veip_list:
+                veip_value = self._config.veip_entities[entity_id]
+                msg = VeipUniFrame(entity_id,
+                                           attributes=dict(administrative_state=state))
+                self._send_uni_lock_msg(entity_id, veip_value, msg)
+
+            self.deferred.callback(self)
+
+        except Exception as e:
+            self.log.exception('setting-uni-lock-state', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+
+    @inlineCallbacks
+    def _send_uni_lock_msg(self, entity_id, value, me_message):
+        frame = me_message.set()
+        self.log.debug('openomci-msg', omci_msg=me_message)
+        results = yield self._device.omci_cc.send(frame)
+        self.strobe_watchdog()
+
+        status = results.fields['omci_message'].fields['success_code']
+        self.log.info('response-status', status=status)
+
+        # Success?
+        if status in (RC.Success.value, RC.InstanceExists):
+            self.log.debug('set-lock-uni', uni=entity_id, value=value, lock=self._lock)
+        else:
+            self.log.warn('cannot-set-lock-uni', uni=entity_id, value=value, lock=self._lock)
+
+        returnValue(None)
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_vlan_filter_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_vlan_filter_task.py
new file mode 100644
index 0000000..6c665c7
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_vlan_filter_task.py
@@ -0,0 +1,216 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from voltha.extensions.omci.tasks.task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import *
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class BrcmVlanFilterException(Exception):
+    pass
+
+
+class BrcmVlanFilterTask(Task):
+    """
+    Apply Vlan Tagging Filter Data and Extended VLAN Tagging Operation Configuration on an ANI and UNI
+    """
+    task_priority = 200
+    name = "Broadcom VLAN Filter Task"
+
+    def __init__(self, omci_agent, device_id, uni_port, set_vlan_id, priority=task_priority):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param set_vlan_id: (int) VLAN to filter for and set
+        :param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
+        """
+
+        self.log = structlog.get_logger(device_id=device_id, uni_port=uni_port.port_number)
+
+        super(BrcmVlanFilterTask, self).__init__(BrcmVlanFilterTask.name,
+                                                 omci_agent,
+                                                 device_id,
+                                                 priority=priority,
+                                                 exclusive=True)
+        self._device = omci_agent.get_device(device_id)
+        self._uni_port = uni_port
+        self._set_vlan_id = set_vlan_id
+        self._results = None
+        self._local_deferred = None
+        self._config = self._device.configuration
+
+    def cancel_deferred(self):
+        super(BrcmVlanFilterTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start Vlan Tagging Task
+        """
+        super(BrcmVlanFilterTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_vlan_tagging)
+
+    @inlineCallbacks
+    def perform_vlan_tagging(self):
+        """
+        Perform the vlan tagging
+        """
+        self.log.info('setting-vlan-tagging')
+
+        try:
+            # TODO: parameterize these from the handler, or objects in the handler
+            # TODO: make this a member of the onu gem port or the uni port
+            _mac_bridge_service_profile_entity_id = 0x201
+            _mac_bridge_port_ani_entity_id = 0x2102  # TODO: can we just use the entity id from the anis list?
+            # Delete bridge ani side vlan filter
+            msg = VlanTaggingFilterDataFrame(_mac_bridge_port_ani_entity_id + self._uni_port.mac_bridge_port_num)
+            frame = msg.delete()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results, 'flow-delete-vlan-tagging-filter-data')
+
+            # Re-Create bridge ani side vlan filter
+            msg = VlanTaggingFilterDataFrame(
+                _mac_bridge_port_ani_entity_id + self._uni_port.mac_bridge_port_num,  # Entity ID
+                vlan_tcis=[self._set_vlan_id],  # VLAN IDs
+                forward_operation=0x10
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results, 'flow-create-vlan-tagging-filter-data')
+
+            # Re-Create bridge ani side vlan filter
+
+            # Update uni side extended vlan filter
+            # filter for untagged
+            # probably for eapol
+            # TODO: Create constants for the operation values.  See omci spec
+            attributes = dict(
+                received_frame_vlan_tagging_operation_table=
+                VlanTaggingOperation(
+                    filter_outer_priority=15,
+                    filter_outer_vid=4096,
+                    filter_outer_tpid_de=0,
+
+                    filter_inner_priority=15,
+                    filter_inner_vid=4096,
+                    filter_inner_tpid_de=0,
+                    filter_ether_type=0,
+
+                    treatment_tags_to_remove=0,
+                    treatment_outer_priority=15,
+                    treatment_outer_vid=0,
+                    treatment_outer_tpid_de=0,
+
+                    treatment_inner_priority=0,
+                    treatment_inner_vid=self._set_vlan_id,
+                    treatment_inner_tpid_de=4
+                )
+            )
+            msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
+                _mac_bridge_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                attributes=attributes  # See above
+            )
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results,
+                                        'flow-set-ext-vlan-tagging-op-config-data-untagged')
+
+            # Update uni side extended vlan filter
+            # filter for vlan 0
+            # TODO: Create constants for the operation values.  See omci spec
+            attributes = dict(
+                received_frame_vlan_tagging_operation_table=
+                VlanTaggingOperation(
+                    filter_outer_priority=15,  # This entry is not a double-tag rule
+                    filter_outer_vid=4096,  # Do not filter on the outer VID value
+                    filter_outer_tpid_de=0,  # Do not filter on the outer TPID field
+
+                    filter_inner_priority=8,  # Filter on inner vlan
+                    filter_inner_vid=0x0,  # Look for vlan 0
+                    filter_inner_tpid_de=0,  # Do not filter on inner TPID field
+                    filter_ether_type=0,  # Do not filter on EtherType
+
+                    treatment_tags_to_remove=1,
+                    treatment_outer_priority=15,
+                    treatment_outer_vid=0,
+                    treatment_outer_tpid_de=0,
+
+                    treatment_inner_priority=8,  # Add an inner tag and insert this value as the priority
+                    treatment_inner_vid=self._set_vlan_id,  # use this value as the VID in the inner VLAN tag
+                    treatment_inner_tpid_de=4,  # set TPID
+                )
+            )
+            msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
+                _mac_bridge_service_profile_entity_id + self._uni_port.mac_bridge_port_num,  # Bridge Entity ID
+                attributes=attributes  # See above
+            )
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results,
+                                        'flow-set-ext-vlan-tagging-op-config-data-zero-tagged')
+
+            self.deferred.callback(self)
+
+        except Exception as e:
+            self.log.exception('setting-vlan-tagging', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+    def check_status_and_state(self, results, operation=''):
+        """
+        Check the results of an OMCI response.  An exception is thrown
+        if the task was cancelled or an error was detected.
+
+        :param results: (OmciFrame) OMCI Response frame
+        :param operation: (str) what operation was being performed
+        :return: True if successful, False if the entity existed (already created)
+        """
+
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+
+        self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg,
+                       status=status, error_mask=error_mask,
+                       failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            self.strobe_watchdog()
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
diff --git a/python/adapters/brcm_openomci_onu/onu_gem_port.py b/python/adapters/brcm_openomci_onu/onu_gem_port.py
new file mode 100644
index 0000000..b388030
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/onu_gem_port.py
@@ -0,0 +1,385 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue
+from voltha.extensions.omci.omci_me import *
+from voltha.extensions.omci.omci_defs import *
+
+RC = ReasonCodes
+
+
+class OnuGemPort(object):
+    """
+    Broadcom ONU specific implementation
+    """
+
+    def __init__(self, gem_id, uni_id, alloc_id,
+                 entity_id=None,
+                 direction="BIDIRECTIONAL",
+                 encryption=False,
+                 discard_config=None,
+                 discard_policy=None,
+                 max_q_size="auto",
+                 pbit_map="0b00000011",
+                 priority_q=3,
+                 scheduling_policy="WRR",
+                 weight=8,
+                 omci_transport=False,
+                 multicast=False,
+                 tcont_ref=None,
+                 traffic_class=None,
+                 intf_ref=None,
+                 untagged=False,
+                 name=None,
+                 handler=None):
+
+        self.log = structlog.get_logger(device_id=handler.device_id, uni_id=uni_id, gem_id=gem_id)
+        self.log.debug('function-entry')
+
+        self.name = name
+        self.gem_id = gem_id
+        self.uni_id = uni_id
+        self._alloc_id = alloc_id
+        self.tcont_ref = tcont_ref
+        self.intf_ref = intf_ref
+        self.traffic_class = traffic_class
+        self._direction = None
+        self._encryption = encryption
+        self._discard_config = None
+        self._discard_policy = None
+        self._max_q_size = None
+        self._pbit_map = None
+        self._scheduling_policy = None
+        self._omci_transport = omci_transport
+        self.multicast = multicast
+        self.untagged = untagged
+        self._handler = handler
+
+        self.direction = direction
+        self.encryption = encryption
+        self.discard_config = discard_config
+        self.discard_policy = discard_policy
+        self.max_q_size = max_q_size
+        self.pbit_map = pbit_map
+        self.priority_q = priority_q
+        self.scheduling_policy = scheduling_policy
+        self.weight = weight
+
+        self._pon_id = None
+        self._onu_id = None
+        self._entity_id = entity_id
+
+        # Statistics
+        self.rx_packets = 0
+        self.rx_bytes = 0
+        self.tx_packets = 0
+        self.tx_bytes = 0
+
+    def __str__(self):
+        return "OnuGemPort - entity_id {}, alloc-id: {}, gem-id: {}, ".format(self.entity_id, self.alloc_id, self.gem_id)
+
+    def __repr__(self):
+        return str(self)
+
+    @property
+    def pon_id(self):
+        self.log.debug('function-entry')
+        return self._pon_id
+
+    @pon_id.setter
+    def pon_id(self, pon_id):
+        self.log.debug('function-entry')
+        assert self._pon_id is None or self._pon_id == pon_id, 'PON-ID can only be set once'
+        self._pon_id = pon_id
+
+    @property
+    def onu_id(self):
+        self.log.debug('function-entry')
+        return self._onu_id
+
+    @onu_id.setter
+    def onu_id(self, onu_id):
+        self.log.debug('function-entry', onu_id=onu_id)
+        assert self._onu_id is None or self._onu_id == onu_id, 'ONU-ID can only be set once'
+        self._onu_id = onu_id
+
+    @property
+    def alloc_id(self):
+        self.log.debug('function-entry')
+        return self._alloc_id
+
+    @property
+    def direction(self):
+        self.log.debug('function-entry')
+        return self._direction
+
+    @direction.setter
+    def direction(self, direction):
+        self.log.debug('function-entry')
+        # GEM Port CTP are configured separately in UPSTREAM and DOWNSTREAM.
+        # BIDIRECTIONAL is not supported.
+        assert direction == "UPSTREAM" or direction == "DOWNSTREAM" or \
+               direction == "BIDIRECTIONAL", "invalid-direction"
+
+        # OMCI framework expects string in lower-case. Tech-Profile sends in upper-case.
+        if direction == "UPSTREAM":
+            self._direction = "upstream"
+        elif direction == "DOWNSTREAM":
+            self._direction = "downstream"
+        elif direction == "BIDIRECTIONAL":
+            self._direction = "bi-directional"
+
+    @property
+    def tcont(self):
+        self.log.debug('function-entry')
+        tcont_item = self._handler.pon_port.tconts.get(self.alloc_id)
+        return tcont_item
+
+    @property
+    def omci_transport(self):
+        self.log.debug('function-entry')
+        return self._omci_transport
+
+    def to_dict(self):
+        self.log.debug('function-entry')
+        return {
+            'port-id': self.gem_id,
+            'alloc-id': self.alloc_id,
+            'encryption': self._encryption,
+            'omci-transport': self.omci_transport
+        }
+
+    @property
+    def entity_id(self):
+        self.log.debug('function-entry')
+        return self._entity_id
+
+    @entity_id.setter
+    def entity_id(self, value):
+        self.log.debug('function-entry')
+        self._entity_id = value
+
+    @property
+    def encryption(self):
+        self.log.debug('function-entry')
+        return self._encryption
+
+    @encryption.setter
+    def encryption(self, value):
+        self.log.debug('function-entry')
+        # FIXME The encryption should come as boolean by default
+        value = eval(value)
+        assert isinstance(value, bool), 'encryption is a boolean'
+
+        if self._encryption != value:
+            self._encryption = value
+
+    @property
+    def discard_config(self):
+        self.log.debug('function-entry')
+        return self._discard_config
+
+    @discard_config.setter
+    def discard_config(self, discard_config):
+        self.log.debug('function-entry')
+        assert isinstance(discard_config, dict), "discard_config not dict"
+        assert 'max_probability' in discard_config, "max_probability missing"
+        assert 'max_threshold' in discard_config, "max_threshold missing"
+        assert 'min_threshold' in discard_config, "min_threshold missing"
+        self._discard_config = discard_config
+
+    @property
+    def discard_policy(self):
+        self.log.debug('function-entry')
+        return self._discard_policy
+
+    @discard_policy.setter
+    def discard_policy(self, discard_policy):
+        self.log.debug('function-entry')
+        dp = ("TailDrop", "WTailDrop", "RED", "WRED")
+        assert (isinstance(discard_policy, str))
+        assert (discard_policy in dp)
+        self._discard_policy = discard_policy
+
+    @property
+    def max_q_size(self):
+        self.log.debug('function-entry')
+        return self._max_q_size
+
+    @max_q_size.setter
+    def max_q_size(self, max_q_size):
+        self.log.debug('function-entry')
+        if isinstance(max_q_size, str):
+            assert (max_q_size == "auto")
+        else:
+            assert (isinstance(max_q_size, int))
+
+        self._max_q_size = max_q_size
+
+    @property
+    def pbit_map(self):
+        self.log.debug('function-entry')
+        return self._pbit_map
+
+    @pbit_map.setter
+    def pbit_map(self, pbit_map):
+        self.log.debug('function-entry')
+        assert (isinstance(pbit_map, str))
+        assert (len(pbit_map[2:]) == 8)  # Example format of pbit_map: "0b00000101"
+        try:
+            _ = int(pbit_map[2], 2)
+        except ValueError:
+            raise Exception("pbit_map-not-binary-string-{}".format(pbit_map))
+
+        # remove '0b'
+        self._pbit_map = pbit_map[2:]
+
+    @property
+    def scheduling_policy(self):
+        self.log.debug('function-entry')
+        return self._scheduling_policy
+
+    @scheduling_policy.setter
+    def scheduling_policy(self, scheduling_policy):
+        self.log.debug('function-entry')
+        sp = ("WRR", "StrictPriority")
+        assert (isinstance(scheduling_policy, str))
+        assert (scheduling_policy in sp)
+        self._scheduling_policy = scheduling_policy
+
+    @staticmethod
+    def create(handler, gem_port):
+        log.debug('function-entry', gem_port=gem_port)
+
+        return OnuGemPort(gem_id=gem_port['gemport_id'],
+                          uni_id=gem_port['uni_id'],
+                          alloc_id=gem_port['alloc_id_ref'],
+                          direction=gem_port['direction'],
+                          encryption=gem_port['encryption'],  # aes_indicator,
+                          discard_config=gem_port['discard_config'],
+                          discard_policy=gem_port['discard_policy'],
+                          max_q_size=gem_port['max_q_size'],
+                          pbit_map=gem_port['pbit_map'],
+                          priority_q=gem_port['priority_q'],
+                          scheduling_policy=gem_port['scheduling_policy'],
+                          weight=gem_port['weight'],
+                          handler=handler,
+                          untagged=False)
+
+    @inlineCallbacks
+    def add_to_hardware(self, omci,
+                        tcont_entity_id,
+                        ieee_mapper_service_profile_entity_id,
+                        gal_enet_profile_entity_id,
+                        ul_prior_q_entity_id,
+                        dl_prior_q_entity_id):
+
+        self.log.debug('add-to-hardware', entity_id=self.entity_id, gem_id=self.gem_id,
+                       tcont_entity_id=tcont_entity_id,
+                       ieee_mapper_service_profile_entity_id=ieee_mapper_service_profile_entity_id,
+                       gal_enet_profile_entity_id=gal_enet_profile_entity_id,
+                       ul_prior_q_entity_id=ul_prior_q_entity_id,
+                       dl_prior_q_entity_id=dl_prior_q_entity_id)
+
+        try:
+            direction = "downstream" if self.multicast else "bi-directional"
+            assert not self.multicast, 'MCAST is not supported yet'
+
+            attributes = dict()
+            attributes['priority_queue_pointer_downstream'] = dl_prior_q_entity_id
+            msg = GemPortNetworkCtpFrame(
+                self.entity_id,  # same entity id as GEM port
+                port_id=self.gem_id,
+                tcont_id=tcont_entity_id,
+                direction=direction,
+                upstream_tm=ul_prior_q_entity_id,
+                attributes=attributes
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'create-gem-port-network-ctp')
+
+        except Exception as e:
+            self.log.exception('gemport-create', e=e)
+            raise
+
+        try:
+            # TODO: magic numbers here
+            msg = GemInterworkingTpFrame(
+                self.entity_id,  # same entity id as GEM port
+                gem_port_network_ctp_pointer=self.entity_id,
+                interworking_option=5,  # IEEE 802.1
+                service_profile_pointer=ieee_mapper_service_profile_entity_id,
+                interworking_tp_pointer=0x0,
+                pptp_counter=1,
+                gal_profile_pointer=gal_enet_profile_entity_id
+            )
+            frame = msg.create()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'create-gem-interworking-tp')
+
+        except Exception as e:
+            self.log.exception('interworking-create', e=e)
+            raise
+
+        returnValue(results)
+
+    @inlineCallbacks
+    def remove_from_hardware(self, omci):
+        self.log.debug('function-entry', omci=omci)
+        self.log.debug('remove-from-hardware', gem_id=self.gem_id)
+
+        try:
+            msg = GemInterworkingTpFrame(self.entity_id)
+            frame = msg.delete()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'delete-gem-port-network-ctp')
+        except Exception as e:
+            self.log.exception('interworking-delete', e=e)
+            raise
+
+        try:
+            msg = GemPortNetworkCtpFrame(self.entity_id)
+            frame = msg.delete()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'delete-gem-interworking-tp')
+        except Exception as e:
+            self.log.exception('gemport-delete', e=e)
+            raise
+
+        returnValue(results)
+
+    def check_status_and_state(self, results, operation=''):
+        self.log.debug('function-entry')
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+
+        self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg,
+                       status=status, error_mask=error_mask,
+                       failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
diff --git a/python/adapters/brcm_openomci_onu/onu_tcont.py b/python/adapters/brcm_openomci_onu/onu_tcont.py
new file mode 100644
index 0000000..c5414ee
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/onu_tcont.py
@@ -0,0 +1,140 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from common.frameio.frameio import hexify
+from twisted.internet.defer import  inlineCallbacks, returnValue, succeed
+from voltha.extensions.omci.omci_me import *
+from voltha.extensions.omci.omci_defs import *
+
+RC = ReasonCodes
+
+
+class OnuTCont(object):
+    """
+    Broadcom ONU specific implementation
+    """
+    def __init__(self, handler, uni_id, alloc_id, q_sched_policy, traffic_descriptor):
+
+        self.log = structlog.get_logger(device_id=handler.device_id, uni_id=uni_id, alloc_id=alloc_id)
+        self.log.debug('function-entry')
+
+        self.uni_id = uni_id
+        self.alloc_id = alloc_id
+        self._q_sched_policy = 0
+        self.q_sched_policy = q_sched_policy
+        self.traffic_descriptor = traffic_descriptor
+
+        self._handler = handler
+        self._entity_id = None
+
+    def __str__(self):
+        return "OnuTCont - uni_id: {}, entity_id {}, alloc-id: {}, q_sched_policy: {}, traffic_descriptor: {}".format(
+            self.uni_id, self._entity_id, self.alloc_id, self.q_sched_policy, self.traffic_descriptor)
+
+    def __repr__(self):
+        return str(self)
+
+    @property
+    def entity_id(self):
+        self.log.debug('function-entry')
+        return self._entity_id
+
+    @property
+    def q_sched_policy(self):
+        self.log.debug('function-entry')
+        return self._q_sched_policy
+
+
+    @q_sched_policy.setter
+    def q_sched_policy(self, q_sched_policy):
+        sp = ('Null', 'WRR', 'StrictPriority')
+        if q_sched_policy in sp:
+            self._q_sched_policy = sp.index(q_sched_policy)
+        else:
+            self._q_sched_policy = 0
+
+    @staticmethod
+    def create(handler, tcont, td):
+        log = structlog.get_logger(tcont=tcont, td=td)
+        log.debug('function-entry', tcont=tcont)
+
+        return OnuTCont(handler,
+                        tcont['uni_id'],
+                        tcont['alloc-id'],
+                        tcont['q_sched_policy'],
+                        td
+                        )
+
+    @inlineCallbacks
+    def add_to_hardware(self, omci, tcont_entity_id):
+        self.log.debug('add-to-hardware', tcont_entity_id=tcont_entity_id)
+
+        self._entity_id = tcont_entity_id
+
+        try:
+            # FIXME: self.q_sched_policy seems to be READ-ONLY
+            # Ideally the READ-ONLY or NOT attribute is available from ONU-2G ME
+            #msg = TcontFrame(self.entity_id, self.alloc_id, self.q_sched_policy)
+            msg = TcontFrame(self.entity_id, self.alloc_id)
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'set-tcont')
+
+        except Exception as e:
+            self.log.exception('tcont-set', e=e)
+            raise
+
+        returnValue(results)
+
+    @inlineCallbacks
+    def remove_from_hardware(self, omci):
+        self.log.debug('function-entry', omci=omci)
+        self.log.debug('remove-from-hardware', tcont_entity_id=self.entity_id)
+
+        # Release tcont by setting alloc_id=0xFFFF
+        # TODO: magic number, create a named variable
+
+        try:
+            msg = TcontFrame(self.entity_id, 0xFFFF)
+            frame = msg.set()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci.send(frame)
+            self.check_status_and_state(results, 'delete-tcont')
+
+        except Exception as e:
+            self.log.exception('tcont-delete', e=e)
+            raise
+
+        returnValue(results)
+
+    def check_status_and_state(self, results, operation=''):
+        self.log.debug('function-entry')
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+
+        self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg,
+                       status=status, error_mask=error_mask,
+                       failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
diff --git a/python/adapters/brcm_openomci_onu/onu_traffic_descriptor.py b/python/adapters/brcm_openomci_onu/onu_traffic_descriptor.py
new file mode 100644
index 0000000..a70aa7e
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/onu_traffic_descriptor.py
@@ -0,0 +1,112 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+
+NONE = 0
+BEST_EFFORT_SHARING = 1
+NON_ASSURED_SHARING = 2             # Should match xpon.py values
+DEFAULT = NONE
+
+
+class OnuTrafficDescriptor(object):
+    """
+    Broadcom ONU specific implementation
+    """
+    def __init__(self, fixed, assured, maximum,
+                 additional=DEFAULT,
+                 best_effort=None,
+                 name=None):
+
+        self.log = structlog.get_logger(fixed=fixed, assured=assured, maximum=maximum, additional=additional)
+        self.log.debug('function-entry')
+
+        self.name = name
+        self.fixed_bandwidth = fixed       # bps
+        self.assured_bandwidth = assured   # bps
+        self.maximum_bandwidth = maximum   # bps
+        self.additional_bandwidth_eligibility = additional
+
+        self.best_effort = best_effort if additional == BEST_EFFORT_SHARING else None
+
+
+    @staticmethod
+    def to_string(value):
+        log = structlog.get_logger()
+        log.debug('function-entry', value=value)
+        return {
+            NON_ASSURED_SHARING: "non-assured-sharing",
+            BEST_EFFORT_SHARING: "best-effort-sharing",
+            NONE: "none"
+        }.get(value, "unknown")
+
+
+    @staticmethod
+    def from_value(value):
+        log = structlog.get_logger()
+        log.debug('function-entry', value=value)
+        return {
+            0: NONE,
+            1: BEST_EFFORT_SHARING,
+            2: NON_ASSURED_SHARING,
+        }.get(value, DEFAULT)
+
+
+    def __str__(self):
+        self.log.debug('function-entry')
+        return "OnuTrafficDescriptor: {}, {}/{}/{}".format(self.name,
+                                                        self.fixed_bandwidth,
+                                                        self.assured_bandwidth,
+                                                        self.maximum_bandwidth)
+
+    def to_dict(self):
+        self.log.debug('function-entry')
+        val = {
+            'fixed-bandwidth': self.fixed_bandwidth,
+            'assured-bandwidth': self.assured_bandwidth,
+            'maximum-bandwidth': self.maximum_bandwidth,
+            'additional-bandwidth-eligibility': OnuTrafficDescriptor.to_string(self.additional_bandwidth_eligibility)
+        }
+        return val
+
+
+    @staticmethod
+    def create(traffic_disc):
+        log = structlog.get_logger()
+        log.debug('function-entry',traffic_disc=traffic_disc)
+
+        additional = OnuTrafficDescriptor.from_value(
+            traffic_disc['additional-bw-eligibility-indicator'])
+
+        # TODO: this is all stub code.  Doesnt do anything yet. tech profiles will likely make this clearer
+        best_effort = None
+
+        return OnuTrafficDescriptor(traffic_disc['fixed-bandwidth'],
+                                    traffic_disc['assured-bandwidth'],
+                                    traffic_disc['maximum-bandwidth'],
+                                    name=traffic_disc['name'],
+                                    best_effort=best_effort,
+                                    additional=additional)
+
+    @inlineCallbacks
+    def add_to_hardware(self, omci):
+       self.log.debug('function-entry', omci=omci)
+       results = succeed('TODO: Implement me')
+       returnValue(results)
+
+
+
diff --git a/python/adapters/brcm_openomci_onu/openonu.yml b/python/adapters/brcm_openomci_onu/openonu.yml
new file mode 100644
index 0000000..542fdf5
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/openonu.yml
@@ -0,0 +1,67 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: openonu.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/python/adapters/brcm_openomci_onu/pon_port.py b/python/adapters/brcm_openomci_onu/pon_port.py
new file mode 100644
index 0000000..db1daa8
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/pon_port.py
@@ -0,0 +1,294 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue
+from voltha.protos.common_pb2 import AdminState, OperStatus
+from voltha.protos.device_pb2 import Port
+from voltha.extensions.omci.tasks.task import Task
+
+BRDCM_DEFAULT_VLAN = 4091
+TASK_PRIORITY = Task.DEFAULT_PRIORITY + 10
+DEFAULT_TPID = 0x8100
+DEFAULT_GEM_PAYLOAD = 48
+
+
+class PonPort(object):
+    """Wraps northbound-port/ANI support for ONU"""
+    # TODO: possibly get from olt
+    MIN_GEM_ENTITY_ID = 0x408
+    MAX_GEM_ENTITY_ID = 0x4FF  # TODO: This limits is internal to specific ONU. It should be more "discoverable"?
+
+    def __init__(self, handler, port_no):
+        self.log = structlog.get_logger(device_id=handler.device_id, port_no=port_no)
+        self.log.debug('function-entry')
+
+        self._enabled = False
+        self._valid = True
+        self._handler = handler
+        self._deferred = None
+        self._port = None
+        self._port_number = port_no
+        self._next_entity_id = PonPort.MIN_GEM_ENTITY_ID
+
+        self._admin_state = AdminState.ENABLED
+        self._oper_status = OperStatus.ACTIVE
+
+        self._gem_ports = {}                           # gem-id -> GemPort
+        self._tconts = {}                              # alloc-id -> TCont
+
+        self.ieee_mapper_service_profile_entity_id = 0x8001
+        self.mac_bridge_port_ani_entity_id = 0x2102  # TODO: can we just use the entity id from the anis list?
+
+    def __str__(self):
+        return "PonPort - port_number: {}, next_entity_id: {}, num_gem_ports: {}, num_tconts: {}".format(
+            self._port_number, self._next_entity_id, len(self._gem_ports), len(self._tconts))
+
+    def __repr__(self):
+        return str(self)
+
+    @staticmethod
+    def create(handler, port_no):
+        log = structlog.get_logger(device_id=handler.device_id, port_no=port_no)
+        log.debug('function-entry')
+        port = PonPort(handler, port_no)
+
+        return port
+
+    def _start(self):
+        self.log.debug('function-entry')
+        self._cancel_deferred()
+
+        self._admin_state = AdminState.ENABLED
+        self._oper_status = OperStatus.ACTIVE
+        self._update_adapter_agent()
+
+    def _stop(self):
+        self.log.debug('function-entry')
+        self._cancel_deferred()
+
+        self._admin_state = AdminState.DISABLED
+        self._oper_status = OperStatus.UNKNOWN
+        self._update_adapter_agent()
+
+        # TODO: stop h/w sync
+
+    def _cancel_deferred(self):
+        self.log.debug('function-entry')
+        d1, self._deferred = self._deferred, None
+
+        for d in [d1]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def delete(self):
+        self.log.debug('function-entry')
+        self.enabled = False
+        self._valid = False
+        self._handler = None
+
+    @property
+    def enabled(self):
+        self.log.debug('function-entry')
+        return self._enabled
+
+    @enabled.setter
+    def enabled(self, value):
+        self.log.debug('function-entry')
+        if self._enabled != value:
+            self._enabled = value
+
+            if value:
+                self._start()
+            else:
+                self._stop()
+
+    @property
+    def port_number(self):
+        self.log.debug('function-entry')
+        return self._port_number
+
+    @property
+    def next_gem_entity_id(self):
+        self.log.debug('function-entry')
+        entity_id = self._next_entity_id
+
+        self._next_entity_id = self._next_entity_id + 1
+        if self._next_entity_id > PonPort.MAX_GEM_ENTITY_ID:
+            self._next_entity_id = PonPort.MIN_GEM_ENTITY_ID
+
+        return entity_id
+
+    @property
+    def tconts(self):
+        self.log.debug('function-entry')
+        return self._tconts
+
+    @property
+    def gem_ports(self):
+        self.log.debug('function-entry')
+        return self._gem_ports
+
+    def get_port(self):
+        """
+        Get the VOLTHA PORT object for this port
+        :return: VOLTHA Port object
+        """
+        self.log.debug('function-entry')
+
+        if self._port is None:
+            self._port = Port(port_no=self.port_number,
+                              label='PON port',
+                              type=Port.PON_ONU,
+                              admin_state=self._admin_state,
+                              oper_status=self._oper_status,
+                              peers=[])
+        return self._port
+
+    def _update_adapter_agent(self):
+        """
+        Update the port status and state in the core
+        """
+        self.log.debug('function-entry')
+        self.log.debug('update-adapter-agent', admin_state=self._admin_state,
+                       oper_status=self._oper_status)
+
+        if self._port is not None:
+            self._port.admin_state = self._admin_state
+            self._port.oper_status = self._oper_status
+
+        # adapter_agent add_port also does an update of port status
+        try:
+            self._handler.adapter_agent.add_port(self._handler.device_id, self.get_port())
+        except Exception as e:
+            self.log.exception('update-port', e=e)
+
+    def add_tcont(self, tcont, reflow=False):
+        """
+        Creates/ a T-CONT with the given alloc-id
+
+        :param tcont: (TCont) Object that maintains the TCONT properties
+        :param reflow: (boolean) If true, force add (used during h/w resync)
+        :return: (deferred)
+        """
+        self.log.debug('function-entry', tcont=tcont.alloc_id)
+
+        if not self._valid:
+            return      # Deleting
+
+        if not reflow and tcont.alloc_id in self._tconts:
+            return      # already created
+
+        self.log.info('add-tcont', tcont=tcont.alloc_id, reflow=reflow)
+        self._tconts[tcont.alloc_id] = tcont
+
+    def update_tcont_td(self, alloc_id, new_td):
+        self.log.debug('function-entry')
+
+        tcont = self._tconts.get(alloc_id)
+
+        if tcont is None:
+            return  # not-found
+
+        tcont.traffic_descriptor = new_td
+
+        # TODO: Not yet implemented
+        #TODO: How does this affect ONU tcont settings?
+        #try:
+        #    results = yield tcont.add_to_hardware(self._handler.omci)
+        #except Exception as e:
+        #    self.log.exception('tcont', tcont=tcont, e=e)
+        #    # May occur with xPON provisioning, use hw-resync to recover
+        #    results = 'resync needed'
+        # returnValue(results)
+
+    @inlineCallbacks
+    def remove_tcont(self, alloc_id):
+        self.log.debug('function-entry')
+
+        tcont = self._tconts.get(alloc_id)
+
+        if tcont is None:
+            returnValue('nop')
+
+        try:
+            del self._tconts[alloc_id]
+            results = yield tcont.remove_from_hardware(self._handler.openomci.omci_cc)
+            returnValue(results)
+
+        except Exception as e:
+            self.log.exception('delete', e=e)
+            raise
+
+    def gem_port(self, gem_id, direction):
+        self.log.debug('function-entry')
+        return self._gem_ports.get((gem_id, direction))
+
+    @property
+    def gem_ids(self):
+        """Get all GEM Port IDs used by this ONU"""
+        self.log.debug('function-entry')
+        return sorted([gem_id_and_direction[0] for gem_id_and_direction, gem in self._gem_ports.items()])
+
+    def add_gem_port(self, gem_port, reflow=False):
+        """
+        Add a GEM Port to this ONU
+
+        :param gem_port: (GemPort) GEM Port to add
+        :param reflow: (boolean) If true, force add (used during h/w resync)
+        :return: (deferred)
+        """
+        self.log.debug('function-entry', gem_port=gem_port.gem_id)
+
+        if not self._valid:
+            return  # Deleting
+
+        if not reflow and (gem_port.gem_id, gem_port.direction) in self._gem_ports:
+            return  # nop
+
+        # if this is actually a new gem port then issue the next entity_id
+        gem_port.entity_id = self.next_gem_entity_id
+        self.log.info('add-gem-port', gem_port=gem_port, reflow=reflow)
+        self._gem_ports[(gem_port.gem_id, gem_port.direction)] = gem_port
+
+    @inlineCallbacks
+    def remove_gem_id(self, gem_id, direction):
+        """
+        Remove a GEM Port from this ONU
+
+        :param gem_id: (GemPort) GEM Port to remove
+        :param direction: Direction of the gem port
+        :return: deferred
+        """
+        self.log.debug('function-entry', gem_id=gem_id)
+
+        gem_port = self._gem_ports.get((gem_id, direction))
+
+        if gem_port is None:
+            returnValue('nop')
+
+        try:
+            del self._gem_ports[(gem_id, direction)]
+            results = yield gem_port.remove_from_hardware(self._handler.openomci.omci_cc)
+            returnValue(results)
+
+        except Exception as ex:
+            self.log.exception('gem-port-delete', e=ex)
+            raise
+
+
diff --git a/python/adapters/brcm_openomci_onu/uni_port.py b/python/adapters/brcm_openomci_onu/uni_port.py
new file mode 100644
index 0000000..2ee307b
--- /dev/null
+++ b/python/adapters/brcm_openomci_onu/uni_port.py
@@ -0,0 +1,248 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+from enum import Enum
+from voltha.protos.common_pb2 import OperStatus, AdminState
+from voltha.protos.device_pb2 import Port
+from voltha.protos.openflow_13_pb2 import OFPPF_10GB_FD
+from voltha.core.logical_device_agent import mac_str_to_tuple
+from voltha.protos.logical_device_pb2 import LogicalPort
+from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, OFPPS_LINK_DOWN
+from voltha.protos.openflow_13_pb2 import ofp_port
+
+class UniType(Enum):
+    """
+    UNI Types Defined in G.988
+    """
+    PPTP = 'PhysicalPathTerminationPointEthernet'
+    VEIP = 'VirtualEthernetInterfacePoint'
+    # TODO: Add others as they become supported
+
+
+class UniPort(object):
+    """Wraps southbound-port(s) support for ONU"""
+
+    def __init__(self, handler, name, uni_id, port_no, ofp_port_no,
+                 type=UniType.PPTP):
+        self.log = structlog.get_logger(device_id=handler.device_id,
+                                        port_no=port_no)
+        self._enabled = False
+        self._handler = handler
+        self._name = name
+        self._port = None
+        self._port_number = port_no
+        self._ofp_port_no = ofp_port_no
+        self._logical_port_number = None
+        self._entity_id = None
+        self._mac_bridge_port_num = 0
+        self._type = type
+        self._uni_id = uni_id
+
+        self._admin_state = AdminState.ENABLED
+        self._oper_status = OperStatus.ACTIVE
+
+    def __str__(self):
+        return "UniPort - name: {}, port_number: {}, entity_id: {}, mac_bridge_port_num: {}, type: {}, ofp_port: {}"\
+            .format(self.name, self.port_number, self.entity_id, self._mac_bridge_port_num, self.type, self._ofp_port_no)
+
+    def __repr__(self):
+        return str(self)
+
+    @staticmethod
+    def create(handler, name, uni_id, port_no, ofp_port_no, type):
+        port = UniPort(handler, name, uni_id, port_no, ofp_port_no, type)
+        return port
+
+    def _start(self):
+        self._cancel_deferred()
+        self._admin_state = AdminState.ENABLED
+        self._oper_status = OperStatus.ACTIVE
+        self._update_adapter_agent()
+
+    def _stop(self):
+        self._cancel_deferred()
+        self._admin_state = AdminState.DISABLED
+        self._oper_status = OperStatus.UNKNOWN
+        self._update_adapter_agent()
+
+    def delete(self):
+        self.enabled = False
+        self._handler = None
+
+    def _cancel_deferred(self):
+        pass
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def enabled(self):
+        return self._enabled
+
+    @enabled.setter
+    def enabled(self, value):
+        if self._enabled != value:
+            self._enabled = value
+
+            if value:
+                self._start()
+            else:
+                self._stop()
+
+    @property
+    def uni_id(self):
+        """
+        Physical prt index on ONU 0 - N
+        :return: (int) uni id
+        """
+        return self._uni_id
+
+
+    @property
+    def mac_bridge_port_num(self):
+        """
+        Port number used when creating MacBridgePortConfigurationDataFrame port number
+        :return: (int) port number
+        """
+        return self._mac_bridge_port_num
+
+    @mac_bridge_port_num.setter
+    def mac_bridge_port_num(self, value):
+        self._mac_bridge_port_num = value
+
+    @property
+    def port_number(self):
+        """
+        Physical device port number
+        :return: (int) port number
+        """
+        return self._port_number
+
+    @property
+    def entity_id(self):
+        """
+        OMCI UNI_G entity ID for port
+        """
+        return self._entity_id
+
+    @entity_id.setter
+    def entity_id(self, value):
+        assert self._entity_id is None, 'Cannot reset the Entity ID'
+        self._entity_id = value
+
+    @property
+    def logical_port_number(self):
+        """
+        Logical device port number (used as OpenFlow port for UNI)
+        :return: (int) port number
+        """
+        return self._logical_port_number
+
+    @property
+    def type(self):
+        """
+        UNI Type used in OMCI messaging
+        :return: (UniType) One of the enumerated types
+        """
+        return self._type
+
+    def _update_adapter_agent(self):
+        """
+        Update the port status and state in the core
+        """
+        self.log.debug('update-adapter-agent', admin_state=self._admin_state,
+                       oper_status=self._oper_status)
+
+        if self._port is not None:
+            self._port.admin_state = self._admin_state
+            self._port.oper_status = self._oper_status
+
+        try:
+            # adapter_agent add_port also does an update of existing port
+            self._handler.adapter_agent.add_port(self._handler.device_id,
+                                                 self.get_port())
+
+        except Exception as e:
+            self.log.exception('update-port', e=e)
+
+    def get_port(self):
+        """
+        Get the VOLTHA PORT object for this port
+        :return: VOLTHA Port object
+        """
+        self._port = Port(port_no=self.port_number,
+                          label=self.port_id_name(),
+                          type=Port.ETHERNET_UNI,
+                          admin_state=self._admin_state,
+                          oper_status=self._oper_status)
+        return self._port
+
+    def port_id_name(self):
+        return 'uni-{}'.format(self._port_number)
+
+    def add_logical_port(self, openflow_port_no, multi_uni_naming,
+                         capabilities=OFPPF_10GB_FD | OFPPF_FIBER,
+                         speed=OFPPF_10GB_FD):
+
+        self.log.debug('function-entry')
+
+        if self._logical_port_number is not None:
+            # delete old logical port if it exists
+            try:
+                port = self._handler.adapter_agent.get_logical_port(self._handler.logical_device_id,
+                                                           self.port_id_name())
+                self._handler.adapter_agent.delete_logical_port(self._handler.logical_device_id, port)
+
+            except Exception as e:
+                # assume this exception was because logical port does not already exist
+                pass
+
+            self._logical_port_number = None
+
+        port_no = openflow_port_no or self._ofp_port_no
+
+        if self._logical_port_number is None and port_no is not None:
+            self._logical_port_number = port_no
+
+            device = self._handler.adapter_agent.get_device(self._handler.device_id)
+
+            # leave the ports down until omci mib download has finished.  otherwise flows push before time
+            openflow_port = ofp_port(
+                port_no=port_no,
+                hw_addr=mac_str_to_tuple('08:%02x:%02x:%02x:%02x:%02x' %
+                                         ((device.parent_port_no >> 8 & 0xff),
+                                          device.parent_port_no & 0xff,
+                                          (port_no >> 16) & 0xff,
+                                          (port_no >> 8) & 0xff,
+                                          port_no & 0xff)),
+                name=device.serial_number + ['', '-' + str(self._mac_bridge_port_num)][multi_uni_naming],
+                config=0,
+                state=OFPPS_LINK_DOWN,
+                curr=capabilities,
+                advertised=capabilities,
+                peer=capabilities,
+                curr_speed=speed,
+                max_speed=speed
+            )
+            self._handler.adapter_agent.add_logical_port(self._handler.logical_device_id,
+                                                         LogicalPort(
+                                                             id=self.port_id_name(),
+                                                             ofp_port=openflow_port,
+                                                             device_id=device.id,
+                                                             device_port_no=self._port_number))
+
+            self.log.debug('logical-port', id=self.port_id_name(), device_port_no=self._port_number, openflow_port=openflow_port)
diff --git a/python/adapters/iadapter.py b/python/adapters/iadapter.py
new file mode 100644
index 0000000..31c5d7a
--- /dev/null
+++ b/python/adapters/iadapter.py
@@ -0,0 +1,358 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Adapter abstract base class
+"""
+
+import structlog
+from twisted.internet import reactor
+from zope.interface import implementer
+
+from interface import IAdapterInterface
+from voltha.protos.adapter_pb2 import Adapter
+from voltha.protos.adapter_pb2 import AdapterConfig
+from voltha.protos.common_pb2 import AdminState
+from voltha.protos.common_pb2 import LogLevel
+from voltha.protos.device_pb2 import DeviceType, DeviceTypes
+from voltha.protos.health_pb2 import HealthStatus
+
+
+log = structlog.get_logger()
+
+
+@implementer(IAdapterInterface)
+class IAdapter(object):
+    def __init__(self,
+                 core_proxy,
+                 adapter_proxy,
+                 config,
+                 device_handler_class,
+                 name,
+                 vendor,
+                 version,
+                 device_type, vendor_id,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False):
+        log.debug(
+            'Initializing adapter: {} {} {}'.format(vendor, name, version))
+        self.core_proxy = core_proxy
+        self.adapter_proxy = adapter_proxy
+        self.config = config
+        self.name = name
+        self.supported_device_types = [
+            DeviceType(
+                id=device_type,
+                vendor_id=vendor_id,
+                adapter=name,
+                accepts_bulk_flow_update=accepts_bulk_flow_update,
+                accepts_add_remove_flow_updates=accepts_add_remove_flow_updates
+            )
+        ]
+        self.descriptor = Adapter(
+            id=self.name,
+            vendor=vendor,
+            version=version,
+            config=AdapterConfig(log_level=LogLevel.INFO)
+        )
+        self.devices_handlers = dict()  # device_id -> Olt/OnuHandler()
+        self.device_handler_class = device_handler_class
+
+    def start(self):
+        log.info('Starting adapter: {}'.format(self.name))
+
+    def stop(self):
+        log.info('Stopping adapter: {}'.format(self.name))
+
+    def adapter_descriptor(self):
+        return self.descriptor
+
+    def device_types(self):
+        return DeviceTypes(items=self.supported_device_types)
+
+    def health(self):
+        # return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
+        return HealthStatus(state=HealthStatus.HEALTHY)
+
+    def change_master_state(self, master):
+        raise NotImplementedError()
+
+    def get_ofp_device_info(self, device):
+        log.debug('get_ofp_device_info_start', device_id=device.id)
+        ofp_device_info = self.devices_handlers[device.id].get_ofp_device_info(
+            device)
+        log.debug('get_ofp_device_info_ends', device_id=device.id)
+        return ofp_device_info
+
+    def get_ofp_port_info(self, device, port_no):
+        log.debug('get_ofp_port_info_start', device_id=device.id,
+                  port_no=port_no)
+        ofp_port_info = self.devices_handlers[device.id].get_ofp_port_info(
+            device, port_no)
+        log.debug('get_ofp_port_info_ends', device_id=device.id,
+                  port_no=port_no)
+        return ofp_port_info
+
+    def adopt_device(self, device):
+        log.debug('adopt_device', device_id=device.id)
+        self.devices_handlers[device.id] = self.device_handler_class(self,
+                                                                     device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].activate, device)
+        log.debug('adopt_device_done', device_id=device.id)
+        return device
+
+    def reconcile_device(self, device):
+        raise NotImplementedError()
+
+    def abandon_device(self, device):
+        raise NotImplementedError()
+
+    def disable_device(self, device):
+        log.info('disable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].disable)
+        log.debug('disable-device-done', device_id=device.id)
+        return device
+
+    def reenable_device(self, device):
+        log.info('reenable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reenable)
+        log.info('reenable-device-done', device_id=device.id)
+        return device
+
+    def reboot_device(self, device):
+        log.info('reboot-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reboot)
+        log.info('reboot-device-done', device_id=device.id)
+        return device
+
+    def download_image(self, device, request):
+        raise NotImplementedError()
+
+    def get_image_download_status(self, device, request):
+        raise NotImplementedError()
+
+    def cancel_image_download(self, device, request):
+        raise NotImplementedError()
+
+    def activate_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def revert_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def self_test_device(self, device):
+        log.info('self-test', device_id=device.id)
+        result = reactor.callLater(0, self.devices_handlers[
+            device.id].self_test_device)
+        log.info('self-test-done', device_id=device.id)
+        return result
+
+    def delete_device(self, device):
+        log.info('delete-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].delete)
+        log.info('delete-device-done', device_id=device.id)
+        return device
+
+    def get_device_details(self, device):
+        raise NotImplementedError()
+
+    def update_flows_bulk(self, device, flows, groups):
+        log.info('bulk-flow-update', device_id=device.id,
+                 flows=flows, groups=groups)
+        assert len(groups.items) == 0
+        reactor.callLater(0, self.devices_handlers[device.id].update_flow_table,
+                          flows.items)
+        return device
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        log.info('incremental-flow-update', device_id=device.id,
+                 flows=flow_changes, groups=group_changes)
+        # For now, there is no support for group changes
+        assert len(group_changes.to_add.items) == 0
+        assert len(group_changes.to_remove.items) == 0
+
+        handler = self.devices_handlers[device.id]
+        # Remove flows
+        if len(flow_changes.to_remove.items) != 0:
+            reactor.callLater(0, handler.remove_from_flow_table,
+                              flow_changes.to_remove.items)
+
+        # Add flows
+        if len(flow_changes.to_add.items) != 0:
+            reactor.callLater(0, handler.add_to_flow_table,
+                              flow_changes.to_add.items)
+        return device
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        if handler:
+            reactor.callLater(0, handler.update_pm_config, device, pm_config)
+
+    def process_inter_adapter_message(self, msg):
+        raise NotImplementedError()
+
+    def receive_packet_out(self, device_id, egress_port_no, msg):
+        raise NotImplementedError()
+
+    def suppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def unsuppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def _get_handler(self, device):
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                return handler
+            return None
+
+
+"""
+OLT Adapter base class
+"""
+
+
+class OltAdapter(IAdapter):
+    def __init__(self,
+                 core_proxy,
+                 adapter_proxy,
+                 config,
+                 device_handler_class,
+                 name,
+                 vendor,
+                 version, device_type,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False):
+        super(OltAdapter, self).__init__(core_proxy=core_proxy,
+                                         adapter_proxy=adapter_proxy,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=None,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates)
+        self.logical_device_id_to_root_device_id = dict()
+
+    def reconcile_device(self, device):
+        try:
+            self.devices_handlers[device.id] = self.device_handler_class(self,
+                                                                         device.id)
+            # Work only required for devices that are in ENABLED state
+            if device.admin_state == AdminState.ENABLED:
+                reactor.callLater(0,
+                                  self.devices_handlers[device.id].reconcile,
+                                  device)
+            else:
+                # Invoke the children reconciliation which would setup the
+                # basic children data structures
+                self.core_proxy.reconcile_child_devices(device.id)
+            return device
+        except Exception, e:
+            log.exception('Exception', e=e)
+
+    def send_proxied_message(self, proxy_address, msg):
+        log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
+        handler = self.devices_handlers[proxy_address.device_id]
+        handler.send_proxied_message(proxy_address, msg)
+
+    def process_inter_adapter_message(self, msg):
+        log.debug('process-inter-adapter-message', msg=msg)
+        # Unpack the header to know which device needs to handle this message
+        handler = None
+        if msg.header.proxy_device_id:
+            # typical request
+            handler = self.devices_handlers[msg.header.proxy_device_id]
+        elif msg.header.to_device_id and \
+                msg.header.to_device_id in self.devices_handlers:
+            # typical response
+            handler = self.devices_handlers[msg.header.to_device_id]
+        if handler:
+            reactor.callLater(0, handler.process_inter_adapter_message, msg)
+
+    def receive_packet_out(self, device_id, egress_port_no, msg):
+        try:
+            log.info('receive_packet_out', device_id=device_id,
+                     egress_port=egress_port_no, msg=msg)
+            handler = self.devices_handlers[device_id]
+            if handler:
+                reactor.callLater(0, handler.packet_out, egress_port_no, msg.data)
+        except Exception, e:
+            log.exception('packet-out-failure', e=e)
+
+
+"""
+ONU Adapter base class
+"""
+
+
+class OnuAdapter(IAdapter):
+    def __init__(self,
+                 core_proxy,
+                 adapter_proxy,
+                 config,
+                 device_handler_class,
+                 name,
+                 vendor,
+                 version,
+                 device_type,
+                 vendor_id,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False):
+        super(OnuAdapter, self).__init__(core_proxy=core_proxy,
+                                         adapter_proxy=adapter_proxy,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=vendor_id,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates)
+
+    def reconcile_device(self, device):
+        self.devices_handlers[device.id] = self.device_handler_class(self,
+                                                                     device.id)
+        # Reconcile only if state was ENABLED
+        if device.admin_state == AdminState.ENABLED:
+            reactor.callLater(0,
+                              self.devices_handlers[device.id].reconcile,
+                              device)
+        return device
+
+    def receive_proxied_message(self, proxy_address, msg):
+        log.info('receive-proxied-message', proxy_address=proxy_address,
+                 device_id=proxy_address.device_id, msg=msg)
+        # Device_id from the proxy_address is the olt device id. We need to
+        # get the onu device id using the port number in the proxy_address
+        device = self.core_proxy. \
+            get_child_device_with_proxy_address(proxy_address)
+        if device:
+            handler = self.devices_handlers[device.id]
+            handler.receive_message(msg)
+
+    def process_inter_adapter_message(self, msg):
+        log.info('process-inter-adapter-message', msg=msg)
+        # Unpack the header to know which device needs to handle this message
+        if msg.header:
+            handler = self.devices_handlers[msg.header.to_device_id]
+            handler.process_inter_adapter_message(msg)
diff --git a/python/adapters/interface.py b/python/adapters/interface.py
new file mode 100644
index 0000000..b0390d8
--- /dev/null
+++ b/python/adapters/interface.py
@@ -0,0 +1,459 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Interface definition for Voltha Adapters
+"""
+from zope.interface import Interface
+
+
+class IAdapterInterface(Interface):
+    """
+    A Voltha adapter.  This interface is used by the Voltha Core to initiate
+    requests towards a voltha adapter.
+    """
+
+    def adapter_descriptor():
+        """
+        Return the adapter descriptor object for this adapter.
+        :return: voltha.Adapter grpc object (see voltha/protos/adapter.proto),
+        with adapter-specific information and config extensions.
+        """
+
+    def device_types():
+        """
+        Return list of device types supported by the adapter.
+        :return: voltha.DeviceTypes protobuf object, with optional type
+        specific extensions.
+        """
+
+    def health():
+        """
+        Return a 3-state health status using the voltha.HealthStatus message.
+        :return: Deferred or direct return with voltha.HealthStatus message
+        """
+
+    def adopt_device(device):
+        """
+        Make sure the adapter looks after given device. Called when a device
+        is provisioned top-down and needs to be activated by the adapter.
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def reconcile_device(device):
+        """
+        Make sure the adapter looks after given device. Called when this
+        device has changed ownership from another Voltha instance to
+        this one (typically, this occurs when the previous voltha
+        instance went down).
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def abandon_device(device):
+        """
+        Make sur ethe adapter no longer looks after device. This is called
+        if device ownership is taken over by another Voltha instance.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge abandonment.
+        """
+
+    def disable_device(device):
+        """
+        This is called when a previously enabled device needs to be disabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge disabling the device.
+        """
+
+    def reenable_device(device):
+        """
+        This is called when a previously disabled device needs to be enabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge re-enabling the
+        device.
+        """
+
+    def reboot_device(device):
+        """
+        This is called to reboot a device based on a NBI call.  The admin
+        state of the device will not change after the reboot
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the reboot.
+        """
+
+    def download_image(device, request):
+        """
+        This is called to request downloading a specified image into
+        the standby partition of a device based on a NBI call.
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge the download.
+        """
+
+    def get_image_download_status(device, request):
+        """
+        This is called to inquire about a requested image download
+        status based on a NBI call.
+        The adapter is expected to update the DownloadImage DB object
+        with the query result
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def cancel_image_download(device, request):
+        """
+        This is called to cancel a requested image download
+        based on a NBI call.  The admin state of the device will not
+        change after the download.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def activate_image_update(device, request):
+        """
+        This is called to activate a downloaded image from
+        a standby partition into active partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        activated image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def revert_image_update(device, request):
+        """
+        This is called to deactivate the specified image at
+        active partition, and revert to previous image at
+        standby partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        previous image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def self_test_device(device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+
+    def delete_device(device):
+        """
+        This is called to delete a device from the PON based on a NBI call.
+        If the device is an OLT then the whole PON will be deleted.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the deletion.
+        """
+
+    def get_device_details(device):
+        """
+        This is called to get additional device details based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the retrieval of
+        additional details.
+        """
+
+    def update_flows_bulk(device, flows, groups):
+        """
+        Called after any flow table change, but only if the device supports
+        bulk mode, which is expressed by the 'accepts_bulk_flow_update'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flows: An openflow_v13.Flows object
+        :param groups: An  openflow_v13.Flows object
+        :return: (Deferred or None)
+        """
+
+    def update_flows_incrementally(device, flow_changes, group_changes):
+        """
+        Called after a flow table update, but only if the device supports
+        non-bulk mode, which is expressed by the 'accepts_add_remove_flow_updates'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flow_changes: An openflow_v13.FlowChanges object
+        :param group_changes: An openflow_v13.FlowGroupChanges object
+        :return: (Deferred or None)
+        """
+
+    def update_pm_config(device, pm_configs):
+        """
+        Called every time a request is made to change pm collection behavior
+        :param device: A Voltha.Device object
+        :param pm_collection_config: A Pms
+        """
+
+    def receive_packet_out(device_id, egress_port_no, msg):
+        """
+        Pass a packet_out message content to adapter so that it can forward
+        it out to the device. This is only called on root devices.
+        :param device_id: device ID
+        :param egress_port: egress logical port number
+         :param msg: actual message
+        :return: None
+        """
+
+    def suppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should be suppressed
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the suppression.
+        """
+
+    def unsuppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should resume
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the unsuppression.
+        """
+
+    def get_ofp_device_info(device):
+        """
+        Retrieve the OLT device info. This includes the ofp_desc and
+        ofp_switch_features. The existing ofp structures can be used,
+        or all the attributes get added to the Device definition or a new proto
+        definition gets created. This API will allow the Core to create a
+        LogicalDevice associated with this device (OLT only).
+        :param device: device
+        :return: Proto Message (TBD)
+        """
+
+    def get_ofp_port_info(device, port_no):
+        """
+        Retrieve the port info. This includes the ofp_port. The existing ofp
+        structure can be used, or all the attributes get added to the Port
+        definitions or a new proto definition gets created.  This API will allow
+        the Core to create a LogicalPort associated with this device.
+        :param device: device
+        :param port_no: port number
+        :return: Proto Message (TBD)
+        """
+
+    def process_inter_adapter_message(msg):
+        """
+        Called when the adapter receives a message that was sent to it directly
+        from another adapter. An adapter is automatically registered for these
+        messages when creating the inter-container kafka proxy. Note that it is
+        the responsibility of the sending and receiving adapters to properly encode
+        and decode the message.
+        :param msg: Proto Message (any)
+        :return: Proto Message Response
+        """
+
+
+class ICoreSouthBoundInterface(Interface):
+    """
+    Represents a Voltha Core. This is used by an adapter to initiate async
+    calls towards Voltha Core.
+    """
+
+    def get_device(device_id):
+        """
+        Retrieve a device using its ID.
+        :param device_id: a device ID
+        :return: Device Object or None
+        """
+
+    def get_child_device(parent_device_id, **kwargs):
+        """
+        Retrieve a child device object belonging to the specified parent
+        device based on some match criteria. The first child device that
+        matches the provided criteria is returned.
+        :param parent_device_id: parent's device protobuf ID
+        :param **kwargs: arbitrary list of match criteria where the Value
+        in each key-value pair must be a protobuf type
+        :return: Child Device Object or None
+        """
+
+    def get_ports(device_id, port_type):
+        """
+        Retrieve all the ports of a given type of a Device.
+        :param device_id: a device ID
+        :param port_type: type of port
+        :return Ports object
+        """
+
+    def get_child_devices(parent_device_id):
+        """
+        Get all child devices given a parent device id
+        :param parent_device_id: The parent device ID
+        :return: Devices object
+        """
+
+    def get_child_device_with_proxy_address(proxy_address):
+        """
+        Get a child device based on its proxy address. Proxy address is
+        defined as {parent id, channel_id}
+        :param proxy_address: A Device.ProxyAddress object
+        :return: Device object or None
+        """
+
+    def device_state_update(device_id,
+                            oper_status=None,
+                            connect_status=None):
+        """
+        Update a device state.
+        :param device_id: The device ID
+        :param oper_state: Operational state of device
+        :param conn_state: Connection state of device
+        :return: None
+        """
+
+    def child_device_detected(parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        """
+        A child device has been detected.  Core will create the device along
+        with its unique ID.
+        :param parent_device_id: The parent device ID
+        :param parent_port_no: The parent port number
+        :param device_type: The child device type
+        :param channel_id: A unique identifier for that child device within
+        the parent device (e.g. vlan_id)
+        :param kw: A list of key-value pair where the value is a protobuf
+        message
+        :return: None
+        """
+
+    def device_update(device):
+        """
+        Event corresponding to a device update.
+        :param device: Device Object
+        :return: None
+        """
+
+    def child_device_removed(parent_device_id, child_device_id):
+        """
+        Event indicating a child device has been removed from a parent.
+        :param parent_device_id: Device ID of the parent
+        :param child_device_id: Device ID of the child
+        :return: None
+        """
+
+    def child_devices_state_update(parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None,
+                                   admin_status=None):
+        """
+        Event indicating the status of all child devices have been changed.
+        :param parent_device_id: Device ID of the parent
+        :param oper_status: Operational status
+        :param connect_status: Connection status
+        :param admin_status: Admin status
+        :return: None
+        """
+
+    def child_devices_removed(parent_device_id):
+        """
+        Event indicating all child devices have been removed from a parent.
+        :param parent_device_id: Device ID of the parent device
+        :return: None
+        """
+
+    def device_pm_config_update(device_pm_config, init=False):
+        """
+        Event corresponding to a PM config update of a device.
+        :param device_pm_config: a PmConfigs object
+        :param init: True indicates initializing stage
+        :return: None
+        """
+
+    def port_created(device_id, port):
+        """
+        A port has been created and needs to be added to a device.
+        :param device_id: a device ID
+        :param port: Port object
+        :return None
+        """
+
+    def port_removed(device_id, port):
+        """
+        A port has been removed and it needs to be removed from a Device.
+        :param device_id: a device ID
+        :param port: a Port object
+        :return None
+        """
+
+    def ports_enabled(device_id):
+        """
+        All ports on that device have been re-enabled. The Core will change
+        the admin state to ENABLED and operational state to ACTIVE for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_disabled(device_id):
+        """
+        All ports on that device have been disabled. The Core will change the
+        admin status to DISABLED and operational state to UNKNOWN for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_oper_status_update(device_id, oper_status):
+        """
+        The operational status of all ports of a Device has been changed.
+        The Core will update the operational status for all ports on the
+        device.
+        :param device_id: a device ID
+        :param oper_status: operational Status
+        :return None
+        """
+
+    def image_download_update(img_dnld):
+        """
+        Event corresponding to an image download update.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def image_download_deleted(img_dnld):
+        """
+        Event corresponding to the deletion of a downloaded image. The
+        references of this image needs to be removed from the Core.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def packet_in(device_id, egress_port_no, packet):
+        """
+        Sends a packet to the SDN controller via voltha Core
+        :param device_id: The OLT device ID
+        :param egress_port_no: The port number representing the ONU (cvid)
+        :param packet: The actual packet
+         :return: None
+        """
diff --git a/python/common/__init__.py b/python/common/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/event_bus.py b/python/common/event_bus.py
new file mode 100644
index 0000000..e717c16
--- /dev/null
+++ b/python/common/event_bus.py
@@ -0,0 +1,194 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A simple internal pub/sub event bus with topics and filter-based registration.
+"""
+import re
+
+import structlog
+
+
+log = structlog.get_logger()
+
+
+class _Subscription(object):
+
+    __slots__ = ('bus', 'predicate', 'callback', 'topic')
+    def __init__(self, bus, predicate, callback, topic=None):
+        self.bus = bus
+        self.predicate = predicate
+        self.callback = callback
+        self.topic = topic
+
+
+class EventBus(object):
+
+    def __init__(self):
+        self.subscriptions = {}  # topic -> list of _Subscription objects
+                                 # topic None holds regexp based topic subs.
+        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
+
+    def list_subscribers(self, topic=None):
+        if topic is None:
+            return sum(self.subscriptions.itervalues(), [])
+        else:
+            if topic in self.subscriptions:
+                return self.subscriptions[topic]
+            else:
+                return []
+
+    @staticmethod
+    def _get_topic_key(topic):
+        if isinstance(topic, str):
+            return topic
+        elif hasattr(topic, 'match'):
+            return None
+        else:
+            raise AttributeError('topic not a string nor a compiled regex')
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function signature def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        subscription = _Subscription(self, predicate, callback, topic)
+        topic_key = self._get_topic_key(topic)
+        self.subscriptions.setdefault(topic_key, []).append(subscription)
+        self.subs_topic_map[subscription] = topic_key
+        return subscription
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        topic_key = self.subs_topic_map[subscription]
+        self.subscriptions[topic_key].remove(subscription)
+
+    def publish(self, topic, msg):
+        """
+        Publish given message to all subscribers registered with topic taking
+        the predicate functions into account.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        from copy import copy
+
+        def passes(msg, predicate):
+            try:
+                return predicate(msg)
+            except Exception, e:
+                return False  # failed predicate function treated as no match
+
+        # lookup subscribers with explicit topic subscriptions
+        subscribers = self.subscriptions.get(topic, [])
+
+        # add matching regexp topic subscribers
+        subscribers.extend(s for s in self.subscriptions.get(None, [])
+                           if s.topic.match(topic))
+
+        # iterate over a shallow-copy of subscribers
+        for candidate in copy(subscribers):
+            predicate = candidate.predicate
+            if predicate is None or passes(msg, predicate):
+                try:
+                    candidate.callback(topic, msg)
+                except Exception, e:
+                    log.exception('callback-failed', e=repr(e), topic=topic)
+
+
+
+default_bus = EventBus()
+
+
+class EventBusClient(object):
+    """
+    Primary interface to the EventBus. Usage:
+
+    Publish:
+    >>> events = EventBusClient()
+    >>> msg = dict(a=1, b='foo')
+    >>> events.publish('a.topic', msg)
+
+    Subscribe to get all messages on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event)
+
+    Subscribe to get messages matching predicate on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
+
+    Use a DeferredQueue to buffer incoming messages
+    >>> queue = DeferredQueue()
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
+
+    """
+    def __init__(self, bus=None):
+        """
+        Obtain a client interface for the pub/sub event bus.
+        :param bus: An optional specific event bus. Inteded for mainly test
+        use. If not provided, the process default bus will be used, which is
+        the preferred use (a process shall not need more than one bus).
+        """
+        self.bus = bus or default_bus
+
+    def publish(self, topic, msg):
+        """
+        Publish given msg to given topic.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        self.bus.publish(topic, msg)
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function with signature
+        def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        return self.bus.subscribe(topic, callback, predicate)
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        return self.bus.unsubscribe(subscription)
+
+    def list_subscribers(self, topic=None):
+        """
+        Return list of subscribers. If topci is provided, it is filtered for
+        those subscribing to the topic.
+        :param topic: Optional topic
+        :return: List of subscriptions
+        """
+        return self.bus.list_subscribers(topic)
diff --git a/python/common/frameio/__init__.py b/python/common/frameio/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/frameio/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/frameio/frameio.py b/python/common/frameio/frameio.py
new file mode 100644
index 0000000..3f5bcf6
--- /dev/null
+++ b/python/common/frameio/frameio.py
@@ -0,0 +1,437 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A module that can send and receive raw ethernet frames on a set of interfaces
+and it can manage a set of vlan interfaces on top of existing
+interfaces. Due to reliance on raw sockets, this module requires
+root access. Also, raw sockets are hard to deal with in Twisted (not
+directly supported) we need to run the receiver select loop on a dedicated
+thread.
+"""
+
+import os
+import socket
+import struct
+import uuid
+from pcapy import BPFProgram
+from threading import Thread, Condition
+
+import fcntl
+
+import select
+import structlog
+import sys
+
+from scapy.data import ETH_P_ALL
+from twisted.internet import reactor
+from zope.interface import implementer
+
+from voltha.registry import IComponent
+
+if sys.platform.startswith('linux'):
+    from common.frameio.third_party.oftest import afpacket, netutils
+elif sys.platform == 'darwin':
+    from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
+
+log = structlog.get_logger()
+
+
+def hexify(buffer):
+    """
+    Return a hexadecimal string encoding of input buffer
+    """
+    return ''.join('%02x' % ord(c) for c in buffer)
+
+
+class _SelectWakerDescriptor(object):
+    """
+    A descriptor that can be mixed into a select loop to wake it up.
+    """
+    def __init__(self):
+        self.pipe_read, self.pipe_write = os.pipe()
+        fcntl.fcntl(self.pipe_write, fcntl.F_SETFL, os.O_NONBLOCK)
+
+    def __del__(self):
+        os.close(self.pipe_read)
+        os.close(self.pipe_write)
+
+    def fileno(self):
+        return self.pipe_read
+
+    def wait(self):
+        os.read(self.pipe_read, 1)
+
+    def notify(self):
+        """Trigger a select loop"""
+        os.write(self.pipe_write, '\x00')
+
+
+class BpfProgramFilter(object):
+    """
+    Convenience packet filter based on the well-tried Berkeley Packet Filter,
+    used by many well known open source tools such as pcap and tcpdump.
+    """
+    def __init__(self, program_string):
+        """
+        Create a filter using the BPF command syntax. To learn more,
+        consult 'man pcap-filter'.
+        :param program_string: The textual definition of the filter. Examples:
+        'vlan 1000'
+        'vlan 1000 and ip src host 10.10.10.10'
+        """
+        self.bpf = BPFProgram(program_string)
+
+    def __call__(self, frame):
+        """
+        Return 1 if frame passes filter.
+        :param frame: Raw frame provided as Python string
+        :return: 1 if frame satisfies filter, 0 otherwise.
+        """
+        return self.bpf.filter(frame)
+
+
+class FrameIOPort(object):
+    """
+    Represents a network interface which we can send/receive raw
+    Ethernet frames.
+    """
+
+    RCV_SIZE_DEFAULT = 4096
+    ETH_P_ALL = 0x03
+    RCV_TIMEOUT = 10000
+    MIN_PKT_SIZE = 60
+
+    def __init__(self, iface_name):
+        self.iface_name = iface_name
+        self.proxies = []
+        self.socket = self.open_socket(self.iface_name)
+        log.debug('socket-opened', fn=self.fileno(), iface=iface_name)
+        self.received = 0
+        self.discarded = 0
+
+    def add_proxy(self, proxy):
+        self.proxies.append(proxy)
+
+    def del_proxy(self, proxy):
+        self.proxies = [p for p in self.proxies if p.name != proxy.name]
+
+    def open_socket(self, iface_name):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def rcv_frame(self):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def __del__(self):
+        if self.socket:
+            self.socket.close()
+            self.socket = None
+        log.debug('socket-closed', iface=self.iface_name)
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    def _dispatch(self, proxy, frame):
+        log.debug('calling-publisher', proxy=proxy.name, frame=hexify(frame))
+        try:
+            proxy.callback(proxy, frame)
+        except Exception as e:
+            log.exception('callback-error',
+                          explanation='Callback failed while processing frame',
+                          e=e)
+
+    def recv(self):
+        """Called on the select thread when a packet arrives"""
+        try:
+            frame = self.rcv_frame()
+        except RuntimeError as e:
+            # we observed this happens sometimes right after the socket was
+            # attached to a newly created veth interface. So we log it, but
+            # allow to continue.
+            log.warn('afpacket-recv-error', code=-1)
+            return
+
+        log.debug('frame-received', iface=self.iface_name, len=len(frame),
+                  hex=hexify(frame))
+        self.received +=1
+        dispatched = False
+        for proxy in self.proxies:
+            if proxy.filter is None or proxy.filter(frame):
+                log.debug('frame-dispatched')
+                dispatched = True
+                reactor.callFromThread(self._dispatch, proxy, frame)
+
+        if not dispatched:
+            self.discarded += 1
+            log.debug('frame-discarded')
+
+    def send(self, frame):
+        log.debug('sending', len=len(frame), iface=self.iface_name)
+        sent_bytes = self.send_frame(frame)
+        if sent_bytes != len(frame):
+            log.error('send-error', iface=self.iface_name,
+                      wanted_to_send=len(frame), actually_sent=sent_bytes)
+        return sent_bytes
+
+    def send_frame(self, frame):
+        try:
+            return self.socket.send(frame)
+        except socket.error, err:
+            if err[0] == os.errno.EINVAL:
+                if len(frame) < self.MIN_PKT_SIZE:
+                    padding = '\x00' * (self.MIN_PKT_SIZE - len(frame))
+                    frame = frame + padding
+                    return self.socket.send(frame)
+            else:
+                raise
+
+    def up(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} up'.format(self.iface_name))
+        return self
+
+    def down(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} down'.format(self.iface_name))
+        return self
+
+    def statistics(self):
+        return self.received, self.discarded
+
+
+class LinuxFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0)
+        afpacket.enable_auxdata(s)
+        s.bind((self.iface_name, self.ETH_P_ALL))
+        netutils.set_promisc(s, iface_name)
+        s.settimeout(self.RCV_TIMEOUT)
+        return s
+
+    def rcv_frame(self):
+        return afpacket.recv(self.socket, self.RCV_SIZE_DEFAULT)
+
+
+class DarwinFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        sin = pcapdnet.open_pcap(iface_name, 1600, 1, 100)
+        try:
+            fcntl.ioctl(sin.fileno(), BIOCIMMEDIATE, struct.pack("I",1))
+        except:
+            pass
+
+        # need a different kind of socket for sending out
+        self.sout = dnet.eth(iface_name)
+
+        return sin
+
+    def send_frame(self, frame):
+        return self.sout.send(frame)
+
+    def rcv_frame(self):
+        pkt = self.socket.next()
+        if pkt is not None:
+            ts, pkt = pkt
+        return pkt
+
+
+if sys.platform == 'darwin':
+    _FrameIOPort = DarwinFrameIOPort
+elif sys.platform.startswith('linux'):
+    _FrameIOPort = LinuxFrameIOPort
+else:
+    raise Exception('Unsupported platform {}'.format(sys.platform))
+    sys.exit(1)
+
+
+class FrameIOPortProxy(object):
+    """Makes FrameIOPort sharable between multiple users"""
+
+    def __init__(self, frame_io_port, callback, filter=None, name=None):
+        self.frame_io_port = frame_io_port
+        self.callback = callback
+        self.filter = filter
+        self.name = uuid.uuid4().hex[:12] if name is None else name
+
+    @property
+    def iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def get_iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def send(self, frame):
+        return self.frame_io_port.send(frame)
+
+    def up(self):
+        self.frame_io_port.up()
+        return self
+
+    def down(self):
+        self.frame_io_port.down()
+        return self
+
+
+@implementer(IComponent)
+class FrameIOManager(Thread):
+    """
+    Packet/Frame IO manager that can be used to send/receive raw frames
+    on a set of network interfaces.
+    """
+    def __init__(self):
+        super(FrameIOManager, self).__init__()
+
+        self.ports = {}  # iface_name -> ActiveFrameReceiver
+        self.queue = {}  # iface_name -> TODO
+
+        self.cvar = Condition()
+        self.waker = _SelectWakerDescriptor()
+        self.stopped = False
+        self.ports_changed = False
+
+    # ~~~~~~~~~~~ exposed methods callable from main thread ~~~~~~~~~~~~~~~~~~~
+
+    def start(self):
+        """
+        Start the IO manager and its select loop thread
+        """
+        log.debug('starting')
+        super(FrameIOManager, self).start()
+        log.info('started')
+        return self
+
+    def stop(self):
+        """
+        Stop the IO manager and its thread with the select loop
+        """
+        log.debug('stopping')
+        self.stopped = True
+        self.waker.notify()
+        self.join()
+        del self.ports
+        log.info('stopped')
+
+    def list_interfaces(self):
+        """
+        Return list of interfaces listened on
+        :return: List of FrameIOPort objects
+        """
+        return self.ports
+
+    def open_port(self, iface_name, callback, filter=None, name=None):
+        """
+        Add a new interface and start receiving on it.
+        :param iface_name: Name of the interface. Must be an existing Unix
+        interface (eth0, en0, etc.)
+        :param callback: Called on each received frame;
+        signature: def callback(port, frame) where port is the FrameIOPort
+        instance at which the frame was received, frame is the actual frame
+        received (as binay string)
+        :param filter: An optional filter (predicate), with signature:
+        def filter(frame). If provided, only frames for which filter evaluates
+        to True will be forwarded to callback.
+        :return: FrmaeIOPortProxy instance.
+        """
+
+        port = self.ports.get(iface_name)
+        if port is None:
+            port = _FrameIOPort(iface_name)
+            self.ports[iface_name] = port
+            self.ports_changed = True
+            self.waker.notify()
+
+        proxy = FrameIOPortProxy(port, callback, filter, name)
+        port.add_proxy(proxy)
+
+        return proxy
+
+    def close_port(self, proxy):
+        """
+        Remove the proxy. If this is the last proxy on an interface, stop and
+        remove the named interface as well
+        :param proxy: FrameIOPortProxy reference
+        :return: None
+        """
+        assert isinstance(proxy, FrameIOPortProxy)
+        iface_name = proxy.get_iface_name()
+        assert iface_name in self.ports, "iface_name {} unknown".format(iface_name)
+        port = self.ports[iface_name]
+        port.del_proxy(proxy)
+
+        if not port.proxies:
+            del self.ports[iface_name]
+            # need to exit select loop to reconstruct select fd lists
+            self.ports_changed = True
+            self.waker.notify()
+
+    def send(self, iface_name, frame):
+        """
+        Send frame on given interface
+        :param iface_name: Name of previously registered interface
+        :param frame: frame as string
+        :return: number of bytes sent
+        """
+        return self.ports[iface_name].send(frame)
+
+    # ~~~~~~~~~~~~~ Thread methods (running on non-main thread ~~~~~~~~~~~~~~~~
+
+    def run(self):
+        """
+        Called on the alien thread, this is the core multi-port receive loop
+        """
+
+        log.debug('select-loop-started')
+
+        # outer loop constructs sockets list for select
+        while not self.stopped:
+            sockets = [self.waker] + self.ports.values()
+            self.ports_changed = False
+            empty = []
+            # inner select loop
+
+            while not self.stopped:
+                try:
+                    _in, _out, _err = select.select(sockets, empty, empty, 1)
+                except Exception as e:
+                    log.exception('frame-io-select-error', e=e)
+                    break
+                with self.cvar:
+                    for port in _in:
+                        if port is self.waker:
+                            self.waker.wait()
+                            continue
+                        else:
+                            port.recv()
+                    self.cvar.notify_all()
+                if self.ports_changed:
+                    break  # break inner loop so we reconstruct sockets list
+
+        log.debug('select-loop-exited')
+
+    def del_interface(self, iface_name):
+        """
+            Delete interface for stopping
+        """
+
+        log.info('Delete interface')
+        del self.ports[iface_name]
+        log.info('Interface(port) is deleted')
diff --git a/python/common/frameio/third_party/__init__.py b/python/common/frameio/third_party/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/frameio/third_party/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/frameio/third_party/oftest/LICENSE b/python/common/frameio/third_party/oftest/LICENSE
new file mode 100644
index 0000000..3216042
--- /dev/null
+++ b/python/common/frameio/third_party/oftest/LICENSE
@@ -0,0 +1,36 @@
+OpenFlow Test Framework
+
+Copyright (c) 2010 The Board of Trustees of The Leland Stanford
+Junior University
+
+Except where otherwise noted, this software is distributed under
+the OpenFlow Software License.  See
+http://www.openflowswitch.org/wp/legal/ for current details.
+
+We are making the OpenFlow specification and associated documentation
+(Software) available for public use and benefit with the expectation
+that others will use, modify and enhance the Software and contribute
+those enhancements back to the community. However, since we would like
+to make the Software available for broadest use, with as few
+restrictions as possible permission is hereby granted, free of charge,
+to any person obtaining a copy of this Software to deal in the
+Software under the copyrights without restriction, including without
+limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED -Y´AS IS¡, WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+The name and trademarks of copyright holder(s) may NOT be used in
+advertising or publicity pertaining to the Software or any derivatives
+without specific, written prior permission.
diff --git a/python/common/frameio/third_party/oftest/README.md b/python/common/frameio/third_party/oftest/README.md
new file mode 100644
index 0000000..f0cb649
--- /dev/null
+++ b/python/common/frameio/third_party/oftest/README.md
@@ -0,0 +1,6 @@
+Files in this directory are derived from the respective files
+in oftest (http://github.com/floodlight/oftest).
+ 
+For the licensing terms of these files, see LICENSE in this dir.
+ 
+
diff --git a/python/common/frameio/third_party/oftest/__init__.py b/python/common/frameio/third_party/oftest/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/frameio/third_party/oftest/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/frameio/third_party/oftest/afpacket.py b/python/common/frameio/third_party/oftest/afpacket.py
new file mode 100644
index 0000000..9ae8075
--- /dev/null
+++ b/python/common/frameio/third_party/oftest/afpacket.py
@@ -0,0 +1,124 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+AF_PACKET receive support
+
+When VLAN offload is enabled on the NIC Linux will not deliver the VLAN tag
+in the data returned by recv. Instead, it delivers the VLAN TCI in a control
+message. Python 2.x doesn't have built-in support for recvmsg, so we have to
+use ctypes to call it. The recv function exported by this module reconstructs
+the VLAN tag if it was offloaded.
+"""
+
+import struct
+from ctypes import *
+
+ETH_P_8021Q = 0x8100
+SOL_PACKET = 263
+PACKET_AUXDATA = 8
+TP_STATUS_VLAN_VALID = 1 << 4
+
+class struct_iovec(Structure):
+    _fields_ = [
+        ("iov_base", c_void_p),
+        ("iov_len", c_size_t),
+    ]
+
+class struct_msghdr(Structure):
+    _fields_ = [
+        ("msg_name", c_void_p),
+        ("msg_namelen", c_uint32),
+        ("msg_iov", POINTER(struct_iovec)),
+        ("msg_iovlen", c_size_t),
+        ("msg_control", c_void_p),
+        ("msg_controllen", c_size_t),
+        ("msg_flags", c_int),
+    ]
+
+class struct_cmsghdr(Structure):
+    _fields_ = [
+        ("cmsg_len", c_size_t),
+        ("cmsg_level", c_int),
+        ("cmsg_type", c_int),
+    ]
+
+class struct_tpacket_auxdata(Structure):
+    _fields_ = [
+        ("tp_status", c_uint),
+        ("tp_len", c_uint),
+        ("tp_snaplen", c_uint),
+        ("tp_mac", c_ushort),
+        ("tp_net", c_ushort),
+        ("tp_vlan_tci", c_ushort),
+        ("tp_padding", c_ushort),
+    ]
+
+libc = CDLL("libc.so.6")
+recvmsg = libc.recvmsg
+recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
+recvmsg.retype = c_int
+
+def enable_auxdata(sk):
+    """
+    Ask the kernel to return the VLAN tag in a control message
+
+    Must be called on the socket before afpacket.recv.
+    """
+    sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
+
+def recv(sk, bufsize):
+    """
+    Receive a packet from an AF_PACKET socket
+    @sk Socket
+    @bufsize Maximum packet size
+    """
+    buf = create_string_buffer(bufsize)
+
+    ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
+    ctrl_buf = create_string_buffer(ctrl_bufsize)
+
+    iov = struct_iovec()
+    iov.iov_base = cast(buf, c_void_p)
+    iov.iov_len = bufsize
+
+    msghdr = struct_msghdr()
+    msghdr.msg_name = None
+    msghdr.msg_namelen = 0
+    msghdr.msg_iov = pointer(iov)
+    msghdr.msg_iovlen = 1
+    msghdr.msg_control = cast(ctrl_buf, c_void_p)
+    msghdr.msg_controllen = ctrl_bufsize
+    msghdr.msg_flags = 0
+
+    rv = recvmsg(sk.fileno(), byref(msghdr), 0)
+    if rv < 0:
+        raise RuntimeError("recvmsg failed: rv=%d", rv)
+
+    # The kernel only delivers control messages we ask for. We
+    # only enabled PACKET_AUXDATA, so we can assume it's the
+    # only control message.
+    assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
+
+    cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
+    assert cmsghdr.cmsg_level == SOL_PACKET
+    assert cmsghdr.cmsg_type == PACKET_AUXDATA
+
+    auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
+
+    if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
+        # Insert VLAN tag
+        tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
+        return buf.raw[:12] + tag + buf.raw[12:rv]
+    else:
+        return buf.raw[:rv]
diff --git a/python/common/frameio/third_party/oftest/netutils.py b/python/common/frameio/third_party/oftest/netutils.py
new file mode 100644
index 0000000..092d490
--- /dev/null
+++ b/python/common/frameio/third_party/oftest/netutils.py
@@ -0,0 +1,73 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Network utilities for the OpenFlow test framework
+"""
+
+###########################################################################
+##                                                                         ##
+## Promiscuous mode enable/disable                                         ##
+##                                                                         ##
+## Based on code from Scapy by Phillippe Biondi                            ##
+##                                                                         ##
+##                                                                         ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License as published by the   ##
+## Free Software Foundation; either version 2, or (at your option) any     ##
+## later version.                                                          ##
+##                                                                         ##
+## This program is distributed in the hope that it will be useful, but     ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of              ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU       ##
+## General Public License for more details.                                ##
+##                                                                         ##
+#############################################################################
+
+import socket
+from fcntl import ioctl
+import struct
+
+# From net/if_arp.h
+ARPHDR_ETHER = 1
+ARPHDR_LOOPBACK = 772
+
+# From bits/ioctls.h
+SIOCGIFHWADDR  = 0x8927          # Get hardware address
+SIOCGIFINDEX   = 0x8933          # name -> if_index mapping
+
+# From netpacket/packet.h
+PACKET_ADD_MEMBERSHIP  = 1
+PACKET_DROP_MEMBERSHIP = 2
+PACKET_MR_PROMISC      = 1
+
+# From bits/socket.h
+SOL_PACKET = 263
+
+def get_if(iff,cmd):
+  s=socket.socket()
+  ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
+  s.close()
+  return ifreq
+
+def get_if_index(iff):
+  return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
+
+def set_promisc(s,iff,val=1):
+  mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, "")
+  if val:
+      cmd = PACKET_ADD_MEMBERSHIP
+  else:
+      cmd = PACKET_DROP_MEMBERSHIP
+  s.setsockopt(SOL_PACKET, cmd, mreq)
+
diff --git a/python/common/kvstore/__init__.py b/python/common/kvstore/__init__.py
new file mode 100644
index 0000000..4a82628
--- /dev/null
+++ b/python/common/kvstore/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/kvstore/consul_client.py b/python/common/kvstore/consul_client.py
new file mode 100644
index 0000000..bc14759
--- /dev/null
+++ b/python/common/kvstore/consul_client.py
@@ -0,0 +1,304 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
+from common.utils.asleep import asleep
+from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
+from consul import ConsulException
+from consul.twisted import Consul
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
+
+log = get_logger()
+
+class ConsulClient(KVClient):
+
+    def __init__(self, kv_host, kv_port):
+        KVClient.__init__(self, kv_host, kv_port)
+        self.session_id = None
+        self.client = Consul(kv_host, kv_port)
+
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        self._retriggering_watch(key, key_change_callback, timeout)
+
+    @inlineCallbacks
+    def _retriggering_watch(self, key, key_change_callback, timeout):
+        self.key_watches[key] = ConsulWatch(self.client, key, key_change_callback, timeout)
+        yield self.key_watches[key].start()
+
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        if key in self.key_watches:
+            self.key_watches[key].stop()
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        log.debug('kv-op', operation=operation, key=key, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        while True:
+            try:
+                if operation == 'GET':
+                    result = yield self._get(key, **kw)
+                elif operation == 'LIST':
+                    result, err = yield self._list(key)
+                elif operation == 'PUT':
+                    # Put returns a boolean response
+                    result = yield self.client.kv.put(key, value)
+                    if not result:
+                        err = 'put-failed'
+                elif operation == 'DELETE':
+                    # Delete returns a boolean response
+                    result = yield self.client.kv.delete(key)
+                    if not result:
+                        err = 'delete-failed'
+                elif operation == 'RESERVE':
+                    result, err = yield self._reserve(key, value, **kw)
+                elif operation == 'RENEW':
+                    result, err = yield self._renew_reservation(key)
+                elif operation == 'RELEASE':
+                    result, err = yield self._release_reservation(key)
+                elif operation == 'RELEASE-ALL':
+                    err = yield self._release_all_reservations()
+                self._clear_backoff()
+                break
+            except ConsulException as ex:
+                if 'ConnectionRefusedError' in ex.message:
+                    log.exception('comms-exception', ex=ex)
+                    yield self._backoff('consul-not-up')
+                else:
+                    log.error('consul-specific-exception', ex=ex)
+                    err = ex
+            except Exception as ex:
+                log.error('consul-exception', ex=ex)
+                err = ex
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue((result,err))
+
+    @inlineCallbacks
+    def _get(self, key, **kw):
+        kvp = None
+        index, rec = yield self.client.kv.get(key, **kw)
+        if rec is not None:
+            kvp = KVPair(rec['Key'], rec['Value'], index)
+        returnValue(kvp)
+
+    @inlineCallbacks
+    def _list(self, key):
+        err = None
+        list = []
+        index, recs = yield self.client.kv.get(key, recurse=True)
+        for rec in recs:
+            list.append(KVPair(rec['Key'], rec['Value'], rec['ModifyIndex']))
+        returnValue((list, err))
+
+    @inlineCallbacks
+    def _reserve(self, key, value, **kw):
+        for name, val in kw.items():
+            if name == 'ttl':
+                ttl = val
+                break
+        reserved = False
+        err = 'reservation-failed'
+        owner = None
+
+        # Create a session
+        self.session_id = yield self.client.session.create(behavior='delete',
+                                                           ttl=ttl) # lock_delay=1)
+        log.debug('create-session', id=self.session_id)
+        # Try to acquire the key
+        result = yield self.client.kv.put(key, value, acquire=self.session_id)
+        log.debug('key-acquire', key=key, value=value, sess=self.session_id, result=result)
+
+        # Check if reservation succeeded
+        index, record = yield self.client.kv.get(key)
+        if record is not None and 'Value' in record:
+            owner = record['Value']
+            log.debug('get-key', session=record['Session'], owner=owner)
+            if record['Session'] == self.session_id and owner == value:
+                reserved = True
+                log.debug('key-reserved', key=key, value=value, ttl=ttl)
+                # Add key to reservation list
+                self.key_reservations[key] = self.session_id
+            else:
+                log.debug('reservation-held-by-another', owner=owner)
+
+        if reserved:
+            err = None
+        returnValue((owner, err))
+
+    @inlineCallbacks
+    def _renew_reservation(self, key):
+        result = None
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            session_id = self.key_reservations[key]
+            # A successfully renewed session returns an object with fields:
+            # Node, CreateIndex, Name, ModifyIndex, ID, Behavior, TTL,
+            # LockDelay, and Checks
+            result = yield self.client.session.renew(session_id=session_id)
+            log.debug('session-renew', result=result)
+        if result is None:
+            err = 'session-renewal-failed'
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_reservation(self, key):
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            session_id = self.key_reservations[key]
+            # A successfully destroyed session returns a boolean result
+            success = yield self.client.session.destroy(session_id)
+            log.debug('session-destroy', result=success)
+            if not success:
+                err = 'session-destroy-failed'
+            self.session_id = None
+            self.key_reservations.pop(key)
+        returnValue((success, err))
+
+    @inlineCallbacks
+    def _release_all_reservations(self):
+        err = None
+        keys_to_delete = []
+        for key in self.key_reservations:
+            session_id = self.key_reservations[key]
+            # A successfully destroyed session returns a boolean result
+            success = yield self.client.session.destroy(session_id)
+            if not success:
+                err = 'session-destroy-failed'
+                log.debug('session-destroy', id=session_id, result=success)
+            self.session_id = None
+            keys_to_delete.append(key)
+        for key in keys_to_delete:
+            self.key_reservations.pop(key)
+        returnValue(err)
+
+
+class ConsulWatch():
+
+    def __init__(self, consul, key, callback, timeout):
+        self.client = consul
+        self.key = key
+        self.index = None
+        self.callback = callback
+        self.timeout = timeout
+        self.period = 60
+        self.running = True
+        self.retries = 0
+        self.retry_time = 0
+
+    @inlineCallbacks
+    def start(self):
+        self.running = True
+        index, rec = yield self._get_with_retry(self.key, None,
+                                              timeout=self.timeout)
+        self.index = str(index)
+
+        @inlineCallbacks
+        def _get(key, deferred):
+            try:
+                index, rec = yield self._get_with_retry(key, None,
+                                                     timeout=self.timeout,
+                                                     index=self.index)
+                self.index = str(index)
+                if not deferred.called:
+                    log.debug('got-result-cancelling-deferred')
+                    deferred.callback((self.index, rec))
+            except Exception as e:
+                log.exception('got-exception', e=e)
+
+        while self.running:
+            try:
+                rcvd = DeferredWithTimeout(timeout=self.period)
+                _get(self.key, rcvd)
+                try:
+                    # Update index for next watch iteration
+                    index, rec = yield rcvd
+                    log.debug('event-received', index=index, rec=rec)
+                    # Notify client of key change event
+                    if rec is None:
+                        # Key has been deleted
+                        self._send_event(Event(Event.DELETE, self.key, None))
+                    else:
+                        self._send_event(Event(Event.PUT, rec['Key'], rec['Value']))
+                except TimeOutError as e:
+                    log.debug('no-events-over-watch-period', key=self.key)
+                except Exception as e:
+                    log.exception('exception', e=e)
+            except Exception as e:
+                log.exception('exception', e=e)
+
+        log.debug('close-watch', key=self.key)
+
+    def stop(self):
+        self.running = False
+        self.callback = None
+
+    @inlineCallbacks
+    def _get_with_retry(self, key, value, timeout, *args, **kw):
+        log.debug('watch-period', key=key, period=self.period, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        while True:
+            try:
+                result = yield self.client.kv.get(key, **kw)
+                self._clear_backoff()
+                break
+            except ConsulException as ex:
+                err = ex
+                if 'ConnectionRefusedError' in ex.message:
+                    self._send_event(Event(Event.CONNECTION_DOWN, self.key, None))
+                    log.exception('comms-exception', ex=ex)
+                    yield self._backoff('consul-not-up')
+                else:
+                    log.error('consul-specific-exception', ex=ex)
+            except Exception as ex:
+                err = ex
+                log.error('consul-exception', ex=ex)
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue(result)
+
+    def _send_event(self, event):
+        if self.callback is not None:
+            self.callback(event)
+
+    def _backoff(self, msg):
+        wait_time = RETRY_BACKOFF[min(self.retries, len(RETRY_BACKOFF) - 1)]
+        self.retry_time += wait_time
+        self.retries += 1
+        log.error(msg, next_retry_in_secs=wait_time,
+                  total_delay_in_secs = self.retry_time,
+                  retries=self.retries)
+        return asleep(wait_time)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.debug('reconnected-to-kv', after_retries=self.retries)
+            self.retries = 0
+            self.retry_time = 0
diff --git a/python/common/kvstore/etcd_client.py b/python/common/kvstore/etcd_client.py
new file mode 100644
index 0000000..a958b71
--- /dev/null
+++ b/python/common/kvstore/etcd_client.py
@@ -0,0 +1,240 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+#
+# Most of the txaioetcd methods provide a timeout parameter. This parameter
+# is likely intended to limit the amount of time spent by any one method
+# waiting for a response from the etcd server. However, if the server is
+# down, the method immediately throws a ConnectionRefusedError exception;
+# it does not perform any retries. The timeout parameter provided by the
+# methods in EtcdClient cover this contingency.
+#
+################################################################################
+
+from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair
+from structlog import get_logger
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
+from twisted.internet.error import ConnectionRefusedError
+from txaioetcd import Client, CompVersion, Failed, KeySet, OpGet, OpSet, Transaction
+
+log = get_logger()
+
+class EtcdClient(KVClient):
+
+    def __init__(self, kv_host, kv_port):
+        KVClient.__init__(self, kv_host, kv_port)
+        self.url = u'http://' + kv_host + u':' + str(kv_port)
+        self.client = Client(reactor, self.url)
+
+    @inlineCallbacks
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        self.key_watches[key] = key_change_callback
+        result = yield self._op_with_retry('WATCH', key, None, timeout, callback=self.key_changed)
+        returnValue(result)
+
+    def key_changed(self, kv):
+        key = kv.key
+        value = kv.value
+        log.debug('key-changed', key=key, value=value)
+        # Notify client of key change event
+        if value is not None:
+            evt = Event(Event.PUT, key, value)
+        else:
+            evt = Event(Event.DELETE, key, None)
+        if key in self.key_watches:
+            self.key_watches[key](evt)
+
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        log.debug('close-watch', key=key)
+        if key in self.key_watches:
+            self.key_watches.pop(key)
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        log.debug('kv-op', operation=operation, key=key, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        if type(key) == str:
+            key = bytes(key)
+        if value is not None:
+           value = bytes(value)
+        while True:
+            try:
+                if operation == 'GET':
+                    result = yield self._get(key)
+                elif operation == 'LIST':
+                    result, err = yield self._list(key)
+                elif operation == 'PUT':
+                    # Put returns an object of type Revision
+                    result = yield self.client.set(key, value, **kw)
+                elif operation == 'DELETE':
+                    # Delete returns an object of type Deleted
+                    result = yield self.client.delete(key)
+                elif operation == 'RESERVE':
+                    result, err = yield self._reserve(key, value, **kw)
+                elif operation == 'RENEW':
+                    result, err = yield self._renew_reservation(key)
+                elif operation == 'RELEASE':
+                    result, err = yield self._release_reservation(key)
+                elif operation == 'RELEASE-ALL':
+                    err = yield self._release_all_reservations()
+                elif operation == 'WATCH':
+                    for name, val in kw.items():
+                        if name == 'callback':
+                            callback = val
+                            break
+                    result = self.client.watch([KeySet(key, prefix=True)], callback)
+                self._clear_backoff()
+                break
+            except ConnectionRefusedError as ex:
+                log.error('comms-exception', ex=ex)
+                yield self._backoff('etcd-not-up')
+            except Exception as ex:
+                log.error('etcd-exception', ex=ex)
+                err = ex
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _get(self, key):
+        kvp = None
+        resp = yield self.client.get(key)
+        if resp.kvs is not None and len(resp.kvs) == 1:
+            kv = resp.kvs[0]
+            kvp = KVPair(kv.key, kv.value, kv.mod_revision)
+        returnValue(kvp)
+
+    @inlineCallbacks
+    def _list(self, key):
+        err = None
+        list = []
+        resp = yield self.client.get(KeySet(key, prefix=True))
+        if resp.kvs is not None and len(resp.kvs) > 0:
+            for kv in resp.kvs:
+                list.append(KVPair(kv.key, kv.value, kv.mod_revision))
+        returnValue((list, err))
+
+    @inlineCallbacks
+    def _reserve(self, key, value, **kw):
+        for name, val in kw.items():
+            if name == 'ttl':
+                ttl = val
+                break
+        reserved = False
+        err = 'reservation-failed'
+        owner = None
+
+        # Create a lease
+        lease = yield self.client.lease(ttl)
+
+        # Create a transaction
+        txn = Transaction(
+            compare=[ CompVersion(key, '==', 0) ],
+            success=[ OpSet(key, bytes(value), lease=lease) ],
+            failure=[ OpGet(key) ]
+        )
+        newly_acquired = False
+        try:
+            result = yield self.client.submit(txn)
+        except Failed as failed:
+            log.debug('key-already-present', key=key)
+            if len(failed.responses) > 0:
+                response = failed.responses[0]
+                if response.kvs is not None and len(response.kvs) > 0:
+                    kv = response.kvs[0]
+                    log.debug('key-already-present', value=kv.value)
+                    if kv.value == value:
+                        reserved = True
+                        log.debug('key-already-reserved', key = kv.key, value=kv.value)
+        else:
+            newly_acquired = True
+            log.debug('key-was-absent', key=key, result=result)
+
+        # Check if reservation succeeded
+        resp = yield self.client.get(key)
+        if resp.kvs is not None and len(resp.kvs) == 1:
+            owner = resp.kvs[0].value
+            if owner == value:
+                if newly_acquired:
+                    log.debug('key-reserved', key=key, value=value, ttl=ttl,
+                             lease_id=lease.lease_id)
+                    reserved = True
+                    # Add key to reservation list
+                    self.key_reservations[key] = lease
+                else:
+                    log.debug("reservation-still-held")
+            else:
+                log.debug('reservation-held-by-another', value=owner)
+
+        if reserved:
+            err = None
+        returnValue((owner, err))
+
+    @inlineCallbacks
+    def _renew_reservation(self, key):
+        result = None
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            lease = self.key_reservations[key]
+            # A successfully refreshed lease returns an object of type Header
+            result = yield lease.refresh()
+        if result is None:
+            err = 'lease-refresh-failed'
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_reservation(self, key):
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            lease = self.key_reservations[key]
+            time_left = yield lease.remaining()
+            # A successfully revoked lease returns an object of type Header
+            log.debug('release-reservation', key=key, lease_id=lease.lease_id,
+                      time_left_in_secs=time_left)
+            result = yield lease.revoke()
+            if result is None:
+                err = 'lease-revoke-failed'
+            self.key_reservations.pop(key)
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_all_reservations(self):
+        err = None
+        keys_to_delete = []
+        for key in self.key_reservations:
+            lease = self.key_reservations[key]
+            time_left = yield lease.remaining()
+            # A successfully revoked lease returns an object of type Header
+            log.debug('release-reservation', key=key, lease_id=lease.lease_id,
+                      time_left_in_secs=time_left)
+            result = yield lease.revoke()
+            if result is None:
+                err = 'lease-revoke-failed'
+                log.debug('lease-revoke', result=result)
+            keys_to_delete.append(key)
+        for key in keys_to_delete:
+            self.key_reservations.pop(key)
+        returnValue(err)
diff --git a/python/common/kvstore/kv_client.py b/python/common/kvstore/kv_client.py
new file mode 100644
index 0000000..69a6480
--- /dev/null
+++ b/python/common/kvstore/kv_client.py
@@ -0,0 +1,206 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.utils.asleep import asleep
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = get_logger()
+
+class KVPair():
+    def __init__(self, key, value, index):
+        self.key = key
+        self.value = value
+        self.index = index
+
+class Event():
+    PUT = 0
+    DELETE = 1
+    CONNECTION_DOWN = 2
+
+    def __init__(self, event_type, key, value):
+        self.event_type = event_type
+        self.key = key
+        self.value = value
+
+RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+DEFAULT_TIMEOUT = 0.0
+for i in range(len(RETRY_BACKOFF)):
+    DEFAULT_TIMEOUT += RETRY_BACKOFF[i]
+
+class KVClient():
+
+    def __init__(self, kv_host, kv_port):
+        self.host = kv_host
+        self.port = kv_port
+        self.key_reservations = {}
+        self.key_watches = {}
+        self.retries = 0
+        self.retry_time = 0
+
+    @inlineCallbacks
+    def get(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method returns the value of the given key in KV store.
+
+        :param key: The key whose value is requested
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: (KVPair, error) where KVPair is None if an error occurred
+        '''
+        result = yield self._op_with_retry('GET', key, None, timeout)
+        returnValue(result)
+
+    @inlineCallbacks
+    def list(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The list method returns an array of key-value pairs all of which
+        share the same key prefix.
+
+        :param key: The key prefix
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: ([]KVPair, error) where []KVPair is a list of KVPair objects
+        '''
+        result = yield self._op_with_retry('LIST', key, None, timeout)
+        returnValue(result)
+
+    @inlineCallbacks
+    def put(self, key, value, timeout=DEFAULT_TIMEOUT):
+        '''
+        The put method writes a value to the given key in KV store.
+        Do NOT modify a reserved key in an etcd store; doing so seems
+        to nullify the TTL of the key. In other words, the key lasts
+        forever.
+
+        :param key: The key to be written to
+        :param value: The value of the key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful write
+        '''
+        _, err = yield self._op_with_retry('PUT', key, value, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def delete(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The delete method removes a key from the KV store.
+
+        :param key: The key to be deleted
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful deletion
+        '''
+        _, err = yield self._op_with_retry('DELETE', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def reserve(self, key, value, ttl, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method acts essentially like a semaphore. The underlying mechanism
+        differs depending on the KV store: etcd uses a test-and-set transaction;
+        consul uses an acquire lock. If using etcd, do NOT write to the key
+        subsequent to the initial reservation; the TTL functionality may become
+        impaired (i.e. the reservation never expires).
+
+        :param key: The key under reservation
+        :param value: The reservation owner
+        :param ttl: The time-to-live (TTL) for the reservation. The key is unreserved
+        by the KV store when the TTL expires.
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: (key_value, error) If the key is acquired, then the value returned will
+        be the value passed in.  If the key is already acquired, then the value assigned
+        to that key will be returned.
+        '''
+        result = yield self._op_with_retry('RESERVE', key, value, timeout, ttl=ttl)
+        returnValue(result)
+
+    @inlineCallbacks
+    def renew_reservation(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method renews the reservation for a given key. A reservation expires
+        after the TTL (Time To Live) period specified when reserving the key.
+
+        :param key: The reserved key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful renewal
+        '''
+        result, err = yield self._op_with_retry('RENEW', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def release_reservation(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The release_reservation method cancels the reservation for a given key.
+
+        :param key: The reserved key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful cancellation
+        '''
+        result, err = yield self._op_with_retry('RELEASE', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def release_all_reservations(self, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method cancels all key reservations made previously
+        using the reserve API.
+
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful cancellation
+        '''
+        result, err = yield self._op_with_retry('RELEASE-ALL', None, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method provides a watch capability for the given key. If the value of the key
+        changes or the key is deleted, then an event indicating the change is passed to
+        the given callback function.
+
+        :param key: The key to be watched
+        :param key_change_callback: The function invoked whenever the key changes
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: There is no return; key change events are passed to the callback function
+        '''
+        raise NotImplementedError('Method not implemented')
+
+    @inlineCallbacks
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method closes the watch on the given key. Once the watch is closed, key
+        change events are no longer passed to the key change callback function.
+
+        :param key: The key under watch
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: There is no return
+        '''
+        raise NotImplementedError('Method not implemented')
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        raise NotImplementedError('Method not implemented')
+
+    def _backoff(self, msg):
+        wait_time = RETRY_BACKOFF[min(self.retries, len(RETRY_BACKOFF) - 1)]
+        self.retry_time += wait_time
+        self.retries += 1
+        log.error(msg, next_retry_in_secs=wait_time,
+                  total_delay_in_secs = self.retry_time,
+                  retries=self.retries)
+        return asleep(wait_time)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.debug('reset-backoff', after_retries=self.retries)
+            self.retries = 0
+            self.retry_time = 0
\ No newline at end of file
diff --git a/python/common/kvstore/kvstore.py b/python/common/kvstore/kvstore.py
new file mode 100644
index 0000000..662b34d
--- /dev/null
+++ b/python/common/kvstore/kvstore.py
@@ -0,0 +1,31 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.kvstore.consul_client import ConsulClient
+from common.kvstore.etcd_client import EtcdClient
+
+def create_kv_client(kv_store, host, port):
+    '''
+    Factory for creating a client interface to a KV store
+
+    :param kv_store: Specify either 'etcd' or 'consul'
+    :param host: Name or IP address of host serving the KV store
+    :param port: Port number (integer) of the KV service
+    :return: Reference to newly created client interface
+    '''
+    if kv_store == 'etcd':
+        return EtcdClient(host, port)
+    elif kv_store == 'consul':
+        return ConsulClient(host, port)
+    return None
diff --git a/python/common/manhole.py b/python/common/manhole.py
new file mode 100644
index 0000000..c00c900
--- /dev/null
+++ b/python/common/manhole.py
@@ -0,0 +1,129 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import rlcompleter
+from pprint import pprint
+
+import structlog
+from twisted.conch import manhole_ssh
+from twisted.conch.manhole import ColoredManhole
+from twisted.conch.ssh import keys
+from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
+from twisted.cred.portal import Portal
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+
+
+MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
+MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
+
+
+def get_rsa_keys():
+    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
+                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
+        # generate a RSA keypair
+        log.info('generate-rsa-keypair')
+        from Crypto.PublicKey import RSA
+        rsa_key = RSA.generate(1024)
+        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
+        private_key_str = rsa_key.exportKey()
+
+        # save keys for next time
+        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
+        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
+        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
+                  private=MANHOLE_SERVER_RSA_PRIVATE)
+    else:
+        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
+        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
+    return public_key_str, private_key_str
+
+
+class ManholeWithCompleter(ColoredManhole):
+
+    def __init__(self, namespace):
+        namespace['manhole'] = self
+        super(ManholeWithCompleter, self).__init__(namespace)
+        self.last_tab = None
+        self.completer = rlcompleter.Completer(self.namespace)
+
+    def handle_TAB(self):
+        if self.last_tab != self.lineBuffer:
+            self.last_tab = self.lineBuffer
+            return
+
+        buffer = ''.join(self.lineBuffer)
+        completions = []
+        maxlen = 3
+        for c in xrange(1000):
+            candidate = self.completer.complete(buffer, c)
+            if not candidate:
+                break
+
+            if len(candidate) > maxlen:
+                maxlen = len(candidate)
+
+            completions.append(candidate)
+
+        if len(completions) == 1:
+            rest = completions[0][len(buffer):]
+            self.terminal.write(rest)
+            self.lineBufferIndex += len(rest)
+            self.lineBuffer.extend(rest)
+
+        elif len(completions):
+            maxlen += 3
+            numcols = self.width / maxlen
+            self.terminal.nextLine()
+            for idx, candidate in enumerate(completions):
+                self.terminal.write('%%-%ss' % maxlen % candidate)
+                if not ((idx + 1) % numcols):
+                    self.terminal.nextLine()
+            self.terminal.nextLine()
+            self.drawInputLine()
+
+
+class Manhole(object):
+
+    def __init__(self, port, pws, **kw):
+        kw.update(globals())
+        kw['pp'] = pprint
+
+        realm = manhole_ssh.TerminalRealm()
+        manhole = ManholeWithCompleter(kw)
+
+        def windowChanged(_, win_size):
+            manhole.terminalSize(*reversed(win_size[:2]))
+
+        realm.sessionFactory.windowChanged = windowChanged
+        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
+        portal = Portal(realm)
+        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
+        factory = manhole_ssh.ConchFactory(portal)
+        public_key_str, private_key_str = get_rsa_keys()
+        factory.publicKeys = {
+            'ssh-rsa': keys.Key.fromString(public_key_str)
+        }
+        factory.privateKeys = {
+            'ssh-rsa': keys.Key.fromString(private_key_str)
+        }
+        reactor.listenTCP(port, factory, interface='localhost')
+
+
+if __name__ == '__main__':
+    Manhole(12222, dict(admin='admin'))
+    reactor.run()
diff --git a/python/common/pon_resource_manager/README.md b/python/common/pon_resource_manager/README.md
new file mode 100644
index 0000000..95d62dd
--- /dev/null
+++ b/python/common/pon_resource_manager/README.md
@@ -0,0 +1,66 @@
+# Resource Manager Profile Configuration
+
+Resource Manager module is responsible for managing PON resource pools.
+It exposes APIs to allocate/free the following resources from the Resource Pools.
+1) alloc_ids
+2) onu_ids
+3) gemport_ids
+4) flow_ids
+5) uni_port_ids.
+
+Resource Manager uses a KV store in backend to ensure resiliency of the Resource Pool data.
+## Configuring Resource Ranges
+Resource Manager assumes the following defaults when no explicit configuration is available
+```
+    {
+        "onu_id_start": 1,
+        "onu_id_end": 127,
+        "alloc_id_start": 1024,
+        "alloc_id_end": 2816,
+        "gemport_id_start": 1024,
+        "gemport_id_end": 8960,
+        "flow_id_start": 1,
+        "flow_id_end": 16383,
+        "uni_id_start": 0,
+        "uni_id_end": 0,
+        "pon_ports": 16
+    }
+```
+To configure specific Resource Ranges for a given OLT model, place the Resource Range JSON at the below path on the KV store and specify the OLT model at the time of OLT pre-provision step.
+
+```
+service/voltha/resource_manager/<technology>/resource_ranges/<olt_model>
+```
+
+Example KV path is `service/voltha/resource_manager/xgpon/resource_ranges/asfvolt16`
+
+Create a ResourceRanges.json file with the example content as below.
+
+```
+    {
+        "onu_id_start": 1,
+        "onu_id_end": 127,
+        "alloc_id_start": 1024,
+        "alloc_id_end": 2816,
+        "gemport_id_start": 1024,
+        "gemport_id_end": 8960,
+        "flow_id_start": 1,
+        "flow_id_end": 16383,
+        "uni_id_start": 0,
+        "uni_id_end": 0,
+        "pon_ports": 16
+    }
+```
+
+Assuming etcd is the KV store in use, push the ResourceRange using below command.
+```
+curl -sSL -XPUT http://<etcd-ip>:2379/v2/keys/service/voltha/resource_manager/xgpon/resource_ranges/asfvolt16 -d value="$(jq -c . ResourceRanges.json)"
+```
+
+When the OLT is being pre-provisioned, specify the OLT model. The Resource Manager will use this OLT model  to look up on the KV store to find any available Resource Range profile and initialize the Resource Pools accordingly. The `-m` below specifies the OLT Model.
+
+```
+preprovision_olt -t openolt -H 192.168.50.100:9191 -m asfvolt16
+```
+
+`Note:` In case of OpenOLT device, resource ranges (if queried and available from the device), will override the resource ranges read from the KV store.
diff --git a/python/common/pon_resource_manager/__init__.py b/python/common/pon_resource_manager/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/pon_resource_manager/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/pon_resource_manager/resource_kv_store.py b/python/common/pon_resource_manager/resource_kv_store.py
new file mode 100644
index 0000000..1ca6530
--- /dev/null
+++ b/python/common/pon_resource_manager/resource_kv_store.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Resource KV store - interface between Resource Manager and backend store."""
+import structlog
+
+from voltha.core.config.config_backend import ConsulStore
+from voltha.core.config.config_backend import EtcdStore
+
+# KV store uses this prefix to store resource info
+PATH_PREFIX = 'service/voltha/resource_manager/{}'
+
+
+class ResourceKvStore(object):
+    """Implements apis to store/get/remove resource in backend store."""
+
+    def __init__(self, technology, device_id, backend, host, port):
+        """
+        Create ResourceKvStore object.
+
+        Based on backend ('consul' and 'etcd' use the host and port
+        to create the respective object.
+
+        :param technology: PON technology
+        :param device_id: OLT device id
+        :param backend: Type of backend storage (etcd or consul)
+        :param host: host ip info for backend storage
+        :param port: port for the backend storage
+        :raises exception when invalid backend store passed as an argument
+        """
+        # logger
+        self._log = structlog.get_logger()
+
+        path = PATH_PREFIX.format(technology)
+        try:
+            if backend == 'consul':
+                self._kv_store = ConsulStore(host, port, path)
+            elif backend == 'etcd':
+                self._kv_store = EtcdStore(host, port, path)
+            else:
+                self._log.error('Invalid-backend')
+                raise Exception("Invalid-backend-for-kv-store")
+        except Exception as e:
+            self._log.exception("exception-in-init")
+            raise Exception(e)
+
+    def update_to_kv_store(self, path, resource):
+        """
+        Update resource.
+
+        :param path: path to update the resource
+        :param resource: updated resource
+        """
+        try:
+            self._kv_store[path] = str(resource)
+            self._log.debug("Resource-updated-in-kv-store", path=path)
+            return True
+        except BaseException:
+            self._log.exception("Resource-update-in-kv-store-failed",
+                                path=path, resource=resource)
+        return False
+
+    def get_from_kv_store(self, path):
+        """
+        Get resource.
+
+        :param path: path to get the resource
+        """
+        resource = None
+        try:
+            resource = self._kv_store[path]
+            self._log.debug("Got-resource-from-kv-store", path=path)
+        except KeyError:
+            self._log.info("Resource-not-found-updating-resource",
+                           path=path)
+        except BaseException:
+            self._log.exception("Getting-resource-from-kv-store-failed",
+                                path=path)
+        return resource
+
+    def remove_from_kv_store(self, path):
+        """
+        Remove resource.
+
+        :param path: path to remove the resource
+        """
+        try:
+            del self._kv_store[path]
+            self._log.debug("Resource-deleted-in-kv-store", path=path)
+            return True
+        except BaseException:
+            self._log.exception("Resource-delete-in-kv-store-failed",
+                                path=path)
+        return False
diff --git a/python/common/pon_resource_manager/resource_manager.py b/python/common/pon_resource_manager/resource_manager.py
new file mode 100644
index 0000000..bdb45ee
--- /dev/null
+++ b/python/common/pon_resource_manager/resource_manager.py
@@ -0,0 +1,1050 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Resource Manager will be unique for each OLT device.
+
+It exposes APIs to create/free alloc_ids/onu_ids/gemport_ids. Resource Manager
+uses a KV store in backend to ensure resiliency of the data.
+"""
+import json
+import ast
+import structlog
+from bitstring import BitArray
+import shlex
+from argparse import ArgumentParser, ArgumentError
+
+from common.pon_resource_manager.resource_kv_store import ResourceKvStore
+from common.tech_profile.tech_profile import TechProfile
+
+
+# Used to parse extra arguments to OpenOlt adapter from the NBI
+class OltVendorArgumentParser(ArgumentParser):
+    # Must override the exit command to prevent it from
+    # calling sys.exit().  Return exception instead.
+    def exit(self, status=0, message=None):
+        raise Exception(message)
+
+
+class PONResourceManager(object):
+    """Implements APIs to initialize/allocate/release alloc/gemport/onu IDs."""
+
+    # Constants to identify resource pool
+    UNI_ID = 'UNI_ID'
+    ONU_ID = 'ONU_ID'
+    ALLOC_ID = 'ALLOC_ID'
+    GEMPORT_ID = 'GEMPORT_ID'
+    FLOW_ID = 'FLOW_ID'
+
+    # Constants for passing command line arugments
+    OLT_MODEL_ARG = '--olt_model'
+
+    # The resource ranges for a given device model should be placed
+    # at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+    # path on the KV store.
+    # If Resource Range parameters are to be read from the external KV store,
+    # they are expected to be stored in the following format.
+    # Note: All parameters are MANDATORY for now.
+    '''
+    {
+        "onu_id_start": 1,
+        "onu_id_end": 127,
+        "alloc_id_start": 1024,
+        "alloc_id_end": 2816,
+        "gemport_id_start": 1024,
+        "gemport_id_end": 8960,
+        "flow_id_start": 1,
+        "flow_id_end": 16383,
+        "uni_id_start": 0,
+        "uni_id_end": 0,
+        "pon_ports": 16
+    }
+
+    '''
+    # constants used as keys to reference the resource range parameters from
+    # and external KV store.
+    UNI_ID_START_IDX = "uni_id_start"
+    UNI_ID_END_IDX = "uni_id_end"
+    ONU_ID_START_IDX = "onu_id_start"
+    ONU_ID_END_IDX = "onu_id_end"
+    ONU_ID_SHARED_IDX = "onu_id_shared"
+    ALLOC_ID_START_IDX = "alloc_id_start"
+    ALLOC_ID_END_IDX = "alloc_id_end"
+    ALLOC_ID_SHARED_IDX = "alloc_id_shared"
+    GEMPORT_ID_START_IDX = "gemport_id_start"
+    GEMPORT_ID_END_IDX = "gemport_id_end"
+    GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+    FLOW_ID_START_IDX = "flow_id_start"
+    FLOW_ID_END_IDX = "flow_id_end"
+    FLOW_ID_SHARED_IDX = "flow_id_shared"
+    NUM_OF_PON_PORT = "pon_ports"
+
+    # PON Resource range configuration on the KV store.
+    # Format: 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+    # The KV store backend is initialized with a path prefix and we need to
+    # provide only the suffix.
+    PON_RESOURCE_RANGE_CONFIG_PATH = 'resource_ranges/{}'
+
+    # resource path suffix
+    ALLOC_ID_POOL_PATH = '{}/alloc_id_pool/{}'
+    GEMPORT_ID_POOL_PATH = '{}/gemport_id_pool/{}'
+    ONU_ID_POOL_PATH = '{}/onu_id_pool/{}'
+    FLOW_ID_POOL_PATH = '{}/flow_id_pool/{}'
+
+    # Path on the KV store for storing list of alloc IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+    ALLOC_ID_RESOURCE_MAP_PATH = '{}/{}/alloc_ids'
+
+    # Path on the KV store for storing list of gemport IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+    GEMPORT_ID_RESOURCE_MAP_PATH = '{}/{}/gemport_ids'
+
+    # Path on the KV store for storing list of Flow IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+    FLOW_ID_RESOURCE_MAP_PATH = '{}/{}/flow_ids'
+
+    # Flow Id info: Use to store more metadata associated with the flow_id
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+    FLOW_ID_INFO_PATH = '{}/{}/flow_id_info/{}'
+
+    # Constants for internal usage.
+    PON_INTF_ID = 'pon_intf_id'
+    START_IDX = 'start_idx'
+    END_IDX = 'end_idx'
+    POOL = 'pool'
+
+    def __init__(self, technology, extra_args, device_id,
+                 backend, host, port):
+        """
+        Create PONResourceManager object.
+
+        :param technology: PON technology
+        :param: extra_args: This string contains extra arguments passed during
+        pre-provisioning of OLT and specifies the OLT Vendor type
+        :param device_id: OLT device id
+        :param backend: backend store
+        :param host: ip of backend store
+        :param port: port on which backend store listens
+        :raises exception when invalid backend store passed as an argument
+        """
+        # logger
+        self._log = structlog.get_logger()
+
+        try:
+            self.technology = technology
+            self.extra_args = extra_args 
+            self.device_id = device_id
+            self.backend = backend
+            self.host = host
+            self.port = port
+            self.olt_model = None
+
+            self._kv_store = ResourceKvStore(technology, device_id, backend,
+                                             host, port)
+            self.tech_profile = TechProfile(self)
+
+            # Below attribute, pon_resource_ranges, should be initialized
+            # by reading from KV store.
+            self.pon_resource_ranges = dict()
+            self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX] = None
+            self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX] = None
+            self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
+            self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX] = None
+
+            self.shared_resource_mgrs = dict()
+            self.shared_resource_mgrs[PONResourceManager.ONU_ID_SHARED_IDX] = None
+            self.shared_resource_mgrs[PONResourceManager.ALLOC_ID_SHARED_IDX] = None
+            self.shared_resource_mgrs[PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
+            self.shared_resource_mgrs[PONResourceManager.FLOW_ID_SHARED_IDX] = None
+
+            self.shared_idx_by_type = dict()
+            self.shared_idx_by_type[PONResourceManager.ONU_ID] = PONResourceManager.ONU_ID_SHARED_IDX
+            self.shared_idx_by_type[PONResourceManager.ALLOC_ID] = PONResourceManager.ALLOC_ID_SHARED_IDX
+            self.shared_idx_by_type[PONResourceManager.GEMPORT_ID] = PONResourceManager.GEMPORT_ID_SHARED_IDX
+            self.shared_idx_by_type[PONResourceManager.FLOW_ID] = PONResourceManager.FLOW_ID_SHARED_IDX
+
+            self.intf_ids = None
+
+        except Exception as e:
+            self._log.exception("exception-in-init")
+            raise Exception(e)
+
+    def init_resource_ranges_from_kv_store(self):
+        """
+        Initialize PON resource ranges with config fetched from kv store.
+
+        :return boolean: True if PON resource ranges initialized else false
+        """
+        self.olt_model = self._get_olt_model()
+        # Try to initialize the PON Resource Ranges from KV store based on the
+        # OLT model key, if available
+        if self.olt_model is None:
+            self._log.info("device-model-unavailable--not-reading-from-kv-store")
+            return False
+
+        path = self.PON_RESOURCE_RANGE_CONFIG_PATH.format(self.olt_model)
+        try:
+            # get resource from kv store
+            result = self._kv_store.get_from_kv_store(path)
+
+            if result is None:
+                self._log.debug("resource-range-config-unavailable-on-kvstore")
+                return False
+
+            resource_range_config = result
+
+            if resource_range_config is not None:
+                # update internal ranges from kv ranges. If there are missing
+                # values in the KV profile, continue to use the defaults
+                for key,value in json.loads(resource_range_config): self.pon_resource_ranges[key] = value
+
+                # initialize optional elements that may not be in the profile
+                if self.pon_resource_ranges[PONResourceManager.UNI_ID_START_IDX] is None:
+                    self.pon_resource_ranges[PONResourceManager.UNI_ID_START_IDX] = 0
+                if self.pon_resource_ranges[PONResourceManager.UNI_ID_END_IDX] is None:
+                    self.pon_resource_ranges[PONResourceManager.UNI_ID_END_IDX] = 0
+
+                self._log.debug("Init-resource-ranges-from-kvstore-success",
+                                pon_resource_ranges=self.pon_resource_ranges,
+                                path=path)
+                return True
+
+        except Exception as e:
+            self._log.exception("error-initializing-resource-range-from-kv-store",
+                                e=e)
+        return False
+
+    def update_range_(self, start_idx, start, end_idx, end, shared_idx = None, shared_pool_id = None,
+                      shared_resource_mgr = None):
+        if (start is not None) and \
+                (start_idx not in self.pon_resource_ranges or self.pon_resource_ranges[start_idx] < start):
+            self.pon_resource_ranges[start_idx] = start
+        if (end is not None) and \
+                (end_idx not in self.pon_resource_ranges or self.pon_resource_ranges[end_idx] > end):
+            self.pon_resource_ranges[end_idx] = end
+        if (shared_pool_id is not None) and \
+                (shared_idx not in self.pon_resource_ranges or self.pon_resource_ranges[shared_idx] is None):
+            self.pon_resource_ranges[shared_idx] = shared_pool_id
+        if (shared_resource_mgr is not None) and \
+                (shared_idx not in self.shared_resource_mgrs or self.shared_resource_mgrs[shared_idx] is None):
+            self.shared_resource_mgrs[shared_idx] = shared_resource_mgr
+
+    def update_ranges(self,
+                      onu_id_start_idx=None,
+                      onu_id_end_idx=None,
+                      onu_id_shared_pool_id=None,
+                      onu_id_shared_resource_mgr=None,
+                      alloc_id_start_idx=None,
+                      alloc_id_end_idx=None,
+                      alloc_id_shared_pool_id=None,
+                      alloc_id_shared_resource_mgr=None,
+                      gemport_id_start_idx=None,
+                      gemport_id_end_idx=None,
+                      gemport_id_shared_pool_id=None,
+                      gemport_id_shared_resource_mgr=None,
+                      flow_id_start_idx=None,
+                      flow_id_end_idx=None,
+                      flow_id_shared_pool_id=None,
+                      flow_id_shared_resource_mgr=None,
+                      uni_id_start_idx=None,
+                      uni_id_end_idx=None):
+
+        self.update_range_(PONResourceManager.ONU_ID_START_IDX, onu_id_start_idx,
+                           PONResourceManager.ONU_ID_END_IDX, onu_id_end_idx,
+                           PONResourceManager.ONU_ID_SHARED_IDX, onu_id_shared_pool_id,
+                           onu_id_shared_resource_mgr)
+
+        self.update_range_(PONResourceManager.ALLOC_ID_START_IDX, alloc_id_start_idx,
+                           PONResourceManager.ALLOC_ID_END_IDX, alloc_id_end_idx,
+                           PONResourceManager.ALLOC_ID_SHARED_IDX, alloc_id_shared_pool_id,
+                           alloc_id_shared_resource_mgr)
+
+        self.update_range_(PONResourceManager.GEMPORT_ID_START_IDX, gemport_id_start_idx,
+                           PONResourceManager.GEMPORT_ID_END_IDX, gemport_id_end_idx,
+                           PONResourceManager.GEMPORT_ID_SHARED_IDX, gemport_id_shared_pool_id,
+                           gemport_id_shared_resource_mgr)
+
+        self.update_range_(PONResourceManager.FLOW_ID_START_IDX, flow_id_start_idx,
+                           PONResourceManager.FLOW_ID_END_IDX, flow_id_end_idx,
+                           PONResourceManager.FLOW_ID_SHARED_IDX, flow_id_shared_pool_id,
+                           flow_id_shared_resource_mgr)
+
+        self.update_range_(PONResourceManager.UNI_ID_START_IDX, uni_id_start_idx,
+                           PONResourceManager.UNI_ID_END_IDX, uni_id_end_idx)
+
+    def init_default_pon_resource_ranges(self,
+                                         onu_id_start_idx=1,
+                                         onu_id_end_idx=127,
+                                         onu_id_shared_pool_id=None,
+                                         alloc_id_start_idx=1024,
+                                         alloc_id_end_idx=2816,
+                                         alloc_id_shared_pool_id=None,
+                                         gemport_id_start_idx=1024,
+                                         gemport_id_end_idx=8960,
+                                         gemport_id_shared_pool_id=None,
+                                         flow_id_start_idx=1,
+                                         flow_id_end_idx=16383,
+                                         flow_id_shared_pool_id=None,
+                                         uni_id_start_idx=0,
+                                         uni_id_end_idx=0,
+                                         num_of_pon_ports=16,
+                                         intf_ids=None):
+        """
+        Initialize default PON resource ranges
+
+        :param onu_id_start_idx: onu id start index
+        :param onu_id_end_idx: onu id end index
+        :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+        :param alloc_id_start_idx: alloc id start index
+        :param alloc_id_end_idx: alloc id end index
+        :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+        :param gemport_id_start_idx: gemport id start index
+        :param gemport_id_end_idx: gemport id end index
+        :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+        :param flow_id_start_idx: flow id start index
+        :param flow_id_end_idx: flow id end index
+        :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+        :param num_of_pon_ports: number of PON ports
+        :param intf_ids: interfaces serviced by this manager
+        """
+        self._log.info("initialize-default-resource-range-values")
+
+        self.update_ranges(onu_id_start_idx, onu_id_end_idx, onu_id_shared_pool_id, None,
+                           alloc_id_start_idx, alloc_id_end_idx, alloc_id_shared_pool_id, None,
+                           gemport_id_start_idx, gemport_id_end_idx, gemport_id_shared_pool_id, None,
+                           flow_id_start_idx, flow_id_end_idx, flow_id_shared_pool_id, None,
+                           uni_id_start_idx, uni_id_end_idx)
+
+        if intf_ids is None:
+            intf_ids = range(0, num_of_pon_ports)
+
+        self.intf_ids = intf_ids
+
+    def init_device_resource_pool(self):
+        """
+        Initialize resource pool for all PON ports.
+        """
+
+        self._log.info("init-device-resource-pool", technology=self.technology,
+                       pon_resource_ranges=self.pon_resource_ranges)
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ONU_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.ONU_ID_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.ONU_ID_END_IDX])
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ALLOC_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.ALLOC_ID_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.ALLOC_ID_END_IDX])
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.GEMPORT_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.GEMPORT_ID_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.GEMPORT_ID_END_IDX])
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.FLOW_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.FLOW_ID_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.FLOW_ID_END_IDX])
+            if shared_pool_id is not None:
+                break
+
+    def clear_device_resource_pool(self):
+        """
+        Clear resource pool of all PON ports.
+        """
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ONU_ID,
+            )
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ALLOC_ID,
+            )
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.GEMPORT_ID,
+            )
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.FLOW_ID,
+            )
+            if shared_pool_id is not None:
+                break
+
+    def init_resource_id_pool(self, pon_intf_id, resource_type, start_idx,
+                              end_idx):
+        """
+        Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param start_idx: start index for onu id pool
+        :param end_idx: end index for onu id pool
+        :return boolean: True if resource id pool initialized else false
+        """
+        status = False
+
+        # delegate to the master instance if sharing enabled across instances
+        shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
+        if shared_resource_mgr is not None and shared_resource_mgr is not self:
+            return shared_resource_mgr.init_resource_id_pool(pon_intf_id, resource_type,
+                                                             start_idx, end_idx)
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return status
+
+        try:
+            # In case of adapter reboot and reconciliation resource in kv store
+            # checked for its presence if not kv store update happens
+            resource = self._get_resource(path)
+
+            if resource is not None:
+                self._log.info("Resource-already-present-in-store", path=path)
+                status = True
+            else:
+                resource = self._format_resource(pon_intf_id, start_idx,
+                                                 end_idx)
+                self._log.info("Resource-initialized", path=path)
+
+                # Add resource as json in kv store.
+                result = self._kv_store.update_to_kv_store(path, resource)
+                if result is True:
+                    status = True
+
+        except Exception as e:
+            self._log.exception("error-initializing-resource-pool", e=e)
+
+        return status
+
+    def assert_resource_limits(self, id, resource_type):
+        """
+        Assert the specified id value is in the limit bounds of he requested resource type.
+
+        :param id: The value to assert is in limits
+        :param resource_type: String to identify type of resource
+        """
+        start_idx = PONResourceManager.ONU_ID_START_IDX if resource_type == PONResourceManager.ONU_ID \
+            else PONResourceManager.ALLOC_ID_START_IDX if resource_type == PONResourceManager.ALLOC_ID \
+            else PONResourceManager.GEMPORT_ID_START_IDX if resource_type == PONResourceManager.GEMPORT_ID \
+            else PONResourceManager.FLOW_ID_START_IDX if resource_type == PONResourceManager.FLOW_ID \
+            else PONResourceManager.UNI_ID_START_IDX if resource_type == PONResourceManager.UNI_ID \
+            else None
+        end_idx = PONResourceManager.ONU_ID_END_IDX if resource_type == PONResourceManager.ONU_ID \
+            else PONResourceManager.ALLOC_ID_END_IDX if resource_type == PONResourceManager.ALLOC_ID \
+            else PONResourceManager.GEMPORT_ID_END_IDX if resource_type == PONResourceManager.GEMPORT_ID \
+            else PONResourceManager.FLOW_ID_END_IDX if resource_type == PONResourceManager.FLOW_ID \
+            else PONResourceManager.UNI_ID_END_IDX if resource_type == PONResourceManager.UNI_ID \
+            else None
+        assert id >= self.pon_resource_ranges[start_idx] and id <= self.pon_resource_ranges[end_idx]
+
+    def get_resource_id(self, pon_intf_id, resource_type, num_of_id=1):
+        """
+        Create alloc/gemport/onu/flow id for given OLT PON interface.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param num_of_id: required number of ids
+        :return list/int/None: list, int or None if resource type is
+                               alloc_id/gemport_id, onu_id or invalid type
+                               respectively
+        """
+        result = None
+
+        if num_of_id < 1:
+            self._log.error("invalid-num-of-resources-requested")
+            return result
+
+        # delegate to the master instance if sharing enabled across instances
+        shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
+        if shared_resource_mgr is not None and shared_resource_mgr is not self:
+            return shared_resource_mgr.get_resource_id(pon_intf_id, resource_type, num_of_id)
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return result
+
+        try:
+            resource = self._get_resource(path)
+            if resource is not None and \
+                    (resource_type == PONResourceManager.ONU_ID or
+                     resource_type == PONResourceManager.FLOW_ID):
+                result = self._generate_next_id(resource)
+            elif resource is not None and (
+                    resource_type == PONResourceManager.GEMPORT_ID or
+                    resource_type == PONResourceManager.ALLOC_ID):
+                if num_of_id == 1:
+                    result = self._generate_next_id(resource)
+                else:
+                    result = list()
+                    while num_of_id > 0:
+                        result.append(self._generate_next_id(resource))
+                        num_of_id -= 1
+            else:
+                raise Exception("get-resource-failed")
+
+            self._log.debug("Get-" + resource_type + "-success", result=result,
+                            path=path)
+            # Update resource in kv store
+            self._update_resource(path, resource)
+
+        except Exception as e:
+            self._log.exception("Get-" + resource_type + "-id-failed",
+                                path=path, e=e)
+        return result
+
+    def free_resource_id(self, pon_intf_id, resource_type, release_content):
+        """
+        Release alloc/gemport/onu/flow id for given OLT PON interface.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param release_content: required number of ids
+        :return boolean: True if all IDs in given release_content released
+                         else False
+        """
+        status = False
+        known_resource_types = [PONResourceManager.ONU_ID,
+                                PONResourceManager.ALLOC_ID,
+                                PONResourceManager.GEMPORT_ID,
+                                PONResourceManager.FLOW_ID]
+        if resource_type not in known_resource_types:
+            self._log.error("unknown-resource-type",
+                            resource_type=resource_type)
+            return status
+        if release_content is None:
+            self._log.debug("nothing-to-release")
+            return status
+        # delegate to the master instance if sharing enabled across instances
+        shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
+        if shared_resource_mgr is not None and shared_resource_mgr is not self:
+            return shared_resource_mgr.free_resource_id(pon_intf_id, resource_type)
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return status
+
+        try:
+            resource = self._get_resource(path)
+            if resource is None:
+                raise Exception("get-resource-failed")
+            if isinstance(release_content, list):
+                for content in release_content:
+                    self._release_id(resource, content)
+            else:
+                self._release_id(resource, release_content)
+
+            self._log.debug("Free-" + resource_type + "-success", path=path)
+
+            # Update resource in kv store
+            status = self._update_resource(path, resource)
+
+        except Exception as e:
+            self._log.exception("Free-" + resource_type + "-failed",
+                                path=path, e=e)
+        return status
+
+    def clear_resource_id_pool(self, pon_intf_id, resource_type):
+        """
+        Clear Resource Pool for a given Resource Type on a given PON Port.
+
+        :return boolean: True if removed else False
+        """
+
+        # delegate to the master instance if sharing enabled across instances
+        shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
+        if shared_resource_mgr is not None and shared_resource_mgr is not self:
+            return shared_resource_mgr.clear_resource_id_pool(pon_intf_id, resource_type)
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return False
+
+        try:
+            result = self._kv_store.remove_from_kv_store(path)
+            if result is True:
+                self._log.debug("Resource-pool-cleared",
+                                device_id=self.device_id,
+                                path=path)
+                return True
+        except Exception as e:
+            self._log.exception("error-clearing-resource-pool", e=e)
+
+        self._log.error("Clear-resource-pool-failed", device_id=self.device_id,
+                        path=path)
+        return False
+
+    def init_resource_map(self, pon_intf_onu_id):
+        """
+        Initialize resource map
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        # initialize pon_intf_onu_id tuple to alloc_ids map
+        alloc_id_path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        alloc_ids = list()
+        self._kv_store.update_to_kv_store(
+            alloc_id_path, json.dumps(alloc_ids)
+        )
+
+        # initialize pon_intf_onu_id tuple to gemport_ids map
+        gemport_id_path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        gemport_ids = list()
+        self._kv_store.update_to_kv_store(
+            gemport_id_path, json.dumps(gemport_ids)
+        )
+
+    def remove_resource_map(self, pon_intf_onu_id):
+        """
+        Remove resource map
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        # remove pon_intf_onu_id tuple to alloc_ids map
+        try:
+            alloc_id_path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+                self.device_id, str(pon_intf_onu_id)
+            )
+            self._kv_store.remove_from_kv_store(alloc_id_path)
+        except Exception as e:
+            self._log.error("error-removing-alloc-id", e=e)
+
+        try:
+            # remove pon_intf_onu_id tuple to gemport_ids map
+            gemport_id_path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+                self.device_id, str(pon_intf_onu_id)
+            )
+            self._kv_store.remove_from_kv_store(gemport_id_path)
+        except Exception as e:
+            self._log.error("error-removing-gem-ports", e=e)
+
+        flow_id_path = PONResourceManager.FLOW_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id))
+        flow_ids = self._kv_store.get_from_kv_store(flow_id_path)
+
+        if flow_ids and isinstance(flow_ids, list):
+            for flow_id in flow_ids:
+                try:
+                    flow_id_info_path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+                                        self.device_id, str(pon_intf_onu_id), flow_id)
+                    self._kv_store.remove_from_kv_store(flow_id_info_path)
+                except Exception as e:
+                    self._log.error("error-removing-flow-info", flow_id=flow_id, e=e)
+                    continue
+        try:
+            self._kv_store.remove_from_kv_store(flow_id_path)
+        except Exception as e:
+            self._log.error("error-removing-flow-ids", e=e)
+
+    def get_current_alloc_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured alloc ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of alloc_ids if available, else None
+        """
+        path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            alloc_id_list = json.loads(value)
+            if len(alloc_id_list) > 0:
+                return alloc_id_list
+
+        return None
+
+    def get_current_gemport_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured gemport ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of gemport IDs if available, else None
+        """
+
+        path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            gemport_id_list = json.loads(value)
+            if len(gemport_id_list) > 0:
+                return gemport_id_list
+
+        return None
+
+    def get_current_flow_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured flow ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of Flow IDs if available, else None
+        """
+
+        path = PONResourceManager.FLOW_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            flow_id_list = json.loads(value)
+            assert(isinstance(flow_id_list, list))
+            if len(flow_id_list) > 0:
+                return flow_id_list
+
+        return None
+
+    def get_flow_id_info(self, pon_intf_onu_id, flow_id):
+        """
+        Get flow_id details configured for the ONU.
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow Id reference
+
+        :return blob: Flow data blob if available, else None
+        """
+
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id),
+            flow_id)
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            return ast.literal_eval(value)
+
+        return None
+
+    def remove_flow_id_info(self, pon_intf_onu_id, flow_id):
+        """
+        Get flow_id details configured for the ONU.
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow Id reference
+
+        """
+
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id),
+            flow_id)
+        self._kv_store.remove_from_kv_store(path)
+
+    def update_alloc_ids_for_onu(self, pon_intf_onu_id, alloc_ids):
+        """
+        Update currently configured alloc ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param alloc_ids: list of alloc ids
+        """
+        path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.update_to_kv_store(
+            path, json.dumps(alloc_ids)
+        )
+
+    def update_gemport_ids_for_onu(self, pon_intf_onu_id, gemport_ids):
+        """
+        Update currently configured gemport ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param gemport_ids: list of gem port ids
+        """
+        path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.update_to_kv_store(
+            path, json.dumps(gemport_ids)
+        )
+
+    def update_flow_id_for_onu(self, pon_intf_onu_id, flow_id, add=True):
+        """
+        Update the flow_id list of the ONU (add or remove flow_id from the list)
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: flow ID
+        :param add: Boolean flag to indicate whether the flow_id should be
+                    added or removed from the list. Defaults to adding the flow.
+        """
+        path = PONResourceManager.FLOW_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        current_flow_ids = self.get_current_flow_ids_for_onu(pon_intf_onu_id)
+        if not isinstance(current_flow_ids, list):
+            # When the first flow_id is being added, the current_flow_ids is None
+            current_flow_ids = list()
+
+        if add:
+            if flow_id not in current_flow_ids:
+                current_flow_ids.append(flow_id)
+        else:
+            if flow_id in current_flow_ids:
+                current_flow_ids.remove(flow_id)
+
+        self._kv_store.update_to_kv_store(path, current_flow_ids)
+
+    def update_flow_id_info_for_onu(self, pon_intf_onu_id, flow_id, flow_data):
+        """
+        Update any metadata associated with the flow_id. The flow_data could be json
+        or any of other data structure. The resource manager doesnt care
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow ID
+        :param flow_data: Flow data blob
+        """
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id, str(pon_intf_onu_id), flow_id
+        )
+
+        if not self._kv_store.update_to_kv_store(path, flow_data):
+            self._log.error("flow-info-update-failed", path=path, flow_id=flow_id)
+
+    def _get_olt_model(self):
+        """
+        Get olt model variant
+
+        :return: type of olt model 
+        """
+        olt_model = None
+        if self.extra_args and len(self.extra_args) > 0:
+            parser = OltVendorArgumentParser(add_help=False)
+            parser.add_argument(PONResourceManager.OLT_MODEL_ARG, '-m', action='store', default='default')
+            try:
+                args = parser.parse_args(shlex.split(self.extra_args))
+                self._log.debug('parsing-extra-arguments', args=args)
+                olt_model = args.olt_model
+            except ArgumentError as e:
+                self._log.exception('invalid-arguments: {}', e=e)
+            except Exception as e:
+                self._log.exception('option-parsing-error: {}', e=e)
+
+        self._log.debug('olt-model', olt_model=olt_model)
+        return olt_model
+
+    def _generate_next_id(self, resource):
+        """
+        Generate unique id having OFFSET as start index.
+
+        :param resource: resource used to generate ID
+        :return int: generated id
+        """
+        pos = resource[PONResourceManager.POOL].find('0b0')
+        resource[PONResourceManager.POOL].set(1, pos)
+        return pos[0] + resource[PONResourceManager.START_IDX]
+
+    def _release_id(self, resource, unique_id):
+        """
+        Release unique id having OFFSET as start index.
+
+        :param resource: resource used to release ID
+        :param unique_id: id need to be released
+        """
+        pos = ((int(unique_id)) - resource[PONResourceManager.START_IDX])
+        resource[PONResourceManager.POOL].set(0, pos)
+
+    def _get_path(self, pon_intf_id, resource_type):
+        """
+        Get path for given resource type.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :return: path for given resource type
+        """
+
+        shared_pool_id = self.pon_resource_ranges[self.shared_idx_by_type[resource_type]]
+        if shared_pool_id is not None:
+            pon_intf_id = shared_pool_id
+
+        path = None
+        if resource_type == PONResourceManager.ONU_ID:
+            path = self._get_onu_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.ALLOC_ID:
+            path = self._get_alloc_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.GEMPORT_ID:
+            path = self._get_gemport_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.FLOW_ID:
+            path = self._get_flow_id_resource_path(pon_intf_id)
+        else:
+            self._log.error("invalid-resource-pool-identifier")
+        return path
+
+    def _get_flow_id_resource_path(self, pon_intf_id):
+        """
+        Get flow id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: flow id resource path
+        """
+        return PONResourceManager.FLOW_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _get_alloc_id_resource_path(self, pon_intf_id):
+        """
+        Get alloc id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: alloc id resource path
+        """
+        return PONResourceManager.ALLOC_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _get_gemport_id_resource_path(self, pon_intf_id):
+        """
+        Get gemport id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: gemport id resource path
+        """
+        return PONResourceManager.GEMPORT_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _get_onu_id_resource_path(self, pon_intf_id):
+        """
+        Get onu id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: onu id resource path
+        """
+        return PONResourceManager.ONU_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _update_resource(self, path, resource):
+        """
+        Update resource in resource kv store.
+
+        :param path: path to update resource
+        :param resource: resource need to be updated
+        :return boolean: True if resource updated in kv store else False
+        """
+        resource[PONResourceManager.POOL] = \
+            resource[PONResourceManager.POOL].bin
+        result = self._kv_store.update_to_kv_store(path, json.dumps(resource))
+        if result is True:
+            return True
+        return False
+
+    def _get_resource(self, path):
+        """
+        Get resource from kv store.
+
+        :param path: path to get resource
+        :return: resource if resource present in kv store else None
+        """
+        # get resource from kv store
+        result = self._kv_store.get_from_kv_store(path)
+        if result is None:
+            return result
+        self._log.info("dumping resource", result=result)
+        resource = result
+
+        if resource is not None:
+            # decode resource fetched from backend store to dictionary
+            resource = json.loads(resource)
+
+            # resource pool in backend store stored as binary string whereas to
+            # access the pool to generate/release IDs it need to be converted
+            # as BitArray
+            resource[PONResourceManager.POOL] = \
+                BitArray('0b' + resource[PONResourceManager.POOL])
+
+        return resource
+
+    def _format_resource(self, pon_intf_id, start_idx, end_idx):
+        """
+        Format resource as json.
+
+        :param pon_intf_id: OLT PON interface id
+        :param start_idx: start index for id pool
+        :param end_idx: end index for id pool
+        :return dictionary: resource formatted as dictionary
+        """
+        # Format resource as json to be stored in backend store
+        resource = dict()
+        resource[PONResourceManager.PON_INTF_ID] = pon_intf_id
+        resource[PONResourceManager.START_IDX] = start_idx
+        resource[PONResourceManager.END_IDX] = end_idx
+
+        # resource pool stored in backend store as binary string
+        resource[PONResourceManager.POOL] = BitArray(end_idx).bin
+
+        return json.dumps(resource)
diff --git a/python/common/structlog_setup.py b/python/common/structlog_setup.py
new file mode 100644
index 0000000..a6950b7
--- /dev/null
+++ b/python/common/structlog_setup.py
@@ -0,0 +1,100 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Setting up proper logging for Voltha"""
+
+import logging
+import logging.config
+
+import structlog
+
+
+def setup_logging(log_config, instance_id,
+                  verbosity_adjust=0, cache_on_use=True):
+    """
+    Set up logging such that:
+    - The primary logging entry method is structlog
+      (see http://structlog.readthedocs.io/en/stable/index.html)
+    - Optionally cache the logger on first use
+    :return: structured logger
+    """
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    # Configure standard logging
+    logging.config.dictConfig(log_config)
+    logging.root.level -= 10 * verbosity_adjust
+
+    structlog.configure(
+        processors=[
+            structlog.stdlib.filter_by_level,
+            structlog.stdlib.PositionalArgumentsFormatter(),
+            structlog.processors.StackInfoRenderer(),
+            structlog.processors.format_exc_info,
+            add_instance_id,
+            structlog.processors.UnicodeEncoder(),
+            structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
+        ],
+        context_class=dict,
+        logger_factory=structlog.stdlib.LoggerFactory(),
+        wrapper_class=structlog.stdlib.BoundLogger,
+        cache_logger_on_first_use=cache_on_use,
+    )
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("first-log-line, logging level %d" % logging.root.level)
+    return log
+
+
+def update_logging(instance_id, vcore_id, cache_on_use=True):
+    """
+    Add the vcore id to the structured logger
+    :param vcore_id:  The assigned vcore id
+    :return: structured logger
+    """
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    def add_vcore_id(_, __, event_dict):
+        event_dict['vcore_id'] = vcore_id
+        return event_dict
+
+    structlog.configure(
+        processors=[
+            structlog.stdlib.filter_by_level,
+            structlog.stdlib.PositionalArgumentsFormatter(),
+            structlog.processors.StackInfoRenderer(),
+            structlog.processors.format_exc_info,
+            add_instance_id,
+            add_vcore_id,
+            structlog.processors.UnicodeEncoder(),
+            structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
+        ],
+        context_class=dict,
+        logger_factory=structlog.stdlib.LoggerFactory(),
+        wrapper_class=structlog.stdlib.BoundLogger,
+        cache_logger_on_first_use=cache_on_use,
+    )
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("updated-logger")
+    return log
diff --git a/python/common/tech_profile/README.md b/python/common/tech_profile/README.md
new file mode 100644
index 0000000..12610a7
--- /dev/null
+++ b/python/common/tech_profile/README.md
@@ -0,0 +1,347 @@
+# Technology Profile Management
+## Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles/<TECHNOLOGY>/<TID>; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+
+
+`NOTE`: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+### Example:
+```sh
+/xgspon/64  {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+```
+
+## Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to `store` or `update` the technical template into the proper location in the etcd key/value store:
+```sh
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set /xgspon/64
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+```
+
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+
+
+## Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+```sh
+# List all the Technology profiles for a Technology
+kubectl exec -i etcd-cluster-0000 -- etcdctl ls /xgspon
+
+# Example output
+/xgspon/64
+/xgspon/65
+```
+
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+```sh
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+# Example outpout
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+```
+
+## Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+```sh
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
+```
+
+## Reference
+https://wiki.opencord.org/display/CORD/Technology+Profile+Management
+
diff --git a/python/common/tech_profile/__init__.py b/python/common/tech_profile/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/tech_profile/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/tech_profile/tech_profile.py b/python/common/tech_profile/tech_profile.py
new file mode 100644
index 0000000..abea364
--- /dev/null
+++ b/python/common/tech_profile/tech_profile.py
@@ -0,0 +1,583 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json
+import ast
+from collections import namedtuple
+import structlog
+from enum import Enum
+
+from voltha.core.config.config_backend import ConsulStore
+from voltha.core.config.config_backend import EtcdStore
+from voltha.registry import registry
+from voltha.adapters.openolt.protos import openolt_pb2
+
+# logger
+log = structlog.get_logger()
+
+DEFAULT_TECH_PROFILE_TABLE_ID = 64
+
+# Enums used while creating TechProfileInstance
+Direction = Enum('Direction', ['UPSTREAM', 'DOWNSTREAM', 'BIDIRECTIONAL'],
+                 start=0)
+SchedulingPolicy = Enum('SchedulingPolicy',
+                        ['WRR', 'StrictPriority', 'Hybrid'], start=0)
+AdditionalBW = Enum('AdditionalBW', ['None', 'NA', 'BestEffort', 'Auto'],
+                    start=0)
+DiscardPolicy = Enum('DiscardPolicy',
+                     ['TailDrop', 'WTailDrop', 'RED', 'WRED'], start=0)
+InferredAdditionBWIndication = Enum('InferredAdditionBWIndication',
+                                    ['None', 'NoneAssured', 'BestEffort'],
+                                    start=0)
+
+
+class InstanceControl(object):
+    # Default value constants
+    ONU_DEFAULT_INSTANCE = 'multi-instance'
+    UNI_DEFAULT_INSTANCE = 'single-instance'
+    DEFAULT_NUM_GEM_PORTS = 1
+    DEFAULT_GEM_PAYLOAD_SIZE = 'auto'
+
+    def __init__(self, onu=ONU_DEFAULT_INSTANCE,
+                 uni=UNI_DEFAULT_INSTANCE,
+                 num_gem_ports=DEFAULT_NUM_GEM_PORTS,
+                 max_gem_payload_size=DEFAULT_GEM_PAYLOAD_SIZE):
+        self.onu = onu
+        self.uni = uni
+        self.num_gem_ports = num_gem_ports
+        self.max_gem_payload_size = max_gem_payload_size
+
+
+class Scheduler(object):
+    # Default value constants
+    DEFAULT_ADDITIONAL_BW = 'auto'
+    DEFAULT_PRIORITY = 0
+    DEFAULT_WEIGHT = 0
+    DEFAULT_Q_SCHED_POLICY = 'hybrid'
+
+    def __init__(self, direction, additional_bw=DEFAULT_ADDITIONAL_BW,
+                 priority=DEFAULT_PRIORITY,
+                 weight=DEFAULT_WEIGHT,
+                 q_sched_policy=DEFAULT_Q_SCHED_POLICY):
+        self.direction = direction
+        self.additional_bw = additional_bw
+        self.priority = priority
+        self.weight = weight
+        self.q_sched_policy = q_sched_policy
+
+
+class GemPortAttribute(object):
+    # Default value constants
+    DEFAULT_AES_ENCRYPTION = 'True'
+    DEFAULT_PRIORITY_Q = 0
+    DEFAULT_WEIGHT = 0
+    DEFAULT_MAX_Q_SIZE = 'auto'
+    DEFAULT_DISCARD_POLICY = DiscardPolicy.TailDrop.name
+
+    def __init__(self, pbit_map, discard_config,
+                 aes_encryption=DEFAULT_AES_ENCRYPTION,
+                 scheduling_policy=SchedulingPolicy.WRR.name,
+                 priority_q=DEFAULT_PRIORITY_Q,
+                 weight=DEFAULT_WEIGHT,
+                 max_q_size=DEFAULT_MAX_Q_SIZE,
+                 discard_policy=DiscardPolicy.TailDrop.name):
+        self.max_q_size = max_q_size
+        self.pbit_map = pbit_map
+        self.aes_encryption = aes_encryption
+        self.scheduling_policy = scheduling_policy
+        self.priority_q = priority_q
+        self.weight = weight
+        self.discard_policy = discard_policy
+        self.discard_config = discard_config
+
+
+class DiscardConfig(object):
+    # Default value constants
+    DEFAULT_MIN_THRESHOLD = 0
+    DEFAULT_MAX_THRESHOLD = 0
+    DEFAULT_MAX_PROBABILITY = 0
+
+    def __init__(self, min_threshold=DEFAULT_MIN_THRESHOLD,
+                 max_threshold=DEFAULT_MAX_THRESHOLD,
+                 max_probability=DEFAULT_MAX_PROBABILITY):
+        self.min_threshold = min_threshold
+        self.max_threshold = max_threshold
+        self.max_probability = max_probability
+
+
+class TechProfile(object):
+    # Constants used in default tech profile
+    DEFAULT_TECH_PROFILE_NAME = 'Default_1tcont_1gem_Profile'
+    DEFAULT_VERSION = 1.0
+    DEFAULT_GEMPORTS_COUNT = 1
+    pbits = ['0b11111111']
+
+    # Tech profile path prefix in kv store
+    KV_STORE_TECH_PROFILE_PATH_PREFIX = 'service/voltha/technology_profiles'
+
+    # Tech profile path in kv store
+    TECH_PROFILE_PATH = '{}/{}'  # <technology>/<table_id>
+
+    # Tech profile instance path in kv store
+    # Format: <technology>/<table_id>/<uni_port_name>
+    TECH_PROFILE_INSTANCE_PATH = '{}/{}/{}'
+
+    # Tech-Profile JSON String Keys
+    NAME = 'name'
+    PROFILE_TYPE = 'profile_type'
+    VERSION = 'version'
+    NUM_GEM_PORTS = 'num_gem_ports'
+    INSTANCE_CONTROL = 'instance_control'
+    US_SCHEDULER = 'us_scheduler'
+    DS_SCHEDULER = 'ds_scheduler'
+    UPSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'upstream_gem_port_attribute_list'
+    DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'downstream_gem_port_attribute_list'
+    ONU = 'onu'
+    UNI = 'uni'
+    MAX_GEM_PAYLOAD_SIZE = 'max_gem_payload_size'
+    DIRECTION = 'direction'
+    ADDITIONAL_BW = 'additional_bw'
+    PRIORITY = 'priority'
+    Q_SCHED_POLICY = 'q_sched_policy'
+    WEIGHT = 'weight'
+    PBIT_MAP = 'pbit_map'
+    DISCARD_CONFIG = 'discard_config'
+    MAX_THRESHOLD = 'max_threshold'
+    MIN_THRESHOLD = 'min_threshold'
+    MAX_PROBABILITY = 'max_probability'
+    DISCARD_POLICY = 'discard_policy'
+    PRIORITY_Q = 'priority_q'
+    SCHEDULING_POLICY = 'scheduling_policy'
+    MAX_Q_SIZE = 'max_q_size'
+    AES_ENCRYPTION = 'aes_encryption'
+
+    def __init__(self, resource_mgr):
+        try:
+            self.args = registry('main').get_args()
+            self.resource_mgr = resource_mgr
+
+            if self.args.backend == 'etcd':
+                # KV store's IP Address and PORT
+                host, port = self.args.etcd.split(':', 1)
+                self._kv_store = EtcdStore(
+                    host, port, TechProfile.
+                    KV_STORE_TECH_PROFILE_PATH_PREFIX)
+            elif self.args.backend == 'consul':
+                # KV store's IP Address and PORT
+                host, port = self.args.consul.split(':', 1)
+                self._kv_store = ConsulStore(
+                    host, port, TechProfile.
+                    KV_STORE_TECH_PROFILE_PATH_PREFIX)
+
+            # self.tech_profile_instance_store = dict()
+        except Exception as e:
+            log.exception("exception-in-init")
+            raise Exception(e)
+
+    class DefaultTechProfile(object):
+        def __init__(self, name, **kwargs):
+            self.name = name
+            self.profile_type = kwargs[TechProfile.PROFILE_TYPE]
+            self.version = kwargs[TechProfile.VERSION]
+            self.num_gem_ports = kwargs[TechProfile.NUM_GEM_PORTS]
+            self.instance_control = kwargs[TechProfile.INSTANCE_CONTROL]
+            self.us_scheduler = kwargs[TechProfile.US_SCHEDULER]
+            self.ds_scheduler = kwargs[TechProfile.DS_SCHEDULER]
+            self.upstream_gem_port_attribute_list = kwargs[
+                TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+            self.downstream_gem_port_attribute_list = kwargs[
+                TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+
+        def to_json(self):
+            return json.dumps(self, default=lambda o: o.__dict__,
+                              indent=4)
+
+    def get_tp_path(self, table_id, uni_port_name):
+        return TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
+            self.resource_mgr.technology, table_id, uni_port_name)
+
+    def create_tech_profile_instance(self, table_id, uni_port_name, intf_id):
+        tech_profile_instance = None
+        try:
+            # Get tech profile from kv store
+            tech_profile = self._get_tech_profile_from_kv_store(table_id)
+            path = self.get_tp_path(table_id, uni_port_name)
+
+            if tech_profile is not None:
+                tech_profile = self._get_tech_profile(tech_profile)
+                log.debug(
+                    "Created-tech-profile-instance-with-values-from-kvstore")
+            else:
+                tech_profile = self._default_tech_profile()
+                log.debug(
+                    "Created-tech-profile-instance-with-default-values")
+
+            tech_profile_instance = TechProfileInstance(
+                uni_port_name, tech_profile, self.resource_mgr, intf_id)
+            self._add_tech_profile_instance(path,
+                                            tech_profile_instance.to_json())
+        except Exception as e:
+            log.exception("Create-tech-profile-instance-failed", exception=e)
+
+        return tech_profile_instance
+
+    def get_tech_profile_instance(self, table_id, uni_port_name):
+        # path to fetch tech profile instance json from kv store
+        path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
+            self.resource_mgr.technology, table_id, uni_port_name)
+
+        try:
+            tech_profile_instance = self._kv_store[path]
+            log.debug("Tech-profile-instance-present-in-kvstore", path=path,
+                      tech_profile_instance=tech_profile_instance)
+
+            # Parse JSON into an object with attributes corresponding to dict keys.
+            tech_profile_instance = json.loads(tech_profile_instance,
+                                               object_hook=lambda d:
+                                               namedtuple('tech_profile_instance',
+                                                          d.keys())(*d.values()))
+            log.debug("Tech-profile-instance-after-json-to-object-conversion", path=path,
+                      tech_profile_instance=tech_profile_instance)
+            return tech_profile_instance
+        except BaseException as e:
+            log.debug("Tech-profile-instance-not-present-in-kvstore",
+                      path=path, tech_profile_instance=None, exception=e)
+            return None
+
+    def delete_tech_profile_instance(self, tp_path):
+
+        try:
+            del self._kv_store[tp_path]
+            log.debug("Delete-tech-profile-instance-success", path=tp_path)
+            return True
+        except Exception as e:
+            log.debug("Delete-tech-profile-instance-failed", path=tp_path,
+                      exception=e)
+            return False
+
+    def _get_tech_profile_from_kv_store(self, table_id):
+        """
+        Get tech profile from kv store.
+
+        :param table_id: reference to get tech profile
+        :return: tech profile if present in kv store else None
+        """
+        # get tech profile from kv store
+        path = TechProfile.TECH_PROFILE_PATH.format(self.resource_mgr.technology,
+                                                    table_id)
+        try:
+            tech_profile = self._kv_store[path]
+            if tech_profile != '':
+                log.debug("Get-tech-profile-success", tech_profile=tech_profile)
+                return json.loads(tech_profile)
+                # return ast.literal_eval(tech_profile)
+        except KeyError as e:
+            log.info("Get-tech-profile-failed", exception=e)
+            return None
+
+    def _default_tech_profile(self):
+        # Default tech profile
+        upstream_gem_port_attribute_list = list()
+        downstream_gem_port_attribute_list = list()
+        for pbit in TechProfile.pbits:
+            upstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=pbit,
+                                 discard_config=DiscardConfig()))
+            downstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=pbit,
+                                 discard_config=DiscardConfig()))
+
+        return TechProfile.DefaultTechProfile(
+            TechProfile.DEFAULT_TECH_PROFILE_NAME,
+            profile_type=self.resource_mgr.technology,
+            version=TechProfile.DEFAULT_VERSION,
+            num_gem_ports=TechProfile.DEFAULT_GEMPORTS_COUNT,
+            instance_control=InstanceControl(),
+            us_scheduler=Scheduler(direction=Direction.UPSTREAM.name),
+            ds_scheduler=Scheduler(direction=Direction.DOWNSTREAM.name),
+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
+            downstream_gem_port_attribute_list=
+            downstream_gem_port_attribute_list)
+
+    @staticmethod
+    def _get_tech_profile(tech_profile):
+        # Tech profile fetched from kv store
+        instance_control = tech_profile[TechProfile.INSTANCE_CONTROL]
+        instance_control = InstanceControl(
+            onu=instance_control[TechProfile.ONU],
+            uni=instance_control[TechProfile.UNI],
+            max_gem_payload_size=instance_control[
+                TechProfile.MAX_GEM_PAYLOAD_SIZE])
+
+        us_scheduler = tech_profile[TechProfile.US_SCHEDULER]
+        us_scheduler = Scheduler(direction=us_scheduler[TechProfile.DIRECTION],
+                                 additional_bw=us_scheduler[
+                                     TechProfile.ADDITIONAL_BW],
+                                 priority=us_scheduler[TechProfile.PRIORITY],
+                                 weight=us_scheduler[TechProfile.WEIGHT],
+                                 q_sched_policy=us_scheduler[
+                                     TechProfile.Q_SCHED_POLICY])
+        ds_scheduler = tech_profile[TechProfile.DS_SCHEDULER]
+        ds_scheduler = Scheduler(direction=ds_scheduler[TechProfile.DIRECTION],
+                                 additional_bw=ds_scheduler[
+                                     TechProfile.ADDITIONAL_BW],
+                                 priority=ds_scheduler[TechProfile.PRIORITY],
+                                 weight=ds_scheduler[TechProfile.WEIGHT],
+                                 q_sched_policy=ds_scheduler[
+                                     TechProfile.Q_SCHED_POLICY])
+
+        upstream_gem_port_attribute_list = list()
+        downstream_gem_port_attribute_list = list()
+        us_gemport_attr_list = tech_profile[
+            TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+        for i in range(len(us_gemport_attr_list)):
+            upstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=us_gemport_attr_list[i][TechProfile.PBIT_MAP],
+                                 discard_config=DiscardConfig(
+                                     max_threshold=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_THRESHOLD],
+                                     min_threshold=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MIN_THRESHOLD],
+                                     max_probability=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_PROBABILITY]),
+                                 discard_policy=us_gemport_attr_list[i][
+                                     TechProfile.DISCARD_POLICY],
+                                 priority_q=us_gemport_attr_list[i][
+                                     TechProfile.PRIORITY_Q],
+                                 weight=us_gemport_attr_list[i][TechProfile.WEIGHT],
+                                 scheduling_policy=us_gemport_attr_list[i][
+                                     TechProfile.SCHEDULING_POLICY],
+                                 max_q_size=us_gemport_attr_list[i][
+                                     TechProfile.MAX_Q_SIZE],
+                                 aes_encryption=us_gemport_attr_list[i][
+                                     TechProfile.AES_ENCRYPTION]))
+
+        ds_gemport_attr_list = tech_profile[
+            TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+        for i in range(len(ds_gemport_attr_list)):
+            downstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=ds_gemport_attr_list[i][TechProfile.PBIT_MAP],
+                                 discard_config=DiscardConfig(
+                                     max_threshold=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_THRESHOLD],
+                                     min_threshold=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MIN_THRESHOLD],
+                                     max_probability=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_PROBABILITY]),
+                                 discard_policy=ds_gemport_attr_list[i][
+                                     TechProfile.DISCARD_POLICY],
+                                 priority_q=ds_gemport_attr_list[i][
+                                     TechProfile.PRIORITY_Q],
+                                 weight=ds_gemport_attr_list[i][TechProfile.WEIGHT],
+                                 scheduling_policy=ds_gemport_attr_list[i][
+                                     TechProfile.SCHEDULING_POLICY],
+                                 max_q_size=ds_gemport_attr_list[i][
+                                     TechProfile.MAX_Q_SIZE],
+                                 aes_encryption=ds_gemport_attr_list[i][
+                                     TechProfile.AES_ENCRYPTION]))
+
+        return TechProfile.DefaultTechProfile(
+            tech_profile[TechProfile.NAME],
+            profile_type=tech_profile[TechProfile.PROFILE_TYPE],
+            version=tech_profile[TechProfile.VERSION],
+            num_gem_ports=tech_profile[TechProfile.NUM_GEM_PORTS],
+            instance_control=instance_control,
+            us_scheduler=us_scheduler,
+            ds_scheduler=ds_scheduler,
+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
+            downstream_gem_port_attribute_list=
+            downstream_gem_port_attribute_list)
+
+    def _add_tech_profile_instance(self, path, tech_profile_instance):
+        """
+        Add tech profile to kv store.
+
+        :param path: path to add tech profile
+        :param tech_profile_instance: tech profile instance need to be added
+        """
+        try:
+            self._kv_store[path] = str(tech_profile_instance)
+            log.debug("Add-tech-profile-instance-success", path=path,
+                      tech_profile_instance=tech_profile_instance)
+            return True
+        except BaseException as e:
+            log.exception("Add-tech-profile-instance-failed", path=path,
+                          tech_profile_instance=tech_profile_instance,
+                          exception=e)
+        return False
+
+    @staticmethod
+    def get_us_scheduler(tech_profile_instance):
+        # upstream scheduler
+        us_scheduler = openolt_pb2.Scheduler(
+            direction=TechProfile.get_parameter(
+                'direction', tech_profile_instance.us_scheduler.
+                    direction),
+            additional_bw=TechProfile.get_parameter(
+                'additional_bw', tech_profile_instance.
+                    us_scheduler.additional_bw),
+            priority=tech_profile_instance.us_scheduler.priority,
+            weight=tech_profile_instance.us_scheduler.weight,
+            sched_policy=TechProfile.get_parameter(
+                'sched_policy', tech_profile_instance.
+                    us_scheduler.q_sched_policy))
+
+        return us_scheduler
+
+    @staticmethod
+    def get_ds_scheduler(tech_profile_instance):
+        ds_scheduler = openolt_pb2.Scheduler(
+            direction=TechProfile.get_parameter(
+                'direction', tech_profile_instance.ds_scheduler.
+                    direction),
+            additional_bw=TechProfile.get_parameter(
+                'additional_bw', tech_profile_instance.
+                    ds_scheduler.additional_bw),
+            priority=tech_profile_instance.ds_scheduler.priority,
+            weight=tech_profile_instance.ds_scheduler.weight,
+            sched_policy=TechProfile.get_parameter(
+                'sched_policy', tech_profile_instance.ds_scheduler.
+                    q_sched_policy))
+
+        return ds_scheduler
+
+    @staticmethod
+    def get_tconts(tech_profile_instance, us_scheduler=None, ds_scheduler=None):
+        if us_scheduler is None:
+            us_scheduler = TechProfile.get_us_scheduler(tech_profile_instance)
+        if ds_scheduler is None:
+            ds_scheduler = TechProfile.get_ds_scheduler(tech_profile_instance)
+
+        tconts = [openolt_pb2.Tcont(direction=TechProfile.get_parameter(
+            'direction',
+            tech_profile_instance.
+                us_scheduler.direction),
+            alloc_id=tech_profile_instance.
+                us_scheduler.alloc_id,
+            scheduler=us_scheduler),
+            openolt_pb2.Tcont(direction=TechProfile.get_parameter(
+                'direction',
+                tech_profile_instance.
+                    ds_scheduler.direction),
+                alloc_id=tech_profile_instance.
+                    ds_scheduler.alloc_id,
+                scheduler=ds_scheduler)]
+
+        return tconts
+
+    @staticmethod
+    def get_parameter(param_type, param_value):
+        parameter = None
+        try:
+            if param_type == 'direction':
+                for direction in openolt_pb2.Direction.keys():
+                    if param_value == direction:
+                        parameter = direction
+            elif param_type == 'discard_policy':
+                for discard_policy in openolt_pb2.DiscardPolicy.keys():
+                    if param_value == discard_policy:
+                        parameter = discard_policy
+            elif param_type == 'sched_policy':
+                for sched_policy in openolt_pb2.SchedulingPolicy.keys():
+                    if param_value == sched_policy:
+                        parameter = sched_policy
+            elif param_type == 'additional_bw':
+                for bw_component in openolt_pb2.AdditionalBW.keys():
+                    if param_value == bw_component:
+                        parameter = bw_component
+        except BaseException as e:
+            log.exception(exception=e)
+        return parameter
+
+
+class TechProfileInstance(object):
+    def __init__(self, subscriber_identifier, tech_profile, resource_mgr,
+                 intf_id, num_of_tconts=1):
+        if tech_profile is not None:
+            self.subscriber_identifier = subscriber_identifier
+            self.num_of_tconts = num_of_tconts
+            self.num_of_gem_ports = tech_profile.num_gem_ports
+            self.name = tech_profile.name
+            self.profile_type = tech_profile.profile_type
+            self.version = tech_profile.version
+            self.instance_control = tech_profile.instance_control
+
+            # TODO: Fixed num_of_tconts to 1 per TP Instance.
+            # This may change in future
+            assert (num_of_tconts == 1)
+            # Get alloc id and gemport id using resource manager
+            alloc_id = resource_mgr.get_resource_id(intf_id,
+                                                    'ALLOC_ID',
+                                                    num_of_tconts)
+            gem_ports = resource_mgr.get_resource_id(intf_id,
+                                                     'GEMPORT_ID',
+                                                     self.num_of_gem_ports)
+
+            gemport_list = list()
+            if isinstance(gem_ports, int):
+                gemport_list.append(gem_ports)
+            elif isinstance(gem_ports, list):
+                for gem in gem_ports:
+                    gemport_list.append(gem)
+            else:
+                raise Exception("invalid-type")
+
+            self.us_scheduler = TechProfileInstance.IScheduler(
+                alloc_id, tech_profile.us_scheduler)
+            self.ds_scheduler = TechProfileInstance.IScheduler(
+                alloc_id, tech_profile.ds_scheduler)
+
+            self.upstream_gem_port_attribute_list = list()
+            self.downstream_gem_port_attribute_list = list()
+            for i in range(self.num_of_gem_ports):
+                self.upstream_gem_port_attribute_list.append(
+                    TechProfileInstance.IGemPortAttribute(
+                        gemport_list[i],
+                        tech_profile.upstream_gem_port_attribute_list[
+                            i]))
+                self.downstream_gem_port_attribute_list.append(
+                    TechProfileInstance.IGemPortAttribute(
+                        gemport_list[i],
+                        tech_profile.downstream_gem_port_attribute_list[
+                            i]))
+
+    class IScheduler(Scheduler):
+        def __init__(self, alloc_id, scheduler):
+            super(TechProfileInstance.IScheduler, self).__init__(
+                scheduler.direction, scheduler.additional_bw,
+                scheduler.priority,
+                scheduler.weight, scheduler.q_sched_policy)
+            self.alloc_id = alloc_id
+
+    class IGemPortAttribute(GemPortAttribute):
+        def __init__(self, gemport_id, gem_port_attribute):
+            super(TechProfileInstance.IGemPortAttribute, self).__init__(
+                gem_port_attribute.pbit_map, gem_port_attribute.discard_config,
+                gem_port_attribute.aes_encryption,
+                gem_port_attribute.scheduling_policy,
+                gem_port_attribute.priority_q, gem_port_attribute.weight,
+                gem_port_attribute.max_q_size,
+                gem_port_attribute.discard_policy)
+            self.gemport_id = gemport_id
+
+    def to_json(self):
+        return json.dumps(self, default=lambda o: o.__dict__,
+                          indent=4)
diff --git a/python/common/utils/__init__.py b/python/common/utils/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/utils/asleep.py b/python/common/utils/asleep.py
new file mode 100644
index 0000000..10d1ce3
--- /dev/null
+++ b/python/common/utils/asleep.py
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+""" Async sleep (asleep) method and other twisted goodies """
+
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+
+
+def asleep(dt):
+    """
+    Async (event driven) wait for given time period (in seconds)
+    :param dt: Delay in seconds
+    :return: Deferred to be fired with value None when time expires.
+    """
+    d = Deferred()
+    reactor.callLater(dt, lambda: d.callback(None))
+    return d
diff --git a/python/common/utils/consulhelpers.py b/python/common/utils/consulhelpers.py
new file mode 100644
index 0000000..df4dd58
--- /dev/null
+++ b/python/common/utils/consulhelpers.py
@@ -0,0 +1,178 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some consul related convenience functions
+"""
+
+from structlog import get_logger
+from consul import Consul
+from random import randint
+from common.utils.nethelpers import get_my_primary_local_ipv4
+
+log = get_logger()
+
+
+def connect_to_consult(consul_endpoint):
+    log.debug('getting-service-endpoint', consul=consul_endpoint)
+
+    host = consul_endpoint.split(':')[0].strip()
+    port = int(consul_endpoint.split(':')[1].strip())
+
+    return Consul(host=host, port=port)
+
+
+def verify_all_services_healthy(consul_endpoint, service_name=None,
+                                number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+def get_all_services(consul_endpoint):
+    log.debug('getting-service-verify-health')
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.services()
+
+    return services
+
+
+def get_all_instances_of_service(consul_endpoint, service_name):
+    log.debug('getting-all-instances-of-service', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    for service in services:
+        log.debug('service',
+                  name=service['ServiceName'],
+                  serviceid=service['ServiceID'],
+                  serviceport=service['ServicePort'],
+                  createindex=service['CreateIndex'])
+
+    return services
+
+
+def get_endpoint_from_consul(consul_endpoint, service_name):
+    """
+    Get endpoint of service_name from consul.
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service for which endpoint
+                         needs to be found.
+    :return: service endpoint if available, else exit.
+    """
+    log.debug('getting-service-info', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    if len(services) == 0:
+        raise Exception(
+            'Cannot find service {} in consul'.format(service_name))
+        os.exit(1)
+
+    """ Get host IPV4 address
+    """
+    local_ipv4 = get_my_primary_local_ipv4()
+    """ If host IP address from where the request came in matches
+        the IP address of the requested service's host IP address,
+        pick the endpoint
+    """
+    for i in range(len(services)):
+        service = services[i]
+        if service['ServiceAddress'] == local_ipv4:
+            log.debug("picking address locally")
+            endpoint = '{}:{}'.format(service['ServiceAddress'],
+                                      service['ServicePort'])
+            return endpoint
+
+    """ If service is not available locally, picak a random
+        endpoint for the service from the list
+    """
+    service = services[randint(0, len(services) - 1)]
+    endpoint = '{}:{}'.format(service['ServiceAddress'],
+                              service['ServicePort'])
+
+    return endpoint
+
+
+def get_healthy_instances(consul_endpoint, service_name=None,
+                          number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+if __name__ == '__main__':
+    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
+    # print get_healthy_instances('10.100.198.220:8500')
+    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/python/common/utils/deferred_utils.py b/python/common/utils/deferred_utils.py
new file mode 100644
index 0000000..3c55c1a
--- /dev/null
+++ b/python/common/utils/deferred_utils.py
@@ -0,0 +1,56 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.internet.error import AlreadyCalled
+
+
+class TimeOutError(Exception): pass
+
+
+class DeferredWithTimeout(Deferred):
+    """
+    Deferred with a timeout. If neither the callback nor the errback method
+    is not called within the given time, the deferred's errback will be called
+    with a TimeOutError() exception.
+
+    All other uses are the same as of Deferred().
+    """
+    def __init__(self, timeout=1.0):
+        Deferred.__init__(self)
+        self._timeout = timeout
+        self.timer = reactor.callLater(timeout, self.timed_out)
+
+    def timed_out(self):
+        self.errback(
+            TimeOutError('timed out after {} seconds'.format(self._timeout)))
+
+    def callback(self, result):
+        self._cancel_timer()
+        return Deferred.callback(self, result)
+
+    def errback(self, fail):
+        self._cancel_timer()
+        return Deferred.errback(self, fail)
+
+    def cancel(self):
+        self._cancel_timer()
+        return Deferred.cancel(self)
+
+    def _cancel_timer(self):
+        try:
+            self.timer.cancel()
+        except AlreadyCalled:
+            pass
+
diff --git a/python/common/utils/dockerhelpers.py b/python/common/utils/dockerhelpers.py
new file mode 100644
index 0000000..4620aef
--- /dev/null
+++ b/python/common/utils/dockerhelpers.py
@@ -0,0 +1,75 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some docker related convenience functions
+"""
+from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
+
+import os
+import socket
+from structlog import get_logger
+
+from docker import Client, errors
+
+
+docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
+log = get_logger()
+
+def get_my_containers_name():
+    """
+    Return the docker containers name in which this process is running.
+    To look up the container name, we use the container ID extracted from the
+    $HOSTNAME environment variable (which is set by docker conventions).
+    :return: String with the docker container name (or None if any issue is
+             encountered)
+    """
+    my_container_id = os.environ.get('HOSTNAME', None)
+
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(my_container_id)
+
+    except Exception, e:
+        log.exception('failed', my_container_id=my_container_id, e=e)
+        raise
+
+    name = info['Name'].lstrip('/')
+
+    return name
+
+def get_all_running_containers():
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        containers = docker_cli.containers()
+
+    except Exception, e:
+        log.exception('failed', e=e)
+        raise
+
+    return containers
+
+def inspect_container(id):
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(id)
+    except Exception, e:
+        log.exception('failed-inspect-container', id=id, e=e)
+        raise
+
+    return info
+
diff --git a/python/common/utils/grpc_utils.py b/python/common/utils/grpc_utils.py
new file mode 100644
index 0000000..8df630e
--- /dev/null
+++ b/python/common/utils/grpc_utils.py
@@ -0,0 +1,109 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Utilities to handle gRPC server and client side code in a Twisted environment
+"""
+import structlog
+from concurrent.futures import Future
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.python.threadable import isInIOThread
+
+
+log = structlog.get_logger()
+
+
+def twisted_async(func):
+    """
+    This decorator can be used to implement a gRPC method on the twisted
+    thread, allowing asynchronous programming in Twisted while serving
+    a gRPC call.
+
+    gRPC methods normally are called on the futures.ThreadPool threads,
+    so these methods cannot directly use Twisted protocol constructs.
+    If the implementation of the methods needs to touch Twisted, it is
+    safer (or mandatory) to wrap the method with this decorator, which will
+    call the inner method from the external thread and ensure that the
+    result is passed back to the foreign thread.
+
+    Example usage:
+
+    When implementing a gRPC server, typical pattern is:
+
+    class SpamService(SpamServicer):
+
+        def GetBadSpam(self, request, context):
+            '''this is called from a ThreadPoolExecutor thread'''
+            # generally unsafe to make Twisted calls
+
+        @twisted_async
+        def GetSpamSafely(self, request, context):
+            '''this method now is executed on the Twisted main thread
+            # safe to call any Twisted protocol functions
+
+        @twisted_async
+        @inlineCallbacks
+        def GetAsyncSpam(self, request, context):
+            '''this generator can use inlineCallbacks Twisted style'''
+            result = yield some_async_twisted_call(request)
+            returnValue(result)
+
+    """
+    def in_thread_wrapper(*args, **kw):
+
+        if isInIOThread():
+
+            return func(*args, **kw)
+
+        f = Future()
+
+        def twisted_wrapper():
+            try:
+                d = func(*args, **kw)
+                if isinstance(d, Deferred):
+
+                    def _done(result):
+                        f.set_result(result)
+                        f.done()
+
+                    def _error(e):
+                        f.set_exception(e)
+                        f.done()
+
+                    d.addCallback(_done)
+                    d.addErrback(_error)
+
+                else:
+                    f.set_result(d)
+                    f.done()
+
+            except Exception, e:
+                f.set_exception(e)
+                f.done()
+
+        reactor.callFromThread(twisted_wrapper)
+        try:
+            result = f.result()
+        except Exception, e:
+            log.exception(e=e, func=func, args=args, kw=kw)
+            raise
+
+        return result
+
+    return in_thread_wrapper
+
+
diff --git a/python/common/utils/id_generation.py b/python/common/utils/id_generation.py
new file mode 100644
index 0000000..e0fea1c
--- /dev/null
+++ b/python/common/utils/id_generation.py
@@ -0,0 +1,116 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# """ ID generation utils """
+
+from uuid import uuid4
+
+
+BROADCAST_CORE_ID=hex(0xFFFF)[2:]
+
+def get_next_core_id(current_id_in_hex_str):
+    """
+    :param current_id_in_hex_str: a hex string of the maximum core id 
+    assigned without the leading 0x characters
+    :return: current_id_in_hex_str + 1 in hex string 
+    """
+    if not current_id_in_hex_str or current_id_in_hex_str == '':
+        return '0001'
+    else:
+        return format(int(current_id_in_hex_str, 16) + 1, '04x')
+
+
+def create_cluster_logical_device_ids(core_id, switch_id):
+    """
+    Creates a logical device id and an OpenFlow datapath id that is unique 
+    across the Voltha cluster.
+    The returned logical device id  represents a 64 bits integer where the
+    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
+    the datapath id the core id is set to '0000' as it is not used for voltha
+    core routing
+    :param core_id: string
+    :param switch_id:int
+    :return: cluster logical device id and OpenFlow datapath id
+    """
+    switch_id = format(switch_id, '012x')
+    core_in_hex=format(int(core_id, 16), '04x')
+    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
+    dpid_id = '{}{}'.format('0000', switch_id[-12:])
+    return ld_id, int(dpid_id, 16)
+
+def is_broadcast_core_id(id):
+    assert id and len(id) == 16
+    return id[:4] == BROADCAST_CORE_ID
+
+def create_empty_broadcast_id():
+    """
+    Returns an empty broadcast id (ffff000000000000). The id is used to
+    dispatch xPON objects across all the Voltha instances.
+    :return: An empty broadcast id
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
+
+def create_cluster_id():
+    """
+    Returns an id that is common across all voltha instances.  The id  
+    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
+    object while the upper 16 bits refers a broadcast core_id
+    :return: An common id across all Voltha instances
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
+
+def create_cluster_device_id(core_id):
+    """
+    Creates a device id that is unique across the Voltha cluster.
+    The device id is a str of 64 bits.  The lower 48 bits refers to the 
+    device id while the upper 16 bits refers to the core id.
+    :param core_id: string
+    :return: cluster device id
+    """
+    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
+
+
+def get_core_id_from_device_id(device_id):
+    # Device id is a string and the first 4 characters represent the core_id
+    assert device_id and len(device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return device_id[:4]
+
+
+def get_core_id_from_logical_device_id(logical_device_id):
+    """ 
+    Logical Device id is a string and the first 4 characters represent the 
+    core_id
+    :param logical_device_id: 
+    :return: core_id string
+    """
+    assert logical_device_id and len(logical_device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return logical_device_id[:4]
+
+
+def get_core_id_from_datapath_id(datapath_id):
+    """
+    datapath id is a uint64 where:
+        - low 48 bits -> switch_id
+        - high 16 bits -> core id
+    :param datapath_id: 
+    :return: core_id string
+    """
+    assert datapath_id
+    # Get the hex string and remove the '0x' prefix
+    id_in_hex_str = hex(datapath_id)[2:]
+    assert len(id_in_hex_str) > 12
+    return id_in_hex_str[:-12]
diff --git a/python/common/utils/indexpool.py b/python/common/utils/indexpool.py
new file mode 100644
index 0000000..858cb3a
--- /dev/null
+++ b/python/common/utils/indexpool.py
@@ -0,0 +1,64 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from bitstring import BitArray
+import structlog
+
+log = structlog.get_logger()
+
+class IndexPool(object):
+    def __init__(self, max_entries, offset):
+        self.max_entries = max_entries
+        self.offset = offset
+        self.indices = BitArray(self.max_entries)
+
+    def get_next(self):
+        try:
+            _pos = self.indices.find('0b0')
+            self.indices.set(1, _pos)
+            return self.offset + _pos[0]
+        except IndexError:
+            log.info("exception-fail-to-allocate-id-all-bits-in-use")
+            return None
+
+    def allocate(self, index):
+        try:
+            _pos = index - self.offset
+            if not (0 <= _pos < self.max_entries):
+                log.info("{}-out-of-range".format(index))
+                return None
+            if self.indices[_pos]:
+                log.info("{}-is-already-allocated".format(index))
+                return None
+            self.indices.set(1, _pos)
+            return index
+
+        except IndexError:
+            return None
+
+    def release(self, index):
+        index -= self.offset
+        _pos = (index,)
+        try:
+            self.indices.set(0, _pos)
+        except IndexError:
+            log.info("bit-position-{}-out-of-range".format(index))
+
+    #index or multiple indices to set all of them to 1 - need to be a tuple
+    def pre_allocate(self, index):
+        if(isinstance(index, tuple)):
+            _lst = list(index)
+            for i in range(len(_lst)):
+                _lst[i] -= self.offset
+            index = tuple(_lst)
+            self.indices.set(1, index)
diff --git a/python/common/utils/json_format.py b/python/common/utils/json_format.py
new file mode 100644
index 0000000..c18d013
--- /dev/null
+++ b/python/common/utils/json_format.py
@@ -0,0 +1,105 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Monkey patched json_format to allow best effort decoding of Any fields.
+Use the additional flag (strict_any_handling=False) to trigger the
+best-effort behavior. Omit the flag, or just use the original json_format
+module fot the strict behavior.
+"""
+
+from google.protobuf import json_format
+
+class _PatchedPrinter(json_format._Printer):
+
+    def __init__(self, including_default_value_fields=False,
+                 preserving_proto_field_name=False,
+                 strict_any_handling=False):
+        super(_PatchedPrinter, self).__init__(including_default_value_fields,
+                                              preserving_proto_field_name)
+        self.strict_any_handling = strict_any_handling
+
+    def _BestEffortAnyMessageToJsonObject(self, msg):
+        try:
+            res = self._AnyMessageToJsonObject(msg)
+        except TypeError:
+            res = self._RegularMessageToJsonObject(msg, {})
+        return res
+
+
+def MessageToDict(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+    """Converts protobuf message to a JSON dictionary.
+
+    Args:
+      message: The protocol buffers message instance to serialize.
+      including_default_value_fields: If True, singular primitive fields,
+          repeated fields, and map fields will always be serialized.  If
+          False, only serialize non-empty fields.  Singular message fields
+          and oneof fields are not affected by this option.
+      preserving_proto_field_name: If True, use the original proto field
+          names as defined in the .proto file. If False, convert the field
+          names to lowerCamelCase.
+      strict_any_handling: If True, converion will error out (like in the
+          original method) if an Any field with value for which the Any type
+          is not loaded is encountered. If False, the conversion will leave
+          the field un-packed, but otherwise will continue.
+
+    Returns:
+      A dict representation of the JSON formatted protocol buffer message.
+    """
+    printer = _PatchedPrinter(including_default_value_fields,
+                              preserving_proto_field_name,
+                              strict_any_handling=strict_any_handling)
+    # pylint: disable=protected-access
+    return printer._MessageToJsonObject(message)
+
+
+def MessageToJson(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+  """Converts protobuf message to JSON format.
+
+  Args:
+    message: The protocol buffers message instance to serialize.
+    including_default_value_fields: If True, singular primitive fields,
+        repeated fields, and map fields will always be serialized.  If
+        False, only serialize non-empty fields.  Singular message fields
+        and oneof fields are not affected by this option.
+    preserving_proto_field_name: If True, use the original proto field
+        names as defined in the .proto file. If False, convert the field
+        names to lowerCamelCase.
+    strict_any_handling: If True, converion will error out (like in the
+        original method) if an Any field with value for which the Any type
+        is not loaded is encountered. If False, the conversion will leave
+        the field un-packed, but otherwise will continue.
+
+  Returns:
+    A string containing the JSON formatted protocol buffer message.
+  """
+  printer = _PatchedPrinter(including_default_value_fields,
+                            preserving_proto_field_name,
+                            strict_any_handling=strict_any_handling)
+  return printer.ToJsonString(message)
+
+
+json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
+    '_BestEffortAnyMessageToJsonObject',
+    '_ConvertAnyMessage'
+]
+
+json_format._Printer._BestEffortAnyMessageToJsonObject = \
+    json_format._Printer._AnyMessageToJsonObject
diff --git a/python/common/utils/message_queue.py b/python/common/utils/message_queue.py
new file mode 100644
index 0000000..2b4257a
--- /dev/null
+++ b/python/common/utils/message_queue.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from twisted.internet.defer import Deferred
+from twisted.internet.defer import succeed
+
+
+class MessageQueue(object):
+    """
+    An event driven queue, similar to twisted.internet.defer.DeferredQueue
+    but which allows selective dequeing based on a predicate function.
+    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
+    limit.
+    """
+
+    def __init__(self):
+        self.waiting = []  # tuples of (d, predicate)
+        self.queue = []  # messages piling up here if no one is waiting
+
+    def reset(self):
+        """
+        Purge all content as well as waiters (by errback-ing their entries).
+        :return: None
+        """
+        for d, _ in self.waiting:
+            d.errback(Exception('mesage queue reset() was called'))
+        self.waiting = []
+        self.queue = []
+
+    def _cancelGet(self, d):
+        """
+        Remove a deferred from our waiting list.
+        :param d: The deferred that was been canceled.
+        :return: None
+        """
+        for i in range(len(self.waiting)):
+            if self.waiting[i][0] is d:
+                self.waiting.pop(i)
+
+    def put(self, obj):
+        """
+        Add an object to this queue
+        :param obj: arbitrary object that will be added to the queue
+        :return:
+        """
+
+        # if someone is waiting for this, return right away
+        for i in range(len(self.waiting)):
+            d, predicate = self.waiting[i]
+            if predicate is None or predicate(obj):
+                self.waiting.pop(i)
+                d.callback(obj)
+                return
+
+        # otherwise...
+        self.queue.append(obj)
+
+    def get(self, predicate=None):
+        """
+        Attempt to retrieve and remove an object from the queue that
+        matches the optional predicate.
+        :return: Deferred which fires with the next object available.
+        If predicate was provided, only objects for which
+        predicate(obj) is True will be considered.
+        """
+        for i in range(len(self.queue)):
+            msg = self.queue[i]
+            if predicate is None or predicate(msg):
+                self.queue.pop(i)
+                return succeed(msg)
+
+        # there were no matching entries if we got here, so we wait
+        d = Deferred(canceller=self._cancelGet)
+        self.waiting.append((d, predicate))
+        return d
+
+
diff --git a/python/common/utils/nethelpers.py b/python/common/utils/nethelpers.py
new file mode 100644
index 0000000..b17aced
--- /dev/null
+++ b/python/common/utils/nethelpers.py
@@ -0,0 +1,86 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some network related convenience functions
+"""
+
+from netifaces import AF_INET
+
+import netifaces as ni
+import netaddr
+
+
+def _get_all_interfaces():
+    m_interfaces = []
+    for iface in ni.interfaces():
+        m_interfaces.append((iface, ni.ifaddresses(iface)))
+    return m_interfaces
+
+
+def _get_my_primary_interface():
+    gateways = ni.gateways()
+    assert 'default' in gateways, \
+        ("No default gateway on host/container, "
+         "cannot determine primary interface")
+    default_gw_index = gateways['default'].keys()[0]
+    # gateways[default_gw_index] has the format (example):
+    # [('10.15.32.1', 'en0', True)]
+    interface_name = gateways[default_gw_index][0][1]
+    return interface_name
+
+
+def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
+    if not inter_core_subnet:
+        return _get_my_primary_local_ipv4(ifname)
+    # My IP should belong to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            _ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(inter_core_subnet)
+            if _ip >= m_network.first and _ip <= m_network.last:
+                return m_ip
+    return None
+
+
+def get_my_primary_interface(pon_subnet=None):
+    if not pon_subnet:
+        return _get_my_primary_interface()
+    # My interface should have an IP that belongs to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            m_ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(pon_subnet)
+            if m_ip >= m_network.first and m_ip <= m_network.last:
+                return iface
+    return None
+
+
+def _get_my_primary_local_ipv4(ifname=None):
+    try:
+        ifname = get_my_primary_interface() if ifname is None else ifname
+        addresses = ni.ifaddresses(ifname)
+        ipv4 = addresses[AF_INET][0]['addr']
+        return ipv4
+    except Exception as e:
+        return None
+
+if __name__ == '__main__':
+    print get_my_primary_local_ipv4()
diff --git a/python/common/utils/ordered_weakvalue_dict.py b/python/common/utils/ordered_weakvalue_dict.py
new file mode 100644
index 0000000..9ea739a
--- /dev/null
+++ b/python/common/utils/ordered_weakvalue_dict.py
@@ -0,0 +1,48 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from _weakref import ref
+from weakref import KeyedRef
+from collections import OrderedDict
+
+
+class OrderedWeakValueDict(OrderedDict):
+    """
+    Modified OrderedDict to use weak references as values. Entries disappear
+    automatically if the referred value has no more strong reference pointing
+    ot it.
+
+    Warning, this is not a complete implementation, only what is needed for
+    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
+    """
+    def __init__(self, *args, **kw):
+        def remove(wr, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                super(OrderedWeakValueDict, self).__delitem__(wr.key)
+        self._remove = remove
+        super(OrderedWeakValueDict, self).__init__(*args, **kw)
+
+    def __setitem__(self, key, value):
+        super(OrderedWeakValueDict, self).__setitem__(
+            key, KeyedRef(value, self._remove, key))
+
+    def __getitem__(self, key):
+        o = super(OrderedWeakValueDict, self).__getitem__(key)()
+        if o is None:
+            raise KeyError, key
+        else:
+            return o
+
diff --git a/python/compose/adapters-openonu.yml b/python/compose/adapters-openonu.yml
new file mode 100644
index 0000000..d151b0c
--- /dev/null
+++ b/python/compose/adapters-openonu.yml
@@ -0,0 +1,42 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: '2'
+
+networks:
+  default:
+    driver: bridge
+
+services:
+
+  adapter_openonu:
+    image: "${REGISTRY}${REPOSITORY}voltha-adapter-openonu${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/voltha/python/adapters/brcm_openomci_onu/main.py",
+      "-v",
+      "--name=openonu",
+      "--kafka_adapter=${DOCKER_HOST_IP}:9092",
+      "--kafka_cluster=${DOCKER_HOST_IP}:9092",
+      "--core_topic=rwcore"
+    ]
+    networks:
+    - default
+    restart: unless-stopped
+
diff --git a/python/compose/system-test.yml b/python/compose/system-test.yml
new file mode 100644
index 0000000..872d4e2
--- /dev/null
+++ b/python/compose/system-test.yml
@@ -0,0 +1,175 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: '2'
+
+networks:
+  default:
+    driver: bridge
+
+services:
+
+  zookeeper:
+    image: "wurstmeister/zookeeper:latest"
+    environment:
+      SERVICE_2181_NAME: "zookeeper"
+    ports:
+    - 2181:2181
+    networks:
+    - default
+
+
+  kafka:
+    image: "wurstmeister/kafka:2.11-2.0.1"
+    environment:
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${DOCKER_HOST_IP}:9092
+      KAFKA_LISTENERS: PLAINTEXT://:9092
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      SERVICE_9092_NAME: "kafka"
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+    ports:
+     - 9092:9092
+    networks:
+    - default
+
+
+  etcd:
+    image: "quay.io/coreos/etcd:v3.2.9"
+    command: [
+      "etcd",
+      "--name=etcd0",
+      "--advertise-client-urls=http://${DOCKER_HOST_IP}:2379,http://${DOCKER_HOST_IP}:4001",
+      "--listen-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001",
+      "--initial-advertise-peer-urls=http://${DOCKER_HOST_IP}:2380",
+      "--listen-peer-urls=http://0.0.0.0:2380",
+      "--initial-cluster-token=etcd-cluster-1",
+      "--initial-cluster=etcd0=http://${DOCKER_HOST_IP}:2380",
+      "--initial-cluster-state=new"
+    ]
+    ports:
+    - "2379:2379"
+    - 2380
+    - 4001
+    networks:
+    - default
+
+
+  rw_core:
+    image: voltha-rw-core
+    entrypoint:
+        - /app/rw_core
+        - -kv_store_type=etcd
+        - -kv_store_host=${DOCKER_HOST_IP}
+        - -kv_store_port=2379
+        - -grpc_port=50057
+        - -banner=true
+        - -kafka_adapter_host=${DOCKER_HOST_IP}
+        - -kafka_adapter_port=9092
+        - -kafka_cluster_host=${DOCKER_HOST_IP}
+        - -kafka_cluster_port=9092
+        - -rw_core_topic=rwcore
+        - -kv_store_data_prefix=service/voltha
+        - -in_competing_mode=false
+        - -log_level=0
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+    ports:
+      - 50057:50057
+    networks:
+    - default
+    restart: unless-stopped
+
+
+  cli:
+    image: "${REGISTRY}${REPOSITORY}voltha-cli:latest"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    environment:
+      DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+    entrypoint:
+    - /voltha/python/cli/setup.sh
+    - -g ${DOCKER_HOST_IP}:50057
+    networks:
+    - default
+    ports:
+    - "5022:22"
+
+
+  ofagent:
+    image: "${REGISTRY}${REPOSITORY}voltha-ofagent:latest"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/ofagent/ofagent/main.py",
+      "--consul=${DOCKER_HOST_IP}:8500",
+      "--controller=${DOCKER_HOST_IP}:6653",
+      "--grpc-endpoint=${DOCKER_HOST_IP}:50057",
+      "--instance-id-is-container-name",
+      "-v"
+    ]
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+    networks:
+    - default
+    restart: unless-stopped
+
+
+  adapter_openolt:
+    image: "${REGISTRY}${REPOSITORY}voltha-adapter-openolt${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/voltha/python/adapters/openolt/main.py",
+      "-v",
+      "--name=openolt",
+      "--kafka_adapter=${DOCKER_HOST_IP}:9092",
+      "--kafka_cluster=${DOCKER_HOST_IP}:9092",
+      "--core_topic=rwcore"
+    ]
+    networks:
+    - default
+    restart: unless-stopped
+
+
+  adapter_openonu:
+    image: "${REGISTRY}${REPOSITORY}voltha-adapter-openonu${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/voltha/voltha/adapters/brcm_openomci_onu/main.py",
+      "-v",
+      "--name=openonu",
+      "--kafka_adapter=${DOCKER_HOST_IP}:9092",
+      "--kafka_cluster=${DOCKER_HOST_IP}:9092",
+      "--core_topic=rwcore"
+    ]
+    networks:
+    - default
+    restart: unless-stopped
+
diff --git a/python/core/__init__.py b/python/core/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/core/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/core/config/__init__.py b/python/core/config/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/core/config/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/core/config/config_backend.py b/python/core/config/config_backend.py
new file mode 100644
index 0000000..d906348
--- /dev/null
+++ b/python/core/config/config_backend.py
@@ -0,0 +1,289 @@
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from consul import Consul, ConsulException
+from common.utils.asleep import asleep
+from requests import ConnectionError
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+import etcd3
+import structlog
+
+
+class ConsulStore(object):
+    """ Config kv store for consul with a cache for quicker subsequent reads
+
+        TODO: This will block the reactor. Should either change
+        whole call stack to yield or put the put/delete transactions into a
+        queue to write later with twisted. Will need a transaction
+        log to ensure we don't lose anything.
+        Making the whole callstack yield is troublesome because other tasks can
+        come in on the side and start modifying things which could be bad.
+    """
+
+    CONNECT_RETRY_INTERVAL_SEC = 1
+    RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+    def __init__(self, host, port, path_prefix):
+
+        self.log = structlog.get_logger()
+        self._consul = Consul(host=host, port=port)
+        self.host = host
+        self.port = port
+        self._path_prefix = path_prefix
+        self._cache = {}
+        self.retries = 0
+
+    def make_path(self, key):
+        return '{}/{}'.format(self._path_prefix, key)
+
+    def __getitem__(self, key):
+        if key in self._cache:
+            return self._cache[key]
+        value = self._kv_get(self.make_path(key))
+        if value is not None:
+            # consul turns empty strings to None, so we do the reverse here
+            self._cache[key] = value['Value'] or ''
+            return value['Value'] or ''
+        else:
+            raise KeyError(key)
+
+    def __contains__(self, key):
+        if key in self._cache:
+            return True
+        value = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value['Value']
+            return True
+        else:
+            return False
+
+    def __setitem__(self, key, value):
+        try:
+            assert isinstance(value, basestring)
+            self._cache[key] = value
+            self._kv_put(self.make_path(key), value)
+        except Exception, e:
+            self.log.exception('cannot-set-item', e=e)
+
+    def __delitem__(self, key):
+        self._cache.pop(key, None)
+        self._kv_delete(self.make_path(key))
+
+    @inlineCallbacks
+    def _backoff(self, msg):
+        wait_time = self.RETRY_BACKOFF[min(self.retries,
+                                           len(self.RETRY_BACKOFF) - 1)]
+        self.retries += 1
+        self.log.error(msg, retry_in=wait_time)
+        yield asleep(wait_time)
+
+    def _redo_consul_connection(self):
+        self._consul = Consul(host=self.host, port=self.port)
+        self._cache.clear()
+
+    def _clear_backoff(self):
+        if self.retries:
+            self.log.info('reconnected-to-consul', after_retries=self.retries)
+            self.retries = 0
+
+    def _get_consul(self):
+        return self._consul
+
+    # Proxy methods for consul with retry support
+    def _kv_get(self, *args, **kw):
+        return self._retry('GET', *args, **kw)
+
+    def _kv_put(self, *args, **kw):
+        return self._retry('PUT', *args, **kw)
+
+    def _kv_delete(self, *args, **kw):
+        return self._retry('DELETE', *args, **kw)
+
+    def _retry(self, operation, *args, **kw):
+        while 1:
+            try:
+                consul = self._get_consul()
+                self.log.debug('consul', consul=consul, operation=operation,
+                         args=args)
+                if operation == 'GET':
+                    index, result = consul.kv.get(*args, **kw)
+                elif operation == 'PUT':
+                     result = consul.kv.put(*args, **kw)
+                elif operation == 'DELETE':
+                    result = consul.kv.delete(*args, **kw)
+                else:
+                    # Default case - consider operation as a function call
+                    result = operation(*args, **kw)
+                self._clear_backoff()
+                break
+            except ConsulException, e:
+                self.log.exception('consul-not-up', e=e)
+                self._backoff('consul-not-up')
+            except ConnectionError, e:
+                self.log.exception('cannot-connect-to-consul', e=e)
+                self._backoff('cannot-connect-to-consul')
+            except Exception, e:
+                self.log.exception(e)
+                self._backoff('unknown-error')
+            self._redo_consul_connection()
+
+        return result
+
+
+class EtcdStore(object):
+    """ Config kv store for etcd with a cache for quicker subsequent reads
+
+        TODO: This will block the reactor. Should either change
+        whole call stack to yield or put the put/delete transactions into a
+        queue to write later with twisted. Will need a transaction
+        log to ensure we don't lose anything.
+        Making the whole callstack yield is troublesome because other tasks can
+        come in on the side and start modifying things which could be bad.
+    """
+
+    CONNECT_RETRY_INTERVAL_SEC = 1
+    RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+    def __init__(self, host, port, path_prefix):
+
+        self.log = structlog.get_logger()
+        self._etcd = etcd3.client(host=host, port=port)
+        self.host = host
+        self.port = port
+        self._path_prefix = path_prefix
+        self._cache = {}
+        self.retries = 0
+
+    def make_path(self, key):
+        return '{}/{}'.format(self._path_prefix, key)
+
+    def __getitem__(self, key):
+        if key in self._cache:
+            return self._cache[key]
+        (value, meta) = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value
+            return value
+        else:
+            raise KeyError(key)
+
+    def __contains__(self, key):
+        if key in self._cache:
+            return True
+        (value, meta) = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value
+            return True
+        else:
+            return False
+
+    def __setitem__(self, key, value):
+        try:
+            assert isinstance(value, basestring)
+            self._cache[key] = value
+            self._kv_put(self.make_path(key), value)
+        except Exception, e:
+            self.log.exception('cannot-set-item', e=e)
+
+    def __delitem__(self, key):
+        self._cache.pop(key, None)
+        self._kv_delete(self.make_path(key))
+
+    @inlineCallbacks
+    def _backoff(self, msg):
+        wait_time = self.RETRY_BACKOFF[min(self.retries,
+                                           len(self.RETRY_BACKOFF) - 1)]
+        self.retries += 1
+        self.log.error(msg, retry_in=wait_time)
+        yield asleep(wait_time)
+
+    def _redo_etcd_connection(self):
+        self._etcd = etcd3.client(host=self.host, port=self.port)
+        self._cache.clear()
+
+    def _clear_backoff(self):
+        if self.retries:
+            self.log.info('reconnected-to-etcd', after_retries=self.retries)
+            self.retries = 0
+
+    def _get_etcd(self):
+        return self._etcd
+
+    # Proxy methods for etcd with retry support
+    def _kv_get(self, *args, **kw):
+        return self._retry('GET', *args, **kw)
+
+    def _kv_put(self, *args, **kw):
+        return self._retry('PUT', *args, **kw)
+
+    def _kv_delete(self, *args, **kw):
+        return self._retry('DELETE', *args, **kw)
+
+    def _retry(self, operation, *args, **kw):
+
+        # etcd data sometimes contains non-utf8 sequences, replace
+        self.log.debug('backend-op',
+                  operation=operation,
+                  args=map(lambda x : unicode(x,'utf8','replace'), args),
+                  kw=kw)
+
+        while 1:
+            try:
+                etcd = self._get_etcd()
+                self.log.debug('etcd', etcd=etcd, operation=operation,
+                    args=map(lambda x : unicode(x,'utf8','replace'), args))
+                if operation == 'GET':
+                    (value, meta) = etcd.get(*args, **kw)
+                    result = (value, meta)
+                elif operation == 'PUT':
+                    result = etcd.put(*args, **kw)
+                elif operation == 'DELETE':
+                    result = etcd.delete(*args, **kw)
+                else:
+                    # Default case - consider operation as a function call
+                    result = operation(*args, **kw)
+                self._clear_backoff()
+                break
+            except Exception, e:
+                self.log.exception(e)
+                self._backoff('unknown-error-with-etcd')
+            self._redo_etcd_connection()
+
+        return result
+
+
+def load_backend(store_id, store_prefix, args):
+    """ Return the kv store backend based on the command line arguments
+    """
+
+    def load_consul_store():
+        instance_core_store_prefix = '{}/{}'.format(store_prefix, store_id)
+
+        host, port = args.consul.split(':', 1)
+        return ConsulStore(host, int(port), instance_core_store_prefix)
+
+    def load_etcd_store():
+        instance_core_store_prefix = '{}/{}'.format(store_prefix, store_id)
+
+        host, port = args.etcd.split(':', 1)
+        return EtcdStore(host, int(port), instance_core_store_prefix)
+
+    loaders = {
+        'none': lambda: None,
+        'consul': load_consul_store,
+        'etcd': load_etcd_store
+    }
+
+    return loaders[args.backend]()
diff --git a/python/core/config/config_branch.py b/python/core/config/config_branch.py
new file mode 100644
index 0000000..207818b
--- /dev/null
+++ b/python/core/config/config_branch.py
@@ -0,0 +1,53 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Class to hold revisions, latest revision, etc., for a config node, used
+for the active committed revisions or revisions part of a transaction.
+"""
+
+from collections import OrderedDict
+from weakref import WeakValueDictionary
+
+
+class ConfigBranch(object):
+
+    __slots__ = (
+        '_node',  # ref to node
+        '_txid',  # txid for this branch (None for the committed branch)
+        '_origin',  # _latest at time of branching on default branch
+        '_revs',  # dict of rev-hash to ref of ConfigRevision
+        '_latest',  # ref to latest committed ConfigRevision
+        '__weakref__'
+    )
+
+    def __init__(self, node, txid=None, origin=None, auto_prune=True):
+        self._node = node
+        self._txid = txid
+        self._origin = origin
+        self._revs = WeakValueDictionary() if auto_prune else OrderedDict()
+        self._latest = origin
+
+    def __getitem__(self, hash):
+        return self._revs[hash]
+
+    @property
+    def latest(self):
+        return self._latest
+
+    @property
+    def origin(self):
+        return self._origin
diff --git a/python/core/config/config_event_bus.py b/python/core/config/config_event_bus.py
new file mode 100644
index 0000000..e56d77a
--- /dev/null
+++ b/python/core/config/config_event_bus.py
@@ -0,0 +1,66 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import structlog
+from enum import Enum
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+
+from common.event_bus import EventBusClient
+from voltha.core.config.config_proxy import CallbackType
+from voltha.protos import third_party
+from voltha.protos.events_pb2 import ConfigEvent, ConfigEventType
+
+IGNORED_CALLBACKS = [CallbackType.PRE_ADD, CallbackType.GET,
+                     CallbackType.POST_LISTCHANGE, CallbackType.PRE_REMOVE,
+                     CallbackType.PRE_UPDATE]
+
+log = structlog.get_logger()
+
+class ConfigEventBus(object):
+
+    __slots__ = (
+        '_event_bus_client',  # The event bus client used to publish events.
+        '_topic'  # the topic to publish to
+    )
+
+    def __init__(self):
+        self._event_bus_client = EventBusClient()
+        self._topic = 'model-change-events'
+
+    def advertise(self, type, data, hash=None):
+        if type in IGNORED_CALLBACKS:
+            log.info('Ignoring event {} with data {}'.format(type, data))
+            return
+
+        if type is CallbackType.POST_ADD:
+            kind = ConfigEventType.add
+        elif type is CallbackType.POST_REMOVE:
+            kind = ConfigEventType.remove
+        else:
+            kind = ConfigEventType.update
+
+        if isinstance(data, Message):
+            msg = dumps(MessageToDict(data, True, True))
+        else:
+            msg = data
+
+        event = ConfigEvent(
+            type=kind,
+            hash=hash,
+            data=msg
+        )
+
+        self._event_bus_client.publish(self._topic, event)
+
diff --git a/python/core/config/config_node.py b/python/core/config/config_node.py
new file mode 100644
index 0000000..ab73484
--- /dev/null
+++ b/python/core/config/config_node.py
@@ -0,0 +1,617 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from copy import copy
+
+from jsonpatch import JsonPatch
+from jsonpatch import make_patch
+
+from common.utils.json_format import MessageToDict
+from voltha.core.config.config_branch import ConfigBranch
+from voltha.core.config.config_event_bus import ConfigEventBus
+from voltha.core.config.config_proxy import CallbackType, ConfigProxy
+from voltha.core.config.config_rev import is_proto_message, children_fields, \
+    ConfigRevision, access_rights
+from voltha.core.config.config_rev_persisted import PersistedConfigRevision
+from voltha.core.config.merge_3way import merge_3way
+from voltha.protos import third_party
+from voltha.protos import meta_pb2
+
+import structlog
+
+log = structlog.get_logger()
+
+def message_to_dict(m):
+    return MessageToDict(m, True, True, False)
+
+
+def check_access_violation(new_msg, old_msg):
+    """Raise ValueError if attempt is made to change a read-only field"""
+    access_map = access_rights(new_msg.__class__)
+    violated_fields = []
+    for field_name, access in access_map.iteritems():
+        if access == meta_pb2.READ_ONLY:
+            if getattr(new_msg, field_name) != getattr(old_msg, field_name):
+                violated_fields.append(field_name)
+    if violated_fields:
+        raise ValueError('Cannot change read-only field(s) %s' %
+                         ', '.join('"%s"' % f for f in violated_fields))
+
+
+def find_rev_by_key(revs, keyname, value):
+    for i, rev in enumerate(revs):
+        if getattr(rev._config._data, keyname) == value:
+            return i, rev
+    raise KeyError('key {}={} not found'.format(keyname, value))
+
+
+class ConfigNode(object):
+    """
+    Represents a configuration node which can hold a number of revisions
+    of the configuration for this node.
+    When the configuration changes, the new version is appended to the
+    node.
+    Initial data must be a protobuf message and it will determine the type of
+    this node.
+    """
+    __slots__ = (
+        '_root',  # ref to root node
+        '_type',  # node type, as __class__ of protobuf message
+        '_branches',  # dict of transaction branches and a default (committed)
+                      # branch
+        '_tags',  # dict of tag-name to ref of ConfigRevision
+        '_proxy',  # ref to proxy observer or None if no proxy assigned
+        '_event_bus',  # ref to event_bus or None if no event bus is assigned
+        '_auto_prune'
+    )
+
+    def __init__(self, root, initial_data, auto_prune=True, txid=None):
+        self._root = root
+        self._branches = {}
+        self._tags = {}
+        self._proxy = None
+        self._event_bus = None
+        self._auto_prune = auto_prune
+
+        if isinstance(initial_data, type):
+            self._type = initial_data
+        elif is_proto_message(initial_data):
+            self._type = initial_data.__class__
+            copied_data = initial_data.__class__()
+            copied_data.CopyFrom(initial_data)
+            self._initialize(copied_data, txid)
+        else:
+            raise NotImplementedError()
+
+    def _mknode(self, *args, **kw):
+        return ConfigNode(self._root, *args, **kw)
+
+    def _mkrev(self, *args, **kw):
+        return self._root.mkrev(*args, **kw)
+
+    def _initialize(self, data, txid):
+        # separate external children data away from locally stored data
+        # based on child_node annotations in protobuf
+        children = {}
+        for field_name, field in children_fields(self._type).iteritems():
+            field_value = getattr(data, field_name)
+            if field.is_container:
+                if field.key:
+                    keys_seen = set()
+                    children[field_name] = lst = []
+                    for v in field_value:
+                        rev = self._mknode(v, txid=txid).latest
+                        key = getattr(v, field.key)
+                        if key in keys_seen:
+                            raise ValueError('Duplicate key "{}"'.format(key))
+                        lst.append(rev)
+                        keys_seen.add(key)
+                else:
+                    children[field_name] = [
+                        self._mknode(v, txid=txid).latest for v in field_value]
+            else:
+                children[field_name] = [
+                    self._mknode(field_value, txid=txid).latest]
+            data.ClearField(field_name)
+
+        branch = ConfigBranch(self, auto_prune=self._auto_prune)
+        rev = self._mkrev(branch, data, children)
+        self._make_latest(branch, rev)
+        self._branches[txid] = branch
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ accessors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    # these convenience short-cuts only work for the committed branch
+
+    @property
+    def revisions(self):
+        return [r._hash for r in self._branches[None]._revs.itervalues()]
+
+    @property
+    def latest(self):
+        return self._branches[None]._latest
+
+    def __getitem__(self, hash):
+        return self._branches[None]._revs[hash]
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ get operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def get(self, path=None, hash=None, depth=0, deep=False, txid=None):
+
+        # depth preparation
+        if deep:
+            depth = -1
+
+        # path preparation
+        path = '' if path is None else path
+        while path.startswith('/'):
+            path = path[1:]
+
+        # determine branch; if lookup fails, it is ok to use default branch
+        branch = self._branches.get(txid, None) or self._branches[None]
+
+        # determine rev
+        if hash is not None:
+            rev = branch._revs[hash]
+        else:
+            rev = branch.latest
+
+        return self._get(rev, path, depth)
+
+    def _get(self, rev, path, depth):
+
+        if not path:
+            return self._do_get(rev, depth)
+
+        # ... otherwise
+        name, _, path = path.partition('/')
+        field = children_fields(self._type)[name]
+        if field.is_container:
+            if field.key:
+                children = rev._children[name]
+                if path:
+                    # need to escalate further
+                    key, _, path = path.partition('/')
+                    key = field.key_from_str(key)
+                    _, child_rev = find_rev_by_key(children, field.key, key)
+                    child_node = child_rev.node
+                    return child_node._get(child_rev, path, depth)
+                else:
+                    # we are the node of interest
+                    response = []
+                    for child_rev in children:
+                        child_node = child_rev.node
+                        value = child_node._do_get(child_rev, depth)
+                        response.append(value)
+                    return response
+            else:
+                if path:
+                    raise LookupError(
+                        'Cannot index into container with no key defined')
+                response = []
+                for child_rev in rev._children[name]:
+                    child_node = child_rev.node
+                    value = child_node._do_get(child_rev, depth)
+                    response.append(value)
+                return response
+        else:
+            child_rev = rev._children[name][0]
+            child_node = child_rev.node
+            return child_node._get(child_rev, path, depth)
+
+    def _do_get(self, rev, depth):
+        msg = rev.get(depth)
+        if self._proxy is not None:
+            msg = self._proxy.invoke_callbacks(CallbackType.GET, msg)
+        return msg
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ update operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def update(self, path, data, strict=False, txid=None, mk_branch=None):
+
+        while path.startswith('/'):
+            path = path[1:]
+
+        try:
+            branch = self._branches[txid]
+        except KeyError:
+            branch = mk_branch(self)
+
+        if not path:
+            return self._do_update(branch, data, strict)
+
+        rev = branch._latest  # change is always made to the latest
+        name, _, path = path.partition('/')
+        field = children_fields(self._type)[name]
+        if field.is_container:
+            if not path:
+                raise ValueError('Cannot update a list')
+            if field.key:
+                key, _, path = path.partition('/')
+                key = field.key_from_str(key)
+                children = copy(rev._children[name])
+                idx, child_rev = find_rev_by_key(children, field.key, key)
+                child_node = child_rev.node
+                # chek if deep copy will work better
+                new_child_rev = child_node.update(
+                    path, data, strict, txid, mk_branch)
+                if new_child_rev.hash == child_rev.hash:
+                    # When the new_child_rev goes out of scope,
+                    # it's destructor gets invoked as it is not being
+                    # referred by any other data structures.  To prevent
+                    # this to trigger the hash it is holding from being
+                    # erased in the db, its hash is set to None.  If the
+                    # new_child_rev object is pointing at the same address
+                    # as the child_rev address then do not clear the hash
+                    if new_child_rev != child_rev:
+                        log.debug('clear-hash',
+                             hash=new_child_rev.hash, object_ref=new_child_rev)
+                        new_child_rev.clear_hash()
+                    return branch._latest
+                if getattr(new_child_rev.data, field.key) != key:
+                    raise ValueError('Cannot change key field')
+                children[idx] = new_child_rev
+                rev = rev.update_children(name, children, branch)
+                self._make_latest(branch, rev)
+                return rev
+            else:
+                raise ValueError('Cannot index into container with no keys')
+
+        else:
+            child_rev = rev._children[name][0]
+            child_node = child_rev.node
+            new_child_rev = child_node.update(
+                path, data, strict, txid, mk_branch)
+            rev = rev.update_children(name, [new_child_rev], branch)
+            self._make_latest(branch, rev)
+            return rev
+
+    def _do_update(self, branch, data, strict):
+        if not isinstance(data, self._type):
+            raise ValueError(
+                '"{}" is not a valid data type for this node'.format(
+                    data.__class__.__name__))
+        self._test_no_children(data)
+        if self._proxy is not None:
+            self._proxy.invoke_callbacks(CallbackType.PRE_UPDATE, data)
+
+        if branch._latest.data != data:
+            if strict:
+                # check if attempt is made to change read-only field
+                check_access_violation(data, branch._latest.data)
+            rev = branch._latest.update_data(data, branch)
+            self._make_latest(branch, rev,
+                              ((CallbackType.POST_UPDATE, rev.data),))
+            return rev
+        else:
+            return branch._latest
+
+    def _make_latest(self, branch, rev, change_announcements=()):
+        # Update the latest branch only when the hash between the previous
+        # data and the new rev is different, otherwise this will trigger the
+        # data already saved in the db (with that hash) to be erased
+        if rev.hash not in branch._revs:
+            branch._revs[rev.hash] = rev
+
+        if not branch._latest or rev.hash != branch._latest.hash:
+            branch._latest = rev
+
+        # announce only if this is main branch
+        if change_announcements and branch._txid is None:
+
+            if self._proxy is not None:
+                for change_type, data in change_announcements:
+                    # since the callback may operate on the config tree,
+                    # we have to defer the execution of the callbacks till
+                    # the change is propagated to the root, then root will
+                    # call the callbacks
+                    self._root.enqueue_callback(
+                        self._proxy.invoke_callbacks,
+                        change_type,
+                        data,
+                        proceed_on_errors=1,
+                    )
+
+            for change_type, data in change_announcements:
+                self._root.enqueue_notification_callback(
+                    self._mk_event_bus().advertise,
+                    change_type,
+                    data,
+                    hash=rev.hash
+                )
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ add operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def add(self, path, data, txid=None, mk_branch=None):
+        while path.startswith('/'):
+            path = path[1:]
+        if not path:
+            raise ValueError('Cannot add to non-container node')
+
+        try:
+            branch = self._branches[txid]
+        except KeyError:
+            branch = mk_branch(self)
+
+        rev = branch._latest  # change is always made to latest
+        name, _, path = path.partition('/')
+        field = children_fields(self._type)[name]
+        if field.is_container:
+            if not path:
+                # we do need to add a new child to the field
+                if field.key:
+                    if self._proxy is not None:
+                        self._proxy.invoke_callbacks(
+                            CallbackType.PRE_ADD, data)
+                    children = copy(rev._children[name])
+                    key = getattr(data, field.key)
+                    try:
+                        find_rev_by_key(children, field.key, key)
+                    except KeyError:
+                        pass
+                    else:
+                        raise ValueError('Duplicate key "{}"'.format(key))
+                    child_rev = self._mknode(data).latest
+                    children.append(child_rev)
+                    rev = rev.update_children(name, children, branch)
+                    self._make_latest(branch, rev,
+                                      ((CallbackType.POST_ADD, data),))
+                    return rev
+                else:
+                    # adding to non-keyed containers not implemented yet
+                    raise ValueError('Cannot add to non-keyed container')
+            else:
+                if field.key:
+                    # need to escalate
+                    key, _, path = path.partition('/')
+                    key = field.key_from_str(key)
+                    children = copy(rev._children[name])
+                    idx, child_rev = find_rev_by_key(children, field.key, key)
+                    child_node = child_rev.node
+                    new_child_rev = child_node.add(path, data, txid, mk_branch)
+                    children[idx] = new_child_rev
+                    rev = rev.update_children(name, children, branch)
+                    self._make_latest(branch, rev)
+                    return rev
+                else:
+                    raise ValueError(
+                        'Cannot index into container with no keys')
+        else:
+            raise ValueError('Cannot add to non-container field')
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ remove operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def remove(self, path, txid=None, mk_branch=None):
+        while path.startswith('/'):
+            path = path[1:]
+        if not path:
+            raise ValueError('Cannot remove from non-container node')
+
+        try:
+            branch = self._branches[txid]
+        except KeyError:
+            branch = mk_branch(self)
+
+        rev = branch._latest  # change is always made to latest
+        name, _, path = path.partition('/')
+        field = children_fields(self._type)[name]
+        if field.is_container:
+            if not path:
+                raise ValueError("Cannot remove without a key")
+            if field.key:
+                key, _, path = path.partition('/')
+                key = field.key_from_str(key)
+                if path:
+                    # need to escalate
+                    children = copy(rev._children[name])
+                    idx, child_rev = find_rev_by_key(children, field.key, key)
+                    child_node = child_rev.node
+                    new_child_rev = child_node.remove(path, txid, mk_branch)
+                    children[idx] = new_child_rev
+                    rev = rev.update_children(name, children, branch)
+                    self._make_latest(branch, rev)
+                    return rev
+                else:
+                    # need to remove from this very node
+                    children = copy(rev._children[name])
+                    idx, child_rev = find_rev_by_key(children, field.key, key)
+                    if self._proxy is not None:
+                        data = child_rev.data
+                        self._proxy.invoke_callbacks(
+                            CallbackType.PRE_REMOVE, data)
+                        post_anno = ((CallbackType.POST_REMOVE, data),)
+                    else:
+                        post_anno = ((CallbackType.POST_REMOVE, child_rev.data),)
+                    del children[idx]
+                    rev = rev.update_children(name, children, branch)
+                    self._make_latest(branch, rev, post_anno)
+                    return rev
+            else:
+                raise ValueError('Cannot remove from non-keyed container')
+        else:
+            raise ValueError('Cannot remove non-conatiner field')
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Branching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def _mk_txbranch(self, txid):
+        branch_point = self._branches[None].latest
+        branch = ConfigBranch(self, txid, branch_point)
+        self._branches[txid] = branch
+        return branch
+
+    def _del_txbranch(self, txid):
+        del self._branches[txid]
+
+    def _merge_txbranch(self, txid, dry_run=False):
+        """
+        Make latest in branch to be latest in the common branch, but only
+        if no conflict is detected. Conflict is where the txbranch branch
+        point no longer matches the latest in the default branch. This has
+        to be verified recursively.
+        """
+
+        def merge_child(child_rev):
+            child_branch = child_rev._branch
+            if child_branch._txid == txid:
+                child_rev = child_branch._node._merge_txbranch(txid, dry_run)
+            return child_rev
+
+        src_branch = self._branches[txid]
+        dst_branch = self._branches[None]
+
+        fork_rev = src_branch.origin  # rev from which src branch was made
+        src_rev = src_branch.latest  # head rev of source branch
+        dst_rev = dst_branch.latest  # head rev of target branch
+
+        rev, changes = merge_3way(
+            fork_rev, src_rev, dst_rev, merge_child, dry_run)
+
+        if not dry_run:
+            self._make_latest(dst_branch, rev, change_announcements=changes)
+            del self._branches[txid]
+
+        return rev
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diff utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def diff(self, hash1, hash2=None, txid=None):
+        branch = self._branches[txid]
+        rev1 = branch[hash1]
+        rev2 = branch[hash2] if hash2 else branch._latest
+        if rev1.hash == rev2.hash:
+            return JsonPatch([])
+        else:
+            dict1 = message_to_dict(rev1.data)
+            dict2 = message_to_dict(rev2.data)
+            return make_patch(dict1, dict2)
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tagging utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def tag(self, tag, hash=None):
+        branch = self._branches[None]  # tag only what has been committed
+        rev = branch._latest if hash is None else branch._revs[hash]
+        self._tags[tag] = rev
+        self.persist_tags()
+        return self
+
+    @property
+    def tags(self):
+        return sorted(self._tags.iterkeys())
+
+    def by_tag(self, tag):
+        """
+        Return revision based on tag
+        :param tag: previously registered tag value
+        :return: revision object
+        """
+        return self._tags[tag]
+
+    def diff_by_tag(self, tag1, tag2):
+        return self.diff(self._tags[tag1].hash, self._tags[tag2].hash)
+
+    def delete_tag(self, tag):
+        del self._tags[tag]
+        self.persist_tags()
+
+    def delete_tags(self, *tags):
+        for tag in tags:
+            del self._tags[tag]
+        self.persist_tags()
+
+    def prune_untagged(self):
+        branch = self._branches[None]
+        keep = set(rev.hash for rev in self._tags.itervalues())
+        keep.add(branch._latest.hash)
+        for hash in branch._revs.keys():
+            if hash not in keep:
+                del branch._revs[hash]
+        return self
+
+    def persist_tags(self):
+        """
+        Persist tag information to the backend
+        """
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Internals ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def _test_no_children(self, data):
+        for field_name, field in children_fields(self._type).items():
+            field_value = getattr(data, field_name)
+            if field.is_container:
+                if len(field_value):
+                    raise NotImplementedError(
+                        'Cannot update external children')
+            else:
+                if data.HasField(field_name):
+                    raise NotImplementedError(
+                        'Cannot update externel children')
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Node proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def get_proxy(self, path, exclusive=False):
+        return self._get_proxy(path, self, path, exclusive)
+
+    def _get_proxy(self, path, root, full_path, exclusive):
+        while path.startswith('/'):
+            path = path[1:]
+        if not path:
+            return self._mk_proxy(root, full_path, exclusive)
+
+        # need to escalate
+        rev = self._branches[None]._latest
+        name, _, path = path.partition('/')
+        field = children_fields(self._type)[name]
+        if field.is_container:
+            if not path:
+                raise ValueError('Cannot proxy a container field')
+            if field.key:
+                key, _, path = path.partition('/')
+                key = field.key_from_str(key)
+                children = rev._children[name]
+                _, child_rev = find_rev_by_key(children, field.key, key)
+                child_node = child_rev.node
+                return child_node._get_proxy(path, root, full_path, exclusive)
+
+            raise ValueError('Cannot index into container with no keys')
+
+        else:
+            child_rev = rev._children[name][0]
+            child_node = child_rev.node
+            return child_node._get_proxy(path, root, full_path, exclusive)
+
+    def _mk_proxy(self, root, full_path, exclusive):
+        if self._proxy is None:
+            self._proxy = ConfigProxy(root, self, full_path, exclusive)
+        else:
+            if self._proxy.exclusive:
+                raise ValueError('Node is already owned exclusively')
+        return self._proxy
+
+    def _mk_event_bus(self):
+        if self._event_bus is None:
+            self._event_bus = ConfigEventBus()
+        return self._event_bus
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~ Persistence loading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def load_latest(self, latest_hash):
+
+        root = self._root
+        kv_store = root._kv_store
+
+        branch = ConfigBranch(node=self, auto_prune=self._auto_prune)
+        rev = PersistedConfigRevision.load(
+            branch, kv_store, self._type, latest_hash)
+        self._make_latest(branch, rev)
+        self._branches[None] = branch
diff --git a/python/core/config/config_proxy.py b/python/core/config/config_proxy.py
new file mode 100644
index 0000000..57d8150
--- /dev/null
+++ b/python/core/config/config_proxy.py
@@ -0,0 +1,155 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from enum import Enum
+
+from voltha.core.config.config_txn import ConfigTransaction
+
+log = structlog.get_logger()
+
+
+class OperationContext(object):
+    def __init__(self, path=None, data=None, field_name=None, child_key=None):
+        self.path = path
+        self._data = data
+        self.field_name = field_name
+        self.child_key = child_key
+    @property
+    def data(self):
+        return self._data
+    def update(self, data):
+        self._data = data
+        return self
+    def __repr__(self):
+        return 'OperationContext({})'.format(self.__dict__)
+
+
+class CallbackType(Enum):
+
+    # GET hooks are called after the data is retrieved and can be used to
+    # augment the data (they should only augment fields marked as REAL_TIME
+    GET = 1
+
+    # PRE_UPDATE hooks are called before the change is made and are supposed
+    # to be used to reject the data by raising an exception. If they don't,
+    # the change will be applied.
+    PRE_UPDATE = 2
+
+    # POST_UPDATE hooks are called after the update has occurred and can
+    # be used to deal with the change. For instance, an adapter can use the
+    # callback to trigger the south-bound configuration
+    POST_UPDATE = 3
+
+    # These behave similarly to the update callbacks as described above.
+    PRE_ADD = 4
+    POST_ADD = 5
+    PRE_REMOVE = 6
+    POST_REMOVE = 7
+
+    # Bulk list change due to transaction commit that changed items in
+    # non-keyed container fields
+    POST_LISTCHANGE = 8
+
+
+class ConfigProxy(object):
+    """
+    Allows an entity to look at a sub-tree and see it as it was the whole tree
+    """
+    __slots__ = (
+        '_root',
+        '_node',
+        '_path',
+        '_exclusive',
+        '_callbacks'
+    )
+
+    def __init__(self, root, node, path, exclusive):
+        self._root = root
+        self._node = node
+        self._exclusive = exclusive
+        self._path = path  # full path to proxied node
+        self._callbacks = {}  # call back type -> list of callbacks
+
+    @property
+    def exclusive(self):
+        return self._exclusive
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~ CRUD handlers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def get(self, path='/', depth=None, deep=None, txid=None):
+        return self._node.get(path, depth=depth, deep=deep, txid=txid)
+
+    def update(self, path, data, strict=False, txid=None):
+        assert path.startswith('/')
+        full_path = self._path if path == '/' else self._path + path
+        return self._root.update(full_path, data, strict, txid=txid)
+
+    def add(self, path, data, txid=None):
+        assert path.startswith('/')
+        full_path = self._path if path == '/' else self._path + path
+        return self._root.add(full_path, data, txid=txid)
+
+    def remove(self, path, txid=None):
+        assert path.startswith('/')
+        full_path = self._path if path == '/' else self._path + path
+        return self._root.remove(full_path, txid=txid)
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~ Transaction support ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def open_transaction(self):
+        """Open a new transaction"""
+        txid = self._root.mk_txbranch()
+        return ConfigTransaction(self, txid)
+
+    def commit_transaction(self, txid):
+        """
+        If having an open transaction, commit it now. Will raise exception
+        if conflict is detected. Either way, transaction will be deleted.
+        """
+        self._root.fold_txbranch(txid)
+
+    def cancel_transaction(self, txid):
+        """
+        Cancel current transaction if we are in a transaction. Always succeeds.
+        """
+        self._root.del_txbranch(txid)
+
+    # ~~~~~~~~~~~~~~~~~~~~~~ Callbacks registrations ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def register_callback(self, callback_type, callback, *args, **kw):
+        lst = self._callbacks.setdefault(callback_type, [])
+        lst.append((callback, args, kw))
+
+    def unregister_callback(self, callback_type, callback, *args, **kw):
+        lst = self._callbacks.setdefault(callback_type, [])
+        if (callback, args, kw) in lst:
+            lst.remove((callback, args, kw))
+
+    # ~~~~~~~~~~~~~~~~~~~~~ Callback dispatch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def invoke_callbacks(self, callback_type, context, proceed_on_errors=False):
+        lst = self._callbacks.get(callback_type, [])
+        for callback, args, kw in lst:
+            try:
+                context = callback(context, *args, **kw)
+            except Exception, e:
+                if proceed_on_errors:
+                    log.exception(
+                        'call-back-error', callback_type=callback_type,
+                        context=context, e=e)
+                else:
+                    raise
+        return context
diff --git a/python/core/config/config_rev.py b/python/core/config/config_rev.py
new file mode 100644
index 0000000..8bfac18
--- /dev/null
+++ b/python/core/config/config_rev.py
@@ -0,0 +1,342 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Immutable classes to store config revision information arranged in a tree.
+
+Immutability cannot be enforced in Python, so anyoen working with these
+classes directly must obey the rules.
+"""
+
+import weakref
+from copy import copy
+from hashlib import md5
+
+from google.protobuf.descriptor import Descriptor
+from simplejson import dumps
+
+from common.utils.json_format import MessageToJson
+from voltha.protos import third_party
+from voltha.protos import meta_pb2
+
+import structlog
+
+log = structlog.get_logger()
+
+def is_proto_message(o):
+    """
+    Return True if object o appears to be a protobuf message; False otherwise.
+    """
+    # use a somewhat empirical approach to decide if something looks like
+    # a protobuf message
+    return isinstance(getattr(o, 'DESCRIPTOR', None), Descriptor)
+
+
+def message_to_json_concise(m):
+    """
+    Return the most concise string representation of a protobuf. Good for
+    things where size matters (e.g., generating hash).
+    """
+    return MessageToJson(m, False, True, False)
+
+
+_rev_cache = weakref.WeakValueDictionary()  # cache of config revs
+
+
+_children_fields_cache = {}  # to memoize externally stored field name info
+
+
+class _ChildType(object):
+    """Used to store key metadata about child_node fields in protobuf messages.
+    """
+    __slots__ = (
+        '_module',
+        '_type',
+        '_is_container',
+        '_key',
+        '_key_from_str'
+    )
+
+    def __init__(self, module, type, is_container,
+                 key=None, key_from_str=None):
+        self._module = module
+        self._type = type
+        self._is_container = is_container
+        self._key = key
+        self._key_from_str = key_from_str
+
+    @property
+    def is_container(self):
+        return self._is_container
+
+    @property
+    def key(self):
+        return self._key
+
+    @property
+    def key_from_str(self):
+        return self._key_from_str
+
+    @property
+    def module(self):
+        return self._module
+
+    @property
+    def type(self):
+        return self._type
+
+
+def children_fields(cls):
+    """
+    Return a map of externally stored fields for this protobuf message type.
+    What is stored as branch node is determined by the "child_node"
+    annotation in the protobuf definitions.
+    With each external field, we store if the field is a container, if a
+    container is keyed (indexed), and what is the function that converts
+    path substring back to the key.
+    """
+    names = _children_fields_cache.get(cls)
+
+    if names is None:
+        names = {}
+
+        for field in cls.DESCRIPTOR.fields:
+
+            if field.has_options:
+                options = field.GetOptions()
+
+                if options.HasExtension(meta_pb2.child_node):
+                    is_container = field.label == 3
+                    meta = options.Extensions[meta_pb2.child_node]
+                    key_from_str = None
+
+                    if meta.key:
+                        key_field = field.message_type.fields_by_name[meta.key]
+                        key_type = key_field.type
+
+                        if key_type == key_field.TYPE_STRING:
+                            key_from_str = lambda s: s
+
+                        elif key_type in (
+                                key_field.TYPE_FIXED32,
+                                key_field.TYPE_FIXED64,
+                                key_field.TYPE_INT32,
+                                key_field.TYPE_INT64,
+                                key_field.TYPE_SFIXED32,
+                                key_field.TYPE_SFIXED64,
+                                key_field.TYPE_SINT32,
+                                key_field.TYPE_SINT64,
+                                key_field.TYPE_UINT32,
+                                key_field.TYPE_UINT64):
+                            key_from_str = lambda s: int(s)
+
+                        else:
+                            raise NotImplementedError()
+
+                    field_class = field.message_type._concrete_class
+                    names[field.name] = _ChildType(
+                        module=field_class.__module__,
+                        type=field_class.__name__,
+                        is_container=is_container,
+                        key=meta.key,
+                        key_from_str=key_from_str
+                    )
+
+        _children_fields_cache[cls] = names
+
+    return names
+
+
+_access_right_cache = {}  # to memoize field access right restrictions
+
+
+def access_rights(cls):
+    """
+    Determine the access rights for each field and cache these maps for
+    fast retrieval.
+    """
+    access_map = _access_right_cache.get(cls)
+    if access_map is None:
+        access_map = {}
+        for field in cls.DESCRIPTOR.fields:
+            if field.has_options:
+                options = field.GetOptions()
+                if options.HasExtension(meta_pb2.access):
+                    access = options.Extensions[meta_pb2.access]
+                    access_map[field.name] = access
+        _access_right_cache[cls] = access_map
+    return access_map
+
+
+class ConfigDataRevision(object):
+    """
+    Holds a specific snapshot of the local configuration for config node.
+    It shall be treated as an immutable object, although in Python this is
+    very difficult to enforce!
+    As such, we can compute a unique hash based on the config data which
+    can be used to establish equivalence. It also has a time-stamp to track
+    changes.
+
+    This object must be treated as immutable, including its nested config data.
+    This is very important. The entire config module depends on hashes
+    we create over the data, so altering the data can lead to unpredictable
+    detriments.
+    """
+
+    __slots__ = (
+        '_data',
+        '_hash',
+        '__weakref__'
+    )
+
+    def __init__(self, data):
+        self._data = data
+        self._hash = self._hash_data(data)
+
+    @property
+    def data(self):
+        return self._data
+
+    @property
+    def hash(self):
+        return self._hash
+
+    @staticmethod
+    def _hash_data(data):
+        """Hash function to be used to track version changes of config nodes"""
+        if isinstance(data, (dict, list)):
+            to_hash = dumps(data, sort_keys=True)
+        elif is_proto_message(data):
+            to_hash = ':'.join((
+                data.__class__.__module__,
+                data.__class__.__name__,
+                data.SerializeToString()))
+        else:
+            to_hash = str(hash(data))
+        return md5(to_hash).hexdigest()[:12]
+
+
+class ConfigRevision(object):
+    """
+    Holds not only the local config data, but also the external children
+    reference lists, per field name.
+    Recall that externally stored fields are those marked "child_node" in
+    the protobuf definition.
+    This object must be treated as immutable, including its config data.
+    """
+
+    __slots__ = (
+        '_config',
+        '_children',
+        '_hash',
+        '_branch',
+        '__weakref__'
+    )
+
+    def __init__(self, branch, data, children=None):
+        self._branch = branch
+        self._config = ConfigDataRevision(data)
+        self._children = children
+        self._finalize()
+
+    def _finalize(self):
+        self._hash = self._hash_content()
+        if self._hash not in _rev_cache:
+            _rev_cache[self._hash] = self
+        if self._config._hash not in _rev_cache:
+            _rev_cache[self._config._hash] = self._config
+        else:
+            self._config = _rev_cache[self._config._hash]  # re-use!
+
+    def _hash_content(self):
+        # hash is derived from config hash and hashes of all children
+        m = md5('' if self._config is None else self._config._hash)
+        if self._children is not None:
+            for child_field in sorted(self._children.keys()):
+                children = self._children[child_field]
+                assert isinstance(children, list)
+                m.update(''.join(c._hash for c in children))
+        return m.hexdigest()[:12]
+
+    @property
+    def hash(self):
+        return self._hash
+
+    @property
+    def data(self):
+        return None if self._config is None else self._config.data
+
+    @property
+    def node(self):
+        return self._branch._node
+
+    @property
+    def type(self):
+        return self._config.data.__class__
+
+    def clear_hash(self):
+        self._hash = None
+
+    def get(self, depth):
+        """
+        Get config data of node. If depth > 0, recursively assemble the
+        branch nodes. If depth is < 0, this results in a fully exhaustive
+        "complete config".
+        """
+        orig_data = self._config.data
+        data = orig_data.__class__()
+        data.CopyFrom(orig_data)
+        if depth:
+            # collect children
+            cfields = children_fields(self.type).iteritems()
+            for field_name, field in cfields:
+                if field.is_container:
+                    for rev in self._children[field_name]:
+                        child_data = rev.get(depth=depth - 1)
+                        child_data_holder = getattr(data, field_name).add()
+                        child_data_holder.MergeFrom(child_data)
+                else:
+                    rev = self._children[field_name][0]
+                    child_data = rev.get(depth=depth - 1)
+                    child_data_holder = getattr(data, field_name)
+                    child_data_holder.MergeFrom(child_data)
+        return data
+
+    def update_data(self, data, branch):
+        """Return a NEW revision which is updated for the modified data"""
+        new_rev = copy(self)
+        new_rev._branch = branch
+        new_rev._config = self._config.__class__(data)
+        new_rev._finalize()
+        return new_rev
+
+    def update_children(self, name, children, branch):
+        """Return a NEW revision which is updated for the modified children"""
+        new_children = self._children.copy()
+        new_children[name] = children
+        new_rev = copy(self)
+        new_rev._branch = branch
+        new_rev._children = new_children
+        new_rev._finalize()
+        return new_rev
+
+    def update_all_children(self, children, branch):
+        """Return a NEW revision which is updated for all children entries"""
+        new_rev = copy(self)
+        new_rev._branch = branch
+        new_rev._children = children
+        new_rev._finalize()
+        return new_rev
diff --git a/python/core/config/config_rev_persisted.py b/python/core/config/config_rev_persisted.py
new file mode 100644
index 0000000..8b25b82
--- /dev/null
+++ b/python/core/config/config_rev_persisted.py
@@ -0,0 +1,143 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A config rev object that persists itself
+"""
+from bz2 import compress, decompress
+
+import structlog
+from simplejson import dumps, loads
+
+from voltha.core.config.config_rev import ConfigRevision, children_fields
+
+log = structlog.get_logger()
+
+
+class PersistedConfigRevision(ConfigRevision):
+
+    compress = False
+
+    __slots__ = ('_kv_store',)
+
+    def __init__(self, branch, data, children=None):
+        self._kv_store = branch._node._root._kv_store
+        super(PersistedConfigRevision, self).__init__(branch, data, children)
+
+    def _finalize(self):
+        super(PersistedConfigRevision, self)._finalize()
+        self.store()
+
+    def __del__(self):
+        try:
+            if self._hash:
+                if self._config.__weakref__ is None:
+                    if self._config._hash in self._kv_store:
+                        del self._kv_store[self._config._hash]
+                assert self.__weakref__ is None
+                if self._hash in self._kv_store:
+                    del self._kv_store[self._hash]
+        except Exception, e:
+            # this should never happen
+            log.exception('del-error', hash=self.hash, e=e)
+
+    def store(self):
+
+        try:
+            # crude serialization of children hash and config data hash
+            if self._hash in self._kv_store:
+                return
+
+            self.store_config()
+
+            children_lists = {}
+            for field_name, children in self._children.iteritems():
+                hashes = [rev.hash for rev in children]
+                children_lists[field_name] = hashes
+
+            data = dict(
+                children=children_lists,
+                config=self._config._hash
+            )
+            blob = dumps(data)
+            if self.compress:
+                blob = compress(blob)
+
+            self._kv_store[self._hash] = blob
+
+        except Exception, e:
+            log.exception('store-error', e=e)
+
+    @classmethod
+    def load(cls, branch, kv_store, msg_cls, hash):
+        #  Update the branch's config store
+        blob = kv_store[hash]
+        if cls.compress:
+            blob = decompress(blob)
+        data = loads(blob)
+
+        config_hash = data['config']
+        config_data = cls.load_config(kv_store, msg_cls, config_hash)
+
+        children_list = data['children']
+        assembled_children = {}
+        node = branch._node
+        for field_name, meta in children_fields(msg_cls).iteritems():
+            child_msg_cls = tmp_cls_loader(meta.module, meta.type)
+            children = []
+            for child_hash in children_list[field_name]:
+                child_node = node._mknode(child_msg_cls)
+                child_node.load_latest(child_hash)
+                child_rev = child_node.latest
+                children.append(child_rev)
+            assembled_children[field_name] = children
+        rev = cls(branch, config_data, assembled_children)
+        return rev
+
+    def store_config(self):
+        if self._config._hash in self._kv_store:
+            return
+
+        # crude serialization of config data
+        blob = self._config._data.SerializeToString()
+        if self.compress:
+            blob = compress(blob)
+
+        self._kv_store[self._config._hash] = blob
+
+    @classmethod
+    def load_config(cls, kv_store, msg_cls, config_hash):
+        blob = kv_store[config_hash]
+        if cls.compress:
+            blob = decompress(blob)
+
+        # TODO use a loader later on
+        data = msg_cls()
+        data.ParseFromString(blob)
+        return data
+
+
+def tmp_cls_loader(module_name, cls_name):
+    # TODO this shall be generalized
+    from voltha.protos import voltha_pb2, health_pb2, adapter_pb2, \
+        logical_device_pb2, device_pb2, openflow_13_pb2, bbf_fiber_base_pb2, \
+        bbf_fiber_traffic_descriptor_profile_body_pb2, \
+        bbf_fiber_tcont_body_pb2, bbf_fiber_gemport_body_pb2, \
+        bbf_fiber_multicast_gemport_body_pb2, \
+        bbf_fiber_multicast_distribution_set_body_pb2, \
+        omci_mib_db_pb2, \
+        omci_alarm_db_pb2
+    return getattr(locals()[module_name], cls_name)
diff --git a/python/core/config/config_root.py b/python/core/config/config_root.py
new file mode 100644
index 0000000..4b1006d
--- /dev/null
+++ b/python/core/config/config_root.py
@@ -0,0 +1,229 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from uuid import uuid4
+
+import structlog
+from simplejson import dumps, loads
+
+from voltha.core.config.config_node import ConfigNode
+from voltha.core.config.config_rev import ConfigRevision
+from voltha.core.config.config_rev_persisted import PersistedConfigRevision
+from voltha.core.config.merge_3way import MergeConflictException
+
+log = structlog.get_logger()
+
+
+class ConfigRoot(ConfigNode):
+
+    __slots__ = (
+        '_dirty_nodes',  # holds set of modified nodes per transaction branch
+        '_kv_store',
+        '_loading',
+        '_rev_cls',
+        '_deferred_callback_queue',
+        '_notification_deferred_callback_queue'
+    )
+
+    def __init__(self, initial_data, kv_store=None, rev_cls=ConfigRevision):
+        self._kv_store = kv_store
+        self._dirty_nodes = {}
+        self._loading = False
+        if kv_store is not None and \
+                not issubclass(rev_cls, PersistedConfigRevision):
+            rev_cls = PersistedConfigRevision
+        self._rev_cls = rev_cls
+        self._deferred_callback_queue = []
+        self._notification_deferred_callback_queue = []
+        super(ConfigRoot, self).__init__(self, initial_data, False)
+
+    @property
+    def kv_store(self):
+        if self._loading:
+            # provide fake store for storing things
+            # TODO this shall be a fake_dict providing noop for all relevant
+            # operations
+            return dict()
+        else:
+            return self._kv_store
+
+    def mkrev(self, *args, **kw):
+        return self._rev_cls(*args, **kw)
+
+    def mk_txbranch(self):
+        txid = uuid4().hex[:12]
+        self._dirty_nodes[txid] = {self}
+        self._mk_txbranch(txid)
+        return txid
+
+    def del_txbranch(self, txid):
+        for dirty_node in self._dirty_nodes[txid]:
+            dirty_node._del_txbranch(txid)
+        del self._dirty_nodes[txid]
+
+    def fold_txbranch(self, txid):
+        try:
+            self._merge_txbranch(txid, dry_run=1)
+        except MergeConflictException:
+            self.del_txbranch(txid)
+            raise
+
+        try:
+            self._merge_txbranch(txid)
+        finally:
+            self.execute_deferred_callbacks()
+
+    # ~~~~~~ Overridden, root-level CRUD methods to handle transactions ~~~~~~~
+
+    def update(self, path, data, strict=None, txid=None, mk_branch=None):
+        assert mk_branch is None
+        self.check_callback_queue()
+        try:
+            if txid is not None:
+                dirtied = self._dirty_nodes[txid]
+
+                def track_dirty(node):
+                    dirtied.add(node)
+                    return node._mk_txbranch(txid)
+
+                res = super(ConfigRoot, self).update(path, data, strict,
+                                                          txid, track_dirty)
+            else:
+                res = super(ConfigRoot, self).update(path, data, strict)
+        finally:
+            self.execute_deferred_callbacks()
+        return res
+
+    def add(self, path, data, txid=None, mk_branch=None):
+        assert mk_branch is None
+        self.check_callback_queue()
+        try:
+            if txid is not None:
+                dirtied = self._dirty_nodes[txid]
+
+                def track_dirty(node):
+                    dirtied.add(node)
+                    return node._mk_txbranch(txid)
+
+                res = super(ConfigRoot, self).add(path, data, txid, track_dirty)
+            else:
+                res = super(ConfigRoot, self).add(path, data)
+        finally:
+            self.execute_deferred_callbacks()
+        return res
+
+    def remove(self, path, txid=None, mk_branch=None):
+        assert mk_branch is None
+        self.check_callback_queue()
+        try:
+            if txid is not None:
+                dirtied = self._dirty_nodes[txid]
+
+                def track_dirty(node):
+                    dirtied.add(node)
+                    return node._mk_txbranch(txid)
+
+                res = super(ConfigRoot, self).remove(path, txid, track_dirty)
+            else:
+                res = super(ConfigRoot, self).remove(path)
+        finally:
+            self.execute_deferred_callbacks()
+        return res
+
+    def check_callback_queue(self):
+        assert len(self._deferred_callback_queue) == 0
+
+    def enqueue_callback(self, func, *args, **kw):
+        self._deferred_callback_queue.append((func, args, kw))
+
+    def enqueue_notification_callback(self, func, *args, **kw):
+        """
+        A separate queue is required for notification.  Previously, when the
+        notifications were added to the self._deferred_callback_queue there
+        was a deadlock condition where two callbacks were added (one
+        related to the model change and one for the notification related to
+        that model change).  Since the model change requires the
+        self._deferred_callback_queue to be empty then there was a deadlock
+        in that scenario.   The simple approach to avoid this problem is to
+        have separate queues for model and notification.
+        TODO: Investigate whether there is a need for the
+        self._deferred_callback_queue to handle multiple model events at the same time
+        :param func: callback function
+        :param args: args
+        :param kw: key-value args
+        :return: None
+        """
+        self._notification_deferred_callback_queue.append((func, args, kw))
+
+    def execute_deferred_callbacks(self):
+        # First process the model-triggered related callbacks
+        while self._deferred_callback_queue:
+            func, args, kw = self._deferred_callback_queue.pop(0)
+            func(*args, **kw)
+
+        # Execute the notification callbacks
+        while self._notification_deferred_callback_queue:
+            func, args, kw = self._notification_deferred_callback_queue.pop(0)
+            func(*args, **kw)
+
+
+    # ~~~~~~~~~~~~~~~~ Persistence related ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    @classmethod
+    def load(cls, root_msg_cls, kv_store):
+        # need to use fake kv store during initial load for not to override
+        # our real k vstore
+        fake_kv_store = dict()  # shall use more efficient mock dict
+        root = cls(root_msg_cls(), kv_store=fake_kv_store,
+                   rev_cls=PersistedConfigRevision)
+        # we can install the real store now
+        root._kv_store = kv_store
+        root.load_from_persistence(root_msg_cls)
+        return root
+
+    def _make_latest(self, branch, *args, **kw):
+        super(ConfigRoot, self)._make_latest(branch, *args, **kw)
+        # only persist the committed branch
+        if self._kv_store is not None and branch._txid is None:
+            root_data = dict(
+                latest=branch._latest._hash,
+                tags=dict((k, v._hash) for k, v in self._tags.iteritems())
+            )
+            blob = dumps(root_data)
+            self._kv_store['root'] = blob
+
+    def persist_tags(self):
+        if self._kv_store is not None:
+            root_data = loads(self.kv_store['root'])
+            root_data = dict(
+                latest=root_data['latest'],
+                tags=dict((k, v._hash) for k, v in self._tags.iteritems())
+            )
+            blob = dumps(root_data)
+            self._kv_store['root'] = blob
+
+    def load_from_persistence(self, root_msg_cls):
+        self._loading = True
+        blob = self._kv_store['root']
+        root_data = loads(blob)
+
+        for tag, hash in root_data['tags'].iteritems():
+            self.load_latest(hash)
+            self._tags[tag] = self.latest
+
+        self.load_latest(root_data['latest'])
+
+        self._loading = False
+
diff --git a/python/core/config/config_txn.py b/python/core/config/config_txn.py
new file mode 100644
index 0000000..87dfc59
--- /dev/null
+++ b/python/core/config/config_txn.py
@@ -0,0 +1,73 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+class ClosedTransactionError(Exception):
+    pass
+
+
+class ConfigTransaction(object):
+
+    __slots__ = (
+        '_proxy',
+        '_txid'
+    )
+
+    def __init__(self, proxy, txid):
+        self._proxy = proxy
+        self._txid = txid
+
+    def __del__(self):
+        if self._txid:
+            try:
+                self.cancel()
+            except:
+                raise
+
+    # ~~~~~~~~~~~~~~~~~~~~ CRUD ops within the transaction ~~~~~~~~~~~~~~~~~~~~
+
+    def get(self, path='/', depth=None, deep=None):
+        if self._txid is None:
+            raise ClosedTransactionError()
+        return self._proxy.get(path, depth=depth, deep=deep, txid=self._txid)
+
+    def update(self, path, data, strict=False):
+        if self._txid is None:
+            raise ClosedTransactionError()
+        return self._proxy.update(path, data, strict, self._txid)
+
+    def add(self, path, data):
+        if self._txid is None:
+            raise ClosedTransactionError()
+        return self._proxy.add(path, data, self._txid)
+
+    def remove(self, path):
+        if self._txid is None:
+            raise ClosedTransactionError()
+        return self._proxy.remove(path, self._txid)
+
+    # ~~~~~~~~~~~~~~~~~~~~ transaction finalization ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def cancel(self):
+        """Explicitly cancel the transaction"""
+        self._proxy.cancel_transaction(self._txid)
+        self._txid = None
+
+    def commit(self):
+        """Commit all transaction changes"""
+        try:
+            self._proxy.commit_transaction(self._txid)
+        finally:
+            self._txid = None
diff --git a/python/core/config/merge_3way.py b/python/core/config/merge_3way.py
new file mode 100644
index 0000000..5444a6c
--- /dev/null
+++ b/python/core/config/merge_3way.py
@@ -0,0 +1,267 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+3-way merge function for config rev objects.
+"""
+from collections import OrderedDict
+from copy import copy
+
+from voltha.core.config.config_proxy import CallbackType, OperationContext
+from voltha.core.config.config_rev import children_fields
+
+
+class MergeConflictException(Exception):
+    pass
+
+
+def merge_3way(fork_rev, src_rev, dst_rev, merge_child_func, dry_run=False):
+    """
+    Attempt to merge src_rev into dst_rev but taking into account what have
+    changed in both revs since the last known common point, the fork_rev.
+    In case of conflict, raise a MergeConflictException(). If dry run is True,
+    don't actually perform the merge, but detect potential conflicts.
+
+    This function recurses into all children nodes stored under the rev and
+    performs the merge if the children is also part of a transaction branch.
+
+    :param fork_rev: Point of forking (last known common state between branches
+    :param src_rev: Latest rev from which we merge to dst_rev
+    :param dst_rev: Target (destination) rev
+    :param merge_child_fun: To run a potential merge in all children that
+    may need merge (determined from the local changes)
+    :param dry_run: If True, do not perform the merge, but detect merge
+    conflicts.
+    :return: The new dst_rev (a new rev instance) the list of changes that
+    occurred in this node or any of its children as part of this merge.
+    """
+
+    # to collect change tuples of (<callback-type>, <op-context>)
+    changes = []
+
+    class AnalyzeChanges(object):
+        def __init__(self, lst1, lst2, keyname):
+            self.keymap1 = OrderedDict((getattr(rev._config._data, keyname), i)
+                                       for i, rev in enumerate(lst1))
+            self.keymap2 = OrderedDict((getattr(rev._config._data, keyname), i)
+                                       for i, rev in enumerate(lst2))
+            self.added_keys = [
+                k for k in self.keymap2.iterkeys() if k not in self.keymap1]
+            self.removed_keys = [
+                k for k in self.keymap1.iterkeys() if k not in self.keymap2]
+            self.changed_keys = [
+                k for k in self.keymap1.iterkeys()
+                if k in self.keymap2 and
+                    lst1[self.keymap1[k]]._hash != lst2[self.keymap2[k]]._hash
+            ]
+
+    # Note: there are a couple of special cases that can be optimized
+    # for larer on. But since premature optimization is a bad idea, we
+    # defer them.
+
+    # deal with config data first
+    if dst_rev._config is fork_rev._config:
+        # no change in master, accept src if different
+        config_changed = dst_rev._config != src_rev._config
+    else:
+        if dst_rev._config.hash != src_rev._config.hash:
+            raise MergeConflictException('Config collision')
+        config_changed = True
+
+    # now to the external children fields
+    new_children = dst_rev._children.copy()
+    _children_fields = children_fields(fork_rev.data.__class__)
+
+    for field_name, field in _children_fields.iteritems():
+
+        fork_list = fork_rev._children[field_name]
+        src_list = src_rev._children[field_name]
+        dst_list = dst_rev._children[field_name]
+
+        if dst_list == src_list:
+            # we do not need to change the dst, however we still need
+            # to complete the branch purging in child nodes so not
+            # to leave dangling branches around
+            [merge_child_func(rev) for rev in src_list]
+            continue
+
+        if not field.key:
+            # If the list is not keyed, we really should not merge. We merely
+            # check for collision, i.e., if both changed (and not same)
+            if dst_list == fork_list:
+                # dst branch did not change since fork
+
+                assert src_list != fork_list, 'We should not be here otherwise'
+
+                # the incoming (src) rev changed, and we have to apply it
+                new_children[field_name] = [
+                    merge_child_func(rev) for rev in src_list]
+
+                if field.is_container:
+                    changes.append((CallbackType.POST_LISTCHANGE,
+                                    OperationContext(field_name=field_name)))
+
+            else:
+                if src_list != fork_list:
+                    raise MergeConflictException(
+                        'Cannot merge because single child node or un-keyed'
+                        'children list has changed')
+
+        else:
+
+            if dst_list == fork_list:
+                # Destination did not change
+
+                # We need to analyze only the changes on the incoming rev
+                # since fork
+                src = AnalyzeChanges(fork_list, src_list, field.key)
+
+                new_list = copy(src_list)  # we start from the source list
+
+                for key in src.added_keys:
+                    idx = src.keymap2[key]
+                    new_rev = merge_child_func(new_list[idx])
+                    new_list[idx] = new_rev
+                    changes.append(
+                        (CallbackType.POST_ADD,
+                         new_rev.data))
+                         # OperationContext(
+                         #     field_name=field_name,
+                         #     child_key=key,
+                         #     data=new_rev.data)))
+
+                for key in src.removed_keys:
+                    old_rev = fork_list[src.keymap1[key]]
+                    changes.append((
+                        CallbackType.POST_REMOVE,
+                        old_rev.data))
+                        # OperationContext(
+                        #     field_name=field_name,
+                        #     child_key=key,
+                        #     data=old_rev.data)))
+
+                for key in src.changed_keys:
+                    idx = src.keymap2[key]
+                    new_rev = merge_child_func(new_list[idx])
+                    new_list[idx] = new_rev
+                    # updated child gets its own change event
+
+                new_children[field_name] = new_list
+
+            else:
+
+                # For keyed fields we can really investigate what has been
+                # added, removed, or changed in both branches and do a
+                # fine-grained collision detection and merge
+
+                src = AnalyzeChanges(fork_list, src_list, field.key)
+                dst = AnalyzeChanges(fork_list, dst_list, field.key)
+
+                new_list = copy(dst_list)  # this time we start with the dst
+
+                for key in src.added_keys:
+                    # we cannot add if it has been added and is different
+                    if key in dst.added_keys:
+                        # it has been added to both, we need to check if
+                        # they are the same
+                        child_dst_rev = dst_list[dst.keymap2[key]]
+                        child_src_rev = src_list[src.keymap2[key]]
+                        if child_dst_rev.hash == child_src_rev.hash:
+                            # they match, so we do not need to change the
+                            # dst list, but we still need to purge the src
+                            # branch
+                            merge_child_func(child_dst_rev)
+                        else:
+                            raise MergeConflictException(
+                                'Cannot add because it has been added and '
+                                'different'
+                            )
+                    else:
+                        # this is a brand new key, need to add it
+                        new_rev = merge_child_func(src_list[src.keymap2[key]])
+                        new_list.append(new_rev)
+                        changes.append((
+                            CallbackType.POST_ADD,
+                            new_rev.data))
+                            # OperationContext(
+                            #     field_name=field_name,
+                            #     child_key=key,
+                            #     data=new_rev.data)))
+
+                for key in src.changed_keys:
+                    # we cannot change if it was removed in dst
+                    if key in dst.removed_keys:
+                        raise MergeConflictException(
+                            'Cannot change because it has been removed')
+
+                    # if it changed in dst as well, we need to check if they
+                    # match (same change
+                    elif key in dst.changed_keys:
+                        child_dst_rev = dst_list[dst.keymap2[key]]
+                        child_src_rev = src_list[src.keymap2[key]]
+                        if child_dst_rev.hash == child_src_rev.hash:
+                            # they match, so we do not need to change the
+                            # dst list, but we still need to purge the src
+                            # branch
+                            merge_child_func(child_src_rev)
+                        elif child_dst_rev._config.hash != child_src_rev._config.hash:
+                            raise MergeConflictException(
+                                'Cannot update because it has been changed and '
+                                'different'
+                            )
+                        else:
+                            new_rev = merge_child_func(
+                                src_list[src.keymap2[key]])
+                            new_list[dst.keymap2[key]] = new_rev
+                            # no announcement for child update
+
+                    else:
+                        # it only changed in src branch
+                        new_rev = merge_child_func(src_list[src.keymap2[key]])
+                        new_list[dst.keymap2[key]] = new_rev
+                        # no announcement for child update
+
+                for key in reversed(src.removed_keys):  # we go from highest
+                                                        # index to lowest
+
+                    # we cannot remove if it has changed in dst
+                    if key in dst.changed_keys:
+                        raise MergeConflictException(
+                            'Cannot remove because it has changed')
+
+                    # if it has not been removed yet from dst, then remove it
+                    if key not in dst.removed_keys:
+                        dst_idx = dst.keymap2[key]
+                        old_rev = new_list.pop(dst_idx)
+                        changes.append((
+                            CallbackType.POST_REMOVE,
+                            old_rev.data))
+                            # OperationContext(
+                            #     field_name=field_name,
+                            #     child_key=key,
+                            #     data=old_rev.data)))
+
+                new_children[field_name] = new_list
+
+    if not dry_run:
+        rev = src_rev if config_changed else dst_rev
+        rev = rev.update_all_children(new_children, dst_rev._branch)
+        if config_changed:
+            changes.append((CallbackType.POST_UPDATE, rev.data))
+        return rev, changes
+
+    else:
+        return None, None
diff --git a/python/core/device_graph.py b/python/core/device_graph.py
new file mode 100644
index 0000000..a4e6d85
--- /dev/null
+++ b/python/core/device_graph.py
@@ -0,0 +1,136 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import networkx as nx
+
+from voltha.core.flow_decomposer import RouteHop
+
+
+class DeviceGraph(object):
+
+    """
+    Mixin class to compute routes in the device graph within
+    a logical device.
+    """
+
+    def compute_routes(self, root_proxy, logical_ports):
+        boundary_ports, graph = self._build_graph(root_proxy, logical_ports)
+        routes = self._build_routes(boundary_ports, graph, logical_ports)
+        return graph, routes
+
+    def _build_graph(self, root_proxy, logical_ports):
+
+        graph = nx.Graph()
+
+        # walk logical device's device and port links to discover full graph
+        devices_added = set()  # set of device.id's
+        ports_added = set()  # set of (device.id, port_no) tuples
+        peer_links = set()
+
+        boundary_ports = dict(
+            ((lp.device_id, lp.device_port_no), lp.ofp_port.port_no)
+            for lp in logical_ports
+        )
+
+        def add_device(device):
+            if device.id in devices_added:
+                return
+
+            graph.add_node(device.id, device=device)
+            devices_added.add(device.id)
+
+            ports = root_proxy.get('/devices/{}/ports'.format(device.id))
+            for port in ports:
+                port_id = (device.id, port.port_no)
+                if port_id not in ports_added:
+                    boundary = port_id in boundary_ports
+                    graph.add_node(port_id, port=port, boundary=boundary)
+                    graph.add_edge(device.id, port_id)
+                for peer in port.peers:
+                    if peer.device_id not in devices_added:
+                        peer_device = root_proxy.get(
+                            '/devices/{}'.format(peer.device_id))
+                        add_device(peer_device)
+                    else:
+                        peer_port_id = (peer.device_id, peer.port_no)
+                        if port_id < peer_port_id:
+                            peer_link = (port_id, peer_port_id)
+                        else:
+                            peer_link = (peer_port_id, port_id)
+                        if peer_link not in peer_links:
+                            graph.add_edge(*peer_link)
+                            peer_links.add(peer_link)
+
+        for logical_port in logical_ports:
+            device_id = logical_port.device_id
+            device = root_proxy.get('/devices/{}'.format(device_id))
+            add_device(device)
+
+        return boundary_ports, graph
+
+    def _build_routes(self, boundary_ports, graph, logical_ports):
+
+        root_ports = dict((lp.ofp_port.port_no, lp.root_port)
+                          for lp in logical_ports if lp.root_port == True)
+
+        routes = {}
+
+        for source, source_port_no in boundary_ports.iteritems():
+            for target, target_port_no in boundary_ports.iteritems():
+
+                if source is target:
+                    continue
+
+                # Ignore NNI - NNI routes
+                if source_port_no in root_ports \
+                        and target_port_no in root_ports:
+                    continue
+
+                # Ignore UNI - UNI routes
+                if source_port_no not in root_ports \
+                        and target_port_no not in root_ports:
+                    continue
+
+                path = nx.shortest_path(graph, source, target)
+
+                # number of nodes in valid paths is always multiple of 3
+                if len(path) % 3:
+                    continue
+
+                # in fact, we currently deal with single fan-out networks,
+                # so the number of hops is always 6
+                assert len(path) == 6
+
+                ingress_input_port, ingress_device, ingress_output_port, \
+                egress_input_port, egress_device, egress_output_port = path
+
+                ingress_hop = RouteHop(
+                    device=graph.node[ingress_device]['device'],
+                    ingress_port=graph.node[ingress_input_port]['port'],
+                    egress_port=graph.node[ingress_output_port]['port']
+                )
+                egress_hop = RouteHop(
+                    device=graph.node[egress_device]['device'],
+                    ingress_port=graph.node[egress_input_port]['port'],
+                    egress_port=graph.node[egress_output_port]['port']
+                )
+
+                routes[(source_port_no, target_port_no)] = [
+                    ingress_hop, egress_hop
+                ]
+
+        return routes
+
diff --git a/python/core/flow_decomposer.py b/python/core/flow_decomposer.py
new file mode 100644
index 0000000..faf3141
--- /dev/null
+++ b/python/core/flow_decomposer.py
@@ -0,0 +1,1010 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A mix-in class implementing flow decomposition
+"""
+from collections import OrderedDict
+from copy import copy, deepcopy
+from hashlib import md5
+
+import structlog
+
+from voltha.protos import third_party
+from voltha.protos import openflow_13_pb2 as ofp
+from common.tech_profile import tech_profile
+_ = third_party
+log = structlog.get_logger()
+
+
+# aliases
+ofb_field = ofp.ofp_oxm_ofb_field
+action = ofp.ofp_action
+
+# OFPAT_* shortcuts
+OUTPUT = ofp.OFPAT_OUTPUT
+COPY_TTL_OUT = ofp.OFPAT_COPY_TTL_OUT
+COPY_TTL_IN = ofp.OFPAT_COPY_TTL_IN
+SET_MPLS_TTL = ofp.OFPAT_SET_MPLS_TTL
+DEC_MPLS_TTL = ofp.OFPAT_DEC_MPLS_TTL
+PUSH_VLAN = ofp.OFPAT_PUSH_VLAN
+POP_VLAN = ofp.OFPAT_POP_VLAN
+PUSH_MPLS = ofp.OFPAT_PUSH_MPLS
+POP_MPLS = ofp.OFPAT_POP_MPLS
+SET_QUEUE = ofp.OFPAT_SET_QUEUE
+GROUP = ofp.OFPAT_GROUP
+SET_NW_TTL = ofp.OFPAT_SET_NW_TTL
+NW_TTL = ofp.OFPAT_DEC_NW_TTL
+SET_FIELD = ofp.OFPAT_SET_FIELD
+PUSH_PBB = ofp.OFPAT_PUSH_PBB
+POP_PBB = ofp.OFPAT_POP_PBB
+EXPERIMENTER = ofp.OFPAT_EXPERIMENTER
+
+# OFPXMT_OFB_* shortcuts (incomplete)
+IN_PORT = ofp.OFPXMT_OFB_IN_PORT
+IN_PHY_PORT = ofp.OFPXMT_OFB_IN_PHY_PORT
+METADATA = ofp.OFPXMT_OFB_METADATA
+ETH_DST = ofp.OFPXMT_OFB_ETH_DST
+ETH_SRC = ofp.OFPXMT_OFB_ETH_SRC
+ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
+VLAN_VID = ofp.OFPXMT_OFB_VLAN_VID
+VLAN_PCP = ofp.OFPXMT_OFB_VLAN_PCP
+IP_DSCP = ofp.OFPXMT_OFB_IP_DSCP
+IP_ECN = ofp.OFPXMT_OFB_IP_ECN
+IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
+IPV4_SRC = ofp.OFPXMT_OFB_IPV4_SRC
+IPV4_DST = ofp.OFPXMT_OFB_IPV4_DST
+TCP_SRC = ofp.OFPXMT_OFB_TCP_SRC
+TCP_DST = ofp.OFPXMT_OFB_TCP_DST
+UDP_SRC = ofp.OFPXMT_OFB_UDP_SRC
+UDP_DST = ofp.OFPXMT_OFB_UDP_DST
+SCTP_SRC = ofp.OFPXMT_OFB_SCTP_SRC
+SCTP_DST = ofp.OFPXMT_OFB_SCTP_DST
+ICMPV4_TYPE = ofp.OFPXMT_OFB_ICMPV4_TYPE
+ICMPV4_CODE = ofp.OFPXMT_OFB_ICMPV4_CODE
+ARP_OP = ofp.OFPXMT_OFB_ARP_OP
+ARP_SPA = ofp.OFPXMT_OFB_ARP_SPA
+ARP_TPA = ofp.OFPXMT_OFB_ARP_TPA
+ARP_SHA = ofp.OFPXMT_OFB_ARP_SHA
+ARP_THA = ofp.OFPXMT_OFB_ARP_THA
+IPV6_SRC = ofp.OFPXMT_OFB_IPV6_SRC
+IPV6_DST = ofp.OFPXMT_OFB_IPV6_DST
+IPV6_FLABEL = ofp.OFPXMT_OFB_IPV6_FLABEL
+ICMPV6_TYPE = ofp.OFPXMT_OFB_ICMPV6_TYPE
+ICMPV6_CODE = ofp.OFPXMT_OFB_ICMPV6_CODE
+IPV6_ND_TARGET = ofp.OFPXMT_OFB_IPV6_ND_TARGET
+OFB_IPV6_ND_SLL = ofp.OFPXMT_OFB_IPV6_ND_SLL
+IPV6_ND_TLL = ofp.OFPXMT_OFB_IPV6_ND_TLL
+MPLS_LABEL = ofp.OFPXMT_OFB_MPLS_LABEL
+MPLS_TC = ofp.OFPXMT_OFB_MPLS_TC
+MPLS_BOS = ofp.OFPXMT_OFB_MPLS_BOS
+PBB_ISID = ofp.OFPXMT_OFB_PBB_ISID
+TUNNEL_ID = ofp.OFPXMT_OFB_TUNNEL_ID
+IPV6_EXTHDR = ofp.OFPXMT_OFB_IPV6_EXTHDR
+
+# ofp_action_* shortcuts
+
+def output(port, max_len=ofp.OFPCML_MAX):
+    return action(
+        type=OUTPUT,
+        output=ofp.ofp_action_output(port=port, max_len=max_len)
+    )
+
+def mpls_ttl(ttl):
+    return action(
+        type=SET_MPLS_TTL,
+        mpls_ttl=ofp.ofp_action_mpls_ttl(mpls_ttl=ttl)
+    )
+
+def push_vlan(eth_type):
+    return action(
+        type=PUSH_VLAN,
+        push=ofp.ofp_action_push(ethertype=eth_type)
+    )
+
+def pop_vlan():
+    return action(
+        type=POP_VLAN
+    )
+
+def pop_mpls(eth_type):
+    return action(
+        type=POP_MPLS,
+        pop_mpls=ofp.ofp_action_pop_mpls(ethertype=eth_type)
+    )
+
+def group(group_id):
+    return action(
+        type=GROUP,
+        group=ofp.ofp_action_group(group_id=group_id)
+    )
+
+def nw_ttl(nw_ttl):
+    return action(
+        type=NW_TTL,
+        nw_ttl=ofp.ofp_action_nw_ttl(nw_ttl=nw_ttl)
+    )
+
+def set_field(field):
+    return action(
+        type=SET_FIELD,
+        set_field=ofp.ofp_action_set_field(
+            field=ofp.ofp_oxm_field(
+                oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                ofb_field=field))
+    )
+
+def experimenter(experimenter, data):
+    return action(
+        type=EXPERIMENTER,
+        experimenter=ofp.ofp_action_experimenter(
+            experimenter=experimenter, data=data)
+    )
+
+
+# ofb_field generators (incomplete set)
+
+def in_port(_in_port):
+    return ofb_field(type=IN_PORT, port=_in_port)
+
+def in_phy_port(_in_phy_port):
+    return ofb_field(type=IN_PHY_PORT, port=_in_phy_port)
+
+def metadata(_table_metadata):
+    return ofb_field(type=METADATA, table_metadata=_table_metadata)
+
+def eth_dst(_eth_dst):
+    return ofb_field(type=ETH_DST, table_metadata=_eth_dst)
+
+def eth_src(_eth_src):
+    return ofb_field(type=ETH_SRC, table_metadata=_eth_src)
+
+def eth_type(_eth_type):
+    return ofb_field(type=ETH_TYPE, eth_type=_eth_type)
+
+def vlan_vid(_vlan_vid):
+    return ofb_field(type=VLAN_VID, vlan_vid=_vlan_vid)
+
+def vlan_pcp(_vlan_pcp):
+    return ofb_field(type=VLAN_PCP, vlan_pcp=_vlan_pcp)
+
+def ip_dscp(_ip_dscp):
+    return ofb_field(type=IP_DSCP, ip_dscp=_ip_dscp)
+
+def ip_ecn(_ip_ecn):
+    return ofb_field(type=IP_ECN, ip_ecn=_ip_ecn)
+
+def ip_proto(_ip_proto):
+    return ofb_field(type=IP_PROTO, ip_proto=_ip_proto)
+
+def ipv4_src(_ipv4_src):
+    return ofb_field(type=IPV4_SRC, ipv4_src=_ipv4_src)
+
+def ipv4_dst(_ipv4_dst):
+    return ofb_field(type=IPV4_DST, ipv4_dst=_ipv4_dst)
+
+def tcp_src(_tcp_src):
+    return ofb_field(type=TCP_SRC, tcp_src=_tcp_src)
+
+def tcp_dst(_tcp_dst):
+    return ofb_field(type=TCP_DST, tcp_dst=_tcp_dst)
+
+def udp_src(_udp_src):
+    return ofb_field(type=UDP_SRC, udp_src=_udp_src)
+
+def udp_dst(_udp_dst):
+    return ofb_field(type=UDP_DST, udp_dst=_udp_dst)
+
+def sctp_src(_sctp_src):
+    return ofb_field(type=SCTP_SRC, sctp_src=_sctp_src)
+
+def sctp_dst(_sctp_dst):
+    return ofb_field(type=SCTP_DST, sctp_dst=_sctp_dst)
+
+def icmpv4_type(_icmpv4_type):
+    return ofb_field(type=ICMPV4_TYPE, icmpv4_type=_icmpv4_type)
+
+def icmpv4_code(_icmpv4_code):
+    return ofb_field(type=ICMPV4_CODE, icmpv4_code=_icmpv4_code)
+
+def arp_op(_arp_op):
+    return ofb_field(type=ARP_OP, arp_op=_arp_op)
+
+def arp_spa(_arp_spa):
+    return ofb_field(type=ARP_SPA, arp_spa=_arp_spa)
+
+def arp_tpa(_arp_tpa):
+    return ofb_field(type=ARP_TPA, arp_tpa=_arp_tpa)
+
+def arp_sha(_arp_sha):
+    return ofb_field(type=ARP_SHA, arp_sha=_arp_sha)
+
+def arp_tha(_arp_tha):
+    return ofb_field(type=ARP_THA, arp_tha=_arp_tha)
+
+def ipv6_src(_ipv6_src):
+    return ofb_field(type=IPV6_SRC, arp_tha=_ipv6_src)
+
+def ipv6_dst(_ipv6_dst):
+    return ofb_field(type=IPV6_DST, arp_tha=_ipv6_dst)
+
+def ipv6_flabel(_ipv6_flabel):
+    return ofb_field(type=IPV6_FLABEL, arp_tha=_ipv6_flabel)
+
+def ipmpv6_type(_icmpv6_type):
+    return ofb_field(type=ICMPV6_TYPE, arp_tha=_icmpv6_type)
+
+def icmpv6_code(_icmpv6_code):
+    return ofb_field(type=ICMPV6_CODE, arp_tha=_icmpv6_code)
+
+def ipv6_nd_target(_ipv6_nd_target):
+    return ofb_field(type=IPV6_ND_TARGET, arp_tha=_ipv6_nd_target)
+
+def ofb_ipv6_nd_sll(_ofb_ipv6_nd_sll):
+    return ofb_field(type=OFB_IPV6_ND_SLL, arp_tha=_ofb_ipv6_nd_sll)
+
+def ipv6_nd_tll(_ipv6_nd_tll):
+    return ofb_field(type=IPV6_ND_TLL, arp_tha=_ipv6_nd_tll)
+
+def mpls_label(_mpls_label):
+    return ofb_field(type=MPLS_LABEL, arp_tha=_mpls_label)
+
+def mpls_tc(_mpls_tc):
+    return ofb_field(type=MPLS_TC, arp_tha=_mpls_tc)
+
+def mpls_bos(_mpls_bos):
+    return ofb_field(type=MPLS_BOS, arp_tha=_mpls_bos)
+
+def pbb_isid(_pbb_isid):
+    return ofb_field(type=PBB_ISID, arp_tha=_pbb_isid)
+
+def tunnel_id(_tunnel_id):
+    return ofb_field(type=TUNNEL_ID, arp_tha=_tunnel_id)
+
+def ipv6_exthdr(_ipv6_exthdr):
+    return ofb_field(type=IPV6_EXTHDR, arp_tha=_ipv6_exthdr)
+
+
+# frequently used extractors:
+
+def get_actions(flow):
+    """Extract list of ofp_action objects from flow spec object"""
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    # we have the following hard assumptions for now
+    actions = []
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_APPLY_ACTIONS or instruction.type == ofp.OFPIT_WRITE_ACTIONS:
+            actions.extend(instruction.actions.actions)
+    return actions
+
+
+def get_ofb_fields(flow):
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    assert flow.match.type == ofp.OFPMT_OXM
+    ofb_fields = []
+    for field in flow.match.oxm_fields:
+        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
+        ofb_fields.append(field.ofb_field)
+    return ofb_fields
+
+def get_out_port(flow):
+    for action in get_actions(flow):
+        if action.type == OUTPUT:
+            return action.output.port
+    return None
+
+def get_in_port(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == IN_PORT:
+            return field.port
+    return None
+
+def get_goto_table_id(flow):
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_GOTO_TABLE:
+            return instruction.goto_table.table_id
+    return None
+
+def get_metadata(flow):
+    ''' legacy get method (only want lower 32 bits '''
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata & 0xffffffff
+    return None
+
+def get_metadata_64_bit(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata
+    return None
+
+
+def get_port_number_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return md & 0xffffffff
+
+
+def get_inner_tag_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return (md >> 32) & 0xffffffff
+
+
+# test and extract next table and group information
+def has_next_table(flow):
+    return get_goto_table_id(flow) is not None
+
+def get_group(flow):
+    for action in get_actions(flow):
+        if action.type == GROUP:
+            return action.group.group_id
+    return None
+
+def get_meter_ids_from_flow(flow):
+    meter_ids = list()
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_METER:
+            meter_ids.append(instruction.meter.meter_id)
+    return meter_ids
+
+def has_group(flow):
+    return get_group(flow) is not None
+
+def mk_oxm_fields(match_fields):
+    oxm_fields = [
+        ofp.ofp_oxm_field(
+            oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+            ofb_field=field
+        ) for field in match_fields
+        ]
+
+    return oxm_fields
+
+def mk_instructions_from_actions(actions):
+    instructions_action = ofp.ofp_instruction_actions()
+    instructions_action.actions.extend(actions)
+    instruction = ofp.ofp_instruction(type=ofp.OFPIT_APPLY_ACTIONS,
+                                      actions=instructions_action)
+    return [instruction]
+
+def mk_simple_flow_mod(match_fields, actions, command=ofp.OFPFC_ADD,
+                       next_table_id=None, meters=None, **kw):
+    """
+    Convenience function to generare ofp_flow_mod message with OXM BASIC match
+    composed from the match_fields, and single APPLY_ACTIONS instruction with
+    a list if ofp_action objects.
+    :param match_fields: list(ofp_oxm_ofb_field)
+    :param actions: list(ofp_action)
+    :param command: one of OFPFC_*
+    :param kw: additional keyword-based params to ofp_flow_mod
+    :return: initialized ofp_flow_mod object
+    """
+    instructions = [
+        ofp.ofp_instruction(
+            type=ofp.OFPIT_APPLY_ACTIONS,
+            actions=ofp.ofp_instruction_actions(actions=actions)
+        )
+    ]
+
+    if meters is not None:
+        for meter_id in meters:
+            instructions.append(ofp.ofp_instruction(
+                type=ofp.OFPIT_METER,
+                meter=ofp.ofp_instruction_meter(meter_id=meter_id)
+            ))
+
+    if next_table_id is not None:
+        instructions.append(ofp.ofp_instruction(
+            type=ofp.OFPIT_GOTO_TABLE,
+            goto_table=ofp.ofp_instruction_goto_table(table_id=next_table_id)
+        ))
+
+    return ofp.ofp_flow_mod(
+        command=command,
+        match=ofp.ofp_match(
+            type=ofp.OFPMT_OXM,
+            oxm_fields=[
+                ofp.ofp_oxm_field(
+                    oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                    ofb_field=field
+                ) for field in match_fields
+            ]
+        ),
+        instructions=instructions,
+        **kw
+    )
+
+
+def mk_multicast_group_mod(group_id, buckets, command=ofp.OFPGC_ADD):
+    group = ofp.ofp_group_mod(
+        command=command,
+        type=ofp.OFPGT_ALL,
+        group_id=group_id,
+        buckets=buckets
+    )
+    return group
+
+
+def hash_flow_stats(flow):
+    """
+    Return unique 64-bit integer hash for flow covering the following
+    attributes: 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
+    """
+    _instruction_string = ""
+    for _instruction in flow.instructions:
+        _instruction_string += _instruction.SerializeToString()
+
+    hex = md5('{},{},{},{},{},{}'.format(
+        flow.table_id,
+        flow.priority,
+        flow.flags,
+        flow.cookie,
+        flow.match.SerializeToString(),
+        _instruction_string
+    )).hexdigest()
+    return int(hex[:16], 16)
+
+
+def flow_stats_entry_from_flow_mod_message(mod):
+    flow = ofp.ofp_flow_stats(
+        table_id=mod.table_id,
+        priority=mod.priority,
+        idle_timeout=mod.idle_timeout,
+        hard_timeout=mod.hard_timeout,
+        flags=mod.flags,
+        cookie=mod.cookie,
+        match=mod.match,
+        instructions=mod.instructions
+    )
+    flow.id = hash_flow_stats(flow)
+    return flow
+
+
+def group_entry_from_group_mod(mod):
+    group = ofp.ofp_group_entry(
+        desc=ofp.ofp_group_desc(
+            type=mod.type,
+            group_id=mod.group_id,
+            buckets=mod.buckets
+        ),
+        stats=ofp.ofp_group_stats(
+            group_id=mod.group_id
+            # TODO do we need to instantiate bucket bins?
+        )
+    )
+    return group
+
+
+def mk_flow_stat(**kw):
+    return flow_stats_entry_from_flow_mod_message(mk_simple_flow_mod(**kw))
+
+
+def mk_group_stat(**kw):
+    return group_entry_from_group_mod(mk_multicast_group_mod(**kw))
+
+class RouteHop(object):
+    __slots__ = ('_device', '_ingress_port', '_egress_port')
+    def __init__(self, device, ingress_port, egress_port):
+        self._device = device
+        self._ingress_port = ingress_port
+        self._egress_port = egress_port
+    @property
+    def device(self): return self._device
+    @property
+    def ingress_port(self): return self._ingress_port
+    @property
+    def egress_port(self): return self._egress_port
+    def __eq__(self, other):
+        return (
+            self._device == other._device and
+            self._ingress_port == other._ingress_port and
+            self._egress_port == other._egress_port)
+    def __ne__(self, other):
+        return not self.__eq__(other)
+    def __str__(self):
+        return 'RouteHop device_id {}, ingress_port {}, egress_port {}'.format(
+            self._device.id, self._ingress_port, self._egress_port)
+
+class FlowDecomposer(object):
+
+    def __init__(self, *args, **kw):
+        self.logical_device_id = 'this shall be overwritten in derived class'
+        super(FlowDecomposer, self).__init__(*args, **kw)
+
+    # ~~~~~~~~~~~~~~~~~~~~ methods exposed *to* derived class ~~~~~~~~~~~~~~~~~
+
+    def decompose_rules(self, flows, groups):
+        """
+        Generate per-device flows and flow-groups from the flows and groups
+        defined on a logical device
+        :param flows: logical device flows
+        :param groups: logical device flow groups
+        :return: dict(device_id ->
+            (OrderedDict-of-device-flows, OrderedDict-of-device-flow-groups))
+        """
+
+        device_rules = deepcopy(self.get_all_default_rules())
+        group_map = dict((g.desc.group_id, g) for g in groups)
+
+        for flow in flows:
+            for device_id, (_flows, _groups) \
+                    in self.decompose_flow(flow, group_map).iteritems():
+                fl_lst, gr_lst = device_rules.setdefault(
+                    device_id, (OrderedDict(), OrderedDict()))
+                for _flow in _flows:
+                    if _flow.id not in fl_lst:
+                        fl_lst[_flow.id] = _flow
+                for _group in _groups:
+                    if _group.group_id not in gr_lst:
+                        gr_lst[_group.group_id] = _group
+        return device_rules
+
+    def decompose_flow(self, flow, group_map):
+        assert isinstance(flow, ofp.ofp_flow_stats)
+
+        ####################################################################
+        #
+        # limited, heuristics based implementation
+        # needs to be replaced, see https://jira.opencord.org/browse/CORD-841
+        #
+        ####################################################################
+
+        in_port_no = get_in_port(flow)
+        out_port_no = get_out_port(flow)  # may be None
+
+        device_rules = {}  # accumulator
+
+        route = self.get_route(in_port_no, out_port_no)
+        if route is None:
+            log.error('no-route', in_port_no=in_port_no,
+                      out_port_no=out_port_no, comment='deleting flow')
+            self.flow_delete(flow)
+            return device_rules
+
+        assert len(route) == 2
+        ingress_hop, egress_hop = route
+
+        def is_downstream():
+            return ingress_hop.device.root
+
+        def is_upstream():
+            return not is_downstream()
+
+        def update_devices_rules(flow, curr_device_rules, meter_ids=None, table_id=None):
+            actions = [action.type for action in get_actions(flow)]
+            if len(actions) == 1 and OUTPUT in actions:
+                # Transparent ONU and OLT case (No-L2-Modification flow)
+                child_device_flow_lst, _ = curr_device_rules.setdefault(
+                    ingress_hop.device.id, ([], []))
+                parent_device_flow_lst, _ = curr_device_rules.setdefault(
+                    egress_hop.device.id, ([], []))
+
+                child_device_flow_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(ingress_hop.ingress_port.port_no)
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                        output(ingress_hop.egress_port.port_no)
+                    ]
+                ))
+
+                parent_device_flow_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(egress_hop.ingress_port.port_no),
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                        output(egress_hop.egress_port.port_no)
+                    ],
+                    table_id=table_id,
+                    meters=meter_ids
+                ))
+
+            else:
+                fl_lst, _ = curr_device_rules.setdefault(
+                    egress_hop.device.id, ([], []))
+                fl_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(egress_hop.ingress_port.port_no)
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                                action for action in get_actions(flow)
+                                if action.type != OUTPUT
+                            ] + [
+                                output(egress_hop.egress_port.port_no)
+                            ],
+                    table_id=table_id,
+                    meters=meter_ids
+                ))
+
+        if out_port_no is not None and \
+                (out_port_no & 0x7fffffff) == ofp.OFPP_CONTROLLER:
+
+            # UPSTREAM CONTROLLER-BOUND FLOW
+
+            # we assume that the ingress device is already pushing a
+            # customer-specific vlan (c-vid), based on its default flow
+            # rules so there is nothing else to do on the ONU
+
+            # on the olt, we need to push a new tag and set it to 4000
+            # which for now represents in-bound channel to the controller
+            # (via Voltha)
+            # TODO make the 4000 configurable
+            fl_lst, _ = device_rules.setdefault(
+                egress_hop.device.id, ([], []))
+
+            log.info('trap-flow', in_port_no=in_port_no,
+                     nni=self._nni_logical_port_no)
+
+            if in_port_no == self._nni_logical_port_no:
+                log.debug('trap-nni')
+                # Trap flow for NNI port
+                fl_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                        in_port(egress_hop.egress_port.port_no)
+                    ] + [
+                        field for field in get_ofb_fields(flow)
+                        if field.type not in (IN_PORT,)
+                    ],
+                    actions=[
+                        action for action in get_actions(flow)
+                    ]
+                ))
+
+            else:
+                log.debug('trap-uni')
+                # Trap flow for UNI port
+
+                # in_port_no is None for wildcard input case, do not include
+                # upstream port for 4000 flow in input
+                if in_port_no is None:
+                    in_ports = self.get_wildcard_input_ports(exclude_port=
+                                                             egress_hop.egress_port.port_no)
+                else:
+                    in_ports = [in_port_no]
+
+                for input_port in in_ports:
+                    fl_lst.append(mk_flow_stat(        # Upstream flow
+                        priority=flow.priority,
+                        cookie=flow.cookie,
+                        match_fields=[
+                            in_port(egress_hop.ingress_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | input_port)
+                        ] + [
+                            field for field in get_ofb_fields(flow)
+                            if field.type not in (IN_PORT, VLAN_VID)
+                        ],
+                        actions=[
+                            push_vlan(0x8100),
+                            set_field(vlan_vid(ofp.OFPVID_PRESENT | 4000)),
+                            output(egress_hop.egress_port.port_no)]
+                    ))
+                    fl_lst.append(mk_flow_stat(            # Downstream flow
+                        priority=flow.priority,
+                        match_fields=[
+                            in_port(egress_hop.egress_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | 4000),
+                            vlan_pcp(0),
+                            metadata(input_port)
+                        ],
+                        actions=[
+                            pop_vlan(),
+                            output(egress_hop.ingress_port.port_no)]
+                    ))
+        else:
+            # NOT A CONTROLLER-BOUND FLOW
+            if is_upstream():
+
+                # We assume that anything that is upstream needs to get Q-in-Q
+                # treatment and that this is expressed via two flow rules,
+                # the first using the goto-statement. We also assume that the
+                # inner tag is applied at the ONU, while the outer tag is
+                # applied at the OLT
+                next_table_id = get_goto_table_id(flow)
+                if next_table_id is not None and next_table_id < tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is None
+                    fl_lst, _ = device_rules.setdefault(
+                        ingress_hop.device.id, ([], []))
+                    fl_lst.append(mk_flow_stat(
+                        priority=flow.priority,
+                        cookie=flow.cookie,
+                        match_fields=[
+                            in_port(ingress_hop.ingress_port.port_no)
+                        ] + [
+                            field for field in get_ofb_fields(flow)
+                            if field.type not in (IN_PORT,)
+                        ],
+                        actions=[
+                            action for action in get_actions(flow)
+                        ] + [
+                            output(ingress_hop.egress_port.port_no)
+                        ]
+                    ))
+
+                elif next_table_id is not None and next_table_id >= tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is not None
+                    meter_ids = get_meter_ids_from_flow(flow)
+                    update_devices_rules(flow, device_rules, meter_ids, next_table_id)
+                else:
+                    update_devices_rules(flow, device_rules)
+
+            else:  # downstream
+                next_table_id = get_goto_table_id(flow)
+                if next_table_id is not None and next_table_id < tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is None
+
+                    if get_metadata(flow) is not None:
+                        log.debug('creating-metadata-flow', flow=flow)
+                        # For downstream flows with dual-tags, recalculate route.
+                        port_number = get_port_number_from_metadata(flow)
+
+                        if port_number is not None:
+                            route = self.get_route(in_port_no, port_number)
+                            if route is None:
+                                log.error('no-route-double-tag', in_port_no=in_port_no,
+                                          out_port_no=port_number, comment='deleting flow',
+                                          metadata=get_metadata_64_bit(flow))
+                                self.flow_delete(flow)
+                                return device_rules
+                            assert len(route) == 2
+                            ingress_hop, egress_hop = route
+
+                        inner_tag = get_inner_tag_from_metadata(flow)
+
+                        if inner_tag is None:
+                            log.error('no-inner-tag-double-tag', in_port_no=in_port_no,
+                                      out_port_no=port_number, comment='deleting flow',
+                                      metadata=get_metadata_64_bit(flow))
+                            self.flow_delete(flow)
+                            return device_rules
+
+                        fl_lst, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(ingress_hop.ingress_port.port_no),
+                                metadata(inner_tag)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT, METADATA)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                            ] + [
+                                output(ingress_hop.egress_port.port_no)
+                            ]
+                        ))
+                    else:
+                        log.debug('creating-standard-flow', flow=flow)
+                        fl_lst, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(ingress_hop.ingress_port.port_no)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT,)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                            ] + [
+                                output(ingress_hop.egress_port.port_no)
+                            ]
+                        ))
+
+                elif out_port_no is not None:  # unicast case
+
+                    actions = [action.type for action in get_actions(flow)]
+                    # Transparent ONU and OLT case (No-L2-Modification flow)
+                    if len(actions) == 1 and OUTPUT in actions:
+                        parent_device_flow_lst, _ = device_rules.setdefault(
+                                                ingress_hop.device.id, ([], []))
+                        child_device_flow_lst, _ = device_rules.setdefault(
+                                                egress_hop.device.id, ([], []))
+
+                        parent_device_flow_lst.append(mk_flow_stat(
+                                            priority=flow.priority,
+                                            cookie=flow.cookie,
+                                            match_fields=[
+                                                in_port(ingress_hop.ingress_port.port_no)
+                                            ] + [
+                                                field for field in get_ofb_fields(flow)
+                                                if field.type not in (IN_PORT,)
+                                            ],
+                                            actions=[
+                                                 output(ingress_hop.egress_port.port_no)
+                                            ]
+                                            ))
+
+                        child_device_flow_lst.append(mk_flow_stat(
+                                            priority = flow.priority,
+                                            cookie=flow.cookie,
+                                            match_fields = [
+                                                 in_port(egress_hop.ingress_port.port_no),
+                                            ] + [
+                                                 field for field in get_ofb_fields(flow)
+                                                 if field.type not in (IN_PORT, )
+                                            ],
+                                            actions=[
+                                                 output(egress_hop.egress_port.port_no)
+                                            ]
+                                            ))
+                    else:
+                        fl_lst, _ = device_rules.setdefault(
+                            egress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(egress_hop.ingress_port.port_no)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT,)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                                if action.type not in (OUTPUT,)
+                            ] + [
+                                output(egress_hop.egress_port.port_no)
+                            ],
+                            #table_id=flow.table_id,
+                            #meters=None if len(get_meter_ids_from_flow(flow)) == 0 else get_meter_ids_from_flow(flow)
+                        ))
+                else:
+                    grp_id = get_group(flow)
+
+                    if grp_id is not None: # multicast case
+                        fl_lst_olt, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        # having no group yet is the same as having a group with
+                        # no buckets
+                        group = group_map.get(grp_id, ofp.ofp_group_entry())
+
+                        for bucket in group.desc.buckets:
+                            found_pop_vlan = False
+                            other_actions = []
+                            for action in bucket.actions:
+                                if action.type == POP_VLAN:
+                                    found_pop_vlan = True
+                                elif action.type == OUTPUT:
+                                    out_port_no = action.output.port
+                                else:
+                                    other_actions.append(action)
+                            # re-run route request to determine egress device and
+                            # ports
+                            route2 = self.get_route(in_port_no, out_port_no)
+                            if not route2 or len(route2) != 2:
+                                log.error('mc-no-route', in_port_no=in_port_no,
+                                    out_port_no=out_port_no, route2=route2,
+                                    comment='deleting flow')
+                                self.flow_delete(flow)
+                                continue
+
+                            ingress_hop2, egress_hop = route2
+
+                            if ingress_hop.ingress_port != ingress_hop2.ingress_port:
+                                log.error('mc-ingress-hop-hop2-mismatch',
+                                    ingress_hop=ingress_hop,
+                                    ingress_hop2=ingress_hop2,
+                                    in_port_no=in_port_no,
+                                    out_port_no=out_port_no,
+                                    comment='ignoring flow')
+                                continue
+
+                            fl_lst_olt.append(mk_flow_stat(
+                                priority=flow.priority,
+                                cookie=flow.cookie,
+                                match_fields=[
+                                    in_port(ingress_hop.ingress_port.port_no)
+                                ] + [
+                                    field for field in get_ofb_fields(flow)
+                                    if field.type not in (IN_PORT,)
+                                ],
+                                actions=[
+                                    action for action in get_actions(flow)
+                                    if action.type not in (GROUP,)
+                                ] + [
+                                    pop_vlan(),
+                                    output(egress_hop.ingress_port.port_no)
+                                ]
+                            ))
+
+                            fl_lst_onu, _ = device_rules.setdefault(
+                                egress_hop.device.id, ([], []))
+                            fl_lst_onu.append(mk_flow_stat(
+                                priority=flow.priority,
+                                cookie=flow.cookie,
+                                match_fields=[
+                                    in_port(egress_hop.ingress_port.port_no)
+                                ] + [
+                                    field for field in get_ofb_fields(flow)
+                                    if field.type not in (IN_PORT, VLAN_VID, VLAN_PCP)
+                                ],
+                                actions=other_actions + [
+                                    output(egress_hop.egress_port.port_no)
+                                ]
+                            ))
+                    else:
+                        raise NotImplementedError('undefined downstream case for flows')
+        return device_rules
+
+    # ~~~~~~~~~~~~ methods expected to be provided by derived class ~~~~~~~~~~~
+
+    def get_all_default_rules(self):
+        raise NotImplementedError('derived class must provide')
+
+    def get_default_rules(self, device_id):
+        raise NotImplementedError('derived class must provide')
+
+    def get_route(self, ingress_port_no, egress_port_no):
+        raise NotImplementedError('derived class must provide')
+
+    def get_wildcard_input_ports(self, exclude_port=None):
+        raise NotImplementedError('derived class must provide')
+
+    def flow_delete(self, mod):
+        raise NotImplementedError('derived class must provide')
diff --git a/python/core/logical_device_agent.py b/python/core/logical_device_agent.py
new file mode 100644
index 0000000..10ec66c
--- /dev/null
+++ b/python/core/logical_device_agent.py
@@ -0,0 +1,973 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Model that captures the current state of a logical device
+"""
+from collections import OrderedDict
+
+import structlog
+
+from common.event_bus import EventBusClient
+from common.frameio.frameio import hexify
+from voltha.registry import registry
+from voltha.core.config.config_proxy import CallbackType
+from voltha.core.device_graph import DeviceGraph
+from voltha.core.flow_decomposer import FlowDecomposer, \
+    flow_stats_entry_from_flow_mod_message, group_entry_from_group_mod, \
+    mk_flow_stat, in_port, vlan_vid, vlan_pcp, pop_vlan, output, set_field, \
+    push_vlan, mk_simple_flow_mod
+from voltha.protos import third_party
+from voltha.protos import openflow_13_pb2 as ofp
+from voltha.protos.device_pb2 import Port
+from voltha.protos.logical_device_pb2 import LogicalPort
+from voltha.protos.openflow_13_pb2 import Flows, Meters, FlowGroups, ofp_meter_config
+
+_ = third_party
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+
+class LogicalDeviceAgent(FlowDecomposer, DeviceGraph):
+
+    def __init__(self, core, logical_device):
+        try:
+            self.core = core
+            self.local_handler = core.get_local_handler()
+            self.logical_device_id = logical_device.id
+
+            self.root_proxy = core.get_proxy('/')
+            self.flows_proxy = core.get_proxy(
+                '/logical_devices/{}/flows'.format(logical_device.id))
+            self.meters_proxy = core.get_proxy(
+                '/logical_devices/{}/meters'.format(logical_device.id))
+            self.groups_proxy = core.get_proxy(
+                '/logical_devices/{}/flow_groups'.format(logical_device.id))
+            self.self_proxy = core.get_proxy(
+                '/logical_devices/{}'.format(logical_device.id))
+
+            self.flows_proxy.register_callback(
+                CallbackType.PRE_UPDATE, self._pre_process_flows)
+            self.flows_proxy.register_callback(
+                CallbackType.POST_UPDATE, self._flow_table_updated)
+            self.groups_proxy.register_callback(
+                CallbackType.POST_UPDATE, self._group_table_updated)
+            self.self_proxy.register_callback(
+                CallbackType.POST_ADD, self._port_added)
+            self.self_proxy.register_callback(
+                CallbackType.POST_REMOVE, self._port_removed)
+
+            self.port_proxy = {}
+            self.port_status_has_changed = {}
+
+            self.event_bus = EventBusClient()
+            self.packet_in_subscription = self.event_bus.subscribe(
+                topic='packet-in:{}'.format(logical_device.id),
+                callback=self.handle_packet_in_event)
+
+            self.log = structlog.get_logger(logical_device_id=logical_device.id)
+
+            self._routes = None
+            self._no_flow_changes_required = False
+            self._flows_ids_to_add = []
+            self._flows_ids_to_remove = []
+            self._flows_to_remove = []
+
+            self.accepts_direct_logical_flows = False
+            self.device_id = self.self_proxy.get('/').root_device_id
+            device_adapter_type = self.root_proxy.get('/devices/{}'.format(
+                self.device_id)).adapter
+            device_type = self.root_proxy.get('/device_types/{}'.format(
+                device_adapter_type))
+
+            if device_type is not None:
+                self.accepts_direct_logical_flows = \
+                    device_type.accepts_direct_logical_flows_update
+
+            if self.accepts_direct_logical_flows:
+
+                self.device_adapter_agent = registry(
+                    'adapter_loader').get_agent(device_adapter_type).adapter
+
+                self.log.debug('this device accepts direct logical flows',
+                               device_adapter_type=device_adapter_type)
+
+
+
+        except Exception, e:
+            self.log.exception('init-error', e=e)
+
+    def start(self, reconcile=False):
+        self.log.debug('starting')
+        if reconcile:
+            # Register the callbacks for the ports
+            ports = self.self_proxy.get('/ports')
+            for port in ports:
+                self._reconcile_port(port)
+            self.log.debug('ports-reconciled', ports=ports)
+        self.log.debug('started')
+        return self
+
+    def stop(self):
+        self.log.debug('stopping')
+        try:
+            self.flows_proxy.unregister_callback(
+                CallbackType.POST_UPDATE, self._flow_table_updated)
+            self.groups_proxy.unregister_callback(
+                CallbackType.POST_UPDATE, self._group_table_updated)
+            self.self_proxy.unregister_callback(
+                CallbackType.POST_ADD, self._port_added)
+            self.self_proxy.unregister_callback(
+                CallbackType.POST_REMOVE, self._port_removed)
+
+            # Remove subscription to the event bus
+            self.event_bus.unsubscribe(self.packet_in_subscription)
+        except Exception, e:
+            self.log.info('stop-exception', e=e)
+
+        self.log.debug('stopped')
+
+    def announce_flows_deleted(self, flows):
+        for f in flows:
+            self.announce_flow_deleted(f)
+
+    def announce_flow_deleted(self, flow):
+        if flow.flags & ofp.OFPFF_SEND_FLOW_REM:
+            raise NotImplementedError("announce_flow_deleted")
+
+    def signal_flow_mod_error(self, code, flow_mod):
+        pass  # TODO
+
+    def signal_flow_removal(self, code, flow):
+        pass  # TODO
+
+    def signal_group_mod_error(self, code, group_mod):
+        pass  # TODO
+
+    def update_flow_table(self, flow_mod):
+
+        command = flow_mod.command
+
+        if command == ofp.OFPFC_ADD:
+            self.flow_add(flow_mod)
+
+        elif command == ofp.OFPFC_DELETE:
+            self.flow_delete(flow_mod)
+
+        elif command == ofp.OFPFC_DELETE_STRICT:
+            self.flow_delete_strict(flow_mod)
+
+        elif command == ofp.OFPFC_MODIFY:
+            self.flow_modify(flow_mod)
+
+        elif command == ofp.OFPFC_MODIFY_STRICT:
+            self.flow_modify_strict(flow_mod)
+
+        else:
+            self.log.warn('unhandled-flow-mod', command=command, flow_mod=flow_mod)
+
+    def update_meter_table(self, meter_mod):
+        command = meter_mod.command
+
+        if command == ofp.OFPMC_ADD:
+            self.meter_add(meter_mod)
+
+        elif command == ofp.OFPMC_MODIFY:
+            self.meter_modify(meter_mod)
+
+        elif command == ofp.OFPMC_DELETE:
+            self.meter_delete(meter_mod)
+        else:
+            self.log.warn('unhandled-meter-mod', command=command, flow_mod=meter_mod)
+
+    def update_group_table(self, group_mod):
+
+        command = group_mod.command
+
+        if command == ofp.OFPGC_DELETE:
+            self.group_delete(group_mod)
+
+        elif command == ofp.OFPGC_ADD:
+            self.group_add(group_mod)
+
+        elif command == ofp.OFPGC_MODIFY:
+            self.group_modify(group_mod)
+
+        else:
+            self.log.warn('unhandled-group-mod',
+                          command=command, group_mod=group_mod)
+
+        # ~~~~~~~~~~~~~~~~~~~~~~~~~ LOW LEVEL METER HANDLERS ~~~~~~~~~~~~~~~~~~~~~~~
+
+    def meter_add(self, meter_mod):
+        assert isinstance(meter_mod, ofp.ofp_meter_mod)
+        # read from model
+        meters = list(self.meters_proxy.get('/').items)
+        if not self.check_meter_id_overlapping(meters, meter_mod):
+            meters.append(ofp_meter_config(flags=meter_mod.flags, \
+                                           meter_id=meter_mod.meter_id, \
+                                           bands=meter_mod.bands))
+
+            self.meters_proxy.update('/', Meters(items=meters))
+        else:
+            self.signal_meter_mod_error(ofp.OFPMMFC_METER_EXISTS, meter_mod)
+
+    def meter_modify(self, meter_mod):
+        assert isinstance(meter_mod, ofp.ofp_meter_mod)
+        meters = list(self.meters_proxy.get('/').items)
+        existing_meter = self.check_meter_id_overlapping(meters, meter_mod)
+        if existing_meter:
+            existing_meter.flags = meter_mod.flags
+            existing_meter.bands = meter_mod.bands
+            self.meters_proxy.update('/', Meters(items=meters))
+        else:
+            self.signal_meter_mod_error(ofp.OFPMMFC_UNKNOWN_METER, meter_mod)
+
+    def meter_delete(self, meter_mod):
+        assert isinstance(meter_mod, ofp.ofp_meter_mod)
+        meters = list(self.meters_proxy.get('/').items)
+        to_keep = list()
+        to_delete = 0
+
+        for meter in meters:
+            if meter.meter_id != meter_mod.meter_id:
+                to_keep.append(meter)
+            else:
+                to_delete += 1
+
+        if to_delete == 1:
+            self.meters_proxy.update('/', Meters(items=to_keep))
+        if to_delete == 0:
+            self.signal_meter_mod_error(ofp.OFPMMFC_UNKNOWN_METER, meter_mod)
+        elif to_delete > 1:
+            raise Exception('More than one meter_config sharing the same meter_id cannot exist')
+
+    @staticmethod
+    def check_meter_id_overlapping(meters, meter_mod):
+        for meter in meters:
+            if meter.meter_id == meter_mod.meter_id:
+                return meter
+        return False
+
+    def signal_meter_mod_error(self, error_code, meter_mod):
+        pass  # TODO
+
+
+
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~ LOW LEVEL FLOW HANDLERS ~~~~~~~~~~~~~~~~~~~~~~~
+
+    def flow_add(self, mod):
+        assert isinstance(mod, ofp.ofp_flow_mod)
+        assert mod.cookie_mask == 0
+
+        # read from model
+        flows = list(self.flows_proxy.get('/').items)
+
+        changed = False
+        check_overlap = mod.flags & ofp.OFPFF_CHECK_OVERLAP
+        if check_overlap:
+            if self.find_overlapping_flows(flows, mod, True):
+                self.signal_flow_mod_error(
+                    ofp.OFPFMFC_OVERLAP, mod)
+            else:
+                # free to add as new flow
+                flow = flow_stats_entry_from_flow_mod_message(mod)
+                flows.append(flow)
+                changed = True
+                self.log.debug('flow-added', flow=mod)
+
+        else:
+            flow = flow_stats_entry_from_flow_mod_message(mod)
+            idx = self.find_flow(flows, flow)
+            if idx >= 0:
+                old_flow = flows[idx]
+                if not (mod.flags & ofp.OFPFF_RESET_COUNTS):
+                    flow.byte_count = old_flow.byte_count
+                    flow.packet_count = old_flow.packet_count
+                flows[idx] = flow
+                changed = True
+                self.log.debug('flow-updated', flow=flow)
+
+            else:
+                flows.append(flow)
+                changed = True
+                self.log.debug('flow-added', flow=mod)
+
+        # write back to model
+        if changed:
+            self.flows_proxy.update('/', Flows(items=flows))
+
+    def flow_delete(self, mod):
+        assert isinstance(mod, (ofp.ofp_flow_mod, ofp.ofp_flow_stats))
+
+        # read from model
+        flows = list(self.flows_proxy.get('/').items)
+
+        # build a list of what to keep vs what to delete
+        to_keep = []
+        to_delete = []
+        for f in flows:
+            if self.flow_matches_spec(f, mod):
+                to_delete.append(f)
+            else:
+                to_keep.append(f)
+
+        # replace flow table with keepers
+        flows = to_keep
+
+        # write back
+        if to_delete:
+            self.flows_proxy.update('/', Flows(items=flows))
+
+        # from mod send announcement
+        if isinstance(mod, ofp.ofp_flow_mod):
+            # send notifications for discarded flow as required by OpenFlow
+            self.announce_flows_deleted(to_delete)
+
+    def flow_delete_strict(self, mod):
+        assert isinstance(mod, ofp.ofp_flow_mod)
+
+        # read from model
+        flows = list(self.flows_proxy.get('/').items)
+        changed = False
+
+        flow = flow_stats_entry_from_flow_mod_message(mod)
+        idx = self.find_flow(flows, flow)
+        if (idx >= 0):
+            del flows[idx]
+            changed = True
+        else:
+            # TODO need to check what to do with this case
+            self.log.warn('flow-cannot-delete', flow=flow)
+
+        if changed:
+            self.flows_proxy.update('/', Flows(items=flows))
+
+    def flow_modify(self, mod):
+        raise NotImplementedError()
+
+    def flow_modify_strict(self, mod):
+        raise NotImplementedError()
+
+    def find_overlapping_flows(self, flows, mod, return_on_first=False):
+        """
+        Return list of overlapping flow(s)
+        Two flows overlap if a packet may match both and if they have the
+        same priority.
+        :param mod: Flow request
+        :param return_on_first: if True, return with the first entry
+        :return:
+        """
+        return []  # TODO finish implementation
+
+    @classmethod
+    def find_flow(cls, flows, flow):
+        for i, f in enumerate(flows):
+            if cls.flow_match(f, flow):
+                return i
+        return -1
+
+    @staticmethod
+    def flow_match(f1, f2):
+        keys_matter = ('table_id', 'priority', 'flags', 'cookie', 'match')
+        for key in keys_matter:
+            if getattr(f1, key) != getattr(f2, key):
+                return False
+        return True
+
+    @classmethod
+    def flow_matches_spec(cls, flow, flow_mod):
+        """
+        Return True if given flow (ofp_flow_stats) is "covered" by the
+        wildcard flow_mod (ofp_flow_mod), taking into consideration of
+        both exact mactches as well as masks-based match fields if any.
+        Otherwise return False
+        :param flow: ofp_flow_stats
+        :param mod: ofp_flow_mod
+        :return: Bool
+        """
+
+        assert isinstance(flow, ofp.ofp_flow_stats)
+        assert isinstance(flow_mod, (ofp.ofp_flow_mod, ofp.ofp_flow_stats))
+
+        if isinstance(flow_mod, ofp.ofp_flow_stats):
+            return cls.flow_match(flow, flow_mod)
+
+        # Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+        if (flow.cookie & flow_mod.cookie_mask) != \
+                (flow_mod.cookie & flow_mod.cookie_mask):
+            return False
+
+        # Check if flow.table_id is covered by flow_mod.table_id
+        if flow_mod.table_id != ofp.OFPTT_ALL and \
+                        flow.table_id != flow_mod.table_id:
+            return False
+
+        # Check out_port
+        if (flow_mod.out_port & 0x7fffffff) != ofp.OFPP_ANY and \
+                not cls.flow_has_out_port(flow, flow_mod.out_port):
+            return False
+
+        # Check out_group
+        if (flow_mod.out_group & 0x7fffffff) != ofp.OFPG_ANY and \
+                not cls.flow_has_out_group(flow, flow_mod.out_group):
+            return False
+        # Priority is ignored
+
+        # Check match condition
+        # If the flow_mod match field is empty, that is a special case and
+        # indicates the flow entry matches
+        match = flow_mod.match
+        assert isinstance(match, ofp.ofp_match)
+        if not match.oxm_fields:
+            # If we got this far and the match is empty in the flow spec,
+            # than the flow matches
+            return True
+        else:
+            raise NotImplementedError(
+                "flow_matches_spec(): No flow match analysis yet")
+
+    @staticmethod
+    def flow_has_out_port(flow, out_port):
+        """
+        Return True if flow has a output command with the given out_port
+        """
+        assert isinstance(flow, ofp.ofp_flow_stats)
+        for instruction in flow.instructions:
+            assert isinstance(instruction, ofp.ofp_instruction)
+            if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+                for action in instruction.actions.actions:
+                    assert isinstance(action, ofp.ofp_action)
+                    if action.type == ofp.OFPAT_OUTPUT and \
+                        action.output.port == out_port:
+                        return True
+
+        # otherwise...
+        return False
+
+    @staticmethod
+    def flow_has_out_group(flow, group_id):
+        """
+        Return True if flow has a output command with the given out_group
+        """
+        assert isinstance(flow, ofp.ofp_flow_stats)
+        for instruction in flow.instructions:
+            assert isinstance(instruction, ofp.ofp_instruction)
+            if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+                for action in instruction.actions.actions:
+                    assert isinstance(action, ofp.ofp_action)
+                    if action.type == ofp.OFPAT_GROUP and \
+                        action.group.group_id == group_id:
+                            return True
+
+        # otherwise...
+        return False
+
+    def flows_delete_by_group_id(self, flows, group_id):
+        """
+        Delete any flow(s) referring to given group_id
+        :param group_id:
+        :return: None
+        """
+        to_keep = []
+        to_delete = []
+        for f in flows:
+            if self.flow_has_out_group(f, group_id):
+                to_delete.append(f)
+            else:
+                to_keep.append(f)
+
+        # replace flow table with keepers
+        flows = to_keep
+
+        # send notification to deleted ones
+        self.announce_flows_deleted(to_delete)
+
+        return bool(to_delete), flows
+
+    # ~~~~~~~~~~~~~~~~~~~~~ LOW LEVEL GROUP HANDLERS ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def group_add(self, group_mod):
+        assert isinstance(group_mod, ofp.ofp_group_mod)
+
+        groups = OrderedDict((g.desc.group_id, g)
+                             for g in self.groups_proxy.get('/').items)
+        changed = False
+
+        if group_mod.group_id in groups:
+            self.signal_group_mod_error(ofp.OFPGMFC_GROUP_EXISTS, group_mod)
+        else:
+            group_entry = group_entry_from_group_mod(group_mod)
+            groups[group_mod.group_id] = group_entry
+            changed = True
+
+        if changed:
+            self.groups_proxy.update('/', FlowGroups(items=groups.values()))
+
+    def group_delete(self, group_mod):
+        assert isinstance(group_mod, ofp.ofp_group_mod)
+
+        groups = OrderedDict((g.desc.group_id, g)
+                             for g in self.groups_proxy.get('/').items)
+        groups_changed = False
+        flows_changed = False
+
+        group_id = group_mod.group_id
+        if group_id == ofp.OFPG_ALL:
+            # TODO we must delete all flows that point to this group and
+            # signal controller as requested by flow's flag
+            groups = OrderedDict()
+            groups_changed = True
+            self.log.debug('all-groups-deleted')
+
+        else:
+            if group_id not in groups:
+                # per openflow spec, this is not an error
+                pass
+
+            else:
+                flows = list(self.flows_proxy.get('/').items)
+                flows_changed, flows = self.flows_delete_by_group_id(
+                    flows, group_id)
+                del groups[group_id]
+                groups_changed = True
+                self.log.debug('group-deleted', group_id=group_id)
+
+        if groups_changed:
+            self.groups_proxy.update('/', FlowGroups(items=groups.values()))
+        if flows_changed:
+            self.flows_proxy.update('/', Flows(items=flows))
+
+    def group_modify(self, group_mod):
+        assert isinstance(group_mod, ofp.ofp_group_mod)
+
+        groups = OrderedDict((g.desc.group_id, g)
+                             for g in self.groups_proxy.get('/').items)
+        changed = False
+
+        if group_mod.group_id not in groups:
+            self.signal_group_mod_error(
+                ofp.OFPGMFC_INVALID_GROUP, group_mod)
+        else:
+            # replace existing group entry with new group definition
+            group_entry = group_entry_from_group_mod(group_mod)
+            groups[group_mod.group_id] = group_entry
+            changed = True
+
+        if changed:
+            self.groups_proxy.update('/', FlowGroups(items=groups.values()))
+
+    def port_enable(self, port_id):
+        self.log.info("port-enable", port_id=port_id)
+
+        proxy = self.port_proxy[port_id]
+        port = proxy.get('/')
+        port.ofp_port.config = port.ofp_port.config & ~ofp.OFPPC_PORT_DOWN
+        proxy.update('/', port)
+
+    def port_disable(self, port_id):
+        self.log.info("port-disable", port_id=port_id)
+
+        proxy = self.port_proxy[port_id]
+        port = proxy.get('/')
+        port.ofp_port.config = port.ofp_port.config & ~ofp.OFPPC_PORT_DOWN | ofp.OFPPC_PORT_DOWN
+        proxy.update('/', port)
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PACKET_OUT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def packet_out(self, ofp_packet_out):
+        self.log.debug('packet-out', packet=ofp_packet_out)
+        topic = 'packet-out:{}'.format(self.logical_device_id)
+        self.event_bus.publish(topic, ofp_packet_out)
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PACKET_IN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def handle_packet_in_event(self, _, msg):
+        self.log.debug('handle-packet-in', msg=msg)
+        logical_port_no, packet = msg
+        packet_in = ofp.ofp_packet_in(
+            # buffer_id=0,
+            reason=ofp.OFPR_ACTION,
+            # table_id=0,
+            # cookie=0,
+            match=ofp.ofp_match(
+                type=ofp.OFPMT_OXM,
+                oxm_fields=[
+                    ofp.ofp_oxm_field(
+                        oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                        ofb_field=in_port(logical_port_no)
+                    )
+                ]
+            ),
+            data=packet
+        )
+        self.packet_in(packet_in)
+
+    def packet_in(self, ofp_packet_in):
+        self.log.info('packet-in', logical_device_id=self.logical_device_id,
+                      pkt=ofp_packet_in, data=hexify(ofp_packet_in.data))
+        self.local_handler.send_packet_in(
+            self.logical_device_id, ofp_packet_in)
+
+    # ~~~~~~~~~~~~~~~~~~~~~ FLOW TABLE UPDATE HANDLING ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def _pre_process_flows(self, flows):
+        """
+        This method is invoked before a device flow table data model is
+        updated. The resulting data is stored locally and the flow table is
+        updated during the post-processing phase, i.e. via the POST_UPDATE
+        callback
+        :param flows: Desired flows
+        :return: None
+        """
+        current_flows = self.flows_proxy.get('/')
+        self.log.debug('pre-processing-flows',
+                       logical_device_id=self.logical_device_id,
+                       desired_flows=flows,
+                       existing_flows=current_flows)
+
+        current_flow_ids = set(f.id for f in current_flows.items)
+        desired_flow_ids = set(f.id for f in flows.items)
+
+        self._flows_ids_to_add = desired_flow_ids.difference(current_flow_ids)
+        self._flows_ids_to_remove = current_flow_ids.difference(desired_flow_ids)
+        self._flows_to_remove = []
+        for f in current_flows.items:
+            if f.id in self._flows_ids_to_remove:
+                self._flows_to_remove.append(f)
+
+        if len(self._flows_ids_to_add) + len(self._flows_ids_to_remove) == 0:
+            # No changes of flows, just stats are changing
+            self._no_flow_changes_required = True
+        else:
+            self._no_flow_changes_required = False
+
+        self.log.debug('flows-preprocess-output', current_flows=len(
+            current_flow_ids), new_flows=len(desired_flow_ids),
+                      adding_flows=len(self._flows_ids_to_add),
+                      removing_flows=len(self._flows_ids_to_remove))
+
+
+    def _flow_table_updated(self, flows):
+        self.log.debug('flow-table-updated',
+                  logical_device_id=self.logical_device_id, flows=flows)
+
+        if self._no_flow_changes_required:
+            # Stats changes, no need to process further
+            self.log.debug('flow-stats-update')
+        else:
+
+            groups = self.groups_proxy.get('/').items
+            device_rules_map = self.decompose_rules(flows.items, groups)
+
+            # TODO we have to evolve this into a policy-based, event based pattern
+            # This is a raw implementation of the specific use-case with certain
+            # built-in assumptions, and not yet device vendor specific. The policy-
+            # based refinement will be introduced that later.
+
+
+            # Temporary bypass for openolt
+
+            if self.accepts_direct_logical_flows:
+                #give the logical flows directly to the adapter
+                self.log.debug('it is an direct logical flow bypass')
+                if self.device_adapter_agent is None:
+                    self.log.error('No device adapter agent',
+                                   device_id=self.device_id,
+                                   logical_device_id = self.logical_device_id)
+                    return
+
+                flows_to_add = []
+                for f in flows.items:
+                    if f.id in self._flows_ids_to_add:
+                        flows_to_add.append(f)
+
+
+                self.log.debug('flows to remove',
+                               flows_to_remove=self._flows_to_remove,
+                               flows_ids=self._flows_ids_to_remove)
+
+                try:
+                    self.device_adapter_agent.update_logical_flows(
+                        self.device_id, flows_to_add, self._flows_to_remove,
+                        groups, device_rules_map)
+                except Exception as e:
+                    self.log.error('logical flows bypass error', error=e,
+                                   flows=flows)
+            else:
+
+                for device_id, (flows, groups) in device_rules_map.iteritems():
+
+                    self.root_proxy.update('/devices/{}/flows'.format(device_id),
+                                           Flows(items=flows.values()))
+                    self.root_proxy.update('/devices/{}/flow_groups'.format(device_id),
+                                           FlowGroups(items=groups.values()))
+
+    # ~~~~~~~~~~~~~~~~~~~~ GROUP TABLE UPDATE HANDLING ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def _group_table_updated(self, flow_groups):
+        self.log.debug('group-table-updated',
+                  logical_device_id=self.logical_device_id,
+                  flow_groups=flow_groups)
+
+        flows = self.flows_proxy.get('/').items
+        device_flows_map = self.decompose_rules(flows, flow_groups.items)
+        for device_id, (flows, groups) in device_flows_map.iteritems():
+            self.root_proxy.update('/devices/{}/flows'.format(device_id),
+                                   Flows(items=flows.values()))
+            self.root_proxy.update('/devices/{}/flow_groups'.format(device_id),
+                                   FlowGroups(items=groups.values()))
+
+    # ~~~~~~~~~~~~~~~~~~~ APIs NEEDED BY FLOW DECOMPOSER ~~~~~~~~~~~~~~~~~~~~~~
+
+    def _port_added(self, port):
+        self.log.debug('port-added', port=port)
+        assert isinstance(port, LogicalPort)
+        self._port_list_updated(port)
+
+        # Set a proxy and callback for that specific port
+        self.port_proxy[port.id] = self.core.get_proxy(
+            '/logical_devices/{}/ports/{}'.format(self.logical_device_id,
+                                                  port.id))
+        self.port_status_has_changed[port.id] = True
+        self.port_proxy[port.id].register_callback(
+            CallbackType.PRE_UPDATE, self._pre_port_changed)
+        self.port_proxy[port.id].register_callback(
+            CallbackType.POST_UPDATE, self._port_changed)
+
+        self.local_handler.send_port_change_event(
+            device_id=self.logical_device_id,
+            port_status=ofp.ofp_port_status(
+                reason=ofp.OFPPR_ADD,
+                desc=port.ofp_port
+            )
+        )
+
+    def _reconcile_port(self, port):
+        self.log.debug('reconcile-port', port=port)
+        assert isinstance(port, LogicalPort)
+        self._port_list_updated(port)
+
+        # Set a proxy and callback for that specific port
+        self.port_proxy[port.id] = self.core.get_proxy(
+            '/logical_devices/{}/ports/{}'.format(self.logical_device_id,
+                                                  port.id))
+        self.port_status_has_changed[port.id] = True
+        self.port_proxy[port.id].register_callback(
+            CallbackType.PRE_UPDATE, self._pre_port_changed)
+        self.port_proxy[port.id].register_callback(
+            CallbackType.POST_UPDATE, self._port_changed)
+
+    def _port_removed(self, port):
+        self.log.debug('port-removed', port=port)
+        assert isinstance(port, LogicalPort)
+        self._port_list_updated(port)
+
+        # Remove the proxy references
+        self.port_proxy[port.id].unregister_callback(
+            CallbackType.PRE_UPDATE, self._pre_port_changed)
+        self.port_proxy[port.id].unregister_callback(
+            CallbackType.POST_UPDATE, self._port_changed)
+        del self.port_proxy[port.id]
+        del self.port_status_has_changed[port.id]
+
+
+        self.local_handler.send_port_change_event(
+            device_id=self.logical_device_id,
+            port_status=ofp.ofp_port_status(
+                reason=ofp.OFPPR_DELETE,
+                desc=port.ofp_port
+            )
+        )
+
+    def _pre_port_changed(self, port):
+        old_port = self.port_proxy[port.id].get('/')
+        if old_port.ofp_port != port.ofp_port:
+            self.port_status_has_changed[port.id] = True
+        else :
+            self.port_status_has_changed[port.id] = False
+
+    def _port_changed(self, port):
+        self.log.debug('port-changed', port=port)
+        if self.port_status_has_changed[port.id]:
+            assert isinstance(port, LogicalPort)
+            self.local_handler.send_port_change_event(
+                device_id=self.logical_device_id,
+                port_status=ofp.ofp_port_status(
+                    reason=ofp.OFPPR_MODIFY,
+                    desc=port.ofp_port
+                )
+            )
+
+    def _port_list_updated(self, _):
+        # invalidate the graph and the route table
+        self._invalidate_cached_tables()
+
+    def _invalidate_cached_tables(self):
+        self._routes = None
+        self._default_rules = None
+        self._nni_logical_port_no = None
+
+    def _assure_cached_tables_up_to_date(self):
+        if self._routes is None:
+            logical_ports = self.self_proxy.get('/ports')
+            graph, self._routes = self.compute_routes(
+                self.root_proxy, logical_ports)
+            self._default_rules = self._generate_default_rules(graph)
+            root_ports = [p for p in logical_ports if p.root_port]
+            assert len(root_ports) == 1, 'Only one root port supported at this time'
+            self._nni_logical_port_no = root_ports[0].ofp_port.port_no
+
+
+    def _generate_default_rules(self, graph):
+
+        def root_device_default_rules(device):
+            flows = OrderedDict()
+            groups = OrderedDict()
+            return flows, groups
+
+        def leaf_device_default_rules(device):
+            ports = self.root_proxy.get('/devices/{}/ports'.format(device.id))
+            upstream_ports = [
+                port for port in ports if port.type == Port.PON_ONU \
+                                            or port.type == Port.VENET_ONU
+            ]
+            assert len(upstream_ports) == 1
+            downstream_ports = [
+                port for port in ports if port.type == Port.ETHERNET_UNI
+            ]
+
+            # it is possible that the downstream ports are not
+            # created, but the flow_decomposition has already
+            # kicked in. In such scenarios, cut short the processing
+            # and return.
+            if len(downstream_ports) == 0:
+                return None, None
+            # assert len(downstream_ports) == 1
+            upstream_port  = upstream_ports[0]
+            flows = OrderedDict()
+            for downstream_port in downstream_ports:
+                flows.update(OrderedDict((f.id, f) for f in [
+                    mk_flow_stat(
+                        priority=500,
+                        match_fields=[
+                            in_port(downstream_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | 0)
+                        ],
+                        actions=[
+                            set_field(vlan_vid(ofp.OFPVID_PRESENT | device.vlan)),
+                            output(upstream_port.port_no)
+                        ]
+                    ),
+                    mk_flow_stat(
+                        priority=500,
+                        match_fields=[
+                            in_port(downstream_port.port_no),
+                            vlan_vid(0)
+                        ],
+                        actions=[
+                            push_vlan(0x8100),
+                            set_field(vlan_vid(ofp.OFPVID_PRESENT | device.vlan)),
+                            output(upstream_port.port_no)
+                        ]
+                    ),
+                    mk_flow_stat(
+                        priority=500,
+                        match_fields=[
+                            in_port(upstream_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | device.vlan)
+                        ],
+                        actions=[
+                            set_field(vlan_vid(ofp.OFPVID_PRESENT | 0)),
+                            output(downstream_port.port_no)
+                        ]
+                    ),
+                ]))
+            groups = OrderedDict()
+            return flows, groups
+
+        root_device_id = self.self_proxy.get('/').root_device_id
+        rules = {}
+        for node_key in graph.nodes():
+            node = graph.node[node_key]
+            device = node.get('device', None)
+            if device is None:
+                continue
+            if device.id == root_device_id:
+                rules[device.id] = root_device_default_rules(device)
+            else:
+                rules[device.id] = leaf_device_default_rules(device)
+        return rules
+
+    def get_route(self, ingress_port_no, egress_port_no):
+        self._assure_cached_tables_up_to_date()
+        self.log.info('getting-route', eg_port=egress_port_no, in_port=ingress_port_no,
+                nni_port=self._nni_logical_port_no)
+        if egress_port_no is not None and \
+                        (egress_port_no & 0x7fffffff) == ofp.OFPP_CONTROLLER:
+            self.log.info('controller-flow', eg_port=egress_port_no, in_port=ingress_port_no,
+                    nni_port=self._nni_logical_port_no)
+            if ingress_port_no == self._nni_logical_port_no:
+                self.log.info('returning half route')
+                # This is a trap on the NNI Port
+                # Return a 'half' route to make the flow decomp logic happy
+                for (ingress, egress), route in self._routes.iteritems():
+                    if egress == self._nni_logical_port_no:
+                        return [None, route[1]]
+                raise Exception('not a single upstream route')
+            # treat it as if the output port is the NNI of the OLT
+            egress_port_no = self._nni_logical_port_no
+
+        # If ingress_port is not specified (None), it may be a wildcarded
+        # route if egress_port is OFPP_CONTROLLER or _nni_logical_port,
+        # in which case we need to create a half-route where only the egress
+        # hop is filled, the first hope is None
+        if ingress_port_no is None and \
+                        egress_port_no == self._nni_logical_port_no:
+            # We can use the 2nd hop of any upstream route, so just find the
+            # first upstream:
+            for (ingress, egress), route in self._routes.iteritems():
+                if egress == self._nni_logical_port_no:
+                    return [None, route[1]]
+            raise Exception('not a single upstream route')
+
+        # If egress_port is not specified (None), we can also can return a
+        # "half" route
+        if egress_port_no is None:
+            for (ingress, egress), route in self._routes.iteritems():
+                if ingress == ingress_port_no:
+                    return [route[0], None]
+
+            # This can occur is a leaf device is disabled
+            self.log.exception('no-downstream-route',
+                               ingress_port_no=ingress_port_no,
+                               egress_port_no= egress_port_no
+                               )
+            return None
+
+
+        return self._routes.get((ingress_port_no, egress_port_no))
+
+    def get_all_default_rules(self):
+        self._assure_cached_tables_up_to_date()
+        return self._default_rules
+
+    def get_wildcard_input_ports(self, exclude_port=None):
+        logical_ports = self.self_proxy.get('/ports')
+        return [port.ofp_port.port_no for port in logical_ports
+                if port.ofp_port.port_no != exclude_port]
diff --git a/python/core/registry.py b/python/core/registry.py
new file mode 100644
index 0000000..270bd71
--- /dev/null
+++ b/python/core/registry.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Simple component registry to provide centralized access to any registered
+components.
+"""
+from collections import OrderedDict
+from zope.interface import Interface
+
+
+class IComponent(Interface):
+    """
+    A Voltha Component
+    """
+
+    def start():
+        """
+        Called once the componet is instantiated. Can be used for async
+        initialization.
+        :return: (None or Deferred)
+        """
+
+    def stop():
+        """
+        Called once before the component is unloaded. Can be used for async
+        cleanup operations.
+        :return: (None or Deferred)
+        """
+
+
+class Registry(object):
+
+    def __init__(self):
+        self.components = OrderedDict()
+
+    def register(self, name, component):
+        assert IComponent.providedBy(component)
+        assert name not in self.components
+        self.components[name] = component
+        return component
+
+    def unregister(self, name):
+        if name in self.components:
+            del self.components[name]
+
+    def __call__(self, name):
+        return self.components[name]
+
+    def iterate(self):
+        return self.components.values()
+
+
+# public shared registry
+registry = Registry()
diff --git a/python/docker/Dockerfile.adapter_openonu b/python/docker/Dockerfile.adapter_openonu
new file mode 100644
index 0000000..52eb916
--- /dev/null
+++ b/python/docker/Dockerfile.adapter_openonu
@@ -0,0 +1,48 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-openolt-protos:${TAG} as openolt_protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+RUN mkdir /voltha/voltha && touch /voltha/voltha/__init__.py
+RUN mkdir /voltha/voltha/adapters && touch /voltha/voltha/adapters/__init__.py
+ENV PYTHONPATH=/voltha
+COPY common /voltha/common/
+COPY core /voltha/voltha/core/
+COPY core/registry.py /voltha/voltha/
+COPY extensions /voltha/voltha/extensions/
+COPY kafka /voltha/kafka
+COPY adapters/*.py /voltha/voltha/adapters/
+COPY adapters/brcm_openomci_onu /voltha/voltha/adapters/brcm_openomci_onu
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /voltha/voltha/protos
+COPY --from=openolt_protos /protos/voltha /voltha/voltha/adapters/openolt/protos
+COPY --from=protos /protos/google/api /voltha/voltha/protos/third_party/google/api
+COPY protos/third_party/__init__.py /voltha/voltha/protos/third_party
+RUN touch /voltha/voltha/protos/__init__.py
+RUN touch /voltha/voltha/adapters/openolt/__init__.py
+RUN touch /voltha/voltha/adapters/openolt/protos/__init__.py
+RUN touch /voltha/voltha/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+CMD ["python", "/voltha/python/adapters/openonu/main.py"]
diff --git a/python/docker/Dockerfile.base b/python/docker/Dockerfile.base
new file mode 100644
index 0000000..1b912e0
--- /dev/null
+++ b/python/docker/Dockerfile.base
@@ -0,0 +1,34 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Update to have latest images
+RUN apt-get update && \
+    apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
+
+COPY requirements.txt /tmp/requirements.txt
+
+# pip install cython enum34 six && \
+# Install app dependencies
+RUN wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
+    dpkg -i *.deb && \
+    rm -f *.deb && \
+    apt-get update && \
+    apt-get install -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    pip install -r /tmp/requirements.txt && \
+    apt-get purge -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    apt-get autoremove -y
diff --git a/python/docker/Dockerfile.protoc b/python/docker/Dockerfile.protoc
new file mode 100644
index 0000000..eef6f54
--- /dev/null
+++ b/python/docker/Dockerfile.protoc
@@ -0,0 +1,39 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG PROTOC_PREFIX=/usr/local
+ARG ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ARG PROTOC=${PROTOC_PREFIX}/bin/protoc
+ARG PROTOC_VERSION=3.3.0
+
+FROM ${REGISTRY}debian:stretch-slim
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+ENV PROTOC_PREFIX=/usr/local
+ENV ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ENV PROTOC=${PROTOC_PREFIX}/bin/protoc
+ENV PROTOC_VERSION=3.3.0
+ENV PROTOC_DOWNLOAD_PREFIX=https://github.com/google/protobuf/releases/download
+ENV PROTOC_DIR=protobuf-${PROTOC_VERSION}
+ENV PROTOC_TARBALL=protobuf-python-${PROTOC_VERSION}.tar.gz
+ENV PROTOC_DOWNLOAD_URI=${PROTOC_DOWNLOAD_PREFIX}/v${PROTOC_VERSION}/${PROTOC_TARBALL}
+
+RUN apt-get update -y && apt-get install -y wget build-essential python-dev python-pip
+RUN pip install grpcio-tools==1.3.5
+WORKDIR /build
+RUN wget -q --no-check-certificate ${PROTOC_DOWNLOAD_URI}
+RUN tar --strip-components=1 -zxf ${PROTOC_TARBALL}
+RUN ./configure --prefix=${PROTOC_PREFIX}
+RUN make install
diff --git a/python/docker/Dockerfile.protos b/python/docker/Dockerfile.protos
new file mode 100644
index 0000000..db70d13
--- /dev/null
+++ b/python/docker/Dockerfile.protos
@@ -0,0 +1,36 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG REPOSITORY=
+ARG TAG=latest
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} as builder
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+COPY protos/third_party/google/api/*.proto /protos/google/api/
+COPY docker/config/Makefile.protos /protos/google/api/Makefile.protos
+COPY protos/*.proto /protos/voltha/
+COPY docker/config/Makefile.protos /protos/voltha/Makefile.protos
+
+WORKDIR /protos
+RUN make -f google/api/Makefile.protos google_api
+RUN touch /protos/google/__init__.py /protos/google/api/__init__.py
+
+WORKDIR /protos/voltha
+RUN make -f Makefile.protos build
+
+# Copy the files to a scrach based container to minimize its size
+FROM ${REGISTRY}scratch
+COPY --from=builder /protos/ /protos/
diff --git a/python/docker/config/Makefile.protos b/python/docker/config/Makefile.protos
new file mode 100644
index 0000000..12ff9e3
--- /dev/null
+++ b/python/docker/config/Makefile.protos
@@ -0,0 +1,59 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: build
+
+PROTO_FILES := $(wildcard *.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_All_PB2_C_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2.pyc,$(f)))
+PROTO_ALL_PB2_GPRC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_ALL_DESC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,.desc,$(f)))
+
+# Google API needs to be built from within the third party directory
+#
+google_api:
+	python -m grpc.tools.protoc \
+	    -I. \
+            --python_out=. \
+            --grpc_python_out=. \
+            --descriptor_set_out=google/api/annotations.desc \
+            --include_imports \
+            --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto
+	python -m grpc.tools.protoc \
+                -I. \
+                -I/protos \
+                --python_out=. \
+                --grpc_python_out=. \
+                --descriptor_set_out=$(basename $<).desc \
+                --include_imports \
+                --include_source_info \
+                $<
+
+clean:
+	rm -f $(PROTO_PB2_FILES) \
+		$(PROTO_ALL_DESC_FILES) \
+		$(PROTO_ALL_PB2_GPRC_FILES) \
+		$(PROTO_All_PB2_C_FILES) \
+		$(PROTO_PB2_GOOGLE_API)
diff --git a/python/env.sh b/python/env.sh
new file mode 100644
index 0000000..ec3b52f
--- /dev/null
+++ b/python/env.sh
@@ -0,0 +1,29 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# sourcing this file is needed to make local development and integration testing work
+export VOLTHA_BASE=$PWD
+
+# load local python virtualenv if exists, otherwise create it
+VENVDIR="venv-$(uname -s | tr '[:upper:]' '[:lower:]')"
+if [ ! -e "$VENVDIR/.built" ]; then
+    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+    echo "Initializing OS-appropriate virtual env."
+    echo "This will take a few minutes."
+    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+    make venv
+fi
+. $VENVDIR/bin/activate
+
+# add top-level voltha dir to pythonpath
+export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/cli:$VOLTHA_BASE/protos/third_party
diff --git a/python/extensions/IGMP.py b/python/extensions/IGMP.py
new file mode 100644
index 0000000..00fab23
--- /dev/null
+++ b/python/extensions/IGMP.py
@@ -0,0 +1,282 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from scapy.fields import ByteEnumField, FieldLenField, IPField, \
+    FieldListField, XShortField, ConditionalField, BitField, ShortField, \
+    PacketListField
+from scapy.fields import ByteField
+from scapy.layers.inet import IP, bind_layers
+from scapy.layers.inet import IPOption_Router_Alert
+from scapy.layers.l2 import Ether
+from scapy.packet import Packet
+from scapy.utils import atol, hexdump
+from scapy.utils import checksum
+
+IGMP_TYPE_MEMBERSHIP_QUERY     = 0x11
+IGMP_TYPE_V3_MEMBERSHIP_REPORT = 0x22
+IGMP_TYPE_V1_MEMBERSHIP_REPORT = 0x12
+IGMP_TYPE_V2_MEMBERSHIP_REPORT = 0x16
+IGMP_TYPE_V2_LEAVE_GROUP       = 0x17
+
+IGMP_V3_GR_TYPE_INCLUDE           = 0x01
+IGMP_V3_GR_TYPE_EXCLUDE           = 0x02
+IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE = 0x03
+IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE = 0x04
+IGMP_V3_GR_TYPE_ALLOW_NEW         = 0x05
+IGMP_V3_GR_TYPE_BLOCK_OLD         = 0x06
+
+
+"""
+IGMPV3_ALL_ROUTERS = '224.0.0.22'
+IGMPv3 = 3
+IP_SRC = '1.2.3.4'
+ETHERTYPE_IP = 0x0800
+IGMP_DST_MAC = "01:00:5e:00:01:01"
+IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
+"""
+
+
+class IGMPv3gr(Packet):
+    """IGMPv3 Group Record, used in membership report"""
+
+    name = "IGMPv3gr"
+
+    igmp_v3_gr_types = {
+        IGMP_V3_GR_TYPE_INCLUDE: "Include Mode",
+        IGMP_V3_GR_TYPE_EXCLUDE: "Exclude Mode",
+        IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE: "Change to Include Mode",
+        IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE: "Change to Exclude Mode",
+        IGMP_V3_GR_TYPE_ALLOW_NEW: "Allow New Sources",
+        IGMP_V3_GR_TYPE_BLOCK_OLD: "Block Old Sources"
+    }
+
+    fields_desc = [
+        ByteEnumField("rtype", IGMP_V3_GR_TYPE_INCLUDE, igmp_v3_gr_types),
+        ByteField("aux_data_len", 0),
+        FieldLenField("numsrc", None, count_of="sources"),
+        IPField("mcaddr", "0.0.0.0"),
+        FieldListField("sources", None, IPField("src", "0.0.0.0"), "numsrc")
+    ]
+
+    def post_build(self, pkt, payload):
+        pkt += payload
+        if self.aux_data_len != 0:
+            print "WARNING: Auxiliary Data Length must be zero (0)"
+        return pkt
+
+
+class IGMPv3(Packet):
+
+    name = "IGMPv3"
+
+    igmp_v3_types = {
+        IGMP_TYPE_MEMBERSHIP_QUERY: "Membership Query",
+        IGMP_TYPE_V3_MEMBERSHIP_REPORT: " Version 3 Mebership Report",
+        IGMP_TYPE_V2_MEMBERSHIP_REPORT: " Version 2 Mebership Report",
+        IGMP_TYPE_V1_MEMBERSHIP_REPORT: " Version 1 Mebership Report",
+        IGMP_TYPE_V2_LEAVE_GROUP: "Version 2 Leave Group"
+    }
+
+    fields_desc = [
+        ByteEnumField("type", IGMP_TYPE_MEMBERSHIP_QUERY, igmp_v3_types),
+        ByteField("max_resp_code", 0),
+        XShortField("checksum", None),
+        #IPField("group_address", "0.0.0.0"),
+
+        # membership query fields
+        ConditionalField(IPField("gaddr", "0.0.0.0"),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(BitField("resv", 0, 4),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(BitField("s", 0, 1),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(BitField("qrv", 0, 3),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(ByteField("qqic", 0),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(FieldLenField("numsrc", None, count_of="srcs"),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+        ConditionalField(FieldListField("srcs", None,
+                                        IPField("src", "0.0.0.0"), "numsrc"),
+                         lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
+
+        # membership report fields
+        ConditionalField(
+            ShortField("resv2", 0),
+            lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT),
+        ConditionalField(
+            FieldLenField("numgrp", None, count_of="grps"),
+            lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT),
+        ConditionalField(
+            PacketListField("grps", [], IGMPv3gr),
+            lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT)
+
+        # TODO: v2 and v3 membership reports?
+
+    ]
+
+    def post_build(self, pkt, payload):
+
+        pkt += payload
+
+        # max_resp_code field is reserved (0)
+        if self.type in [IGMP_TYPE_V3_MEMBERSHIP_REPORT,]:
+            mrc = 0
+        else:
+            mrc = self.encode_float(self.max_resp_code)
+        pkt = pkt[:1] + chr(mrc) + pkt[2:]
+
+        if self.checksum is None:
+            chksum = checksum(pkt)
+            pkt = pkt[:2] + chr(chksum >> 8) + chr(chksum & 0xff) + pkt[4:]
+
+        return pkt
+
+    def encode_float(self, value):
+        """Encode max response time value per RFC 3376."""
+        if value < 128:
+            return value
+        if value > 31743:
+            return 255
+        exp = 0
+        value >>= 3
+        while value > 31:
+            exp += 1
+            value >>= 1
+        return 0x80 | (exp << 4) | (value & 0xf)
+
+
+    def decode_float(self, code):
+        if code < 128:
+            return code
+        mant = code & 0xf
+        exp = (code >> 4) & 0x7
+        return (mant | 0x10) << (exp + 3)
+
+    @staticmethod
+    def is_valid_mcaddr(ip):
+        byte1 = atol(ip) >> 24 & 0xff
+        return (byte1 & 0xf0) == 0xe0
+
+    @staticmethod
+    def fixup(pkt):
+        """Fixes up the underlying IP() and Ether() headers."""
+        assert pkt.haslayer(IGMPv3), \
+            "This packet is not an IGMPv4 packet; cannot fix it up"
+
+        igmp = pkt.getlayer(IGMPv3)
+
+        if pkt.haslayer(IP):
+            ip = pkt.getlayer(IP)
+            ip.ttl = 1
+            ip.proto = 2
+            ip.tos = 0xc0
+            ip.options = [IPOption_Router_Alert()]
+
+            if igmp.type == IGMP_TYPE_MEMBERSHIP_QUERY:
+                if igmp.gaddr == "0.0.0.0":
+                    ip.dst = "224.0.0.1"
+                else:
+                    assert IGMPv3.is_valid_mcaddr(igmp.gaddr), \
+                        "IGMP membership query with invalid mcast address"
+                    ip.dst = igmp.gaddr
+
+            elif igmp.type == IGMP_TYPE_V2_LEAVE_GROUP and \
+                    IGMPv3.is_valid_mcaddr(igmp.gaddr):
+                ip.dst = "224.0.0.2"
+
+            elif igmp.type in (IGMP_TYPE_V1_MEMBERSHIP_REPORT,
+                              IGMP_TYPE_V2_MEMBERSHIP_REPORT) and \
+                  IGMPv3.is_valid_mcaddr(igmp.gaddr):
+                ip.dst = igmp.gaddr
+
+           # We do not need to fixup the ether layer, it is done by scapy
+           #
+           # if pkt.haslayer(Ether):
+           #     eth = pkt.getlayer(Ether)
+           #     ip_long = atol(ip.dst)
+           #     ether.dst = '01:00:5e:%02x:%02x:%02x' % (
+           #        (ip_long >> 16) & 0x7f, (ip_long >> 8) & 0xff,
+           #        ip_long & 0xff )
+
+        return pkt
+
+
+bind_layers(IP,       IGMPv3,   frag=0, proto=2, ttl=1, tos=0xc0)
+bind_layers(IGMPv3,   IGMPv3gr, frag=0, proto=2)
+bind_layers(IGMPv3gr, IGMPv3gr, frag=0, proto=2)
+
+
+if __name__ == "__main__":
+
+    print "test float encoding"
+    from math import log
+    max_expected_error = 1.0 / (2<<3) # four bit precision
+    p = IGMPv3()
+    for v in range(0, 31745):
+        c = p.encode_float(v)
+        d = p.decode_float(c)
+        rel_err = float(v-d)/v if v!=0 else 0.0
+        assert rel_err <= max_expected_error
+
+    print "construct membership query - general query"
+    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120)
+    hexdump(str(mq))
+
+    print "construct membership query - group-specific query"
+    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120,
+                gaddr="224.0.0.1")
+    hexdump(str(mq))
+
+    print "construct membership query - group-and-source-specific query"
+    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120,
+                gaddr="224.0.0.1")
+    mq.srcs = ['1.2.3.4', '5.6.7.8']
+    hexdump(str(mq))
+
+    print "fixup"
+    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY)
+    mq.srcs = ['1.2.3.4', '5.6.7.8']
+    pkt = Ether() / IP() / mq
+    print "before fixup:"
+    hexdump(str(pkt))
+
+    print "after fixup:"
+    IGMPv3.fixup(pkt)
+    hexdump(str(pkt))
+
+    print "construct v3 membership report - join a single group"
+    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                gaddr="224.0.0.1")
+    mr.grps = [IGMPv3gr( rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.30")]
+    hexdump(mr)
+
+    print "construct v3 membership report - join two groups"
+    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                gaddr="224.0.0.1")
+    mr.grps = [
+        IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.30"),
+        IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.31")
+    ]
+    hexdump(mr)
+
+    print "construct v3 membership report - leave a group"
+    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                gaddr="224.0.0.1")
+    mr.grps = [IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr="229.10.20.30")]
+    hexdump(mr)
+
+    print "all ok"
diff --git a/python/extensions/__init__.py b/python/extensions/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/alarms/README.md b/python/extensions/alarms/README.md
new file mode 100644
index 0000000..2ac971a
--- /dev/null
+++ b/python/extensions/alarms/README.md
@@ -0,0 +1,62 @@
+# VOLTHA Alarm Library
+
+This directory provides a common library for the creation of Alarms by adapters within VOLTHA
+and should be used to insure that published alarms from different adapters use the same format
+
+## Alarm Manager Creation
+
+Each device handler should create an instance of the **AdapterAlarms** alarm manager shortly after
+initial activation. This alarm manager is responsible for the formatting and sending of alarms
+by the adapters.
+
+## Raising and Clearing Alarms
+
+To create a specific alarm, create an instance of the specific alarm you wish to publish
+(such as **OnuDiscoveryAlarms** for newly discovered ONUs) and pass in alarm specific information
+to the initialize.
+
+Once constructed, you can call the alarm's **_raise_alarm()_** method to format and send an active
+alarm, or the **_clear_alarm()_** to clear it.
+
+# Basic Alarm Format
+
+Here is an JSON example of a current alarm published on the kafka bus under the 
+_voltha.alarms_ topic:
+
+```json
+{
+  "id": "voltha.adtran_olt.000198f9c4d2ae80.Discovery",
+  "description": "adtran_olt.000198f9c4d2ae80 - ONU DISCOVERY Alarm - DISCOVERY - Raised",
+  "logical_device_id": "0001112233445566",
+  "state": "RAISED",
+  "category": "PON",
+  "severity": "CRITICAL",
+  "resource_id": "0",
+  "type": "EQUIPMENT",
+  "reported_ts": 1532031872.0,
+  "raised_ts": 1532031872.0,
+  "changed_ts": 0.0,
+  "context": {
+    "serial-number": "ADTN17230031",
+    "pon-id": "0"
+  }
+}
+```
+
+# Remaining Work Items
+This initial code is only a prelimenary sample. The following tasks need to be
+added to the VOLTHA JIRA or performed in the SEBA group.
+
+- Get a list from SEBA/VOLTHA on required alarms 
+
+- Provide example JSON output and verify that it meets SEBA's requirements
+
+- Get feedback from other OLT/ONU developers on any needed changes
+
+- For the logical_device_id, this is reported in the format that the device adapter has which
+  includes the vcore number (often 0001) in the first four nibble.  Should this be normalized to
+  all zeros?
+
+- Support alarm_suppression capability (via IAdapter call). Needs investigation
+
+- TODO: Probably a few more.  Look through code for more 'TODO' Notes
diff --git a/python/extensions/alarms/__init__.py b/python/extensions/alarms/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/alarms/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/alarms/adapter_alarms.py b/python/extensions/alarms/adapter_alarms.py
new file mode 100644
index 0000000..b24113f
--- /dev/null
+++ b/python/extensions/alarms/adapter_alarms.py
@@ -0,0 +1,205 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import arrow
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity,\
+    AlarmEventState, AlarmEventCategory
+log = structlog.get_logger()
+
+
+# TODO: In the device adapter, the following alarms are still TBD
+#       (Taken from openolt_alarms)
+# onu_alarm_ind
+# onu_startup_failure_indication
+# onu_signal_degrade_indication
+# onu_drift_of_window_ind
+# onu_loss_omci_ind
+# onu_signals_fail_ind
+# onu_tiwi_ind
+# onu_activation_fail_ind
+# onu_processing_error_ind
+
+
+class AdapterAlarms:
+    """
+    Class for managing Alarms within a given Device Handler instance
+    """
+    def __init__(self, adapter_agent, device_id, logical_device_id):
+        """
+        Adapter alarm manager initializer
+
+        :param adapter_agent: (AdapterAgent) Adapter agent reference
+        :param device_id: (str) Device handler's unique device id
+        :param logical_device_id: (str) Logical Device that the device is a member of
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self.adapter_agent = adapter_agent
+        self.device_id = device_id
+        self.logical_device_id = logical_device_id
+        self.adapter_name = adapter_agent.adapter_name
+        self.lc = None
+
+    def format_id(self, alarm):
+        """
+        Format the Unique Alarm ID for this alarm.  This is provided in the alarms
+        'id' field
+
+        :param alarm: (str) The name of the alarm such as 'Discover' or 'LOS'
+
+        :return: (str) Alarm ID
+        """
+        return 'voltha.{}.{}.{}'.format(self.adapter_name,
+                                        self.device_id,
+                                        alarm)
+
+    def format_description(self, _object, alarm, status):
+        """
+        Format the textual description field of this alarm
+
+        :param _object: ()
+        :param alarm: (str) The name of the alarm such as 'Discover' or 'LOS'
+        :param status: (bool) If True, the alarm is active (it is being raised)
+
+        :return: (str) Alarm description
+        """
+        return '{} Alarm - {} - {}'.format(_object.upper(),
+                                           alarm.upper(),
+                                           'Raised' if status else 'Cleared')
+
+    def send_alarm(self, context_data, alarm_data):
+        """
+        Send the alarm to the event bus
+
+        :param context_data: (dict) Alarm specific context data
+        :param alarm_data: (dict) Common Alarm information dictionary
+        """
+        try:
+            current_context = {}
+            if isinstance(context_data, dict):
+                for key, value in context_data.iteritems():
+                    current_context[key] = str(value)
+            ser_num = None
+            device = self.adapter_agent.get_device(device_id=self.device_id)
+            ser_num = device.serial_number
+
+
+            """
+            Only put in the onu serial numbers since the OLT does not currently have a serial number and the
+            value is the ip:port address.
+            """
+            if isinstance(context_data, dict) and '_onu' in device.type.lower():
+                current_context["onu_serial_number"] = ser_num
+            alarm_event = self.adapter_agent.create_alarm(
+                id=alarm_data.get('id', 'voltha.{}.{}.olt'.format(self.adapter_name,
+                                                                  self.device_id)),
+                resource_id=str(alarm_data.get('resource_id', self.device_id)),
+                description="{}.{} - {}".format(self.adapter_name, self.device_id,
+                                                alarm_data.get('description')),
+                type=alarm_data.get('type'),
+                category=alarm_data.get('category'),
+                severity=alarm_data.get('severity'),
+                state=alarm_data.get('state'),
+                raised_ts=alarm_data.get('ts', 0),
+                context=current_context,
+                logical_device_id=self.logical_device_id,
+                alarm_type_name=alarm_data.get('alarm_type_name')
+            )
+            self.adapter_agent.submit_alarm(self.device_id, alarm_event)
+
+        except Exception as e:
+            self.log.exception('failed-to-send-alarm', e=e)
+            raise
+
+
+class AlarmBase(object):
+    """Base class for alarms"""
+    def __init__(self, alarm_mgr, object_type, alarm,
+                 alarm_category,
+                 resource_id=None,
+                 alarm_type=AlarmEventType.EQUIPMENT,
+                 alarm_severity=AlarmEventSeverity.CRITICAL):
+        """
+        Initializer for the Alarm base class
+
+        :param alarm_mgr: (AdapterAlarms) Reference to the device handler's Adapter
+                                          Alarm manager
+        :param object_type: (str) Type of device generating the alarm such as 'olt' or 'onu'
+        :param alarm: (str) A textual name for the alarm such as 'HeartBeat' or 'Discovery'
+        :param alarm_category: (AlarmEventCategory) Refers to functional category of
+                                                    the alarm
+        :param resource_id: (str) Identifier of the originating resource of the alarm
+        :param alarm_type: (AlarmEventType) Refers to the area of the system impacted
+                                            by the alarm
+        :param alarm_severity: (AlarmEventSeverity) Overall impact of the alarm on the
+                                                    system
+        """
+        self._alarm_mgr = alarm_mgr
+        self._object_type = object_type
+        self._alarm = alarm
+        self._alarm_category = alarm_category
+        self._alarm_type = alarm_type
+        self._alarm_severity = alarm_severity
+        self._resource_id = resource_id
+
+    def get_alarm_data(self, status):
+        """
+        Get the alarm specific data and format it into a dictionary.  When the alarm
+        is being sent to the event bus, this dictionary provides a majority of the
+        fields for the alarms.
+
+        :param status: (bool) True if the alarm is active/raised
+        :return: (dict) Alarm data
+        """
+        data = {
+            'ts': arrow.utcnow().timestamp,
+            'description': self._alarm_mgr.format_description(self._object_type,
+                                                              self._alarm,
+                                                              status),
+            'id': self._alarm_mgr.format_id(self._alarm),
+            'type': self._alarm_type,
+            'category': self._alarm_category,
+            'severity': self._alarm_severity,
+            'state': AlarmEventState.RAISED if status else AlarmEventState.CLEARED,
+            'alarm_type_name': self._alarm
+        }
+        if self._resource_id is not None:
+            data['resource_id'] = self._resource_id
+        return data
+
+    def get_context_data(self):
+        """
+        Get alarm specific context data. If an alarm has specific data to specify, it is
+        included in the context field in the published event
+
+        :return: (dict) Dictionary with alarm specific context data
+        """
+        return {}   # NOTE: You should override this if needed
+
+    def raise_alarm(self):
+        """
+        Called to set the state of an alarm to active and to send it to the event bus
+        """
+        alarm_data = self.get_alarm_data(True)
+        context_data = self.get_context_data()
+        self._alarm_mgr.send_alarm(context_data, alarm_data)
+
+    def clear_alarm(self):
+        """
+        Called to set the state of an alarm to inactive and to send it to the event bus
+        """
+        alarm_data = self.get_alarm_data(False)
+        context_data = self.get_context_data()
+        self._alarm_mgr.send_alarm(context_data, alarm_data)
diff --git a/python/extensions/alarms/heartbeat_alarm.py b/python/extensions/alarms/heartbeat_alarm.py
new file mode 100644
index 0000000..4f5f4f4
--- /dev/null
+++ b/python/extensions/alarms/heartbeat_alarm.py
@@ -0,0 +1,28 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from adapter_alarms import AlarmBase
+
+
+class HeartbeatAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, object_type='olt', heartbeat_misses=0):
+        super(HeartbeatAlarm, self).__init__(alarm_mgr, object_type,
+                                             alarm='Heartbeat',
+                                             alarm_category=AlarmEventCategory.PON,
+                                             alarm_type=AlarmEventType.EQUIPMENT,
+                                             alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._misses = heartbeat_misses
+
+    def get_context_data(self):
+        return {'heartbeats-missed': self._misses}
diff --git a/python/extensions/alarms/olt/__init__.py b/python/extensions/alarms/olt/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/alarms/olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/alarms/olt/olt_los_alarm.py b/python/extensions/alarms/olt/olt_los_alarm.py
new file mode 100644
index 0000000..c8666a3
--- /dev/null
+++ b/python/extensions/alarms/olt/olt_los_alarm.py
@@ -0,0 +1,32 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OltLosAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, intf_id, port_type_name):
+        super(OltLosAlarm, self).__init__(alarm_mgr, object_type='olt LOS',
+                                          alarm='OLT_LOS',
+                                          alarm_category=AlarmEventCategory.OLT,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        # Added port type to indicate if alarm was on NNI or PON
+        self._intf_id = intf_id
+        self._port_type_name = port_type_name
+
+    def get_context_data(self):
+        return {'olt-intf-id:': self._intf_id,
+                'olt-port-type-name': self._port_type_name}
diff --git a/python/extensions/alarms/onu/__init__.py b/python/extensions/alarms/onu/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/alarms/onu/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/alarms/onu/onu_activation_fail_alarm.py b/python/extensions/alarms/onu/onu_activation_fail_alarm.py
new file mode 100644
index 0000000..2bf054e
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_activation_fail_alarm.py
@@ -0,0 +1,30 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuActivationFailAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuActivationFailAlarm, self).__init__(alarm_mgr, object_type='onu ACTIVATION FAIL',
+                                          alarm='ONU_ACTIVATION_FAIL',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_active_alarm.py b/python/extensions/alarms/onu/onu_active_alarm.py
new file mode 100644
index 0000000..a139875
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_active_alarm.py
@@ -0,0 +1,50 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+
+
+class OnuActiveAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, device_id, pon_id, onu_serial_number,
+                 reg_id, olt_serial_number, ipv4_address=None):
+        super(OnuActiveAlarm, self).__init__(alarm_mgr, object_type='ONU',
+                                             alarm='ONU_ACTIVATED',
+                                             alarm_category=AlarmEventCategory.PON,
+                                             resource_id=pon_id,
+                                             alarm_type=AlarmEventType.EQUIPMENT,
+                                             alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._pon_id = pon_id
+        self._onu_serial_number = onu_serial_number
+        self._device_id = device_id
+        self._olt_serial_number = olt_serial_number
+        self._host = ipv4_address
+        self._reg_id = reg_id
+
+    def get_context_data(self):
+        data = {
+            'pon-id': self._pon_id,
+            'serial-number': self._onu_serial_number,
+            'olt_serial_number': self._olt_serial_number,
+            'device_id': self._device_id,
+            'registration_id': self._reg_id
+        }
+        if self._host is not None:
+            data['host'] = self._host
+
+        return data
+
+    def clear_alarm(self):
+        raise NotImplementedError('ONU Active Alarms are auto-clear')
+
diff --git a/python/extensions/alarms/onu/onu_discovery_alarm.py b/python/extensions/alarms/onu/onu_discovery_alarm.py
new file mode 100644
index 0000000..c7da2bc
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_discovery_alarm.py
@@ -0,0 +1,36 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuDiscoveryAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, pon_id, serial_number):
+        super(OnuDiscoveryAlarm, self).__init__(alarm_mgr, object_type='ONU Discovery',
+                                                alarm='ONU_DISCOVERY',
+                                                alarm_category=AlarmEventCategory.PON,
+                                                resource_id=pon_id,
+                                                alarm_type=AlarmEventType.EQUIPMENT,
+                                                alarm_severity=AlarmEventSeverity.MAJOR)
+        self._pon_id = pon_id
+        self._serial_number = serial_number
+
+    def get_context_data(self):
+        return {
+            'pon-id': self._pon_id,
+            'serial-number': self._serial_number
+        }
+
+    def clear_alarm(self):
+        raise NotImplementedError('ONU Discovery Alarms are auto-clear')
diff --git a/python/extensions/alarms/onu/onu_dying_gasp_alarm.py b/python/extensions/alarms/onu/onu_dying_gasp_alarm.py
new file mode 100644
index 0000000..52b6850
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_dying_gasp_alarm.py
@@ -0,0 +1,33 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuDyingGaspAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuDyingGaspAlarm, self).__init__(alarm_mgr, object_type='onu DYING_GASP',
+                                                alarm='ONU_DYING_GASP',
+                                                alarm_category=AlarmEventCategory.ONU,
+                                                alarm_type=AlarmEventType.COMMUNICATION,
+                                                alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {
+            'onu-id': self._onu_id,
+            'onu-intf-id': self._intf_id
+        }
diff --git a/python/extensions/alarms/onu/onu_equipment_alarm.py b/python/extensions/alarms/onu/onu_equipment_alarm.py
new file mode 100644
index 0000000..e7e3a7a
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_equipment_alarm.py
@@ -0,0 +1,45 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuEquipmentAlarm(AlarmBase):
+    """
+    The ONU Equipment Alarm is reported by both the CircuitPack (ME #6) and
+    the ONT-G (ME # 256) to indicate failure on an internal interface or
+    failed self-test.
+
+    For CircuitPack equipment alarms, the intf_id reported is that of the
+    UNI's logical port number
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+
+    Note: Some ONUs may use this alarm to report a self-test failure or may
+          may report it with a different alarm number specifically for a
+          self-test failure.
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuEquipmentAlarm, self).__init__(alarm_mgr, object_type='onu equipment',
+                                                alarm='ONU_EQUIPMENT',
+                                                alarm_category=AlarmEventCategory.ONU,
+                                                alarm_type=AlarmEventType.EQUIPTMENT,
+                                                alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_high_rx_optical_power_alarm.py b/python/extensions/alarms/onu/onu_high_rx_optical_power_alarm.py
new file mode 100644
index 0000000..7b59d55
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_high_rx_optical_power_alarm.py
@@ -0,0 +1,37 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuHighRxOpticalAlarm(AlarmBase):
+    """
+    The ONU High Tx Optical Power Alarm is reported by the ANI-G (ME # 263) to
+    indicate that the received downstream optical power above threshold..
+
+    For ANI-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuHighRxOpticalAlarm, self).__init__(alarm_mgr, object_type='onu high rx optical power',
+                                                    alarm='ONU_HIGH_RX_OPTICAL',
+                                                    alarm_category=AlarmEventCategory.ONU,
+                                                    alarm_type=AlarmEventType.COMMUNICATION,
+                                                    alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_high_tx_optical_power_alarm.py b/python/extensions/alarms/onu/onu_high_tx_optical_power_alarm.py
new file mode 100644
index 0000000..64caefe
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_high_tx_optical_power_alarm.py
@@ -0,0 +1,37 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuHighTxOpticalAlarm(AlarmBase):
+    """
+    The ONU High Tx Optical Power Alarm is reported by the ANI-G (ME # 263) to
+    indicate that the received downstream optical power above upper threshold.
+
+    For ANI-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuHighTxOpticalAlarm, self).__init__(alarm_mgr, object_type='onu high tx optical power',
+                                                    alarm='ONU_HIGH_TX_OPTICAL',
+                                                    alarm_category=AlarmEventCategory.ONU,
+                                                    alarm_type=AlarmEventType.COMMUNICATION,
+                                                    alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
\ No newline at end of file
diff --git a/python/extensions/alarms/onu/onu_laser_bias_current_alarm.py b/python/extensions/alarms/onu/onu_laser_bias_current_alarm.py
new file mode 100644
index 0000000..8daf5a6
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_laser_bias_current_alarm.py
@@ -0,0 +1,38 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLaserBiasAlarm(AlarmBase):
+    """
+    The ONU Laser Bias Current Alarm is reported by the ANI-G (ME # 263) to
+    indicate that the laser bias current above threshold determined by
+    vendor and that laser end of life is pending
+
+    For ANI-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLaserBiasAlarm, self).__init__(alarm_mgr, object_type='onu laser bias current',
+                                                alarm='ONU_LASER_BIAS_CURRENT',
+                                                alarm_category=AlarmEventCategory.ONU,
+                                                alarm_type=AlarmEventType.EQUIPTMENT,
+                                                alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_laser_eol_alarm.py b/python/extensions/alarms/onu/onu_laser_eol_alarm.py
new file mode 100644
index 0000000..fa5039c
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_laser_eol_alarm.py
@@ -0,0 +1,36 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLaserEolAlarm(AlarmBase):
+    """
+    The ONU Laser End-of-Lifer Alarm is reported by both the CircuitPack (ME #6)
+    to indicate that failure of transmit laser imminent
+
+    The intf_id reported is that of the UNI's logical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLaserEolAlarm, self).__init__(alarm_mgr, object_type='onu laser EOL',
+                                               alarm='ONU_LASER_EOL',
+                                               alarm_category=AlarmEventCategory.ONU,
+                                               alarm_type=AlarmEventType.EQUIPTMENT,
+                                               alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_lob_alarm.py b/python/extensions/alarms/onu/onu_lob_alarm.py
new file mode 100644
index 0000000..e595211
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_lob_alarm.py
@@ -0,0 +1,30 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLobAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLobAlarm, self).__init__(alarm_mgr, object_type='onu LOB',
+                                          alarm='ONU_LOB',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_lopc_mic_error_alarm.py b/python/extensions/alarms/onu/onu_lopc_mic_error_alarm.py
new file mode 100644
index 0000000..cc05cb0
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_lopc_mic_error_alarm.py
@@ -0,0 +1,33 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLopcMicErrorAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLopcMicErrorAlarm, self).__init__(alarm_mgr,  object_type='onu LOPC_MIC_ERROR',
+                                                   alarm='ONU_LOPC_MIC_ERROR',
+                                                   alarm_category=AlarmEventCategory.ONU,
+                                                   alarm_type=AlarmEventType.COMMUNICATION,
+                                                   alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {
+            'onu-id': self._onu_id,
+            'onu-intf-id': self._intf_id
+        }
diff --git a/python/extensions/alarms/onu/onu_lopc_miss_alarm.py b/python/extensions/alarms/onu/onu_lopc_miss_alarm.py
new file mode 100644
index 0000000..af695ca
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_lopc_miss_alarm.py
@@ -0,0 +1,33 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLopcMissAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLopcMissAlarm, self).__init__(alarm_mgr, object_type='onu LOPC_MISS',
+                                               alarm='ONU_LOPC_MISS',
+                                               alarm_category=AlarmEventCategory.ONU,
+                                               alarm_type=AlarmEventType.COMMUNICATION,
+                                               alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {
+            'onu-id': self._onu_id,
+            'onu-intf-id': self._intf_id
+        }
diff --git a/python/extensions/alarms/onu/onu_los_alarm.py b/python/extensions/alarms/onu/onu_los_alarm.py
new file mode 100644
index 0000000..d2ebb7f
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_los_alarm.py
@@ -0,0 +1,30 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLosAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLosAlarm, self).__init__(alarm_mgr, object_type='onu LOS',
+                                          alarm='ONU_LOS',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_low_rx_optical_power_alarm.py b/python/extensions/alarms/onu/onu_low_rx_optical_power_alarm.py
new file mode 100644
index 0000000..ee6f4d2
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_low_rx_optical_power_alarm.py
@@ -0,0 +1,37 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLowRxOpticalAlarm(AlarmBase):
+    """
+    The ONU Low Rx Optical Power Alarm is reported by the ANI-G (ME # 263) to
+    indicate that the received downstream optical power below threshold.
+
+    For ANI-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLowRxOpticalAlarm, self).__init__(alarm_mgr, object_type='onu low rx optical power',
+                                                   alarm='ONU_LOW_RX_OPTICAL',
+                                                   alarm_category=AlarmEventCategory.ONU,
+                                                   alarm_type=AlarmEventType.COMMUNICATION,
+                                                   alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_low_tx_optical_power_alarm.py b/python/extensions/alarms/onu/onu_low_tx_optical_power_alarm.py
new file mode 100644
index 0000000..e28a556
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_low_tx_optical_power_alarm.py
@@ -0,0 +1,37 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuLowTxOpticalAlarm(AlarmBase):
+    """
+    The ONU Low Tx Optical Power Alarm is reported by the ANI-G (ME # 263) to
+    indicate that the transmit optical power below lower threshold
+
+    For ANI-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuLowTxOpticalAlarm, self).__init__(alarm_mgr, object_type='onu low tx optical power',
+                                                   alarm='ONU_LOW_TX_OPTICAL',
+                                                   alarm_category=AlarmEventCategory.ONU,
+                                                   alarm_type=AlarmEventType.COMMUNICATION,
+                                                   alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_selftest_failure_alarm.py b/python/extensions/alarms/onu/onu_selftest_failure_alarm.py
new file mode 100644
index 0000000..c742762
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_selftest_failure_alarm.py
@@ -0,0 +1,44 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuSelfTestFailureAlarm(AlarmBase):
+    """
+    The ONU Self Test Failure Alarm is reported by both the CircuitPack (ME #6)
+    and the ONT-G (ME # 256) to indicate failure a failed autonomous self-test.
+
+    For CircuitPack equipment alarms, the intf_id reported is that of the
+    UNI's logical port number
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+
+    Note: Some ONUs may use this alarm to report a self-test failure or may
+          may report it with the ONU Equipment Alarm which can also cover a
+          self-test failure.
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuSelfTestFailureAlarm, self).__init__(alarm_mgr, object_type='onu self-test failure',
+                                                      alarm='ONU_SELF_TEST_FAIL',
+                                                      alarm_category=AlarmEventCategory.ONU,
+                                                      alarm_type=AlarmEventType.EQUIPTMENT,
+                                                      alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_signal_degrade_alarm.py b/python/extensions/alarms/onu/onu_signal_degrade_alarm.py
new file mode 100644
index 0000000..4861f75
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_signal_degrade_alarm.py
@@ -0,0 +1,33 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuSignalDegradeAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id,
+                 inverse_bit_error_rate):
+        super(OnuSignalDegradeAlarm, self).__init__(alarm_mgr, object_type='onu SIGNAL DEGRADE',
+                                          alarm='ONU_SIGNAL_DEGRADE',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+        self._inverse_bit_error_rate=inverse_bit_error_rate
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id,
+                'inverse-bit-error-rate': self._inverse_bit_error_rate}
diff --git a/python/extensions/alarms/onu/onu_signal_fail_alarm.py b/python/extensions/alarms/onu/onu_signal_fail_alarm.py
new file mode 100644
index 0000000..bcc629a
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_signal_fail_alarm.py
@@ -0,0 +1,39 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+"""
+    OnuSignalsFailureIndication {
+                fixed32 intf_id = 1;
+                fixed32 onu_id = 2;
+                string status = 3;
+                fixed32 inverse_bit_error_rate = 4;
+"""
+
+class OnuSignalFailAlarm(AlarmBase):
+    def __init__(self, alarm_mgr, onu_id, intf_id, inverse_bit_error_rate):
+        super(OnuSignalFailAlarm, self).__init__(alarm_mgr, object_type='onu SIGNAL FAIL',
+                                          alarm='ONU_SIGNAL_FAIL',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+        self._inverse_bit_error_rate = inverse_bit_error_rate
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id,
+                'inverse-bit-error-rate': self._inverse_bit_error_rate}
diff --git a/python/extensions/alarms/onu/onu_startup_alarm.py b/python/extensions/alarms/onu/onu_startup_alarm.py
new file mode 100644
index 0000000..9960f03
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_startup_alarm.py
@@ -0,0 +1,39 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+"""
+    message OnuStartupFailureIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+}
+
+"""
+
+class OnuStartupAlarm(AlarmBase):
+
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuStartupAlarm, self).__init__(alarm_mgr, object_type='onu STARTUP FAIL',
+                                          alarm='ONU_STARTUP_FAIL',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_temp_red_alarm.py b/python/extensions/alarms/onu/onu_temp_red_alarm.py
new file mode 100644
index 0000000..bfa1623
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_temp_red_alarm.py
@@ -0,0 +1,42 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuTempRedAlarm(AlarmBase):
+    """
+    The ONU Temperature Yellow Alarm is reported by both the CircuitPack
+    (ME #6) and the ONT-G (ME # 256) to indicate no service has been shut
+    down to avoid equipment damage. The operational state of the affected
+    PPTPs indicates the affected services.
+
+    For CircuitPack equipment alarms, the intf_id reported is that of the
+    UNI's logical port number
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuTempRedAlarm, self).__init__(alarm_mgr, object_type='onu temperature red',
+                                              alarm='ONU_TEMP_RED',
+                                              alarm_category=AlarmEventCategory.ONU,
+                                              alarm_type=AlarmEventType.ENVIRONMENT,
+                                              alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_temp_yellow_alarm.py b/python/extensions/alarms/onu/onu_temp_yellow_alarm.py
new file mode 100644
index 0000000..7a28f81
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_temp_yellow_alarm.py
@@ -0,0 +1,41 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuTempYellowAlarm(AlarmBase):
+    """
+    The ONU Temperature Yellow Alarm is reported by both the CircuitPack
+    (ME #6) and the ONT-G (ME # 256) to indicate no service shutdown at
+    present, but the circuit pack is operating beyond its recommended range.
+
+    For CircuitPack equipment alarms, the intf_id reported is that of the
+    UNI's logical port number
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuTempYellowAlarm, self).__init__(alarm_mgr, object_type='onu temperature yellow',
+                                                 alarm='ONU_TEMP_YELLOW',
+                                                 alarm_category=AlarmEventCategory.ONU,
+                                                 alarm_type=AlarmEventType.ENVIRONMENT,
+                                                 alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_voltage_red_alarm.py b/python/extensions/alarms/onu/onu_voltage_red_alarm.py
new file mode 100644
index 0000000..506351c
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_voltage_red_alarm.py
@@ -0,0 +1,39 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuVoltageRedAlarm(AlarmBase):
+    """
+    The ONU Voltage Red Alarm is reported by the ONT-G (ME # 256) to
+    indicate some services have been shut down to avoid power collapse.
+    The operational state of the affected PPTPs indicates the affected
+    services.
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuVoltageRedAlarm, self).__init__(alarm_mgr, object_type='onu voltage red',
+                                                 alarm='ONU_VOLTAGE_RED',
+                                                 alarm_category=AlarmEventCategory.ONU,
+                                                 alarm_type=AlarmEventType.COMMUNICATION,
+                                                 alarm_severity=AlarmEventSeverity.CRITICAL)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_voltage_yellow_alarm.py b/python/extensions/alarms/onu/onu_voltage_yellow_alarm.py
new file mode 100644
index 0000000..1bb3ef6
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_voltage_yellow_alarm.py
@@ -0,0 +1,39 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuVoltageYellowAlarm(AlarmBase):
+    """
+    The ONU Voltage Red Alarm is reported by the ONT-G (ME # 256) to
+    indicate some services have been shut down to avoid power collapse.
+    The operational state of the affected PPTPs indicates the affected
+    services.
+
+    For ONT-G equipment alarms, the intf_id reported is that of the PON/ANI
+    physical port number
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id):
+        super(OnuVoltageYellowAlarm, self).__init__(alarm_mgr, object_type='onu voltage yellow',
+                                                    alarm='ONU_VOLTAGE_YELLOW',
+                                                    alarm_category=AlarmEventCategory.ONU,
+                                                    alarm_type=AlarmEventType.COMMUNICATION,
+                                                    alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id}
diff --git a/python/extensions/alarms/onu/onu_window_drift_alarm.py b/python/extensions/alarms/onu/onu_window_drift_alarm.py
new file mode 100644
index 0000000..32d677d
--- /dev/null
+++ b/python/extensions/alarms/onu/onu_window_drift_alarm.py
@@ -0,0 +1,43 @@
+# Copyright 2017-present Adtran, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+from voltha.extensions.alarms.adapter_alarms import AlarmBase
+
+
+class OnuWindowDriftAlarm(AlarmBase):
+    """
+    OnuDriftOfWindowIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 drift = 4;
+            fixed32 new_eqd = 5;
+        }
+    """
+    def __init__(self, alarm_mgr, onu_id, intf_id, drift, new_eqd):
+        super(OnuWindowDriftAlarm, self).__init__(alarm_mgr, object_type='onu WINDOW DRIFT',
+                                          alarm='ONU_WINDOW_DRIFT',
+                                          alarm_category=AlarmEventCategory.ONU,
+                                          alarm_type=AlarmEventType.COMMUNICATION,
+                                          alarm_severity=AlarmEventSeverity.MAJOR)
+        self._onu_id = onu_id
+        self._intf_id = intf_id
+        self._drift = drift
+        self._new_eqd = new_eqd
+
+    def get_context_data(self):
+        return {'onu-id': self._onu_id,
+                'onu-intf-id': self._intf_id,
+                'drift': self._drift,
+                'new-eqd': self._new_eqd}
diff --git a/python/extensions/alarms/simulator/README.md b/python/extensions/alarms/simulator/README.md
new file mode 100644
index 0000000..1333ed7
--- /dev/null
+++ b/python/extensions/alarms/simulator/README.md
@@ -0,0 +1 @@
+TODO
diff --git a/python/extensions/alarms/simulator/__init__.py b/python/extensions/alarms/simulator/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/alarms/simulator/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/alarms/simulator/simulate_alarms.py b/python/extensions/alarms/simulator/simulate_alarms.py
new file mode 100644
index 0000000..4dfee37
--- /dev/null
+++ b/python/extensions/alarms/simulator/simulate_alarms.py
@@ -0,0 +1,77 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.extensions.alarms.olt.olt_los_alarm import OltLosAlarm
+from voltha.extensions.alarms.onu.onu_dying_gasp_alarm import OnuDyingGaspAlarm
+from voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+from voltha.extensions.alarms.onu.onu_lopc_miss_alarm import OnuLopcMissAlarm
+from voltha.extensions.alarms.onu.onu_lopc_mic_error_alarm import OnuLopcMicErrorAlarm
+from voltha.extensions.alarms.onu.onu_lob_alarm import OnuLobAlarm
+
+from voltha.extensions.alarms.onu.onu_startup_alarm import OnuStartupAlarm
+from voltha.extensions.alarms.onu.onu_signal_degrade_alarm import OnuSignalDegradeAlarm
+from voltha.extensions.alarms.onu.onu_signal_fail_alarm import OnuSignalFailAlarm
+from voltha.extensions.alarms.onu.onu_window_drift_alarm import OnuWindowDriftAlarm
+from voltha.extensions.alarms.onu.onu_activation_fail_alarm import OnuActivationFailAlarm
+
+from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
+
+class AdapterAlarmSimulator(object):
+    def __init__(self, adapter_alarms):
+        self.adapter_alarms = adapter_alarms
+
+    def simulate_alarm(self, alarm):
+        if alarm.indicator == "los":
+            alarm_obj = OltLosAlarm(self.adapter_alarms, intf_id=alarm.intf_id, port_type_name=alarm.port_type_name)
+        elif alarm.indicator == "dying_gasp":
+            alarm_obj = OnuDyingGaspAlarm(self.adapter_alarms, onu_id=alarm.onu_device_id, intf_id=alarm.intf_id)
+        elif alarm.indicator == "onu_los":
+            alarm_obj = OnuLosAlarm(self.adapter_alarms, onu_id=alarm.onu_device_id, intf_id=alarm.intf_id)
+        elif alarm.indicator == "onu_lopc_miss":
+            alarm_obj = OnuLopcMissAlarm(self.adapter_alarms, onu_id=alarm.onu_device_id, intf_id=alarm.intf_id)
+        elif alarm.indicator == "onu_lopc_mic":
+            alarm_obj = OnuLopcMicErrorAlarm(self.adapter_alarms, onu_id=alarm.onu_device_id, intf_id=alarm.intf_id)
+        elif alarm.indicator == "onu_lob":
+            alarm_obj = OnuLobAlarm(self.adapter_alarms, onu_id=alarm.onu_device_id, intf_id=alarm.intf_id)
+        elif alarm.indicator == "onu_startup":
+            alarm_obj = OnuStartupAlarm(self.adapter_alarms, intf_id=alarm.intf_id, onu_id=alarm.onu_device_id)
+        elif alarm.indicator == "onu_signal_degrade":
+            alarm_obj = OnuSignalDegradeAlarm(self.adapter_alarms, intf_id=alarm.intf_id, onu_id=alarm.onu_device_id,
+                                  inverse_bit_error_rate=alarm.inverse_bit_error_rate)
+        elif alarm.indicator == "onu_drift_of_window":
+            alarm_obj = OnuWindowDriftAlarm(self.adapter_alarms, intf_id=alarm.intf_id,
+                                onu_id=alarm.onu_device_id,
+                                drift=alarm.drift,
+                                new_eqd=alarm.new_eqd)
+        elif alarm.indicator == "onu_signal_fail":
+            alarm_obj = OnuSignalFailAlarm(self.adapter_alarms, intf_id=alarm.intf_id,
+                               onu_id=alarm.onu_device_id,
+                               inverse_bit_error_rate=alarm.inverse_bit_error_rate)
+        elif alarm.indicator == "onu_activation":
+            alarm_obj = OnuActivationFailAlarm(self.adapter_alarms, intf_id=alarm.intf_id,
+                                   onu_id=alarm.onu_device_id)
+        elif alarm.indicator == "onu_discovery":
+            alarm_obj = OnuDiscoveryAlarm(self.adapter_alarms, pon_id=alarm.intf_id,
+                                   serial_number=alarm.onu_serial_number)
+        else:
+            raise Exception("Unknown alarm indicator %s" % alarm.indicator)
+
+        if alarm.operation == alarm.RAISE:
+            alarm_obj.raise_alarm()
+        elif alarm.operation == alarm.CLEAR:
+            alarm_obj.clear_alarm()
+        else:
+            # This shouldn't happen
+            raise Exception("Unknown alarm operation")
diff --git a/python/extensions/eoam/EOAM.py b/python/extensions/eoam/EOAM.py
new file mode 100644
index 0000000..935f0e3
--- /dev/null
+++ b/python/extensions/eoam/EOAM.py
@@ -0,0 +1,507 @@
+#!/usr/bin/env python

+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------#

+# Copyright (C) 2015 - 2016 by Tibit Communications, Inc.                  #

+# All rights reserved.                                                     #

+#                                                                          #

+#    _______ ____  _ ______                                                #

+#   /_  __(_) __ )(_)_  __/                                                #

+#    / / / / __  / / / /                                                   #

+#   / / / / /_/ / / / /                                                    #

+#  /_/ /_/_____/_/ /_/                                                     #

+#                                                                          #

+#--------------------------------------------------------------------------#

+""" EOAM protocol implementation in scapy """

+

+TIBIT_VERSION_NUMBER = '1.1.4'

+

+import argparse

+import logging

+import time

+from hexdump import hexdump

+from datetime import datetime

+

+

+logging.getLogger("scapy.runtime").setLevel(logging.ERROR)

+from scapy.layers.l2 import Ether, Dot1Q

+from scapy.sendrecv import sendp

+from scapy.fields import PacketField

+from scapy.packet import bind_layers

+from scapy.fields import StrField, X3BytesField

+from scapy.packet import Packet

+from scapy.fields import ByteEnumField, XShortField, XByteField, MACField, \

+    ByteField, BitEnumField, BitField, ShortField

+from scapy.fields import XLongField, StrFixedLenField, XIntField, \

+    FieldLenField, StrLenField, IntField

+

+import fcntl, socket, struct # for get hw address

+

+from EOAM_Layers import EOAM_MULTICAST_ADDRESS, IGMP_MULTICAST_ADDRESS, OAM_ETHERTYPE

+from EOAM_Layers import VENDOR_SPECIFIC_OPCODE, CABLELABS_OUI, TIBIT_OUI

+from EOAM_Layers import RxedOamMsgTypeEnum, RxedOamMsgTypes

+from EOAM_Layers import EOAMPayload, EOAM_EventMsg, EOAM_VendSpecificMsg, EOAM_TibitMsg, EOAM_DpoeMsg, EOAM_OmciMsg

+

+# TODO should remove import *

+from EOAM_TLV import *

+

+ADTRAN_SHORTENED_VSSN = u'4144'  # 'AD'

+TIBIT_SHORTENED_VSSN  = u'5442'  # 'TB'

+

+def get_oam_msg_type(log, frame):

+

+    respType = RxedOamMsgTypeEnum["Unknown"]

+    recv_frame = frame

+

+    if recv_frame.haslayer(EOAMPayload):

+        if recv_frame.haslayer(EOAM_EventMsg):

+            respType = RxedOamMsgTypeEnum["Event Notification"]

+        elif recv_frame.haslayer(EOAM_OmciMsg):

+            respType = RxedOamMsgTypeEnum["OMCI Message"]

+        else:

+            dpoeOpcode = 0x00

+            if recv_frame.haslayer(EOAM_TibitMsg):

+                dpoeOpcode = recv_frame.getlayer(EOAM_TibitMsg).dpoe_opcode;

+            elif recv_frame.haslayer(EOAM_DpoeMsg):

+                dpoeOpcode = recv_frame.getlayer(EOAM_DpoeMsg).dpoe_opcode;

+

+            # Get Response

+            if (dpoeOpcode == DPoEOpcodes["Get Response"]):

+                respType = RxedOamMsgTypeEnum["DPoE Get Response"]

+

+            # Set Response

+            elif (dpoeOpcode == DPoEOpcodes["Set Response"]):

+                respType = RxedOamMsgTypeEnum["DPoE Set Response"]

+

+            # File Transfer ACK

+            elif (dpoeOpcode == DPoEOpcodes["File Transfer"]):

+                respType = RxedOamMsgTypeEnum["DPoE File Transfer"]

+            else:

+                log.info("Unsupported DPoE Opcode {:0>2X}".format(dpoeOpcode))

+    else:

+        log.info("Invalid OAM Header")

+

+    log.info('Received OAM Message - %s' % RxedOamMsgTypes[respType])

+

+    return respType

+

+

+def handle_get_value(log, loadstr, startOfTlvs, queryBranch, queryLeaf):

+    retVal = False;

+    value = 0

+    branch = 0

+    leaf = 0

+    bytesRead = startOfTlvs

+    loadstrlen    = len(loadstr)

+

+    while (bytesRead <= loadstrlen):

+        (branch, leaf) = struct.unpack_from('>BH', loadstr, bytesRead)

+

+        if (branch != 0):

+            bytesRead += 3

+            length = struct.unpack_from('>B', loadstr, bytesRead)[0]

+            bytesRead += 1

+

+            if (length == 1):

+                value = struct.unpack_from(">B", loadstr, bytesRead)[0]

+            elif (length == 2):

+                value = struct.unpack_from(">H", loadstr, bytesRead)[0]

+            elif (length == 4):

+                value = struct.unpack_from(">I", loadstr, bytesRead)[0]

+            elif (length == 8):

+                value = struct.unpack_from(">Q", loadstr, bytesRead)[0]

+            else:

+                if (length >= 0x80):

+                    log.info('Branch 0x{:0>2X} Leaf 0x{:0>4X} {}'.format(branch, leaf, DPoEVariableResponseEnum[length]))

+                    # Set length to zero so bytesRead doesn't get mistakenly incremented below

+                    length = 0

+                else:

+                    # Attributes with a length of zero are actually 128 bytes long

+                    if (length == 0):

+                        length = 128;

+                    valStr = ">{}s".format(length)

+                    value = struct.unpack_from(valStr, loadstr, bytesRead)[0]

+

+            if (length > 0):

+                bytesRead += length

+

+            if (branch != OamBranches["DPoE Object"]):

+                if ( ((queryBranch == 0) and (queryLeaf == 0)) or

+                     ((queryBranch == branch) and (queryLeaf == leaf)) ):

+                    # Prevent zero-lengthed values from returning success

+                    if (length > 0):

+                        retVal = True;

+                    break

+        else:

+            break

+

+    if (retVal == False):

+        value = 0

+

+    return retVal,bytesRead,value,branch,leaf

+

+

+def get_value_from_msg(log, frame, branch, leaf):

+    retVal = False

+    value = 0

+    recv_frame = frame

+

+    if recv_frame.haslayer(EOAMPayload):

+        payload = recv_frame.payload

+        if hasattr(payload, 'body'):

+            loadstr = payload.body.load

+            # Get a specific TLV value

+            (retVal,bytesRead,value,retbranch,retleaf) = handle_get_value(log, loadstr, 0, branch, leaf)

+        else:

+            log.info('received frame has no payload')

+    else:

+        log.info('Invalid OAM Header')

+    return retVal,value,

+

+def check_set_resp_attrs(log, loadstr, startOfTlvs):

+    retVal = True;

+    branch = 0

+    leaf = 0

+    length = 0

+    bytesRead = startOfTlvs

+    loadstrlen    = len(loadstr)

+

+    while (bytesRead <= loadstrlen):

+        (branch, leaf) = struct.unpack_from('>BH', loadstr, bytesRead)

+#            print "Branch/Leaf        0x{:0>2X}/0x{:0>4X}".format(branch, leaf)

+

+        if (branch != 0):

+            bytesRead += 3

+            length = struct.unpack_from('>B', loadstr, bytesRead)[0]

+#                print "Length:            0x{:0>2X} ({})".format(length,length)

+            bytesRead += 1

+

+            if (length >= 0x80):

+                log.info('Branch 0x{:0>2X} Leaf 0x{:0>4X} {}'.format(branch, leaf, DPoEVariableResponseEnum[length]))

+                if (length > 0x80):

+                    retVal = False;

+                    break;

+            else:

+                bytesRead += length

+

+        else:

+            break

+

+    return retVal,branch,leaf,length

+

+def check_set_resp(log, frame):

+    rc = False

+    branch = 0

+    leaf = 0

+    status = 0

+    recv_frame = frame

+

+    if recv_frame.haslayer(EOAMPayload):

+        payload = recv_frame.payload

+        if hasattr(payload, 'body'):

+            loadstr = payload.body.load

+            # Get a specific TLV value

+            (rc,branch,leaf,status) = check_set_resp_attrs(log, loadstr, 0)

+        else:

+            log.info('received frame has no payload')

+    else:

+        log.info('Invalid OAM Header')

+    return rc,branch,leaf,status

+

+

+def handle_get_event_context(log, loadstr, startOfTlvs, queryType):

+    retVal = False;

+    value = 0

+    objType = 0

+    bytesRead = startOfTlvs

+    loadstrlen    = len(loadstr)

+

+    while (bytesRead <= loadstrlen):

+        objType = struct.unpack_from('>H', loadstr, bytesRead)[0]

+#            print "Branch/Leaf        0x{:0>2X}/0x{:0>4X}".format(branch, leaf)

+

+        if (objType != 0):

+            bytesRead += 2

+            length = struct.unpack_from('>B', loadstr, bytesRead)[0]

+#                print "Length:            0x{:0>2X} ({})".format(length,length)

+            bytesRead += 1

+

+            if (length == 1):

+                value = struct.unpack_from(">B", loadstr, bytesRead)[0]

+            elif (length == 2):

+                value = struct.unpack_from(">H", loadstr, bytesRead)[0]

+            elif (length == 4):

+                value = struct.unpack_from(">I", loadstr, bytesRead)[0]

+            elif (length == 8):

+                value = struct.unpack_from(">Q", loadstr, bytesRead)[0]

+            else:

+                valStr = ">{}s".format(length)

+                value = struct.unpack_from(valStr, loadstr, bytesRead)[0]

+

+#                print "Value:             {}".format(value)

+

+            if (length > 0):

+                bytesRead += length

+

+            if ( (queryType == 0) or (queryType == objType) ):

+                # Prevent zero-lengthed values from returning success

+                if (length > 0):

+                    retVal = True;

+                break

+        else:

+            break

+

+    if (retVal == False):

+        value = 0

+

+    return retVal,bytesRead,value,objType

+

+

+def handle_tibit_oam_event(log, loadstr):

+    bytesRead = 0

+    loadstrlen = len(loadstr)

+    if loadstrlen > 0:

+        rc = True

+        num_iters = 0

+        bytesRead = 0

+        link_mac = ""

+        msg = ""

+        # Theare are two contexts in a Tibit-specific event - Source & Reference Contexts

+        while(rc == True and num_iters < 2):

+            objType = 0

+            (rc,bytesRead,value,objType) = handle_get_event_context(log, loadstr, bytesRead, objType)

+            if (rc == True):

+                if objType == 0x0001:

+#                        print "PON Object 0x{:0>4X}  Value = {}".format(objType, value)

+                    pass

+                elif objType == 0x000A:

+                    # This is a Unicast Logical Link context. Determine if this a GPON or EPON link

+                    if value[1:5] == "TBIT":

+                        #

+                        link_mac = ''.join(s.encode('hex') for s in value[1:3])

+                        link_mac += ''.join(s.encode('hex') for s in value[5:9])

+                    else:

+                        link_mac = ''.join(s.encode('hex') for s in value[1:7])

+

+#                        print "Unicast Logical Link Object 0x{:0>4X}  Value = {}".format(objType, link_mac)

+                else:

+                    log.info("Object Type 0x{:0>4X}  value = {}".format(objType, value))

+            elif (branch != 0):

+                log.error("Object Type 0x{:0>4X}  no value".format(objType))

+            num_iters += 1

+

+        # Pull the Event Code and Event Length out of the event

+        (evtCode, evtLen) = struct.unpack_from('>HB', loadstr, bytesRead)

+        bytesRead += 3

+

+#            print "Event Code  : 0x{:0>4X}".format(evtCode)

+#            print "Event Len   : 0x{:0>4X}".format(evtLen)

+

+        # Tibit Registration Event

+        if (evtCode == 0x0001):

+            # Handle Registration Status attribute

+            regStatus = struct.unpack_from('>B', loadstr, bytesRead)[0]

+            if regStatus == 1:

+                msg = "Link {} Registered".format(link_mac)

+            else:

+                msg = "Link {} Deregistered".format(link_mac)

+

+    return objType,evtCode,msg

+

+

+def handle_dpoe_oam_event(log, loadstr):

+    bytesRead = 0

+    loadstrlen = len(loadstr)

+    if loadstrlen > 0:

+

+        (evtCode, raised, objType) = struct.unpack_from('>BBH', loadstr, bytesRead)

+        bytesRead += 4

+

+#            print "Event Code  : 0x{:0>4X}".format(evtCode)

+#            print "Event Len   : 0x{:0>4X}".format(evtLen)

+

+        if ((loadstrlen - bytesRead) == 2):

+            objInst = struct.unpack_from(">H", loadstr, bytesRead)[0]

+        elif ((loadstrlen - bytesRead) == 4):

+            objInst = struct.unpack_from(">I", loadstr, bytesRead)[0]

+

+        objTypeStr = ObjectContextEnum[objType]

+        evtCodeStr = DPoEEventCodeEnum[evtCode]

+

+        raisedStr = "Raised"

+        if (raised):

+            rasiedStr = "Cleared"

+

+        #print "{} : {} - {} {}".format(objTypeStr, objInst, evtCodeStr, raisedStr)

+        return objType,evtCode,objTypeStr+":"+evtCodeStr

+

+

+def handle_oam_event(log, frame):

+    recv_frame = frame

+    if recv_frame.haslayer(EOAM_EventMsg):

+        now = datetime.now().strftime('%Y-%m-%f %H:%M:%S.%f')

+        event = recv_frame.getlayer(EOAM_EventMsg)

+        if hasattr(event, 'body'):

+            loadstr = event.body.load

+

+            if (event.tlv_type != VENDOR_SPECIFIC_OPCODE):

+                log.error("unexpected tlv_type 0x%x (expected 0xFE)" % event.tlv_type)

+            elif (event.oui == CABLELABS_OUI):

+                log.info("DPoE Event")

+                objType,eventCode,msg = handle_dpoe_oam_event(log, loadstr)

+            elif (event.oui == TIBIT_OUI):

+                log.info("Tibit-specific Event")

+                objType,eventCode,msg = handle_tibit_oam_event(log, loadstr)

+

+            log.info("Description:    %s" % msg)

+            log.info("sequence:       0x%04x" % event.sequence)

+            log.info("tlv_type:       0x%x" % event.tlv_type)

+            log.info("length:         0x%x" % event.length)

+            log.info("oui:            0x%06x" % event.oui)

+            log.info("time_stamp:     %s" % now)

+            log.info("obj_type:       "+hex(objType))

+            log.info("event_code:     "+hex(eventCode))

+

+    # TODO - Store the event for future use or generate alarm

+    #event_data = [msg, event.sequence, objType, eventCode, now]

+

+def handle_omci(log, frame):

+    recv_frame = frame

+    if recv_frame.haslayer(EOAM_OmciMsg):

+        omci = recv_frame.getlayer(EOAM_OmciMsg)

+        if hasattr(omci, 'body'):

+            loadstr = omci.body.load

+

+            #log.info("trans_id:  0x%04x" % omci.trans_id)

+            #log.info("msg_type:  0x%x" % omci.msg_type)

+            #log.info("dev_id:    0x%x" % omci.dev_id)

+            #log.info("me_class:  0x%04x" % omci.me_class)

+            #log.info("me_inst:   0x%04x" % omci.me_inst)

+

+            bytesRead = 0

+

+    # TODO - Handle OMCI message

+

+def handle_fx_ack(log, loadstr):

+    response_code = Dpoe_FileAckRspOpcodes["OK"]

+

+    (fx_opcode, acked_block, response_code) = struct.unpack('>BHB', loadstr[0:4])

+

+    if (fx_opcode == Dpoe_FileXferOpcodes["File Transfer Ack"]):

+        pass

+        #log.info("   Acked_block: {} Code: {}".format(acked_block, DPoEFileAckRespCodeEnum[response_code]))

+    else:

+        log.error("Unexpected File Transfer Opcode {} when expecting ACK".format(DPoEFileXferOpcodeEnum[fx_opcode]))

+

+    return response_code,acked_block

+

+

+def check_resp(log, frame):

+    respType = RxedOamMsgTypeEnum["Unknown"]

+    recv_frame = frame

+    if recv_frame.haslayer(EOAMPayload):

+

+        if recv_frame.haslayer(EOAM_EventMsg):

+            handle_oam_event(log, recv_frame)

+        elif recv_frame.haslayer(EOAM_OmciMsg):

+            handle_omci(log, recv_frame)

+        else:

+            dpoeOpcode = 0x00

+            if recv_frame.haslayer(EOAM_TibitMsg):

+                dpoeOpcode = recv_frame.getlayer(EOAM_TibitMsg).dpoe_opcode;

+            elif recv_frame.haslayer(EOAM_DpoeMsg):

+                dpoeOpcode = recv_frame.getlayer(EOAM_DpoeMsg).dpoe_opcode;

+

+            if hasattr(recv_frame, 'body'):

+                payload = recv_frame.payload

+                loadstr = payload.body.load

+

+            # Get Response

+            if (dpoeOpcode == DPoEOpcodes["Get Response"]):

+                bytesRead = 0

+                rc = True

+                while(rc == True):

+                    branch = 0

+                    leaf = 0

+                    (rc,bytesRead,value,branch,leaf) = handle_get_value(log, loadstr, bytesRead, branch, leaf)

+                    if (rc == True):

+                        log.info('Branch 0x{:0>2X} Leaf 0x{:0>4X}  value = {}'.format(branch, leaf, value))

+                    elif (branch != 0):

+                        log.info('Branch 0x{:0>2X} Leaf 0x{:0>4X}  no value'.format(branch, leaf))

+

+            # Set Response

+            elif (dpoeOpcode == DPoEOpcodes["Set Response"]):

+                (rc,branch,leaf,status) = check_set_resp_attrs(loadstr, 0)

+                if (rc == True):

+                    log.info('Set Response had no errors')

+                else:

+                    log.info('Branch 0x{:X} Leaf 0x{:0>4X} {}'.format(branch, leaf, DPoEVariableResponseEnum[status]))

+

+            # File Transfer ACK

+            elif (dpoeOpcode == DPoEOpcodes["File Transfer"]):

+                (rc,block) = handle_fx_ack(log, loadstr)

+            else:

+                log.info('Unsupported DPoE Opcode {:0>2X}'.format(dpoeOpcode))

+    else:

+        log.info('Invalid OAM Header')

+

+    return respType

+

+

+def mcastIp2McastMac(ip):

+    """ Convert a dot-notated IPv4 multicast address string into an multicast MAC address"""

+    digits = [int(d) for d in ip.split('.')]

+    return '01:00:5e:%02x:%02x:%02x' % (digits[1] & 0x7f, digits[2] & 0xff, digits[3] & 0xff)

+

+def get_olt_queue(mac, mode = None):

+    resultOltQueue = ""

+    if mode:

+        # If the MAC is the Multicast LLID, then use EPON encoding regardless of the actual

+        # mode we are in.

+        if (mac == "FFFFFFFFFFFF"):

+            mode = "EPON"

+

+        if mode.upper()[0] == "G":  #GPON

+            if mac[:4].upper() == ADTRAN_SHORTENED_VSSN:

+                vssn = "ADTN"

+            else:

+                vssn = "TBIT"

+            link = int(mac[4:12], 16)

+            resultOltQueue = "PortIngressRuleResultOLTQueue(unicastvssn=\"" + vssn + "\", unicastlink=" + str(link) + ")"

+        else:                       #EPON

+            vssn = int(mac[0:8].rjust(8,"0"), 16)

+            link = int((mac[8:12]).ljust(8,"0"), 16)

+            resultOltQueue = "PortIngressRuleResultOLTEPONQueue(unicastvssn=" + str(vssn) + ", unicastlink=" + str(link) + ")"

+    return resultOltQueue

+

+

+def get_unicast_logical_link(mac, mode = None):

+    unicastLogicalLink = ""

+    if mode:

+        if mode.upper()[0] == "G":  #GPON

+            if mac[:4].upper() == ADTRAN_SHORTENED_VSSN:

+                vssn = "ADTN"

+            else:

+                vssn = "TBIT"

+            link = int(mac[4:12], 16)

+            unicastLogicalLink = "OLTUnicastLogicalLink(unicastvssn=\"" + vssn + "\", unicastlink=" + str(link) + ")"

+        else:                       #EPON

+            vssn = int(mac[0:8].rjust(8,"0"), 16)

+            link = int((mac[8:12]).ljust(8,"0"), 16)

+            unicastLogicalLink = "OLTEPONUnicastLogicalLink(unicastvssn=" + str(vssn) + ", unicastlink=" + str(link) +")"

+    return unicastLogicalLink

+

+

+if __name__ == "__main__":

+    pass

diff --git a/python/extensions/eoam/EOAM_Layers.py b/python/extensions/eoam/EOAM_Layers.py
new file mode 100755
index 0000000..4554bdb
--- /dev/null
+++ b/python/extensions/eoam/EOAM_Layers.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------#
+# Copyright (C) 2015 - 2016 by Tibit Communications, Inc.                  #
+# All rights reserved.                                                     #
+#                                                                          #
+#    _______ ____  _ ______                                                #
+#   /_  __(_) __ )(_)_  __/                                                #
+#    / / / / __  / / / /                                                   #
+#   / / / / /_/ / / / /                                                    #
+#  /_/ /_/_____/_/ /_/                                                     #
+#                                                                          #
+#--------------------------------------------------------------------------#
+
+import sys
+import inspect
+
+# SCAPY-specific imports
+from scapy.packet import Packet, bind_layers
+from scapy.fields import StrField, PacketField, X3BytesField
+from scapy.layers.l2 import Ether
+
+from EOAM_TLV import *
+
+# Layer 2 definitions
+EOAM_MULTICAST_ADDRESS = '01:80:c2:00:00:02'   # Well-known OAM Multicast address
+UNUSED_SOURCE_ADDRESS  = '12:34:56:78:9a:bc'   # for OAM frames sent over the CLI
+IGMP_MULTICAST_ADDRESS = '01:00:5e:00:00:01'   # IGMP Multicast address
+OAM_ETHERTYPE          = 0xA8C8                # Ethertype value used to identify a 1904.2 message
+
+VENDOR_SPECIFIC_OPCODE = 0xFE
+CABLELABS_OUI          = 0x001000              # CableLabs OUI (used for DPoE OAM messages)
+TIBIT_OUI              = 0x2AEA15              # Tibit OUI
+ITU_OUI                = 0x0019A7              # ITU OUI - used to encapsulate OMCI messages
+
+# Message Types which can be received from the Tibit OLT, Tibit ONU, DPoE ONU, or GPON ONT
+# ove the 1904.2 transport
+RxedOamMsgTypeEnum = {
+    "Unknown"            : 0x00,
+    "Info"               : 0x01,    # Info PDU
+    "Event Notification" : 0x02,    # Event Notification - Tibit or DPoE Event
+    "DPoE Get Response"  : 0x03,    # DPoE Get Response
+    "DPoE Set Response"  : 0x04,    # DPoE Set Rewponse
+    "DPoE File Transfer" : 0x05,    # Specifically - a File Transfer ACK
+    "OMCI Message"       : 0x06,    # Contains an embedded OMCI message
+    }
+
+RxedOamMsgTypes = {v: k for k, v in RxedOamMsgTypeEnum.iteritems()}
+
+
+###############################################################
+# SCAPY Layer definitions used to parse 1904.2 messages
+###############################################################
+
+# OAM fields after the L2 addressing/VLAN tags
+# when the Ethertype is set to 0xA8C8
+class EOAMPayload(Packet):
+    name = 'EOAM Payload'
+    fields_desc = [
+        ByteEnumField("subtype", 0x03, SlowProtocolsSubtypeEnum),
+        XShortField("flags", 0x0050),
+        XByteField("opcode", VENDOR_SPECIFIC_OPCODE),
+    ]
+
+bind_layers(Ether, EOAMPayload, type=OAM_ETHERTYPE)
+
+
+# 1904.1 OAM Event
+class EOAM_EventMsg(Packet):
+    name = 'EOAM Event'
+    fields_desc = [
+        XShortField("sequence", 0x0001),
+        XByteField("tlv_type", VENDOR_SPECIFIC_OPCODE),
+        XByteField("length", 0x01),
+        X3BytesField("oui", CABLELABS_OUI),
+        PacketField("body", None, Packet),
+    ]
+
+bind_layers(EOAMPayload, EOAM_EventMsg, opcode=0x01)
+
+# Vendor-specific OAM message
+# indicated by an Opcode field set to 0xFE
+class EOAM_VendSpecificMsg(Packet):
+    name = "Vendor-Specific OAM"
+    fields_desc  = [
+        X3BytesField("oui", CABLELABS_OUI),
+    ]
+
+bind_layers(EOAMPayload, EOAM_VendSpecificMsg, opcode=VENDOR_SPECIFIC_OPCODE)
+
+# Tibit-specific OAM message
+# indicated by an OUI set to 0x2AEA15
+class EOAM_TibitMsg(Packet):
+    name = "Tibit OAM Message"
+    fields_desc  = [
+        ByteEnumField("dpoe_opcode", 0x01, DPoEOpcodeEnum),
+        PacketField("body", None, Packet),
+    ]
+
+bind_layers(EOAM_VendSpecificMsg, EOAM_TibitMsg, oui=TIBIT_OUI)
+
+# DPoE-specific OAM message
+# indicated by an OUI set to 0x001000
+class EOAM_DpoeMsg(Packet):
+    name = "DPoE OAM Message"
+    fields_desc  = [
+        ByteEnumField("dpoe_opcode", 0x01, DPoEOpcodeEnum),
+        PacketField("body", None, Packet),
+    ]
+
+bind_layers(EOAM_VendSpecificMsg, EOAM_DpoeMsg, oui=CABLELABS_OUI)
+
+# Embedded OMCI message
+# indicated by an OUI set to ITU OUI (0x0019A7)
+
+#class EOAM_OmciMsg(Packet):
+#    name = "OAM-encapsulated OMCI Message"
+#    fields_desc  = [
+#        XShortField("trans_id", 1),
+#        XByteField("msg_type", 0x49),
+#        XByteField("dev_id", 0x0A),
+#        XShortField("me_class", 0x0000),
+#        XShortField("me_inst", 0x0000),
+#        PacketField("body", None, Packet),
+#    ]
+
+class EOAM_OmciMsg(Packet):
+    name = "OAM-encapsulated OMCI Message"
+    fields_desc  = [
+        PacketField("body", None, Packet),
+    ]
+
+bind_layers(EOAM_VendSpecificMsg, EOAM_OmciMsg, oui=ITU_OUI)
+
+###############################################################
+# End of SCAPY Layers
+###############################################################
+
+
+
diff --git a/python/extensions/eoam/EOAM_TLV.py b/python/extensions/eoam/EOAM_TLV.py
new file mode 100644
index 0000000..638f965
--- /dev/null
+++ b/python/extensions/eoam/EOAM_TLV.py
@@ -0,0 +1,2093 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##--------------------------------------------------------------------------#
+# Copyright (C) 2015 - 2016 by Tibit Communications, Inc.                  #
+# All rights reserved.                                                     #
+#                                                                          #
+#    _______ ____  _ ______                                                #
+#   /_  __(_) __ )(_)_  __/                                                #
+#    / / / / __  / / / /                                                   #
+#   / / / / /_/ / / / /                                                    #
+#  /_/ /_/_____/_/ /_/                                                     #
+#                                                                          #
+#--------------------------------------------------------------------------#
+
+from scapy.packet import Packet
+from scapy.fields import ByteEnumField, XShortField, XByteField, MACField, \
+    ByteField, BitEnumField, BitField, ShortField
+from scapy.fields import XLongField, StrField, StrFixedLenField, XIntField, \
+    FieldLenField, StrLenField, IntField, ShortEnumField
+
+# This library strives to be an implementation of the following standard:
+
+# DPoE-SP-OAMv1.0-IO8-140807 - DPoE OAM Extensions Specifications
+
+# This library may be used with PON devices for
+# configuration and provisioning.
+
+## Note on Deviations:
+
+## Tibit endeavors to use DPoE OAM for not only communicating with DpOE ONUs,
+## but also to communicate with the Tibit OLT Microplug.  In places where this
+## document deviates from the DPoE standard for ONUs, Tibit has added a comment
+## __TIBIT_OLT_OAM__
+
+TIBIT_VERSION_NUMBER = '1.1.4'
+
+TLV_dictionary = {
+    0x00: "End EOAMPDU",
+    }
+
+SlowProtocolsSubtypeEnum = {0x03: "OAM"}
+
+### OAM Branch Enumerations
+OamBranchEnum = {
+    0x00: "End",
+    0x06: "Clause 30 Object",
+    0x07: "Clause 30 Attr",
+    0x09: "Clause 30 Action",
+    0xB7: "Tibit Attr",
+    0xB9: "Tibit Action",
+    0xC7: "DPoG Attr",
+    0xC9: "DPoG Action",
+    0xD6: "DPoE Object",
+    0xD7: "DPoE Attr",
+    0xD9: "DPoE Action",
+    }
+
+OamBranches = {v: k for k, v in OamBranchEnum.iteritems()}
+
+
+### Multicast Action Flags
+MulticastActionFlagsEnum = {
+    0x02: "Deregister",
+    0x03: "Register"
+    }
+
+### Table 17 - DPoE Opcodes
+DPoEOpcodeEnum = {
+    0x01: "Get Request",
+    0x02: "Get Response",
+    0x03: "Set Request",
+    0x04: "Set Response",
+    0x05: "Dynamic IP Multicast Control",
+    0x06: "Multicast Register",
+    0x07: "Multicast Register Response",
+    0x09: "File Transfer",
+    }
+
+DPoEOpcodes = {v: k for k, v in DPoEOpcodeEnum.iteritems()}
+
+
+### Table 20 - DPoE Variable Response Codes
+DPoEVariableResponseEnum = {
+    0x80: "No Error",
+    0x81: "Too Long",
+    0x86: "Bad Parameters",
+    0x87: "No Resources",
+    0x88: "System Busy",
+    0xa0: "Undetermined Error",
+    0xa1: "Unsupported",
+    0xa2: "May Be Corrupted",
+    0xa3: "Hardware Failure",
+    0xa4: "Overflow",
+    }
+
+DPoEVariableResponseCodes = {v: k for k, v in DPoEVariableResponseEnum.iteritems()}
+
+
+### Table 14 - DPoE Event Codes
+DPoEEventCodeEnum = {
+    0x11: "Loss of Signal",
+    0x12: "Key Exchange Failure",
+    0x21: "Port Disabled",
+    0x41: "Power Failure",
+    0x81: "Statistics Alarm",
+    0x82: "D-ONU Busy",
+    0x83: "MAC Table Overflow",
+    0x84: "PON Interface Switch",
+    }
+
+DPoEEventCodes = {v: k for k, v in DPoEEventCodeEnum.iteritems()}
+
+
+class SlowProtocolsSubtype(Packet):
+    """ Slow Protocols subtype"""
+    name = "Slow Protocols subtype"
+    fields_desc  = [ByteEnumField("subtype", 0x03, SlowProtocolsSubtypeEnum)]
+
+class FlagsBytes(Packet):
+    """ Two Bytes Reserved for 802.3 Flags"""
+    name = "FlagsBytes"
+    fields_desc  = [XShortField("flags", 0x0050)]
+
+class OAMPDU(Packet):
+    """ OAMPDU code: Organization Specific"""
+    name = "OAMPDU code: Organization Specific"
+    fields_desc  = [XByteField("opcode", 0xfe)]
+
+class CablelabsOUI(Packet):
+    """ Organizationally Unique Identifier (Cablelabs)"""
+    name = "Organizationally Unique Identifier (Cablelabs)"
+    fields_desc  = [XByteField("oui0", 0x00),
+                    XByteField("oui1", 0x10),
+                    XByteField("oui2", 0x00)]
+
+class BroadcomOUI(Packet):
+    """ Organizationally Unique Identifier (Broadcom)"""
+    name = "Organizationally Unique Identifier (Broadcom)"
+    fields_desc  = [XByteField("oui0", 0x00),
+                    XByteField("oui1", 0x0D),
+                    XByteField("oui2", 0xB6)]
+
+class TibitOUI(Packet):
+    """ Organizationally Unique Identifier (Tibit)"""
+    name = "Organizationally Unique Identifier (Tibit)"
+    fields_desc  = [XByteField("oui0", 0x2A),
+                    XByteField("oui1", 0xEA),
+                    XByteField("oui2", 0x15)]
+
+class ItuOUI(Packet):
+    """ Organizationally Unique Identifier (Tibit)"""
+    name = "Organizationally Unique Identifier (ITU)"
+    fields_desc  = [XByteField("oui0", 0x00),
+                    XByteField("oui1", 0x19),
+                    XByteField("oui2", 0xA7)]
+
+class DPoEOpcode_GetRequest(Packet):
+    """ DPoE Opcode"""
+    name = "DPoE Opcode"
+    fields_desc  = [ByteEnumField("opcode", 0x01, DPoEOpcodeEnum)]
+
+class DPoEOpcode_SetRequest(Packet):
+    """ DPoE Opcode"""
+    name = "DPoE Opcode"
+    fields_desc  = [ByteEnumField("opcode", 0x03, DPoEOpcodeEnum)]
+
+class DPoEOpcode_MulticastRegister(Packet):
+    """ DPoE Opcode"""
+    name = "DPoE Opcode"
+    fields_desc  = [ByteEnumField("opcode", 0x06, DPoEOpcodeEnum)]
+
+class DPoEOpcode_MulticastRegisterResponse(Packet):
+    """ DPoE Opcode"""
+    name = "DPoE Opcode"
+    fields_desc  = [ByteEnumField("opcode", 0x07, DPoEOpcodeEnum)]
+
+class DPoEOpcode_FileTransfer(Packet):
+    """ DPoE Opcode"""
+    name = "DPoE Opcode"
+    fields_desc  = [ByteEnumField("opcode", 0x09, DPoEOpcodeEnum)]
+
+class MulticastRegisterSetSumitomo01(Packet):
+    """ Multicast Register: Multicast Register Set Sumitomo 01 """
+    name = "Multicast Register: Multicast Register Set Sumitomo 01"
+    fields_desc = [ByteEnumField("ActionFlags", 0x02, MulticastActionFlagsEnum),
+                   XShortField("MulticastLink", 0xfffe),
+                   XShortField("UnicastLink", 0x43dc),
+                   ]
+
+class MulticastRegisterSetSumitomo02(Packet):
+    """ Multicast Register: Multicast Register Set Sumitomo 02 """
+    name = "Multicast Register: Multicast Register Set Sumitomo 02"
+    fields_desc = [ByteEnumField("ActionFlags", 0x03, MulticastActionFlagsEnum),
+                   XShortField("MulticastLink", 0x43dd),
+                   XShortField("UnicastLink", 0x43dc),
+                   ]
+
+class MulticastRegisterSet(Packet):
+    """ Multicast Register: Multicast Register Set """
+    name = "Multicast Register: Multicast Register Set"
+    fields_desc = [ByteEnumField("ActionFlags", 0x03, MulticastActionFlagsEnum),
+                   XShortField("MulticastLink", 0x0000),
+                   XShortField("UnicastLink", 0x0000),
+                   ]
+
+####
+#### OAM Context OBJECTS
+####
+
+### Object Context Enumerations
+ObjectContextEnum = {
+    0x0000: "Device",
+    0x0001: "PON Port",
+    0x0002: "Unicast Logical Link",
+    0x0003: "Enet Port",
+    0x0004: "Queue",
+    0x0005: "SOAM MEP",
+    0x0006: "Multicast Link",
+    0x0007: "T-CONT",
+# __TIBIT_OLT_OAM__: Defined by Tibit
+    0x0009: "ONU",
+    0x000A: "OLT Unicast Link",
+    0x000B: "GPIO",
+    }
+
+ObjectContexts = {v: k for k, v in ObjectContextEnum.iteritems()}
+
+
+class DONUObject(Packet):
+    """ Object Context: D-ONU Object """
+    name = "Object Context: D-ONU Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0000, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class DOLTObject(Packet):
+    """ Object Context: D-OLT Object """
+    name = "Object Context: D-OLT Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0000, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+class NetworkPortObject(Packet):
+    """ Object Context: Network Port Object """
+    name = "Object Context: Network Port Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0001, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class PonPortObject(Packet):
+    """ Object Context: PON Port Object """
+    name = "Object Context: PON Port Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0001, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+class UnicastLogicalLink(Packet):
+    """ Object Context: Unicast Logical Link """
+    name = "Object Context: Unicast Logical Link"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0002, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class OLTUnicastLogicalLink(Packet):
+    """ Object Context: OLT Unicast Logical Link """
+    name = "Object Context: OLT Unicast Logical Link"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x000A, ObjectContextEnum),
+                   XByteField("length", 10),
+                   XByteField("pon", 0),
+                   StrField("unicastvssn", "TBIT"),
+                   XIntField("unicastlink", 0x00000000),
+                   XByteField("pad", 0),
+                   ]
+
+class OLTEPONUnicastLogicalLink(Packet):
+    """ Object Context: OLT Unicast Logical Link """
+    name = "Object Context: OLT Unicast Logical Link"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x000A, ObjectContextEnum),
+                   XByteField("length", 10),
+                   XByteField("pon", 0),
+                   XIntField("unicastvssn", 0x00000000),
+                   XIntField("unicastlink", 0x00000000),
+                   XByteField("pad", 0),
+                   ]
+
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class NetworkToNetworkPortObject(Packet):
+    """ Object Context: Network-to-Network (NNI) Port Object """
+    name = "Object Context: Network-to-Network (NNI) Port Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0003, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+class UserPortObject(Packet):
+    """ Object Context: User Port Object """
+    name = "Object Context: User Port Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0003, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("number", 0)
+                   ]
+
+class QueueObject(Packet):
+    """ Object Context: Queue Object """
+    name = "Object Context: Queue Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0004, ObjectContextEnum),
+                   XByteField("length", 2),
+                   XByteField("instance", 0),
+                   XByteField("number", 0)
+                   ]
+
+class ONUObject(Packet):
+    """ Object Context: ONU Object """
+    name = "Object Context: ONU Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x0009, ObjectContextEnum),
+                   XByteField("length", 6),
+                   MACField("mac", "54:42:e2:22:11:00")
+                   ]
+
+class GpioObject(Packet):
+    """ Object Context: GPIO Object """
+    name = "Object Context: GPIO Object"
+    fields_desc = [ByteEnumField("branch", 0xD6, OamBranchEnum),
+                   ShortEnumField("leaf", 0x000B, ObjectContextEnum),
+                   XByteField("length", 1),
+                   XByteField("condition", 1)
+                   ]
+
+####
+#### 0x09 - BRANCH ATTRIBUTES
+####
+class PhyAdminControl(Packet):
+    """ Variable Descriptor: Phy Admin Control """
+    name = "Phy Admin Control"
+    fields_desc = [ByteEnumField("branch", 0x09, OamBranchEnum),
+                   XShortField("leaf", 0x0005),
+                   ]
+
+class PhyAdminControlEnableSet(Packet):
+    """ Variable Descriptor: Phy Admin Control Enable """
+    name = "Phy Admin Control Enable"
+    fields_desc = [ByteEnumField("branch", 0x09, OamBranchEnum),
+                   XShortField("leaf", 0x0005),
+                   XByteField("length", 1),
+                   XByteField("value", 2)
+                   ]
+
+class PhyAdminControlDisableSet(Packet):
+    """ Variable Descriptor: Phy Admin Control Disable """
+    name = "Phy Admin Control Disable"
+    fields_desc = [ByteEnumField("branch", 0x09, OamBranchEnum),
+                   XShortField("leaf", 0x0005),
+                   XByteField("length", 1),
+                   XByteField("value", 1)
+                   ]
+
+####
+#### 0xd7 - BRANCH ATTRIBUTES
+####
+class DeviceId(Packet):
+    """ Variable Descriptor: Device ID """
+    name = "Device ID"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0002)]
+
+class FirmwareInfo(Packet):
+    """ Variable Descriptor: Firmware Info """
+    name = "Firmware Info"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0003)]
+
+class ChipsetInfo(Packet):
+    """ Variable Descriptor: Chipset Info """
+    name = "Chipset Info"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0004)]
+
+class DateOfManufacture(Packet):
+    """ Variable Descriptor: Date of Manufacture """
+    name = "Date of Manufacture"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0005)]
+
+class ManufacturerInfo(Packet):
+    """ Variable Descriptor: ManufacturerInfo """
+    name = "ManufacturerInfo"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0006)]
+
+class MaxLogicalLinks(Packet):
+    """ Variable Descriptor: Max Logical Links """
+    name = "Max Logical Links"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0007)]
+
+class NumberOfNetworkPorts(Packet):
+    """ Variable Descriptor: Number of Network Ports """
+    name = "Number of Network Ports"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0008)]
+
+class NumberOfS1Interfaces(Packet):
+    """ Variable Descriptor: Number of S1 Interfaces """
+    name = "Number of S1 Interfaces"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0009)]
+
+class DONUPacketBuffer(Packet):
+    """ Variable Descriptor: D-ONU Packet Buffer """
+    name = "D-ONU Packet Buffer"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000a)]
+
+class ReportThresholds(Packet):
+    """ Variable Descriptor: Report Thresholds """
+    name = "Report Thresholds"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000b),
+                   ]
+
+DFLT_NUM_QUEUE_SETS  = 4
+DFLT_NUM_REPORT_VALS = 1
+
+class ReportThresholdsSet(Packet):
+    """ Variable Descriptor: Report Thresholds Set """
+    name = "Report Thresholds Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000b),
+                   XByteField("length", 0x0a),
+                   XByteField("num_queue_sets", 4),
+                   XByteField("values", 1),
+                   XShortField("threshold0", 0x800),
+                   XShortField("threshold1", 0x1000),
+                   XShortField("threshold2", 0x1800),
+                   XShortField("threshold3", 0x2000),
+                   ]
+
+class UnicastLogicalLinkReportThresholdsSet(Packet):
+    """ Variable Descriptor: Report Thresholds Unicast Logical Link Set"""
+    name = "Report Thresholds Unicast Logical Link Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000b),
+                   XByteField("length", 0x0a),
+                   XByteField("num_queue_sets", 4),
+                   XByteField("values", 1),
+                   XShortField("threshold0", 0x2800),
+                   XShortField("threshold1", 0x5000),
+                   XShortField("threshold2", 0x7800),
+                   XShortField("threshold3", 0xa000),
+                   ]
+
+class LogicalLinkForwarding(Packet):
+    """ Variable Descriptor: Logical Link Forwarding """
+    name = "Logical Link Forwarding"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000c),
+                   ]
+
+class OamFrameRate(Packet):
+    """ Variable Descriptor: OAM Frame Rate """
+    name = "OAM Frame Rate"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000d),
+                   ]
+
+class OamFrameRateSet(Packet):
+    """ Variable Descriptor: OAM Frame Rate """
+    name = "OAM Frame Rate"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000d),
+                   XByteField("length", 2),
+                   XByteField("max", 12),
+                   XByteField("min", 10),
+                   ]
+
+class OnuManufacturerOrganizationName(Packet):
+    """ Variable Descriptor: ONU Manufacturer Organization Name """
+    name = "ONU Manufacturer Organization Name"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000e),
+                   ]
+
+class FirmwareMfgTimeVaryingControls(Packet):
+    """ Variable Descriptor: Firmware Mfg Time Varying Controls """
+    name = "Firmware Mfg Time Varying Controls"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x000f),
+                   ]
+
+class VendorName(Packet):
+    """ Variable Descriptor: Vendor Name """
+    name = "Vendor Name"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0011),
+                   ]
+
+class ModelNumber(Packet):
+    """ Variable Descriptor: Model Number """
+    name = "Model Number"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0012),
+                   ]
+
+class HardwareVersion(Packet):
+    """ Variable Descriptor: Hardware Version """
+    name = "Hardware Version"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0013),
+                   ]
+
+class EponMode(Packet):
+    """ Variable Descriptor: EPON Mode """
+    name = "EPON Mode"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0014),
+                   ]
+
+class DynamicAddressAgeLimit(Packet):
+    """ Variable Descriptor: Dynamic Address Age Limit """
+    name = "Dynamic Address Age Limit"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0102),
+                   ]
+
+class DynamicAddressAgeLimitSet(Packet):
+    """ Variable Descriptor: Dynamic Address Age Limit Set """
+    name = "Dynamic Address Age Limit Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0102),
+                   XByteField("length", 2),
+                   XShortField("value", 0x0000),
+                   ]
+
+class DynamicMacTable(Packet):
+    """ Variable Descriptor: Dynamic MAC Table """
+    name = "Dynamic MAC Table"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0103),
+                   ]
+
+class StaticMacTable(Packet):
+    """ Variable Descriptor: Static MAC Table """
+    name = "Static MAC Table"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0104),
+                   ]
+
+class SourceAddressAdmissionControl(Packet):
+    """ Variable Descriptor: Source Address Admission Control """
+    name = "Source Address Admission Control"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0106),
+                   ]
+
+class SourceAddressAdmissionControlSet(Packet):
+    """ Variable Descriptor: Source Address Admission Control Set """
+    name = "Source Address Admission Control Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0106),
+                   XByteField("length", 1),
+                   XByteField("value", 1),
+                   ]
+
+class MacLearningMinGuarantee(Packet):
+    """ Variable Descriptor: MAC Learning MIN Guarantee """
+    name = "MAC Learning MIN Guarantee"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0107),
+                   ]
+
+class MacLearningMinGuaranteeSet(Packet):
+    """ Variable Descriptor: MAC Learning MIN Guarantee Set """
+    name = "MAC Learning MIN Guarantee Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0107),
+                   XByteField("length", 2),
+                   XShortField("value", 0),
+                   ]
+
+class MacLearningMaxAllowed(Packet):
+    """ Variable Descriptor: MAC Learning MAX Allowed """
+    name = "MAC Learning MAX Allowed"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0108),
+                   ]
+
+class MacLearningMaxAllowedSet(Packet):
+    """ Variable Descriptor: MAC Learning MAX Allowed Set """
+    name = "MAC Learning MAX Allowed Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0108),
+                   XByteField("length", 2),
+                   XShortField("value", 0x0010),
+                   ]
+
+class MacLearningAggregateLimit(Packet):
+    """ Variable Descriptor: MAC Learning Aggregate Limit """
+    name = "MAC Learning Aggregate Limit"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0109),
+                   ]
+
+class MacLearningAggregateLimitSet(Packet):
+    """ Variable Descriptor: MAC Learning Aggregate Limit Set """
+    name = "MAC Learning Aggregate Limit Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0109),
+                   XByteField("length", 2),
+                   XShortField("value", 0x0040),
+                   ]
+
+class FloodUnknown(Packet):
+    """ Variable Descriptor: Flood Unknown """
+    name = "Flood Unknown"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010b),
+                   ]
+
+class FloodUnknownSet(Packet):
+    """ Variable Descriptor: Flood Unknown Set """
+    name = "Flood Unknown Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010b),
+                   XByteField("length", 1),
+                   XByteField("value", 1),
+                   ]
+
+class LocalSwitching(Packet):
+    """ Variable Descriptor: Local Switching """
+    name = "Local Switching"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010c),
+                   ]
+
+class LocalSwitchingSet(Packet):
+    """ Variable Descriptor: Local Switching Set """
+    name = "Local Switching Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010c),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class LLIDQueueConfiguration(Packet):
+    """ Variable Descriptor: LLID Queue Configuration """
+    name = "LLID Queue Configuration"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010d),
+                   ]
+
+DFLT_NUM_ONU_LLIDS   = 1
+DFLT_NUM_LLID_QUEUES = 1
+DFLT_NUM_UNI_PORTS   = 1
+DFLT_NUM_PORT_QUEUES = 1
+DFLT_LLID_QUEUE_SIZE = 0xA0
+
+class LLIDQueueConfigurationSet(Packet):
+    """ Variable Descriptor: LLID Queue Configuration """
+    name = "LLID Queue Configuration"
+
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010d),
+                   XByteField("length", 6),
+                   XByteField("numLLID",    DFLT_NUM_ONU_LLIDS),
+                   XByteField("LLID0-numq", DFLT_NUM_LLID_QUEUES),
+                   XByteField("l0Q0-size",  DFLT_LLID_QUEUE_SIZE),
+                   XByteField("numPort",    DFLT_NUM_UNI_PORTS),
+                   XByteField("Port0-numq", DFLT_NUM_PORT_QUEUES),
+                   XByteField("p0Q0-size",  DFLT_LLID_QUEUE_SIZE),
+                   ]
+
+
+
+class LLIDQueueConfiguration16Set(Packet):
+    """ Variable Descriptor: LLID Queue Configuration """
+    name = "LLID Queue Configuration"
+
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010d),
+                   XByteField("length", 36),
+                   XByteField("numLLID",    16),
+                   XByteField("LLID0-numq", 1),
+                   XByteField("l0Q0-size",  32),
+                   XByteField("LLID1-numq", 1),
+                   XByteField("l1Q0-size",  32),
+                   XByteField("LLID2-numq", 1),
+                   XByteField("l2Q0-size",  32),
+                   XByteField("LLID3-numq", 1),
+                   XByteField("l3Q0-size",  32),
+                   XByteField("LLID4-numq", 1),
+                   XByteField("l4Q0-size",  32),
+                   XByteField("LLID5-numq", 1),
+                   XByteField("l5Q0-size",  32),
+                   XByteField("LLID6-numq", 1),
+                   XByteField("l6Q0-size",  32),
+                   XByteField("LLID7-numq", 1),
+                   XByteField("l7Q0-size",  32),
+                   XByteField("LLID8-numq", 1),
+                   XByteField("l8Q0-size",  32),
+                   XByteField("LLID9-numq", 1),
+                   XByteField("l9Q0-size",  32),
+                   XByteField("LLID10-numq", 1),
+                   XByteField("l10Q0-size",  32),
+                   XByteField("LLID11-numq", 1),
+                   XByteField("l11Q0-size",  32),
+                   XByteField("LLID12-numq", 1),
+                   XByteField("l12Q0-size",  32),
+                   XByteField("LLID13-numq", 1),
+                   XByteField("l13Q0-size",  32),
+                   XByteField("LLID14-numq", 1),
+                   XByteField("l14Q0-size",  32),
+                   XByteField("LLID15-numq", 1),
+                   XByteField("l15Q0-size",  16),
+                   XByteField("numPort",    DFLT_NUM_UNI_PORTS),
+                   XByteField("Port0-numq", DFLT_NUM_PORT_QUEUES),
+                   XByteField("p0Q0-size",  DFLT_LLID_QUEUE_SIZE),
+                   ]
+
+
+
+class LLIDQueueConfigurationSetData(Packet):
+    """ Variable Descriptor: LLID Queue Configuration """
+    name = "LLID Queue Configuration"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010d),
+                   FieldLenField("length", None, length_of="data", fmt="B"),
+                   StrLenField("data", "", length_from=lambda x:x.length),
+                  ]
+
+
+class FirmwareFilename(Packet):
+    """ Variable Descriptor: Firmware Filename """
+    name = "Firmware Filename"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x010e),
+                   ]
+
+
+####
+#### 0xD9 - MAC Table Operations - Dynamic and Static
+####
+
+class ClearDynamicMacTable(Packet):
+    """ Variable Descriptor: Clear Dynamic MAC Table """
+    name = "Clear Dynamic MAC Table"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0101),
+                   ]
+
+class AddDynamicMacAddress(Packet):
+    """ Variable Descriptor: Add Dynamic MAC Address """
+    name = "Add Dynamic MAC Address"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0102),
+                   ]
+
+class DeleteDynamicMacAddress(Packet):
+    """ Variable Descriptor: Delete Dynamic MAC Address """
+    name = "Delete Dynamic MAC Address"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0103),
+                   ]
+
+class ClearStaticMacTable(Packet):
+    """ Variable Descriptor: Clear Static MAC Table """
+    name = "Clear Static MAC Table"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0104),
+                   ]
+
+class AddStaticMacAddress(Packet):
+    """ Variable Descriptor: Add Static MAC Address """
+    name = "Add Static MAC Address"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0105),
+                   ByteField("length", 6),
+                   MACField("mac", "01:00:5e:00:00:00"),
+                   ]
+
+class DeleteStaticMacAddress(Packet):
+    """ Variable Descriptor: Delete Static MAC Address """
+    name = "Delete Static MAC Address"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0106),
+                   ByteField("length", 6),
+                   MACField("mac", "01:00:5e:00:00:00"),
+                   ]
+
+####
+#### 0xd7 - STATISTICS
+####
+
+class RxFramesGreen(Packet):
+    """ Variable Descriptor: RxFramesGreen """
+    name = "RxFramesGreen"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0201),
+                   ]
+
+class TxFramesGreen(Packet):
+    """ Variable Descriptor: TxFramesGreen """
+    name = "TxFramesGreen"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0202),
+                   ]
+
+class RxFrame_64(Packet):
+    """ Variable Descriptor: RxFrame_64 """
+    name = "RxFrame_64"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0204),
+                   ]
+
+class RxFrame_65_127(Packet):
+    """ Variable Descriptor: RxFrame_65_127 """
+    name = "RxFrame_65_127"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0205),
+                   ]
+
+class RxFrame_128_255(Packet):
+    """ Variable Descriptor: RxFrame_128_255 """
+    name = "RxFrame_128_255"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0206),
+                   ]
+
+class RxFrame_256_511(Packet):
+    """ Variable Descriptor: RxFrame_256_511 """
+    name = "RxFrame_256_511"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0207),
+                   ]
+
+class RxFrame_512_1023(Packet):
+    """ Variable Descriptor: RxFrame_512_1023 """
+    name = "RxFrame_512_1023"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0208),
+                   ]
+
+class RxFrame_1024_1518(Packet):
+    """ Variable Descriptor: RxFrame_1024_1518 """
+    name = "RxFrame_1024_1518"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0209),
+                   ]
+
+class RxFrame_1519Plus(Packet):
+    """ Variable Descriptor: RxFrame_1024_1518 """
+    name = "RxFrame_1519_Plus"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020A),
+                   ]
+
+class TxFrame_64(Packet):
+    """ Variable Descriptor: TxFrame_64 """
+    name = "TxFrame_64"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020B),
+                   ]
+
+class TxFrame_65_127(Packet):
+    """ Variable Descriptor: TxFrame_65_127 """
+    name = "TxFrame_65_127"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020C),
+                   ]
+
+class TxFrame_128_255(Packet):
+    """ Variable Descriptor: TxFrame_128_255 """
+    name = "TxFrame_128_255"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020D),
+                   ]
+
+class TxFrame_256_511(Packet):
+    """ Variable Descriptor: TxFrame_256_511 """
+    name = "TxFrame_256_511"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020E),
+                   ]
+
+class TxFrame_512_1023(Packet):
+    """ Variable Descriptor: TxFrame_512_1023 """
+    name = "TxFrame_512_1023"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x020F),
+                   ]
+
+class TxFrame_1024_1518(Packet):
+    """ Variable Descriptor: TxFrame_1024_1518 """
+    name = "TxFrame_1024_1518"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0210),
+                   ]
+
+class TxFrame_1519Plus(Packet):
+    """ Variable Descriptor: TxFrame_1024_1518 """
+    name = "TxFrame_1519_Plus"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0211),
+                   ]
+
+class FramesDropped(Packet):
+    """ Variable Descriptor: Frames Dropped """
+    name = "Frames Dropped"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0214),
+                   ]
+
+class BytesDropped(Packet):
+    """ Variable Descriptor: Bytes Dropped """
+    name = "Bytes Dropped"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0215),
+                   ]
+
+class TxBytesUnused(Packet):
+    """ Variable Descriptor: Tx Bytes Unused """
+    name = "Tx Bytes Unused"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0217),
+                   ]
+
+class TxL2Errors(Packet):
+    """ Variable Descriptor: TxL2Errors """
+    name = "TxL2Errors"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0235),
+                   ]
+
+class RxL2Errors(Packet):
+    """ Variable Descriptor: RxL2Errors """
+    name = "RxL2Errors"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0236),
+                   ]
+
+####
+#### 0xD7 - Alarm Reporting
+####
+
+class AlarmReporting(Packet):
+    """ Variable Descriptor: Alarm Reporting """
+    name = "Alarm Reporting"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0303),
+                   ]
+
+class AlarmReportingSet(Packet):
+    """ Variable Descriptor: Alarm Reporting Set """
+    name = "Alarm Reporting Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0303),
+                   XByteField("length", 6),
+                   XShortField("LOS", 0x1101),
+                   XShortField("KeyExchange", 0x1201),
+                   XShortField("PortDisbled", 0x2101),
+                   ]
+
+####
+#### 0xD7 - Encryption/ FEC/ and Queue CIR/EIR
+####
+class EncryptionMode(Packet):
+    """ Variable Descriptor: Encryption Mode """
+    name = "Encryption Mode"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0402),
+                   ]
+
+class EncryptionModeSet(Packet):
+    """ Variable Descriptor: Encryption Mode Set """
+    name = "Encryption Mode Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0402),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class IpmcForwardingRuleConfiguration(Packet):
+    """ Variable Descriptor: IPMC Forwarding Rule Configuration """
+    name = "IPMC Forwarding Rule Configuration"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0505),
+                   XByteField("length", 2),
+                   XShortField("value", 0x0000),
+                   ]
+
+class QueueCommittedInformationRate(Packet):
+    """ Variable Descriptor: Queue Committed Information Rate """
+    name = "Queue Committed Information Rate"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0604),
+                   ]
+
+class QueueCommittedInformationRateSet(Packet):
+    """ Variable Descriptor: Queue Committed Information Rate Set """
+    name = "Queue Committed Information Rate Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0604),
+                   XByteField("length", 6),
+                   XShortField("burst", 0x0fff),
+                   XShortField("CIR_UPPER", 0x0000),
+                   XShortField("CIR_LOWER", 0xffff),
+                   ]
+
+class FECMode(Packet):
+    """ Variable Descriptor: FEC Mode """
+    name = "FEC Mode"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0605),
+                   ]
+
+class FECModeSet(Packet):
+    """ Variable Descriptor: FEC Mode """
+    name = "FEC Mode"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0605),
+                   XByteField("length", 2),
+                   XByteField("downstream", 0x01),
+                   XByteField("upstream", 0x01),
+                   ]
+
+class MediaType(Packet):
+    """ Variable Descriptor: Media Type """
+    name = "Media Type"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0822),
+                   ]
+
+
+####
+#### 0xD7 - Port Ingress Rules
+####
+RuleSubtypeEnum = {  0x00: "Terminator",
+                     0x01: "Header",
+                     0x02: "Clause",
+                     0x03: "Result",
+                     }
+
+ClauseSubtypeEnum = {0x00: "LLID Index",
+                     0x01: "L2 Destination MAC address",
+                     0x02: "L2 Source MAC address",
+                     0x03: "L2 Type/Len",
+                     0x04: "B-DA",
+                     0x05: "B-SA",
+                     0x06: "I-Tag",
+                     0x07: "S-VLAN Tag",
+                     0x08: "C-VLAN Tag",
+                     0x09: "MPLS Label Stack Entry",
+                     0x0a: "IPv4 TOS/IPv6 Traffic Class",
+                     0x0b: "IPv4 TTL/IPv6 Hop Limit",
+                     0x0c: "IPv4/IPv6 Protocol Type",
+                     0x0d: "IPv4 Source Address",
+                     0x0e: "IPv6 Source Address",
+                     0x0f: "IPv4 Destination Address",
+                     0x10: "IPv6 Destination Address",
+                     0x11: "IPv6 Next Header",
+                     0x12: "IPv6 Flow Header",
+                     0x13: "TCP/UDP source port",
+                     0x14: "TCP/UDP destination port",
+                     0x15: "B-Tag",
+                     0x16: "Reserved",
+                     0x17: "Reserved",
+                     0x18: "Custom field 0",
+                     0x19: "Custom field 1",
+                     0x1a: "Custom field 2",
+                     0x1b: "Custom field 3",
+                     0x1c: "Custom field 4",
+                     0x1d: "Custom field 5",
+                     0x1e: "Custom field 6",
+                     0x1f: "Custom field 7",
+                     }
+
+RuleOperatorEnum = { 0x00: "F",           #False
+                     0x01: "==",
+                     0x02: "!=",
+                     0x03: "<=",
+                     0x04: ">=",
+                     0x05: "exists",
+                     0x06: "!exist",
+                     0x07: "T",           #True
+                     }
+
+RuleResultsEnum =  { 0x00: "NOP",
+                     0x01: "Discard",
+                     0x02: "Forward",
+                     0x03: "Queue",
+                     0x04: "Set",
+                     0x05: "Copy",
+                     0x06: "Delete",
+                     0x07: "Insert",
+                     0x08: "Replace",
+                     0x09: "Clear Delete",
+                     0x0a: "Clear Insert",
+                     0x0b: "Increment Counter",
+                     # Tibit-specific values
+                     0x13: "OLT Queue",
+                     0x14: "Learning Group"
+                     }
+
+RuleClauses = {v: k for k, v in ClauseSubtypeEnum.iteritems()}
+RuleOperators = {v: k for k, v in RuleOperatorEnum.iteritems()}
+RuleResults = {v: k for k, v in RuleResultsEnum.iteritems()}
+
+class PortIngressRule(Packet):
+    """ Variable Descriptor: Port Ingress Rule """
+    name = "Port Ingress Rule"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ]
+
+class PortIngressRuleHeader(Packet):
+    """ Variable Descriptor: Port Ingress Rule Header """
+    name = "Port Ingress Rule Header"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 2),
+                   XByteField("subtype", 0x01), # Header
+                   ByteField("precedence", 12),
+                   ]
+
+class PortIngressRuleClause(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   FieldLenField("length", None, length_of="match", fmt="B", adjust=lambda pkt,x: x+7),
+                   XByteField("subtype", 0x02), #Clause
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 0x7), # T
+                   XByteField("matchlength", 0),
+                   StrLenField("match", "", length_from=lambda x:x.matchlength),
+                   ]
+
+class PortIngressRuleResultNoData(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result NOP """
+    name = "Rule Result NOP"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 2),
+                   XByteField("subtype", 0x03), # Result
+                   ByteField("resulttype", 0x00),
+                   ]
+
+class PortIngressRuleClauseMatchLength00(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 7),
+                   XByteField("clause", 2),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 0),
+                   XByteField("matchlength", 0),
+                   ]
+
+class PortIngressRuleClauseAlwaysMatch(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 7),
+                   XByteField("clause", 2),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 7),
+                   XByteField("matchlength", 0),
+                   ]
+
+class PortIngressRuleClauseMatchLength01(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 8),
+                   XByteField("clause", 2),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 0),
+                   XByteField("matchlength", 1),
+                   XByteField("match", 0),
+                   ]
+
+class PortIngressRuleClauseMatchLength02(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 9),
+                   XByteField("clause", 2),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 0),
+                   XByteField("matchlength", 2),
+                   XShortField("match", 0)
+                   ]
+
+
+class PortIngressRuleClauseMatchLength06(Packet):
+    """ Variable Descriptor: Port Ingress Rule Clause """
+    name = "Port Ingress Rule Clause"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 13),
+                   XByteField("clause", 2),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   XByteField("operator", 0),
+                   XByteField("matchlength", 6),
+                   XByteField("match0", 0x01),
+                   XByteField("match1", 0x80),
+                   XByteField("match2", 0xc2),
+                   XByteField("match3", 0x00),
+                   XByteField("match4", 0x00),
+                   XByteField("match5", 0x00),
+                   ]
+
+class PortIngressRuleResultForward(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Forward """
+    name = "Port Ingress Rule Result Forward"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 2),
+                   XByteField("result", 3),
+                   XByteField("forward", 2),
+                   ]
+
+class PortIngressRuleResultDiscard(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Discard """
+    name = "Port Ingress Rule Result Discard"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 2),
+                   XByteField("result", 3),
+                   XByteField("discard", 1),
+                   ]
+
+class PortIngressRuleResultQueue(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Queue """
+    name = "Port Ingress Rule Result Queue"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 6),
+                   XByteField("result", 3),
+                   XByteField("queuerule", 3),
+                   XShortField("objecttype", 0x0000),
+                   XByteField("instance", 0),
+                   XByteField("queuenum", 0),
+                   ]
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class PortIngressRuleResultOLTQueue(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result OLT Queue """
+    name = "Port Ingress Rule Result OLT Queue"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 15),
+                   XByteField("result", 3),
+                   XByteField("oltqueuerule", 0x13),
+                   XShortField("objecttype", 0x0001),
+                   XByteField("instance", 0),
+                   XByteField("pon", 0),
+                   StrField("unicastvssn", "TBIT"),
+                   XIntField("unicastlink", 0x00000000),
+                   XByteField("pad", 0),
+                   ]
+
+class PortIngressRuleResultOLTEPONQueue(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result OLT Queue """
+    name = "Port Ingress Rule Result OLT Queue"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 15),
+                   XByteField("result", 3),
+                   XByteField("oltqueuerule", 0x13),
+                   XShortField("objecttype", 0x0001),
+                   XByteField("instance", 0),
+                   XByteField("pon", 0),
+                   XIntField("unicastvssn", 0x00000000),
+                   XIntField("unicastlink", 0x00000000),
+                   XByteField("pad", 0),
+                   ]
+
+
+
+# __TIBIT_OLT_OAM__: Defined by Tibit
+class PortIngressRuleResultOLTBroadcastQueue(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result OLT Broadcast Queue """
+    name = "Port Ingress Rule Result OLT Broadcast Queue"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 15),
+                   XByteField("result", 3),
+                   XByteField("oltqueuerule", 0x13),
+                   XShortField("objecttype", 0x0001),
+                   XByteField("instance", 0),
+                   XByteField("pon", 0),
+                   XLongField("broadcast", 0xffffffffffff0000),
+                   XByteField("pad", 0),
+                   ]
+
+class PortIngressRuleResultLearningGroup(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Learning Group """
+    name = "Port Ingress Rule Result Learning Group "
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 6),
+                   XByteField("result", 3),
+                   XByteField("grouprule", 0x14),
+                   XShortField("objecttype", 0x0000),
+                   XByteField("instance", 0),
+                   XByteField("num", 0),
+                   ]
+
+class PortIngressRuleResultSet(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Set """
+    name = "Port Ingress Rule Result Set"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   FieldLenField("length", None, length_of="value", fmt="B", adjust=lambda pkt,x: x+6),
+                   XByteField("result", 3),
+                   XByteField("set", 4),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   StrLenField("value", "", length_from=lambda x:x.length-6),
+                   ]
+
+class PortIngressRuleResultCopy(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Copy """
+    name = "Port Ingress Rule Result Copy"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 6),
+                   XByteField("result", 3),
+                   XByteField("copy", 5),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   XByteField("msbmask", 0),
+                   XByteField("lsbmask", 0),
+                   ]
+
+class PortIngressRuleResultDelete(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Delete """
+    name = "Port Ingress Rule Result Delete"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 4),
+                   XByteField("result", 3),
+                   XByteField("delete", 6),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   ]
+
+class PortIngressRuleResultInsert(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Insert """
+    name = "Port Ingress Rule Result Insert"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 4),
+                   XByteField("result", 3),
+                   XByteField("insert", 7),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   ]
+
+class PortIngressRuleResultReplace(Packet):
+    """ Variable Descriptor: Port Ingress Rule Result Replace """
+    name = "Port Ingress Rule Result Replace"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 4),
+                   XByteField("result", 3),
+                   XByteField("replace", 8),
+                   XByteField("fieldcode", 0),
+                   XByteField("fieldinstance", 0),
+                   ]
+
+class PortIngressRuleTerminator(Packet):
+    """ Variable Descriptor: Port Ingress Rule Terminator """
+    name = "Port Ingress Rule Terminator"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ByteField("length", 1),
+                   XByteField("terminator", 0),
+                   ]
+
+class CustomField(Packet):
+    """ Variable Descriptor: Custom Field """
+    name = "Custom Field"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0502),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class CustomFieldEtherType(Packet):
+    """ Variable Descriptor: Custom Field EtherType """
+    name = "Custom Field EtherType"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0502),
+                   XByteField("length", 6),
+                   XByteField("fieldcode", 0x19),
+                   XByteField("layerselect", 2),
+                   XByteField("dwordoffset", 0),
+                   XByteField("lsb", 0),
+                   XByteField("width", 0x10),
+                   XByteField("numclauses", 0),
+                   ]
+
+class CustomFieldGenericL3(Packet):
+    """ Variable Descriptor: Custom Field Generic L3 """
+    name = "Custom Field Generic L3"
+    fields_desc = [ByteEnumField("branch", 0xD7, OamBranchEnum),
+                   XShortField("leaf", 0x0502),
+                   XByteField("length", 6),
+                   XByteField("fieldcode", 0x1a),
+                   XByteField("layerselect", 8),
+                   XByteField("dwordoffset", 0),
+                   XByteField("lsb", 0x18),
+                   XByteField("width", 0x8),
+                   XByteField("numclauses", 0),
+                   ]
+
+####
+#### 0xD9 - Port Ingress Rules
+####
+
+class ClearPortIngressRules(Packet):
+    """ Variable Descriptor: Clear Port Ingress Rule """
+    name = "Clear Port Ingress Rule"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0501),
+                   ]
+
+class AddPortIngressRule(Packet):
+    """ Variable Descriptor: Add Port Ingress Rule """
+    name = "Add Port Ingress Rule"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0502),
+                   ]
+
+
+class DeletePortIngressRule(Packet):
+    """ Variable Descriptor: Delete Port Ingress Rule """
+    name = "Delete Port Ingress Rule"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0503),
+                   ]
+
+####
+#### 0xb7 - TIBIT ATTRIBUTES
+####
+class OltMode(Packet):
+    """ Variable Descriptor: OLT Mode """
+    name = "OLT Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0101),
+                   ]
+
+class OltModeSet(Packet):
+    """ Variable Descriptor: OLT Mode """
+    name = "OLT Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0101),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class OltPonAdminState(Packet):
+    """ Variable Descriptor: OLT PON Admin State """
+    name = "PON Admin State"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0102),
+                   ]
+
+class OltPonAdminStateSet(Packet):
+    """ Variable Container: OLT PON Admin State """
+    name = "PON Admin State"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0102),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class TibitLinkMacTable(Packet):
+    """ Variable Descriptor: Link MAC Table """
+    name = "Link MAC Table"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0103),
+                   ]
+
+class TibitKeyExchange(Packet):
+    """ Variable Descriptor: Key Exchange """
+    name = "Key Exchange Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0104),
+                   ]
+
+class TibitKeyExchangeSet(Packet):
+    """ Variable Descriptor: Key Exchange Set"""
+    name = "Key Exchange Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0104),
+                   XByteField("length", 2),
+                   XShortField("value", 0x1234),
+                  ]
+
+class OnuMode(Packet):
+    """ Variable Descriptor: ONU Mode """
+    name = "ONU Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0105),
+                   ]
+
+class OnuModeSet(Packet):
+    """ Variable Descriptor: ONU Mode """
+    name = "ONU Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0105),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class TibitGrantSpacing(Packet):
+    """ Variable Descriptor: Grant Spacing """
+    name = "Grant Spacing"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0106),
+                   ]
+
+class TibitGrantSpacingSet(Packet):
+    """ Variable Descriptor: Grant Spacing """
+    name = "Grant Spacing"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0106),
+                   XByteField("length", 1),
+                   XByteField("value", 0),
+                   ]
+
+class TibitBurstOverheadProfiles(Packet):
+    """ Variable Descriptor: Burst Overhead Profiles """
+    name = "Burst Overhead Profiles"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0107),
+                   ]
+
+class TibitBurstOverheadProfilesSet(Packet):
+    """ Variable Descriptor: Burst Overhead Profiles """
+    name = "Burst Overhead Profiles"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0107),
+                   # Length is one + 5 for each entry
+                   XByteField("length", 6),
+                   XByteField("num_profiles", 1),
+                   ]
+
+class TibitBurstOverheadProfilesEntry(Packet):
+    """ Variable Descriptor: Burst Overhead Profile Entry """
+    name = "Burst Profile Entry:"
+    fields_desc = [XByteField("laser_on_time", 0x28),
+                   XByteField("laser_off_time", 0x28),
+                   XShortField("sync_time", 0x0040),
+                   XByteField("us_fec", 1),
+                   ]
+
+class TibitGpioConditionSet(Packet):
+    """ Variable Descriptor: GPIO condition Set """
+    name = "GPIO Condition"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0108),
+                   XByteField("length", 1),
+                   XByteField("state", 0),
+                   ]
+
+class TibitDiscoveryPeriod(Packet):
+    """ Variable Descriptor: Discovery Period """
+    name = "Discovery Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0109),
+                   ]
+
+class TibitDiscoveryPeriodSet(Packet):
+    """ Variable Descriptor: Discovery Period Set """
+    name = "Discovery Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0109),
+                   XByteField("length", 2),
+                   XShortField("period", 3000),
+                   ]
+class TibitLldpPeriod(Packet):
+    """ Variable Descriptor: LLDP Period """
+    name = "LLDP Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010A),
+                   ]
+
+class TibitLldpPeriodSet(Packet):
+    """ Variable Descriptor: LLDP Period Set """
+    name = "LLDP Period"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010A),
+                   XByteField("length", 2),
+                   XShortField("period", 60),
+                   ]
+
+class TibitLldpDestAddress(Packet):
+    """ Variable Descriptor: LLDP Destination MAC Address """
+    name = "LLDP Dest Address"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010B),
+                   ]
+
+class TibitLldpDestAddressSet(Packet):
+    """ Variable Descriptor: LLDP Destination MAC Address Set """
+    name = "LLDP Dest Address"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010B),
+                   XByteField("length", 6),
+                   MACField("addr", "01:80:c2:00:00:0e"),
+                   ]
+
+class TibitLldpTpid(Packet):
+    """ Variable Descriptor: LLDP TPID """
+    name = "LLDP TPID"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010C),
+                   ]
+
+class TibitLldpTpidSet(Packet):
+    """ Variable Descriptor: LLDP TPID Set """
+    name = "LLDP TPID"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010C),
+                   XByteField("length", 2),
+                   XShortField("tpid", 0),
+                   ]
+
+class TibitLldpVid(Packet):
+    """ Variable Descriptor: LLDP TPID """
+    name = "LLDP VID"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010D),
+                   ]
+
+class TibitLldpVidSet(Packet):
+    """ Variable Descriptor: LLDP TPID Set """
+    name = "LLDP VID"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010D),
+                   XByteField("length", 2),
+                   XShortField("vid", 0),
+                   ]
+
+class TibitFailsafeTimer(Packet):
+    """ Variable Descriptor: Failsafe Timer """
+    name = "Failsafe Timer"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010E),
+                   ]
+
+class TibitFailsafeTimerSet(Packet):
+    """ Variable Descriptor: Failsafe Timer Set """
+    name = "Failsafe Timer"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010E),
+                   XByteField("length", 1),
+                   XByteField("timer", 0),
+                   ]
+
+class TibitMtu(Packet):
+    """ Variable Descriptor: MTU """
+    name = "MTU"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010F),
+                   ]
+
+class TibitMtuSet(Packet):
+    """ Variable Descriptor: MTU Set """
+    name = "MTU"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x010F),
+                   XByteField("length", 4),
+                   XIntField("mtu", 0),
+                   ]
+
+class TibitCtagCtagMode(Packet):
+    """ Variable Descriptor: CTAG CTAG Mode """
+    name = "CTAG-CTAG Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0110),
+                   ]
+
+class TibitCtagCtagModeSet(Packet):
+    """ Variable Descriptor: CTAG CTAG Mode Set """
+    name = "CTAG-CTAG Mode"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0110),
+                   XByteField("length", 1),
+                   XByteField("enable", 0),
+                   ]
+
+class TibitStatsOptions(Packet):
+    """ Variable Descriptor: Tibit Stats Options """
+    name = "Tibit Stats Options"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0111),
+                   ]
+
+class TibitStatsOptionsSet(Packet):
+    """ Variable Descriptor: Tibit Stats Options Set"""
+    name = "Tibit Stats Options Set"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0111),
+                   ByteField("length", 5),
+                   ByteField("enable", 0),
+                   IntField("period", 0),
+                   ]
+
+
+UpstreamSlaSubtypeEnum = { 0x00: "Terminator",
+                           0x01: "Header",
+                           0x02: "Max Grant Period",
+                           0x03: "Min Grant Period",
+                           0x04: "Service Limit",
+                           0x05: "Fixed Rate",
+                           0x06: "Guaranteed Rate",
+                           0x07: "Best Effort Rate",
+                           0x08: "Max Burst Size",
+                           0x09: "Priority",
+                         }
+
+class UpstreamSla(Packet):
+    """ Variable Descriptor: Upstream SLA """
+    name = "Upstream SLA"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ]
+
+class UpstreamSlaHeader(Packet):
+    """ Variable Descriptor: Upstream SLA Header """
+    name = "Upstream SLA Header"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ByteField("length", 1),
+                   XByteField("subtype", 1),
+                   ]
+
+class UpstreamSlaTerminator(Packet):
+    """ Variable Descriptor: Upstream SLA Terminator """
+    name = "Upstream SLA Terminator"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ByteField("length", 1),
+                   XByteField("subtype", 0),
+                   ]
+
+class UpstreamSlaSettingLength01(Packet):
+    """ Variable Descriptor: Upstream SLA Setting """
+    name = "Upstream SLA Setting"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ByteField("length", 3),
+                   XByteField("subtype", 0),
+                   XByteField("setting_len", 1),
+                   ByteField("setting_val", 0),
+                   ]
+
+class UpstreamSlaSettingLength02(Packet):
+    """ Variable Descriptor: Upstream SLA Setting """
+    name = "Upstream SLA Setting"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ByteField("length", 4),
+                   XByteField("subtype", 0),
+                   XByteField("setting_len", 2),
+                   ShortField("setting_val", 0),
+                   ]
+
+class UpstreamSlaSettingLength04(Packet):
+    """ Variable Descriptor: Upstream SLA Setting """
+    name = "Upstream SLA Setting"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0621),
+                   ByteField("length", 6),
+                   XByteField("subtype", 0),
+                   XByteField("setting_len", 4),
+                   IntField("setting_val", 0),
+                   ]
+
+class SlaPriorityType(Packet):
+    """ Variable Descriptor: SLA Priority Type """
+    name = "SLA Priority Type"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0622),
+                   ]
+
+class SlaPriorityTypeSet(Packet):
+    """ Variable Container: SLA Priority Type """
+    name = "SLA Priority Type"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0622),
+                   XByteField("length", 1),
+                   XByteField("value", 1),
+                   ]
+
+class DsGuarRate(Packet):
+    """ Variable Descriptor: Downstream Guaranteed Rate """
+    name = "Downstream Guaranteed Rate"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0623),
+                   ]
+
+class DsGuarRateSet(Packet):
+    """ Variable Descriptor: Downstream Guaranteed Rate Set"""
+    name = "Downstream Guaranteed Rate Set"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0623),
+                   ByteField("length", 6),
+                   ShortField("mbs", 0),
+                   IntField("rate", 0),                  ]
+
+
+class DsBestEffortRate(Packet):
+    """ Variable Descriptor: Downstream Best Effort Rate """
+    name = "Downstream Best Effort Rate"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0624),
+                   ]
+
+class DsBestEffortRateSet(Packet):
+    """ Variable Descriptor: Downstream Best Effort Rate Set"""
+    name = "Downstream Best Effort Rate Set"
+    fields_desc = [ByteEnumField("branch", 0xB7, OamBranchEnum),
+                   XShortField("leaf", 0x0624),
+                   ByteField("length", 6),
+                   ShortField("mbs", 0),
+                   IntField("rate", 0),                  ]
+
+
+####
+#### 0xd9 - BRANCH ATTRIBUTES
+####
+
+class EnableUserTraffic(Packet):
+    """ Variable Descriptor: Enable User Traffic """
+    name = "Enable User Traffic"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0601),
+                   ]
+
+class DisableUserTraffic(Packet):
+    """ Variable Descriptor: Disable User Traffic """
+    name = "Disable User Traffic"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0602),
+                   ]
+
+class LoopbackEnable(Packet):
+    """ Variable Descriptor: Loopback Enable """
+    name = "Loopback Enable"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0603),
+                   XByteField("length", 1),
+                   XByteField("location", 0),
+                   ]
+
+class LoopbackDisable(Packet):
+    """ Variable Descriptor: Loopback Disable """
+    name = "Loopback Disable"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0604),
+                   XByteField("length", 1),
+                   XByteField("location", 0),
+                   ]
+
+class CurrentAlarmSummary(Packet):
+    """ Variable Descriptor: Current Alarm Summary """
+    name = "Current Alarm Summary"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0301)]
+
+class DeviceReset(Packet):
+    """ Variable Descriptor: Device Reset """
+    name = "Device Reset"
+    fields_desc = [ByteEnumField("branch", 0xD9, OamBranchEnum),
+                   XShortField("leaf", 0x0001),
+                   ]
+
+class TibitDeviceReset(Packet):
+    """ Variable Descriptor: Tibit Device Reset """
+    name = "Tibit Device Reset"
+    fields_desc = [XByteField("branch", 0xB9),
+                   XShortField("leaf", 0x0001),
+                   ]
+
+class TibitPreprovisionLink(Packet):
+    """ Variable Descriptor: Tibit Preprovision Link """
+    name = "Tibit Preprovision Link"
+    fields_desc = [XByteField("branch", 0xB9),
+                   XShortField("leaf", 0x0002),
+                   ]
+
+class TibitApplySla(Packet):
+    """ Variable Descriptor: Apply SLA """
+    name = "Apply Sla"
+    fields_desc = [XByteField("branch", 0xB9),
+                   XShortField("leaf", 0x0601),
+                   ]
+
+
+##
+## DPoE File Transfer
+##
+
+### Table 156 - DPoE File Transfer Opcodes
+DPoEFileXferOpcodeEnum = {
+    0x00: "Reserved",
+    0x01: "Write Request",
+    0x02: "File Transfer Data",
+    0x03: "File Transfer Ack",
+    }
+
+Dpoe_FileXferOpcodes = {v: k for k, v in DPoEFileXferOpcodeEnum.iteritems()}
+
+
+### Table 160 - DPoE File Acknowledgement Response Codes
+DPoEFileAckRespCodeEnum = {
+    0x00: "OK",
+    0x01: "Undefined",
+    0x02: "Not Found",
+    0x03: "No Access",
+    0x04: "Full",
+    0x05: "Illegal Operation",
+    0x06: "Unknown ID",
+    0x07: "Bad Block",
+    0x08: "Timeout",
+    0x09: "Busy",
+    0x0A: "Incompatible File",
+    0x0B: "Corrupted File",
+    }
+
+Dpoe_FileAckRspOpcodes = {v: k for k, v in DPoEFileAckRespCodeEnum.iteritems()}
+
+class DpoeFileTransferWrite(Packet):
+    name = "DPoE File Transfer Write "
+    fields_desc = [ByteEnumField("opcode", 0x01, DPoEFileXferOpcodeEnum),
+                   StrField("filename", ""),
+                  ]
+
+class DpoeFileTransferData(Packet):
+    name = "DPoE File Transfer Data "
+    fields_desc = [ByteEnumField("opcode", 0x02, DPoEFileXferOpcodeEnum),
+                   ShortField("block_num", 0),
+                   FieldLenField("block_width", None, length_of="block", fmt="H"),
+                   StrLenField("block", "", length_from=lambda x:x.length),
+                  ]
+
+class DpoeFileTransferAck(Packet):
+    name = "DPoE File Transfer Data "
+    fields_desc = [ByteEnumField("opcode", 0x03, DPoEFileXferOpcodeEnum),
+                   ShortField("block_num", 0),
+                   ByteEnumField("response_code", 0x00, DPoEFileAckRespCodeEnum),
+                  ]
+
+
+##
+## Broadcom TLVs
+##
+class Broadcom07_7F_F1_Set01(Packet):
+    """ Variable Descriptor: Broadcom 0x07/0x7ff1 """
+    name = "Broadcom 0x07/0x7ff1"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x7ff1),
+                   XByteField("length", 2),
+                   XShortField("value0", 0x0101),
+                   ]
+
+class Broadcom07_7F_F1_Set02(Packet):
+    """ Variable Descriptor: Broadcom 0x07/0x7ff1 """
+    name = "Broadcom 0x07/0x7ff1"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x7ff1),
+                   XByteField("length", 7),
+                   XShortField("value0", 0x0201),
+                   XShortField("value1", 0x0000),
+                   XShortField("value2", 0x0107),
+                   XByteField("value3", 0xd0),
+                   ]
+
+class Broadcom07_7F_F1_Set03(Packet):
+    """ Variable Descriptor: Broadcom 0x07/0x7ff1 """
+    name = "Broadcom 0x07/0x7ff1"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x7ff1),
+                   XByteField("length", 7),
+                   XShortField("value0", 0x0301),
+                   XShortField("value1", 0x0000),
+                   XShortField("value2", 0x0100),
+                   XByteField("value3", 0xb8),
+                   ]
+
+class Broadcom07_7F_F1_Set04(Packet):
+    """ Variable Descriptor: Broadcom 0x07/0x7ff1 """
+    name = "Broadcom 0x07/0x7ff1"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x7ff1),
+                   XByteField("length", 1),
+                   XByteField("value0", 0x00),
+                   ]
+
+class Broadcom07_7F_F6_Set(Packet):
+    """ Variable Descriptor: Broadcom 0x07/0x7ff6 """
+    name = "Broadcom 0x07/0x7ff6"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x7ff6),
+                   XByteField("length", 2),
+                   XShortField("value0", 0x07d0),
+                   ]
+
+###
+### Clause 30 Attributes (0x07)
+###
+class Clause30AttributesMacEnable(Packet):
+    """ Variable Descriptor: Clause 30 Attributes MAC Enable """
+    name = "Clause 30 Attributes MAC Enable"
+    fields_desc = [ByteEnumField("branch", 0x07, OamBranchEnum),
+                   XShortField("leaf", 0x001a),
+                   XByteField("length", 1),
+                   XByteField("value", 1),
+                   ]
+
+class GenericTLV(Packet):
+    """ Variable Descriptor: Generic TLV """
+    name = "Generic TLV"
+    fields_desc = [ByteEnumField("branch", 0x00, OamBranchEnum),
+                   XShortField("leaf", 0x0000),
+                   FieldLenField("length", None, length_of="value", fmt="B"),
+                   StrLenField("value", "", length_from=lambda x:x.length),
+                   ]
+
+class EndOfPDU(Packet):
+    name = "End of EOAM PDU"
+    fields_desc = [BitEnumField("type", 0x00, 7, TLV_dictionary),
+                   BitField("length", 0x00, 9)]
diff --git a/python/extensions/eoam/__init__.py b/python/extensions/eoam/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/eoam/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/kpi/README.md b/python/extensions/kpi/README.md
new file mode 100644
index 0000000..a55c7d2
--- /dev/null
+++ b/python/extensions/kpi/README.md
@@ -0,0 +1,286 @@
+# VOLTHA Performance Monitoring/KPI Library
+
+This directory provides a common library for the creation of Performance Monitoring groups
+within VOLTHA and should be used to insure that KPI information from different adapters use
+the same format.
+
+The original KpiEvent protobuf message is still supported for adapters that wish to use theprevious format but device adapter developers are encouraged to support the new format and
+make use of this shared library. 
+
+**Also**, please read the **Remaining Work Item** sections of each README.md file. Some additional
+work items as well as existing/related JIRA items are highlighted in this section. 
+
+## KPI Manager Creation
+
+Currently, each device adapter is required to follow the following steps to create and
+register PM Metric manager. This is typically performed in the device handler's
+'activate' method (called in response to the device handler first being enabled)
+
+1. Create an instance of a derived **AdapterPmMetrics** manager object. This is currently an
+   **OltPmMetrics** object for an _OLT_ adapter, or an **OnuPmMetrics** adapter for an
+   _ONU_ adapter. If you have additional device specific metrics to report, you can
+   derive your own manager object from one of these two derived classes. In order to
+   inherit (or modify) the metrics defined in those classes as well as support any new
+   metrics specific to your device.
+   
+   This call takes a number of device adapter specific arguments and these are detailed
+   in the pydoc headers for the appropriate  **AdapterPmMetrics** _\_\_init___() method.
+   
+2. Create the ProtoBuf message for your metrics by calling the newly created _manager's_
+   **_make_proto_**() method. 
+   
+3. Register the ProtoBuf message configuration with the adapter agent via the 
+   _update_device_pm_config_() method with the optional init parameter set to **True**.
+   
+4. Request the manager to schedule the first PM collection interval by calling the
+   manager's _start_collector_() method. You may wish to do this after a short pause
+   depending on how your adapter is designed.
+   
+**NOTE:** Currently there is only a single collection frequency for all metrics for
+a given device adapter. In the future, individual collection intervals on a per-metric/metric-group
+will be supported by the shared library.
+   
+The next two subsections provides examples of these steps for both an OLT and an ONU
+device adapter  
+
+### OLT Device Adapters PM Manager setup
+
+```python
+    # Create the OLT PM Manager object
+    kwargs = {
+        'nni-ports': self.northbound_ports.values(),
+        'pon-ports': self.southbound_ports.values()
+    }
+    self.pm_metrics = OltPmMetrics(self.adapter_agent, self.device_id, self.logical_device_id,
+                                   grouped=True, freq_override=False,
+                                   **kwargs)
+
+    # Create the protobuf message configuration
+    pm_config = self.pm_metrics.make_proto()
+    self.log.debug("initial-pm-config", pm_config=pm_config)
+    
+    # Create the PM information in the adapter agent
+    self.adapter_agent.update_device_pm_config(pm_config, init=True)
+        
+    # Start collecting stats from the device after a brief pause
+    reactor.callLater(10, self.pm_metrics.start_collector)
+```
+
+### ONU Device Adapters PM Manager Setup
+
+For ONU devices, if you wish to include OpenOMCI 15-minute historical interval
+intervals, you will need to register the PM Metrics OpenOMCI Interval PM class
+with OpenOMCI.  This ties in the OpenOMCI PM Interval State Machine with the KPI
+shared library.
+
+```python
+
+    # Create the OLT PM Manager object
+    kwargs = {
+        'heartbeat': self.heartbeat,
+        'omci-cc': self.openomci.omci_cc
+    }
+    self.pm_metrics = OnuPmMetrics(self.adapter_agent, self.device_id, self.logical_device_id,
+                                   grouped=True, freq_override=False,
+                                   **kwargs)
+                                   
+    # Create the protobuf message configuration
+    pm_config = self.pm_metrics.make_proto()
+    
+    # Register the OMCI history intervals with OpenOMCI
+    self.openomci.set_pm_config(self.pm_metrics.omci_pm.openomci_interval_pm)
+    
+    # Create the PM information in the adapter agent
+    self.adapter_agent.update_device_pm_config(pm_config, init=True)
+    
+    # Start collecting stats from the device after a brief pause
+    reactor.callLater(30, self.pm_metrics.start_collector)
+```
+
+### How metrics are currently collected
+
+Currently, the default behaviour is to collect KPI information on a single periodic 
+interval that can be adjusted via the NBI/CLI of VOLTHA.  It collects data by extracting
+it from an object provided during the collection request and this object should either
+provide attributes or a property method that matches the metric to be collected.
+For instance, assume that you have an NNI metric called 'tx_packets'.  You would pass
+an object during collection that should have one of the two following;
+
+- a _tx_packets_ attribute/member name defined for the object that has the requested
+  value already set (via background poll)
+  
+- a _tx_packets_ **property** method that accesses an internal variable with the value
+  already set (via background poll) or that calculates/extracts the value without blockin
+  the call.
+
+### Known Issues in collection
+
+Note that a future story will be created to allow for collection to be requested for
+a metric/metric-group on demand so that background polling of KPI information is not
+required for all reported metrics.
+
+Note that a future story will be created to allow KPI information to be collected on
+per-group/metric intervals.
+
+# Basic KPI Format (**KpiEvent2**)
+
+The KPI information is published on the kafka bus under the _voltha.kpi_ topic. For 
+VOLTHA PM information, the kafka key is empty and the value is a JSON message composed
+of the following key-value pairs.
+
+| key        | value  | Notes |
+| :--------: | :----- | :---- |
+| type       | string | "slice" or "ts". A "slice" is a set of path/metric data for the same time-stamp. A "ts" is a time-series: array of data for same metric |
+| ts         | float  | UTC time-stamp of when the KpiEvent2 was created (seconds since the epoch of January 1, 1970) |
+| slice_data | list   | One or more sets of metrics composed of a _metadata_ section and a _metrics_ section. |
+
+**NOTE**: Time-series metrics and corresponding protobuf messages have not been defined.
+
+## Slice Data Format
+
+For KPI slice KPI messages, the _slice_data_ portion of the **KpiEvent2** is composed of a _metadata_
+section and a _metrics_ section.
+
+### _metadata_ Section Format
+
+The metadata section is used to:
+ - Define which metric/metric-group is being reported (The _title_ field)
+ - Provide some common fields required by all metrics (_title_, _timestamp_, _device ID_, ...)
+ - Provide metric/metric-group specific context (the _context_ fields)
+
+| key        | value  | Notes |
+| :--------: | :----- | :---- |
+| title       | string | "slice" or "ts". A "slice" is a set of path/metric data for the same time-stamp. A "ts" is a time-series: array of data for same metric |
+| ts         | float | UTC time-stamp of data at the time of collection (seconds since the epoch of January 1, 1970) |
+| logical_device_id | string | The logical ID that the device belongs to. This is equivalent to the DPID reported in ONOS for the VOLTHA logical device with the 'of:' prefix removed. |
+| device_id | string | The physical device ID that is reporting the metric. |
+| serial_number | string | The reported serial number for the physical device reporting the metric. |
+| context | map | A key-value map of metric/metric-group specific information.|
+
+The context map is composed of key-value pairs where the key (string) is the label for the context
+specific value and the value (string) is the corresponding context value. While most values may be
+better represented as a float/integer, there may be some that are better represented as text. For
+this reason, values are always represented as strings to allow the ProtoBuf message format to be
+as simple as possible.
+
+Here is an JSON _example_ of a current KPI published on the kafka bus under the 
+_voltha.kpi_ topic. 
+
+```json
+{
+  "type": "slice",
+  "ts": 1534440704.0,
+  "slice_data": [
+    {
+      "metadata": {
+        "title": "Ethernet",
+        "ts": 1534440704.0,
+        "logical_device_id": "000000139521a269",
+        "device_id": "000115929ed71696",
+        "serial_no": "dummy_sn2209199",
+        "context": {
+          "port_no": "1"
+        }
+      },
+      "metrics": {
+        "tx_dropped": 0.0,    # A COUNTER
+        "rx_packets": 0.0,
+        "rx_bytes": 0.0,
+        "rx_mcast_packets": 0.0,
+        "tx_mcast_packets": 16.0,
+        "rx_bcast_packets": 0.0,
+        "oper_status": 4.0,   # A STATE
+        "admin_state": 3.0,
+        "rx_errors": 0.0,
+        "tx_bytes": 1436.0,
+        "rx_dropped": 0.0,
+        "tx_packets": 16.0,
+        "tx_bcast": 0.0
+      }
+    },
+    {
+      "metadata": {
+        "title": "PON",
+        "logical_device_id": "000000139521a269",
+        "device_id": "000115929ed71696",
+        "serial_no": "dummy_sn2209199",
+        "ts": 1534440704.0,
+        "context": {
+          "port_no": "5",
+          "pon_id": "0"
+        },
+      },
+      "metrics": {
+        "rx_packets": 0.0,
+        "in_service_onus": 0.0,     # A GAUGE
+        "rx_bytes": 0.0,
+        "closest_onu_distance": -1.0,
+        "tx_bip_errors": 0.0,
+        "oper_status": 4.0,
+        "admin_state": 3.0,
+        "tx_bytes": 0.0,
+        "tx_packets": 0.0
+      }
+    },
+    ...
+}
+
+```
+
+For OpenOMCI historical intervals, the name is derived from the Managed Entity class:
+
+```json
+{
+  "type": "slice",
+  "ts": 1532372864.0,
+  "prefixes": {
+    "voltha.adtran_onu.0001b8c505090b5b.EthernetFrameExtendedPerformanceMonitoring": {
+      "metrics": {
+        "entity_id": 2.0,
+        "class_id": 334.0,
+        "packets": 0.0,
+        "octets": 0.0,
+        "interval_end_time": 0.0,
+        "crc_errored_packets": 0.0,
+        "broadcast_packets": 0.0,
+        "64_octets": 0.0,
+        "65_to_127_octets": 0.0,
+        "128_to_255_octets": 0.0,
+        "256_to_511_octets": 0.0,
+        "undersize_packets": 0.0,
+        "drop_events": 0.0,
+        "multicast_packets": 0.0,
+        "oversize_packets": 0.0
+      }
+    }
+  }
+}
+```
+More information on the OpenOMCI ONU Historical Intervals is detailed in the _IntervalMetrics.md_
+file in the _onu/_ subdirectory.
+
+# Remaining Work Items
+
+This initial code is only a preliminary sample. The following tasks need to be
+added to the VOLTHA JIRA or performed in the SEBA group:
+    
+- Get feedback from other OLT/ONU developers on any needed changes
+
+- Allow PM groups to have different collection times
+
+- Support calling a 'get-data' method before collect the metrics.  Currently metrics are collected
+  in a device adapter independent way and the PM just updates what the attributes happen to have.
+  This would provide an asynchronous request and upon successful completion, the KPI metric/group
+  would be published on the Kafka bus.
+
+- [VOL-931](https://jira.opencord.org/browse/VOL-931) Support for retrieval of PM measurements
+  on-demaind. Would be best implemented after the previous async (get-data) work item.
+
+- For statistics groups that have more than one instance, do we need to be able to
+  enable/disable specific instances? Major refactor of code if so (database work, ...)
+
+- [VOL-930](https://jira.opencord.org/browse/VOL-930) PM Collection Format. This format may
+  fit better with the time-series KPI collection as it requests ability for start/stop times.
+  It could possibly be done at a higher layer but the intent may be to have a greater number
+  of samples on a specific metric instance for a defined period of time. Need clarification
+  from the JIRA author.
diff --git a/python/extensions/kpi/__init__.py b/python/extensions/kpi/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/kpi/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/kpi/adapter_pm_metrics.py b/python/extensions/kpi/adapter_pm_metrics.py
new file mode 100644
index 0000000..fe14fee
--- /dev/null
+++ b/python/extensions/kpi/adapter_pm_metrics.py
@@ -0,0 +1,217 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import structlog
+import arrow
+from twisted.internet.task import LoopingCall
+from voltha.protos.events_pb2 import KpiEvent2, KpiEventType, MetricInformation, MetricMetaData
+from voltha.protos.device_pb2 import PmConfig
+
+
+class AdapterPmMetrics(object):
+    """
+    Base class for Device Adapter PM Metrics Manager
+
+    Device specific (OLT, ONU, OpenOMCI, ...) will derive groups of PM information
+    and this base class is primarily used to provide a consistent interface to configure,
+    start, and stop statistics collection.
+    """
+    DEFAULT_FREQUENCY_KEY = 'default-collection-frequency'
+    DEFAULT_COLLECTION_FREQUENCY = 15 * 10      # 1/10ths of a second
+
+    # If the collection object has a property of the following name, it will be used
+    # to retrieve the UTC Collection Timestamp (UTC seconds since epoch). If the collection
+    # object does not support this attribute, the current time will be used. If the attribute
+    # is supported, but returns None, this signals that no metrics are currently available
+    # for collection.
+    TIMESTAMP_ATTRIBUTE = 'timestamp'
+
+    def __init__(self, adapter_agent, device_id, logical_device_id,
+                 grouped=False, freq_override=False, **kwargs):
+        """
+        Initializer for shared Device Adapter PM metrics manager
+
+        :param adapter_agent: (AdapterAgent) Adapter agent for the device
+        :param device_id: (str) Device ID
+        :param logical_device_id: (str) VOLTHA Logical Device ID
+        :param grouped: (bool) Flag indicating if statistics are managed as a group
+        :param freq_override: (bool) Flag indicating if frequency collection can be specified
+                                     on a per group basis
+        :param kwargs: (dict) Device Adapter specific values
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self.device_id = device_id
+        self.adapter_agent = adapter_agent
+        self.name = adapter_agent.adapter_name
+        # Sanitize the vcore ID in the logical device ID
+        self.logical_device_id = '0000' + logical_device_id[4:]
+        device = self.adapter_agent.get_device(self.device_id)
+        self.serial_number = device.serial_number
+
+        self.default_freq = kwargs.get(AdapterPmMetrics.DEFAULT_FREQUENCY_KEY,
+                                       AdapterPmMetrics.DEFAULT_COLLECTION_FREQUENCY)
+        self.grouped = grouped
+        self.freq_override = grouped and freq_override
+        self.lc = None
+        self.pm_group_metrics = dict()      # name -> PmGroupConfig
+
+    def update(self, pm_config):
+        # TODO: Move any common steps into base class
+        raise NotImplementedError('Your derived class should override this method')
+
+    def make_proto(self, pm_config=None):
+        raise NotImplementedError('Your derived class should override this method')
+
+    def start_collector(self, callback=None):
+        """
+        Start the collection loop for an adapter if the frequency > 0
+
+        :param callback: (callable) Function to call to collect PM data
+        """
+        self.log.info("starting-pm-collection", device_name=self.name)
+        if callback is None:
+            callback = self.collect_and_publish_metrics
+
+        if self.lc is None:
+            self.lc = LoopingCall(callback)
+
+        if self.default_freq > 0:
+            self.lc.start(interval=self.default_freq / 10)
+
+    def stop_collector(self):
+        """ Stop the collection loop"""
+        if self.lc is not None and self.default_freq > 0:
+            self.lc.stop()
+
+    def collect_group_metrics(self, group_name, group, names, config):
+        """
+        Collect the metrics for a specific PM group.
+
+        This common collection method expects that the 'group object' provide as the second
+        parameter supports an attribute or property with the name of the value to
+        retrieve.
+
+        :param group_name: (str) The unique collection name. The name should not contain spaces.
+        :param group: (object) The object to query for the value of various attributes (PM names)
+        :param names: (set) A collection of PM names that, if implemented as a property in the object,
+                            will return a value to store in the returned PM dictionary
+        :param config: (PMConfig) PM Configuration settings. The enabled flag is examined to determine
+                                  if the data associated with a PM Name will be collected.
+
+        :return: (MetricInformation) collected metrics
+        """
+        assert ' ' not in group_name,  'Spaces are not allowed in metric titles, use an underscore'
+
+        if group is None:
+            return None
+
+        metrics = dict()
+        context = dict()
+
+        now = getattr(group, AdapterPmMetrics.TIMESTAMP_ATTRIBUTE) \
+            if hasattr(group, AdapterPmMetrics.TIMESTAMP_ATTRIBUTE) \
+            else arrow.utcnow().float_timestamp
+
+        if now is None:
+            return None     # No metrics available at this time for collection
+
+        for (metric, t) in names:
+            if config[metric].type == PmConfig.CONTEXT and hasattr(group, metric):
+                context[metric] = str(getattr(group, metric))
+
+            elif config[metric].type in (PmConfig.COUNTER, PmConfig.GAUGE, PmConfig.STATE):
+                if config[metric].enabled and hasattr(group, metric):
+                    metrics[metric] = getattr(group, metric)
+
+        # Check length of metric data. Will be zero if if/when individual group
+        # metrics can be disabled and all are (or or not supported by the
+        # underlying adapter)
+        if len(metrics) == 0:
+            return None
+
+        return MetricInformation(metadata=MetricMetaData(title=group_name,
+                                                         ts=now,
+                                                         logical_device_id=self.logical_device_id,
+                                                         serial_no=self.serial_number,
+                                                         device_id=self.device_id,
+                                                         context=context),
+                                 metrics=metrics)
+
+    def collect_metrics(self, data=None):
+        """
+        Collect metrics for this adapter.
+
+        The adapter type (OLT, ONU, ..) should provide a derived class where this
+        method iterates through all metrics and collects them up in a dictionary with
+        the group/metric name as the key, and the metric values as the contents.
+
+        The data collected (or passed in) is a list of pairs/tuples.  Each
+        pair is composed of a MetricMetaData metadata-portion and list of MetricValuePairs
+        that contains a single individual metric or list of metrics if this is a
+        group metric.
+
+        This method is called for each adapter at a fixed frequency.
+        TODO: Currently all group metrics are collected on a single timer tick.
+              This needs to be fixed as independent group or instance collection is
+              desirable.
+
+        :param data: (list) Existing list of collected metrics (MetricInformation).
+                            This is provided to allow derived classes to call into
+                            further encapsulated classes.
+
+        :return: (list) metadata and metrics pairs - see description above
+        """
+        raise NotImplementedError('Your derived class should override this method')
+
+    def collect_and_publish_metrics(self):
+        """ Request collection of all enabled metrics and publish them """
+        try:
+            data = self.collect_metrics()
+            self.publish_metrics(data)
+
+        except Exception as e:
+            self.log.exception('failed-to-collect-kpis', e=e)
+
+    def publish_metrics(self, data):
+        """
+        Publish the metrics during a collection.
+
+        The data collected (or passed in) is a list of dictionary pairs/tuple.  Each
+        pair is composed of a metadata-portion and a metrics-portion that contains
+        information for a specific instance of an individual metric or metric group.
+
+        :param data: (list) Existing list of collected metrics (MetricInformation)
+                            to convert to a KPIEvent and publish
+        """
+        self.log.debug('publish-metrics')
+
+        if len(data):
+            try:
+                # TODO: Existing adapters use the KpiEvent, if/when all existing
+                #       adapters use the shared KPI library, we may want to
+                #       deprecate the KPIEvent
+                kpi_event = KpiEvent2(
+                    type=KpiEventType.slice,
+                    ts=arrow.utcnow().float_timestamp,
+                    slice_data=data
+                )
+                self.adapter_agent.submit_kpis(kpi_event)
+
+            except Exception as e:
+                self.log.exception('failed-to-submit-kpis', e=e)
+
+    # TODO: Need to support on-demand counter update if provided by the PM 'group'.
+    #       Currently we expect PM data to be periodically polled by a separate
+    #       mechanism. The on-demand counter update should be optional in case the
+    #       particular device adapter group of data is polled anyway for other reasons.
diff --git a/python/extensions/kpi/olt/README.md b/python/extensions/kpi/olt/README.md
new file mode 100644
index 0000000..e0012b8
--- /dev/null
+++ b/python/extensions/kpi/olt/README.md
@@ -0,0 +1,179 @@
+# OLT PM Metrics
+
+
+**THESE ARE PRELIMINARY METRIC GROUPS**, Work is needed by the VOLTHA community to reach a consensus on the
+actual metrics that will be provided. **Also**, please read the **Remaining Work Item** sections of each
+README file.
+
+
+
+This document outlines the metrics reported by VOLTHA OLTs.  These are currently collected
+from OLT Device Adapter which is responsible for polling the hardware for information. A future
+version of the Performance Monitoring Library will allow for collection on-demand.
+
+## Format on the Kafka bus
+
+The format of the OLT KPI Events is detailed in the [Basic KPI Format (**KpiEvent2**)](../README.md)
+section of this documents parent directory for wire format on the bus. This document primarily provides
+the group metric information for OLT PKIs and associated metadata context information.
+
+**All** metric values reported by the library are reported as *float*s. The context and metric tables
+listed in the sections below report the type as initially collected by the OLT Device Adapters.
+
+#OLT PM Metric Groups
+
+The following sections outline the KPI metrics gathered by most OLT Device adapters. If an OLT does not
+support a specific metric in a group, it will not report that metric. This is preferred to reporting a
+metric and it always having a value of 0.0 (which could be misleading).
+
+## Admin and Oper State/status
+
+Various interfaces will provide a numeric (integer) value for the current Admin State and Operation
+Status of the interface.  These map to the following states:
+
+**Admin State**
+
+| State             | Value | Notes |
+| ----------------: | :---: | :---- |
+| UNKNOWN           |   0   | The administrative state of the device is unknown |
+| DISABLED          |   2   | The device is disabled and shall not perform its intended forwarding functions other than being available for re-activation. |
+| PREPROVISIONED    |   1   | The device is pre-provisioned into Voltha, but not contacted by it |
+| ENABLED           |   3   | The device is enabled for activation and operation |
+| DOWNLOADING_IMAGE |   4   | The device is in the state of image download |
+
+**Operational Status**
+
+| State      | Value | Notes |
+| ---------: | :---: | :---- |
+| UNKNOWN    |   0   | The status of the device is unknown at this point |
+| DISCOVERED |   1   | The device has been discovered, but not yet activated |
+| ACTIVATING |   2   | The device is being activated (booted, rebooted, upgraded, etc.) |
+| TESTING    |   3   | Service impacting tests are being conducted |
+| ACTIVE     |   4   | The device is up and active |
+| FAILED     |   5   | The device has failed and cannot fulfill its intended role |
+
+## NNI KPI Metrics
+
+This metric provides metrics for a specific NNI Port of an OLT
+
+**Metadata Context items**
+
+| key         | value   | Notes |
+| :---------: | :------ | :---- |
+| intf_id     | integer | Physical device interface port number for this NNI port |
+
+**Metrics**
+
+| key              | type / size  | Notes |
+| :--------------: | :----------- | :---- |
+| admin_state      | state        | See _Admin State_ section above |
+| oper_status      | state        | See _Operational Status_ section above |
+| rx_bytes         | int, 64-bits | TODO: add definition here... |
+| rx_packets       | int, 64-bits | TODO: add definition here... |
+| rx_ucast_packets | int, 64-bits | TODO: add definition here... |
+| rx_mcast_packets | int, 64-bits | TODO: add definition here... |
+| rx_bcast_packets | int, 64-bits | TODO: add definition here... |
+| rx_error_packets | int, 64-bits | TODO: add definition here... | 
+| tx_bytes         | int, 64-bits | TODO: add definition here... |
+| tx_packets       | int, 64-bits | TODO: add definition here... |
+| tx_ucast_packets | int, 64-bits | TODO: add definition here... |
+| tx_mcast_packets | int, 64-bits | TODO: add definition here... |
+| tx_bcast_packets | int, 64-bits | TODO: add definition here... |
+| tx_error_packets | int, 64-bits | TODO: add definition here... |
+| rx_crc_errors    | int, 64-bits | TODO: add definition here... |
+| bip_errors       | int, 64-bits | TODO: add definition here... |
+
+## PON KPI Metrics
+
+The OLT PON Port metrics
+
+**Metadata Context items**
+
+| key         | value   | Notes |
+| :---------: | :------ | :---- |
+| intf_id     | integer | Physical device interface port number for this NNI port |
+| pon_id      | integer | PON ID (0..n) |
+
+**Metrics**
+
+| key                  | type / size  | Notes |
+| :------------------: | :----------- | :---- |
+| admin_state          | state        | See _Admin State_ section above |
+| oper_status          | state        | See _Operational Status_ section above |
+| rx_packets           | int, 64-bits | Sum of all the RX Packets of GEM ports that are not base TCONT's |
+| rx_bytes             | int, 64-bits | Sum of all the RX Octets of GEM ports that are not base TCONT's |
+| tx_packets           | int, 64-bits | Sum of all the TX Packets of GEM ports that are not base TCONT's |
+| tx_bytes             | int, 64-bits | Sum of all the TX Octets of GEM ports that are not base TCONT's |
+| tx_bip_errors        | int, 32-bits | Sum of all the TX ONU bip errors to get TX BIP's per PON |
+| in_service_onus      | int          | The number of activated ONUs on this pon |
+| closest_onu_distance | float        | Distance to the closest ONU, units=kM w/granularity in the thousandths |
+
+## ONU KPI Metrics
+
+The OLT metrics for each activated ONUs
+
+**Metadata Context items**
+
+| key         | value   | Notes |
+| :---------: | :------ | :---- |
+| intf_id     | integer | Physical device interface port number for this NNI port |
+| pon_id      | integer | PON ID (0..n) |
+| onu_id      | integer | ONU ID |
+
+**Metrics**
+
+| key                | type / size  | Notes |
+| :----------------: | :----------- | :---- |
+| fiber_length       | float        | Distance to ONU, units=kM w/granularity in the thousandths |
+| equalization_delay | int, 32-bits | Equalization delay |
+| rssi               | int, 32-bits | The received signal strength indication of the ONU. |
+
+**TODO**: How about the following as well?
+ - rx_packets - int, 32-bits - Rx packets received on all GEM ports
+ - rx_bytes   - int, 64-bits - Rx octets received on all GEM ports
+ - tx_packets - int, 32-bits - Tx packets transmitted on all GEM ports
+ - tx_bytes   - int, 64-bits - Rx packets transmitted on all GEM ports
+ - tx_bip_errors - int, 32-bits - Sum of all the TX ONU bip errors to get TX BIP's on all GEM ports
+
+## GEM Port KPI Metrics
+
+The GEM Port metrics for each activated ONUs
+
+**Metadata Context items**
+
+| key         | value   | Notes |
+| :---------: | :------ | :---- |
+| intf_id     | integer | Physical device interface port number for this NNI port |
+| pon_id      | integer | PON ID (0..n) |
+| onu_id      | integer | ONU ID |
+| gem_id      | integer | GEM Port ID |
+
+**Metrics**
+
+| key         | type / size  | Notes |
+| :---------: | :----------- | :---- |
+| alloc_id    | int, 16-bits | TODO: add definition here... |
+| rx_packets  | int, 32-bits | Rx packets received |
+| rx_bytes    | int, 64-bits | Rx octets received |
+| tx_packets  | int, 32-bits | Tx packets transmitted |
+| tx_bytes    | int, 64-bits | Rx packets transmitted |
+
+# Remaining Work Items
+
+This initial code is only a preliminary work. See the [Remaining Work Items](../README.md)
+section of this document's parent directory for a list of remaining tasks. 
+  
+- [VOL-932](https://jira.opencord.org/browse/VOL-932) PM Interval collection on the OLT. Need
+  to consult OLT device adapter vendors and operators for which KPIs would best fit in the
+  interval groups. Intervals differ from other metric groups as they are defined to collect on
+  a specific interval (15-minutes most common) and at the start of the interval, the counters
+  should be set to zero so that the accumulation during the interval is what is reported. See
+  also [VOL-933](https://jira.opencord.org/browse/VOL-932),
+       [VOL-934](https://jira.opencord.org/browse/VOL-934),
+       [VOL-935](https://jira.opencord.org/browse/VOL-935),
+       [VOL-938](https://jira.opencord.org/browse/VOL-938),
+       [VOL-939](https://jira.opencord.org/browse/VOL-939),
+       [VOL-940](https://jira.opencord.org/browse/VOL-940).
+       **NOTE**: A couple of the ones above are for the ONU
+
+TODO: For each group, list if the default is enabled/disabled
\ No newline at end of file
diff --git a/python/extensions/kpi/olt/__init__.py b/python/extensions/kpi/olt/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/kpi/olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/kpi/olt/olt_pm_metrics.py b/python/extensions/kpi/olt/olt_pm_metrics.py
new file mode 100644
index 0000000..ea2e0c8
--- /dev/null
+++ b/python/extensions/kpi/olt/olt_pm_metrics.py
@@ -0,0 +1,300 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
+from voltha.extensions.kpi.adapter_pm_metrics import AdapterPmMetrics
+
+
+class OltPmMetrics(AdapterPmMetrics):
+    """
+    Shared OL Device Adapter PM Metrics Manager
+
+    This class specifically addresses ONU general PM (health, ...) area
+    specific PM (OMCI, PON, UNI) is supported in encapsulated classes accessible
+    from this object
+    """
+    def __init__(self, adapter_agent, device_id, logical_device_id,
+                 grouped=False, freq_override=False, **kwargs):
+        """
+        Initializer for shared ONU Device Adapter PM metrics
+
+        :param adapter_agent: (AdapterAgent) Adapter agent for the device
+        :param device_id: (str) Device ID
+        :param logical_device_id: (str) VOLTHA Logical Device ID
+        :param grouped: (bool) Flag indicating if statistics are managed as a group
+        :param freq_override: (bool) Flag indicating if frequency collection can be specified
+                                     on a per group basis
+        :param kwargs: (dict) Device Adapter specific values. For an ONU Device adapter, the
+                              expected key-value pairs are listed below. If not provided, the
+                              associated PM statistics are not gathered:
+
+                              'nni-ports': List of objects that provide NNI (northbound) port statistics
+                              'pon-ports': List of objects that provide PON port statistics
+        """
+        super(OltPmMetrics, self).__init__(adapter_agent, device_id, logical_device_id,
+                                           grouped=grouped, freq_override=freq_override,
+                                           **kwargs)
+
+        # PM Config Types are COUNTER, GAUGE, and STATE
+        self.nni_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),      # Physical device interface ID/Port number
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER),
+            ('rx_crc_errors', PmConfig.COUNTER),
+            ('bip_errors', PmConfig.COUNTER),
+        }
+        self.pon_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),         # PON ID (0..n)
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_bip_errors', PmConfig.COUNTER),
+            ('in_service_onus', PmConfig.GAUGE),
+            ('closest_onu_distance', PmConfig.GAUGE)
+        }
+        self.onu_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+
+            ('fiber_length', PmConfig.GAUGE),
+            ('equalization_delay', PmConfig.GAUGE),
+            ('rssi', PmConfig.GAUGE),
+        }
+        self.gem_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+            ('gem_id', PmConfig.CONTEXT),
+
+            ('alloc_id', PmConfig.GAUGE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+        }
+        self.nni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                   for (m, t) in self.nni_pm_names}
+        self.pon_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                   for (m, t) in self.pon_pm_names}
+        self.onu_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                   for (m, t) in self.onu_pm_names}
+        self.gem_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                   for (m, t) in self.gem_pm_names}
+
+        self._nni_ports = kwargs.pop('nni-ports', None)
+        self._pon_ports = kwargs.pop('pon-ports', None)
+
+    def update(self, pm_config):
+        try:
+            # TODO: Test frequency override capability for a particular group
+            if self.default_freq != pm_config.default_freq:
+                # Update the callback to the new frequency.
+                self.default_freq = pm_config.default_freq
+                self.lc.stop()
+                self.lc.start(interval=self.default_freq / 10)
+
+            if pm_config.grouped:
+                for group in pm_config.groups:
+                    group_config = self.pm_group_metrics.get(group.group_name)
+                    if group_config is not None:
+                        group_config.enabled = group.enabled
+            else:
+                msg = 'There are no independent OLT metrics, only group metrics at this time'
+                raise NotImplemented(msg)
+
+        except Exception as e:
+            self.log.exception('update-failure', e=e)
+            raise
+
+    def make_proto(self, pm_config=None):
+        if pm_config is None:
+            pm_config = PmConfigs(id=self.device_id, default_freq=self.default_freq,
+                                  grouped=self.grouped,
+                                  freq_override=self.freq_override)
+        metrics = set()
+        have_nni = self._nni_ports is not None and len(self._nni_ports) > 0
+        have_pon = self._pon_ports is not None and len(self._pon_ports) > 0
+
+        if self.grouped:
+            if have_nni:
+                pm_ether_stats = PmGroupConfig(group_name='Ethernet',
+                                               group_freq=self.default_freq,
+                                               enabled=True)
+                self.pm_group_metrics[pm_ether_stats.group_name] = pm_ether_stats
+
+            else:
+                pm_ether_stats = None
+
+            if have_pon:
+                pm_pon_stats = PmGroupConfig(group_name='PON',
+                                             group_freq=self.default_freq,
+                                             enabled=True)
+
+                pm_onu_stats = PmGroupConfig(group_name='ONU',
+                                             group_freq=self.default_freq,
+                                             enabled=True)
+
+                pm_gem_stats = PmGroupConfig(group_name='GEM',
+                                             group_freq=self.default_freq,
+                                             enabled=True)
+
+                self.pm_group_metrics[pm_pon_stats.group_name] = pm_pon_stats
+                self.pm_group_metrics[pm_onu_stats.group_name] = pm_onu_stats
+                self.pm_group_metrics[pm_gem_stats.group_name] = pm_gem_stats
+            else:
+                pm_pon_stats = None
+                pm_onu_stats = None
+                pm_gem_stats = None
+
+        else:
+            pm_ether_stats = pm_config if have_nni else None
+            pm_pon_stats = pm_config if have_pon else None
+            pm_onu_stats = pm_config if have_pon else None
+            pm_gem_stats = pm_config if have_pon else None
+
+        if have_nni:
+            for m in sorted(self.nni_metrics_config):
+                pm = self.nni_metrics_config[m]
+                if not self.grouped:
+                    if pm.name in metrics:
+                        continue
+                    metrics.add(pm.name)
+                pm_ether_stats.metrics.extend([PmConfig(name=pm.name,
+                                                        type=pm.type,
+                                                        enabled=pm.enabled)])
+        if have_pon:
+            for m in sorted(self.pon_metrics_config):
+                pm = self.pon_metrics_config[m]
+                if not self.grouped:
+                    if pm.name in metrics:
+                        continue
+                    metrics.add(pm.name)
+                pm_pon_stats.metrics.extend([PmConfig(name=pm.name,
+                                                      type=pm.type,
+                                                      enabled=pm.enabled)])
+
+            for m in sorted(self.onu_metrics_config):
+                pm = self.onu_metrics_config[m]
+                if not self.grouped:
+                    if pm.name in metrics:
+                        continue
+                    metrics.add(pm.name)
+                pm_onu_stats.metrics.extend([PmConfig(name=pm.name,
+                                                      type=pm.type,
+                                                      enabled=pm.enabled)])
+
+            for m in sorted(self.gem_metrics_config):
+                pm = self.gem_metrics_config[m]
+                if not self.grouped:
+                    if pm.name in metrics:
+                        continue
+                    metrics.add(pm.name)
+                pm_gem_stats.metrics.extend([PmConfig(name=pm.name,
+                                                      type=pm.type,
+                                                      enabled=pm.enabled)])
+        if self.grouped:
+            pm_config.groups.extend([stats for stats in
+                                     self.pm_group_metrics.itervalues()])
+
+        return pm_config
+
+    def collect_metrics(self, data=None):
+        """
+        Collect metrics for this adapter.
+
+        The data collected (or passed in) is a list of pairs/tuples.  Each
+        pair is composed of a MetricMetaData metadata-portion and list of MetricValuePairs
+        that contains a single individual metric or list of metrics if this is a
+        group metric.
+
+        This method is called for each adapter at a fixed frequency.
+        TODO: Currently all group metrics are collected on a single timer tick.
+              This needs to be fixed as independent group or instance collection is
+              desirable.
+
+        :param data: (list) Existing list of collected metrics (MetricInformation).
+                            This is provided to allow derived classes to call into
+                            further encapsulated classes.
+
+        :return: (list) metadata and metrics pairs - see description above
+        """
+        if data is None:
+            data = list()
+
+        group_name = 'Ethernet'
+        if self.pm_group_metrics[group_name].enabled:
+            for port in self._nni_ports:
+                group_data = self.collect_group_metrics(group_name,
+                                                        port,
+                                                        self.nni_pm_names,
+                                                        self.nni_metrics_config)
+                if group_data is not None:
+                    data.append(group_data)
+
+        for port in self._pon_ports:
+            group_name = 'PON'
+            if self.pm_group_metrics[group_name].enabled:
+                group_data = self.collect_group_metrics(group_name,
+                                                        port,
+                                                        self.pon_pm_names,
+                                                        self.pon_metrics_config)
+                if group_data is not None:
+                    data.append(group_data)
+
+            for onu_id in port.onu_ids:
+                onu = port.onu(onu_id)
+                if onu is not None:
+                    group_name = 'ONU'
+                    if self.pm_group_metrics[group_name].enabled:
+                        group_data = self.collect_group_metrics(group_name,
+                                                                onu,
+                                                                self.onu_pm_names,
+                                                                self.onu_metrics_config)
+                        if group_data is not None:
+                            data.append(group_data)
+
+                    group_name = 'GEM'
+                    if self.pm_group_metrics[group_name].enabled:
+                        for gem in onu.gem_ports:
+                            if not gem.multicast:
+                                group_data = self.collect_group_metrics(group_name,
+                                                                        onu,
+                                                                        self.gem_pm_names,
+                                                                        self.gem_metrics_config)
+                                if group_data is not None:
+                                    data.append(group_data)
+
+                            # TODO: Do any multicast GEM PORT metrics here...
+        return data
diff --git a/python/extensions/kpi/onu/IntervalMetrics.md b/python/extensions/kpi/onu/IntervalMetrics.md
new file mode 100644
index 0000000..31169ae
--- /dev/null
+++ b/python/extensions/kpi/onu/IntervalMetrics.md
@@ -0,0 +1,302 @@
+# ONU OMCI Historical Interval PM Groups
+
+This document outlines the 15-minute interval groups currently supported by the
+**OnuPmIntervalMetrics** _onu_pm_interval_metrics.py_ file.  These groups
+cover a 15-minute interval.
+
+## Performance Interval State Machine
+
+At OpenOMCI startup within an ONU Device Adapter, as soon as the OpenOMCI ME database has
+been declared to be in-sync ONU's ME Database, the Performance Interval State Machine is
+started for the ONU. The first task it performs is to synchronize the ONU's (hardware) time with
+the ONU Device Handler's (Linux container) so that a 15-minute interval is established.
+
+The OpenOMCI PM interval state machine then examines managed elements created by the
+ONU autonomously or created by OpenOMCI in response to a OMCI request from an ONU
+adapter to determine if an appropriate 15-Minute historical PM ME needs to be attached. The
+state machine also registers for notification of any create/delete requests at that
+point so that it can add/remove 15-minute historical PM MEs as services are applied or
+removed. 
+
+Before adding a 15-minute historical PM ME, the ME capabilities of the ONU is
+examined to insure that it can support that particular ME. This is important as the
+Ethernet Frame historical intervals are actually supported by up to 4 different MEs
+reporting the basically the same data. This is detailed below in the _Ethernet Frame
+Performance Monitoring MEs_ section.
+
+## Timezone
+
+The ONU will be synchronized to the Linux Container running the ONU Device handler's
+time in UTC upon startup. Not all ONUs have the capability to set their calendar 
+date (month, day, year) to that of the ONU's Device Handler, but it will set the
+clock to that date. For reporting of 15-minute intervals, only an accurate 15-minute
+boundary is really of any great importance.
+
+## Interval Reporting
+
+After the ONU time synchronization request is made, the first reported interval is
+schedule to occur in the next 15-minute boundry.  For example, if the OpenOMCI
+state machine synchronizes the ONU's time at 
+
+## Common Elements for All Reported MEs
+
+In addition to counter elements (attributes) reported in each ME, every reported 
+historical interval report the following Elements as context values in the KPI
+Event metadata field.  Each value is reported as a _string_ per the Protobuf structure 
+but are actually integer/floats.
+
+| Label               | Type         | Description |
+| ------------------: | :----------: | :---------- |
+| class_id            | int, 16-bits | The ME Class ID of the PM Interval ME |
+| entity_id           | int, 16-bits | The OMCI Entity Instance of the particular PM Interval ME |
+| interval_end_time   | int, 8-bits  | Identifies the most recently finished 15 minute. This attribute is set to zero when a synchronize time request is performed by OpenOMCI.  This counter rolls over from 255 to 0 upon saturation. | 
+| interval_start_time | int, 64-bits | The UTC timestamp (seconds since epoch) rounded down to the start time of the specific interval |
+
+# Supported 15-Minute Historical Performance Monitoring MEs
+
+The following 15-minute historical performance monitoring MEs currently supported are detailed
+in the sections below
+
+## Ethernet Frame Performance Monitoring MEs
+
+The OMCI Ethernet PM supported by OpenOMCI includes 4 possible MEs.  These MEs are attached to
+the MAC Bridge Port Configuration MEs for the ONU. For downstream data, the ME is placed on the
+MAC Bridge Port Configuration ME closest to the ANI Port. For upstream data, the ME is placed
+on the MAC Bridge Port Configuration ME closest to the associated UNI.
+
+The OpenOMCI will first attempt to use the Extended Performance Monitoring MEs if they are
+supported by the ONU.  First the 64-bit counter version will be attempted and then the 32-bit
+counters as a fallback. If of the Extended Performance Monitoring MEs are supported, the
+appropriate Upstream or DownStream Monitoring ME will be used.
+
+### ME Information
+
+The table below describes the four Ethernet Frame Performance Monitoring MEs and provides their
+counter width (in bytes) and ME Class ID.
+
+| ME Name                                                     | Class ID | Counter Width |
+| ----------------------------------------------------------: | :------: | :---:   |
+| Ethernet Frame Extended Performance Monitoring64Bit         |   426    |  64-bit |
+| Ethernet Frame Extended Performance Monitoring              |   334    |  32-bit |
+| Ethernet Frame Upstream Performance MonitoringHistoryData   |   322    |  32-bit |
+| Ethernet Frame Downstream Performance MonitoringHistoryData |   321    |  32-bit |
+
+**Metric Group Name**: Ethernet_Bridge_Port_History  
+**Default Collection**: True  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Counter Information
+
+Each of the Ethernet Frame PM MEs contain the following counters
+
+| Attribute Name      | Description |
+| ------------------: | :-----------|
+| drop_events         | The total number of events in which packets were dropped due to a lack of resources. This is not necessarily the number of packets dropped; it is the number of times this event was detected. |
+| octets              | The total number of upstream octets received, including those in bad packets, excluding framing bits, but including FCS. |
+| packets             | The total number of upstream packets received, including bad packets, broadcast packets and multicast packets. |
+| broadcast_packets   | The total number of upstream good packets received that were directed to the broadcast address. This does not include multicast packets. |
+| multicast_packets   | The total number of upstream good packets received that were directed to a multicast address. This does not include broadcast packets. |
+| crc_errored_packets | The total number of upstream packets received that had a length (excluding framing bits, but including FCS octets) of between 64 octets and 1518 octets, inclusive, but had either a bad FCS with an integral number of octets (FCS error) or a bad FCS with a non-integral number of octets (alignment error). |
+| undersize_packets   | The total number of upstream packets received that were less than 64 octets long, but were otherwise well formed (excluding framing bits, but including FCS). |
+| oversize_packets    | The total number of upstream packets received that were longer than 1518 octets (excluding framing bits, but including FCS) and were otherwise well formed. NOTE 2 – If 2 000 byte Ethernet frames are supported, counts in this performance parameter are not necessarily errors. |
+| 64_octets           | The total number of upstream received packets (including bad packets) that were 64 octets long, excluding framing bits but including FCS. |
+| 65_to_127_octets    | The total number of upstream received packets (including bad packets) that were 65..127 octets long, excluding framing bits but including FCS. |
+| 128_to_255_octets   | The total number of upstream packets (including bad packets) received that were 128..255 octets long, excluding framing bits but including FCS. |
+| 256_to_511_octets   | The total number of upstream packets (including bad packets) received that were 256..511 octets long, excluding framing bits but including FCS. |
+| 512_to_1023_octets  | The total number of upstream packets (including bad packets) received that were 512..1 023 octets long, excluding framing bits but including FCS. |
+| 1024_to_1518_octets | The total number of upstream packets (including bad packets) received that were 1024..1518 octets long, excluding framing bits, but including FCS. |
+
+## Ethernet PM Monitoring History Data (Class ID 24)
+
+This managed entity collects some of the performance monitoring data for a physical
+Ethernet interface. Instances of this managed entity are created and deleted by the OLT.
+
+**Metric Group Name**: Ethernet_UNI_History  
+**Default Collection**: True  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Application
+
+For performance monitoring of Ethernet UNI.
+
+### Relationships
+
+An instance of this managed entity is associated with an instance of the physical path
+termination point Ethernet UNI.                 
+
+### Attributes
+All counters are 32-bits wide.
+
+| Attribute Name      | Description |
+| ------------------: | :-----------|
+| fcs_errors                        | This attribute counts frames received on a particular interface that were an integral number of octets in length but failed the frame check sequence (FCS) check. The count is incremented when the MAC service returns the frameCheckError status to the link layer control (LLC) or other MAC user. Received frames for which multiple error conditions are obtained are counted according to the error status presented to the LLC. |
+| excessive_collision_counter       | This attribute counts frames whose transmission failed due to excessive collisions. |
+| late_collision_counter            | This attribute counts the number of times that a collision was detected later than 512 bit times into the transmission of a packet. |
+| frames_too_long                   | This attribute counts received frames that exceeded the maximum permitted frame size. The count is incremented when the MAC service returns the frameTooLong status to the LLC. |
+| buffer_overflows_on_rx            | This attribute counts the number of times that the receive buffer overflowed. |
+| buffer_overflows_on_tx            | This attribute counts the number of times that the transmit buffer overflowed. |
+| single_collision_frame_counter    | This attribute counts successfully transmitted frames whose transmission was delayed by exactly one collision. |
+| multiple_collisions_frame_counter | This attribute counts successfully transmitted frames whose transmission was delayed by more than one collision. |
+| sqe_counter                       | This attribute counts the number of times that the SQE test error message was generated by the PLS sublayer. |
+| deferred_tx_counter               | This attribute counts frames whose first transmission attempt was delayed because the medium was busy. The count does not include frames involved in collisions. |
+| internal_mac_tx_error_counter     | This attribute counts frames whose transmission failed due to an internal MAC sublayer transmit error. |
+| carrier_sense_error_counter       | This attribute counts the number of times that carrier sense was lost or never asserted when attempting to transmit a frame. |
+| alignment_error_counter           | This attribute counts received frames that were not an integral number of octets in length and did not pass the FCS check. |
+| internal_mac_rx_error_counter     | This attribute counts frames whose reception failed due to an internal MAC sublayer receive error. |
+
+## FEC Performance Monitoring History Data (Class ID 312)
+
+This managed entity collects performance monitoring data associated with PON downstream FEC
+counters. Instances of this managed entity are created and deleted by the OLT.
+
+**Metric Group Name**: FEC_History  
+**Default Collection**: True  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Application
+This managed entity collects performance monitoring data associated with PON downstream FEC
+counters.
+
+### Relationships
+An instance of this managed entity is associated with an instance of the ANI-G managed entity.
+
+### Attributes
+
+| Attribute Name           | Counter Width | Description |
+| -----------------------: | :-----: | :-----------|
+| corrected_bytes          | 32-bits | This attribute counts the number of bytes that were corrected by the FEC function. |
+| corrected_code_words     | 32-bits | This attribute counts the code words that were corrected by the FEC function. |
+| uncorrectable_code_words | 32-bits | This attribute counts errored code words that could not be corrected by the FEC function. |
+| total_code_words         | 32-bits | This attribute counts the total received code words. |
+| fec_seconds              | 16-bits | This attribute counts seconds during which there was a forward error correction anomaly. |
+
+
+## GEM Port Network CTP Monitoring History Data (Class ID 341)
+
+This managed entity collects GEM frame performance monitoring data associated with a GEM port
+network CTP. Instances of this managed entity are created and deleted by the OLT.
+
+Note 1: One might expect to find some form of impaired or discarded frame count associated with
+a GEM port. However, the only impairment that might be detected at the GEM frame level would be
+a corrupted GEM frame header. In this case, no part of the header could be considered reliable
+including the port ID. For this reason, there is no impaired or discarded frame count in this ME.
+
+Note 2: This managed entity replaces the GEM port performance history data managed entity and
+is preferred for new implementations.
+
+**Metric Group Name**: GEM_Port_History  
+**Default Collection**: False  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Relationships
+
+An instance of this managed entity is associated with an instance of the GEM port network CTP
+managed entity.                
+
+### Attributes
+
+| Attribute Name            | Counter Width | Description |
+| ------------------------: | :-----: | :-----------|
+| transmitted_gem_frames    | 32-bits | This attribute counts GEM frames transmitted on the monitored GEM port. |
+| received_gem_frames       | 32-bits | This attribute counts GEM frames received correctly on the monitored GEM port. A correctly received GEM frame is one that does not contain uncorrectable errors and has a valid HEC. |
+| received_payload_bytes    | 64-bits | This attribute counts user payload bytes received on the monitored GEM port. |
+| transmitted_payload_bytes | 64-bits | This attribute counts user payload bytes transmitted on the monitored GEM port. |
+| encryption_key_errors     | 32-bits | This attribute is defined in ITU-T G.987 systems only. It counts GEM frames with erroneous encryption key indexes. If the GEM port is not encrypted, this attribute counts any frame with a key index not equal to 0. If the GEM port is encrypted, this attribute counts any frame whose key index specifies a key that is not known to the ONU. |
+
+Note 3: GEM PM ignores idle GEM frames.
+
+Note 4: GEM PM counts each non-idle GEM frame, whether it contains an entire user frame or only
+a fragment of a user frame.
+
+## XgPon TC Performance Monitoring History Data (Class ID 344)
+
+This managed entity collects performance monitoring data associated with the XG-PON
+transmission convergence layer, as defined in ITU-T G.987.3.
+
+**Metric Group Name**: xgPON_TC_History  
+**Default Collection**: False  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Relationships
+An instance of this managed entity is associated with an ANI-G.
+
+### Attributes
+
+All counters are 32-bits wide.
+
+| Attribute Name            | Description |
+| ------------------------: | :-----------|
+| psbd_hec_error_count      | This attribute counts HEC errors in any of the fields of the downstream physical sync block. |
+| xgtc_hec_error_count      | This attribute counts HEC errors detected in the XGTC header. |
+| unknown_profile_count     | This attribute counts the number of grants received whose specified profile was not known to the ONU. |
+| transmitted_xgem_frames   | This attribute counts the number of non-idle XGEM frames transmitted. If an SDU is fragmented, each fragment is an XGEM frame and is counted as such. |
+| fragment_xgem_frames      | This attribute counts the number of XGEM frames that represent fragmented SDUs, as indicated by the LF bit = 0. |
+| xgem_hec_lost_words_count | This attribute counts the number of four-byte words lost because of an XGEM frame HEC error. In general, all XGTC payload following the error is lost, until the next PSBd event. |
+| xgem_key_errors           | This attribute counts the number of downstream XGEM frames received with an invalid key specification. The key may be invalid for several reasons. |
+| xgem_hec_error_count      | This attribute counts the number of instances of an XGEM frame HEC error. |
+
+## XgPon Downstream Performance Monitoring History Data (Class ID 345)
+
+This managed entity collects performance monitoring data associated with the XG-PON
+transmission convergence layer, as defined in ITU-T G.987.3. It collects counters associated with
+downstream PLOAM and OMCI messages.
+
+**Metric Group Name**: xgPON_Downstream_History  
+**Default Collection**: False  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+### Relationships
+
+An instance of this managed entity is associated with an ANI-G.           
+
+### Attributes
+     
+All counters are 32-bits wide.
+
+| Attribute Name                          | Description |
+| --------------------------------------: | :-----------|
+| ploam_mic_error_count                   | This attribute counts MIC errors detected in downstream PLOAM messages, either directed to this ONU or broadcast to all ONUs. |
+| downstream_ploam_messages_count         | This attribute counts PLOAM messages received, either directed to this ONU or broadcast to all ONUs. |
+| profile_messages_received               | This attribute counts the number of profile messages received, either directed to this ONU or broadcast to all ONUs. |
+| ranging_time_messages_received          | This attribute counts the number of ranging_time messages received, either directed to this ONU or broadcast to all ONUs. |
+| deactivate_onu_id_messages_received     | This attribute counts the number of deactivate_ONU-ID messages received, either directed to this ONU or broadcast to all ONUs. Deactivate_ONU-ID messages do not reset this counter. |
+| disable_serial_number_messages_received | This attribute counts the number of disable_serial_number messages received, whose serial number specified this ONU. |
+| request_registration_messages_received  | This attribute counts the number request_registration messages received. |
+| assign_alloc_id_messages_received       | This attribute counts the number of assign_alloc-ID messages received. |
+| key_control_messages_received           | This attribute counts the number of key_control messages received, either directed to this ONU or broadcast to all ONUs. |
+| sleep_allow_messages_received           | This attribute counts the number of sleep_allow messages received, either directed to this ONU or broadcast to all ONUs. |
+| baseline_omci_messages_received_count   | This attribute counts the number of OMCI messages received in the baseline message format. |
+| extended_omci_messages_received_count   | This attribute counts the number of OMCI messages received in the extended message format. |
+| assign_onu_id_messages_received         | This attribute counts the number of assign_ONU-ID messages received since the last re-boot. |
+| omci_mic_error_count                    | This attribute counts MIC errors detected in OMCI messages directed to this ONU. |
+
+## XgPon Upstream Performance Monitoring History Data (Class ID 346)
+
+This managed entity collects performance monitoring data associated with the XG-PON
+transmission convergence layer, as defined in ITU-T G.987.3. It counts upstream PLOAM
+messages transmitted by the ONU.
+
+**Metric Group Name**: xgPON_Upstream_History  
+**Default Collection**: False  
+**Default Interval**:  15 minutes & aligned to wall-clock. Read-Only
+
+###Relationships
+
+An instance of this managed entity is associated with an ANI-G.          
+
+### Attributes
+
+All counters are 32-bits wide.
+
+| Attribute Name                  | Description |
+| ------------------------------: | :-----------|
+| upstream_ploam_message_count    | This attribute counts PLOAM messages transmitted upstream, excluding acknowledge messages. |
+| serial_number_onu_message_count | This attribute counts Serial_number_ONU PLOAM messages transmitted. |
+| registration_message_count      | This attribute counts registration PLOAM messages transmitted. |
+| key_report_message_count        | This attribute counts key_report PLOAM messages transmitted. |
+| acknowledge_message_count       | This attribute counts acknowledge PLOAM messages transmitted. It includes all forms of acknowledgement, including those transmitted in response to a PLOAM grant when the ONU has nothing to send. |
+| sleep_request_message_count     | This attribute counts sleep_request PLOAM messages transmitted. |
+
+# Remaining Work Items
+
+- The enable/disable of a PM group (CLI/NBI) should control whether or not a PM interval ME is created and collected.
\ No newline at end of file
diff --git a/python/extensions/kpi/onu/README.md b/python/extensions/kpi/onu/README.md
new file mode 100644
index 0000000..4b9798e
--- /dev/null
+++ b/python/extensions/kpi/onu/README.md
@@ -0,0 +1,123 @@
+# ONU PM Metrics
+
+
+**THESE ARE PRELIMINARY METRIC GROUPS**, Work is needed by the VOLTHA community to reach a consensus on the
+actual metrics that will be provided.  **Also**, please read the **Remaining Work Item** sections of each
+README file.
+
+
+This document outlines the non-interval metrics collected for the ONU by the OpenOMCI code.  These
+are primarily collected from one of the many OMCI Managed Entities
+
+## Format on the Kafka bus
+
+The format of the ONU KPI Events is detailed in the [Basic KPI Format (**KpiEvent2**)](../README.md)
+section of this documents parent directory for wire format on the bus. This document primarily provides
+the group metric information for OLT PKIs and associated metadata context information.
+
+**All** metric values reported by the library are reported as *float*s. The context and metric tables
+listed in the sections below report the type as initially collected by the OLT Device Adapters.
+
+#ONU PM Metric Groups
+
+The following sections outline the KPI metrics gathered by OpenOMCI on behalf of the ONU. If an ONU
+does not support a specific metric in a group, it will not report that metric. This is preferred to
+reporting a metric and it always having a value of 0.0 (which could be misleading).
+
+**Note**: Currently all metric groups are collected and reported at one time (only one collection timer)
+and this value is controlled by the VOLTHA shared kpi library's PM_Config default_freq value and will
+be set to _60 seconds_. This single-collection deficiency will be corrected in the near future. 
+
+## ANI Optical KPI Metrics
+
+This group reports the ONU's Optical Power metrics for each PON physical port as reported by the
+OMCI Managed Entity ANI-G (_Class ID #263_).
+
+**Metric Group Name**: PON_Optical 
+**Default Collection**: True  
+**Default Interval**:  15 minutes
+
+**Metadata Context items**
+
+| key     | value   | Notes |
+| :-----: | :------ | :---- |
+| intf_id | integer | Physical device interface port ID for this PON/ANI port |
+
+The port ID is extracted from the lower 8-bits of the ANI-G Managed Entity ID and indicates
+the physical position of the PON interface.
+
+**Metrics**
+
+| key                     | type / size  | Notes |
+| :---------------------: | :----------- | :---- |
+| transmit_power          | int, 16-bits | This attribute reports the current measurement of mean optical launch power. Its value is a 2s complement integer referred to 1 mW (i.e., dBm), with 0.002 dB granularity |
+| receive_power           | int, 16-bits | This attribute reports the current measurement of the total downstream optical signal level. Its value is a 2s complement integer referred to 1 mW (i.e., dBm), with 0.002 dB granularity. |
+
+**NOTE**: The following metrics were also requested for the PON interface in
+[VOL-935](https://jira.opencord.org/browse/VOL-935) but they are not available through
+the OpenOMCI set of Managed Entities. However there are alarms available that relate to
+these items available through the ANI-G ME:
+
+ - ONT Optical module/transceiver temperature
+ - ONT Optical module/transceiver voltage
+ - ONT Laser bias current
+ 
+TR-287 does reference mechanisms to perform OLT and ONU Optical Link monitoring to cover these
+three items but interfaces are not yet available in VOLTHA and retrieval of these values from
+an ONU may be difficult as the only defined interface to retrieve data is OMCI.
+
+## UNI KPI Metrics
+
+This group reports metrics associated with the customer facing UNI port of the ONU
+and is collected from OMCI Physical Path Termination Point Ethernet UNI (_Class ID #11_)
+and the UNI-G (_Class iD #264_).
+
+**Metric Group Name**: UNI_Status
+**Default Collection**: True  
+**Default Interval**:  15 minutes
+
+**Metadata Context items**
+
+| key     | value   | Notes |
+| :-----: | :------ | :---- |
+| intf_id | integer | Physical device interface port ID for this UNI port |
+
+The port ID is extracted from the UNI-G Managed Entity ID and indicates the 
+physical position of the UNI interface.  This ID is implicitly linked to the
+associated PPTP Ethernet UNI ME.
+
+**Metrics**
+
+| key              | type / size | From  | Notes |
+| :--------------: | :---------- | :---- | :---- |
+| ethernet_type    | int, gauge  | PPTP  | This attribute represents the sensed interface type as defined in the table below |
+| oper_status      | boolean     | PPTP  | Link status/Operational Status: Link up (1), Link down (0) |
+| pptp_admin_state | boolean     | PPTP  | Administrative state: Locked/disabled (1), Unlocked/enabled (0) |
+| uni_admin_state  | boolean     | UNI-G | Administrative state: Locked/disabled (1), Unlocked/enabled (0) |
+
+**Sensed Ethernet Type Table**
+
+| value | Rate             | Duplex |
+| ----: | :--------------: | :--- |
+|  0x00 | Unknown          | n/a  |
+|  0x01 | 10BASE-T         | full |
+|  0x02 | 100BASE-T        | full |
+|  0x03 | Gigabit Ethernet | full |
+|  0x04 | 10Gb/s Ethernet  | full |
+|  0x05 | 2.5Gb/s Ethernet | full |
+|  0x06 | 5Gb/s Ethernet   | full |
+|  0x07 | 25Gb/s Ethernet  | full |
+|  0x08 | 40Gb/s  Ethernet | full |
+|  0x11 | 10BASE-T         | half |
+|  0x12 | 100BASE-T        | half |
+|  0x13 | Gigabit Ethernet | half |
+
+# Remaining Work Items
+
+This initial code is only a preliminary work. See the [Remaining Work Items](../README.md)
+section of this document's parent directory for a list of remaining tasks. In addition to these
+work items, the interval statistics [README](./IntervalMetrics.md) may have additional work
+items remaining.
+
+
+TODO: For each group, list if the default is enabled/disabled
\ No newline at end of file
diff --git a/python/extensions/kpi/onu/__init__.py b/python/extensions/kpi/onu/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/kpi/onu/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/kpi/onu/onu_omci_pm.py b/python/extensions/kpi/onu/onu_omci_pm.py
new file mode 100644
index 0000000..c186bbb
--- /dev/null
+++ b/python/extensions/kpi/onu/onu_omci_pm.py
@@ -0,0 +1,317 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import arrow
+from voltha.protos.device_pb2 import PmConfig, PmGroupConfig
+from voltha.protos.events_pb2 import MetricInformation, MetricMetaData
+from voltha.extensions.kpi.adapter_pm_metrics import AdapterPmMetrics
+from voltha.extensions.kpi.onu.onu_pm_interval_metrics import OnuPmIntervalMetrics
+from voltha.extensions.omci.omci_entities import UniG
+from voltha.extensions.omci.omci_entities import PptpEthernetUni
+
+
+class OnuOmciPmMetrics(AdapterPmMetrics):
+    """ ONU OMCI related metrics """
+
+    # Metric default settings
+    #
+    #  Frequency values are in 1/10ths of a second
+    #
+    OMCI_DEV_KEY = 'omci-onu-dev'
+    OMCI_CC_GROUP_NAME = 'OMCI_CC'
+    DEFAULT_OMCI_CC_ENABLED = False
+    DEFAULT_OMCI_CC_FREQUENCY = (2 * 60) * 10
+
+    OPTICAL_GROUP_NAME = 'PON_Optical'
+    DEFAULT_OPTICAL_ENABLED = True
+    DEFAULT_OPTICAL_FREQUENCY = (15 * 60 * 10)
+
+    UNI_STATUS_GROUP_NAME = 'UNI_Status'
+    DEFAULT_UNI_STATUS_ENABLED = True
+    DEFAULT_UNI_STATUS_FREQUENCY = (15 * 60 * 10)
+
+    def __init__(self, adapter_agent, device_id, logical_device_id,
+                 grouped=False, freq_override=False, **kwargs):
+        """
+        Initializer for shared ONU Device Adapter OMCI CC PM metrics
+
+        :param adapter_agent: (AdapterAgent) Adapter agent for the device
+        :param device_id: (str) Device ID
+        :param logical_device_id: (str) VOLTHA Logical Device ID
+        :param grouped: (bool) Flag indicating if statistics are managed as a group
+        :param freq_override: (bool) Flag indicating if frequency collection can be specified
+                                     on a per group basis
+        :param kwargs: (dict) Device Adapter specific values. For an ONU Device adapter, the
+                              expected key-value pairs are listed below. If not provided, the
+                              associated PM statistics are not gathered:
+
+                              'omci-onu-dev': Reference to the OMCI OnuDeviceEtnry object for
+                                         retrieval of OpenOMCI Communications channel statistics
+                                         and retrieval of polled statistics.
+        """
+        super(OnuOmciPmMetrics, self).__init__(adapter_agent, device_id, logical_device_id,
+                                               grouped=grouped, freq_override=freq_override,
+                                               **kwargs)
+
+        self._omci_onu_device = kwargs.pop(OnuOmciPmMetrics.OMCI_DEV_KEY, None)
+        self._omci_cc = self._omci_onu_device.omci_cc if self._omci_onu_device is not None else None
+
+        self.omci_cc_pm_names = {
+            ('tx_frames', PmConfig.COUNTER),
+            ('tx_errors', PmConfig.COUNTER),
+            ('rx_frames', PmConfig.COUNTER),
+            ('rx_unknown_tid', PmConfig.COUNTER),
+            ('rx_onu_frames', PmConfig.COUNTER),        # Rx ONU autonomous messages
+            ('rx_unknown_me', PmConfig.COUNTER),        # Managed Entities without a decode definition
+            ('rx_timeouts', PmConfig.COUNTER),
+            ('rx_late', PmConfig.COUNTER),
+            ('consecutive_errors', PmConfig.COUNTER),
+            ('reply_min', PmConfig.GAUGE),      # Milliseconds
+            ('reply_max', PmConfig.GAUGE),      # Milliseconds
+            ('reply_average', PmConfig.GAUGE),  # Milliseconds
+            ('hp_tx_queue_len', PmConfig.GAUGE),
+            ('lp_tx_queue_len', PmConfig.GAUGE),
+            ('max_hp_tx_queue', PmConfig.GAUGE),
+            ('max_lp_tx_queue', PmConfig.GAUGE),
+        }
+        self.omci_cc_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                       for (m, t) in self.omci_cc_pm_names}
+
+        self.omci_optical_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),
+
+            ('transmit_power', PmConfig.GAUGE),
+            ('receive_power', PmConfig.GAUGE),
+        }
+        self.omci_optical_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                            for (m, t) in self.omci_optical_pm_names}
+
+        self.omci_uni_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),
+
+            ('ethernet_type', PmConfig.GAUGE),     # PPTP Ethernet ME
+            ('oper_status', PmConfig.GAUGE),       # PPTP Ethernet ME
+            ('pptp_admin_state', PmConfig.GAUGE),  # PPTP Ethernet ME
+            ('uni_admin_state', PmConfig.GAUGE),   # UNI-G ME
+        }
+        self.omci_uni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                        for (m, t) in self.omci_uni_pm_names}
+
+        self.openomci_interval_pm = OnuPmIntervalMetrics(adapter_agent, device_id, logical_device_id)
+
+    def update(self, pm_config):
+        # TODO: Test frequency override capability for a particular group
+        if self.default_freq != pm_config.default_freq:
+            # Update the callback to the new frequency.
+            self.default_freq = pm_config.default_freq
+            self.lc.stop()
+            self.lc.start(interval=self.default_freq / 10)
+
+        if pm_config.grouped:
+            for group in pm_config.groups:
+                group_config = self.pm_group_metrics.get(group.group_name)
+                if group_config is not None:
+                    group_config.enabled = group.enabled
+        else:
+            msg = 'There are on independent OMCI metrics, only group metrics at this time'
+            raise NotImplemented(msg)
+
+        self.openomci_interval_pm.update(pm_config)
+
+    def make_proto(self, pm_config=None):
+        assert pm_config is not None
+
+        # OMCI only supports grouped metrics
+        if self._omci_onu_device is None or not self.grouped:
+            return pm_config
+
+        pm_omci_cc_stats = PmGroupConfig(group_name=OnuOmciPmMetrics.OMCI_CC_GROUP_NAME,
+                                         group_freq=OnuOmciPmMetrics.DEFAULT_OMCI_CC_FREQUENCY,
+                                         enabled=OnuOmciPmMetrics.DEFAULT_OMCI_CC_ENABLED)
+        self.pm_group_metrics[pm_omci_cc_stats.group_name] = pm_omci_cc_stats
+
+        pm_omci_optical_stats = PmGroupConfig(group_name=OnuOmciPmMetrics.OPTICAL_GROUP_NAME,
+                                              group_freq=OnuOmciPmMetrics.DEFAULT_OPTICAL_FREQUENCY,
+                                              enabled=OnuOmciPmMetrics.DEFAULT_OPTICAL_ENABLED)
+        self.pm_group_metrics[pm_omci_optical_stats.group_name] = pm_omci_optical_stats
+
+        pm_omci_uni_stats = PmGroupConfig(group_name=OnuOmciPmMetrics.UNI_STATUS_GROUP_NAME,
+                                          group_freq=OnuOmciPmMetrics.DEFAULT_UNI_STATUS_FREQUENCY,
+                                          enabled=OnuOmciPmMetrics.DEFAULT_UNI_STATUS_ENABLED)
+        self.pm_group_metrics[pm_omci_uni_stats.group_name] = pm_omci_uni_stats
+
+        stats_and_config = [(pm_omci_cc_stats, self.omci_cc_metrics_config),
+                            (pm_omci_optical_stats, self.omci_optical_metrics_config),
+                            (pm_omci_uni_stats, self.omci_cc_metrics_config)]
+
+        for stats, config in stats_and_config:
+            for m in sorted(config):
+                pm = config[m]
+                stats.metrics.extend([PmConfig(name=pm.name,
+                                               type=pm.type,
+                                               enabled=pm.enabled)])
+            pm_config.groups.extend([stats])
+
+        # Also create OMCI Interval PM configs
+        return self.openomci_interval_pm.make_proto(pm_config)
+
+    def collect_metrics(self, data=None):
+        """
+        Collect metrics for this adapter.
+
+        The data collected (or passed in) is a list of pairs/tuples.  Each
+        pair is composed of a MetricMetaData metadata-portion and list of MetricValuePairs
+        that contains a single individual metric or list of metrics if this is a
+        group metric.
+
+        This method is called for each adapter at a fixed frequency.
+        TODO: Currently all group metrics are collected on a single timer tick.
+              This needs to be fixed as independent group or instance collection is
+              desirable.
+
+        :param data: (list) Existing list of collected metrics (MetricInformation).
+                            This is provided to allow derived classes to call into
+                            further encapsulated classes.
+
+        :return: (list) metadata and metrics pairs - see description above
+        """
+        if data is None:
+            data = list()
+
+        # Note: Interval PM is collection done autonomously, not through this method
+
+        if self._omci_cc is not None:
+            group_name = OnuOmciPmMetrics.OMCI_CC_GROUP_NAME
+            if self.pm_group_metrics[group_name].enabled:
+                group_data = self.collect_group_metrics(group_name,
+                                                        self._omci_cc,
+                                                        self.omci_cc_pm_names,
+                                                        self.omci_cc_metrics_config)
+                if group_data is not None:
+                    data.append(group_data)
+
+            # Optical and UNI data is collected on a per-port basis
+            data.extend(self.collect_optical_metrics())
+            data.extend(self.collect_uni_status_metrics())
+
+        return data
+
+    def collect_optical_metrics(self):
+        """
+        Collect the metrics for optical information from all ANI/PONs
+
+        :return: (list) collected metrics (MetricInformation)
+        """
+        now = self._omci_onu_device.timestamp
+
+        group_name = OnuOmciPmMetrics.OPTICAL_GROUP_NAME
+        if now is None or not self.pm_group_metrics[group_name].enabled:
+            return []
+
+        # Scan all ANI-G ports
+        ani_g_entities = self._omci_onu_device.configuration.ani_g_entities
+        ani_g_entities_ids = ani_g_entities.keys() if ani_g_entities is not None else None
+        metrics_info = []
+
+        if ani_g_entities_ids is not None and len(ani_g_entities_ids):
+            from voltha.extensions.omci.omci_entities import AniG
+            ani_g_items = ['optical_signal_level', 'transmit_optical_level']
+
+            for entity_id in ani_g_entities_ids:
+                metrics = dict()
+                data = self._omci_onu_device.query_mib(class_id=AniG.class_id,
+                                                       instance_id=entity_id,
+                                                       attributes=ani_g_items)
+                if len(data):
+                    if 'optical_signal_level' in data:
+                        metrics['receive_power'] = data['optical_signal_level']
+
+                    if 'transmit_optical_level' in data:
+                        metrics['transmit_power'] = data['transmit_optical_level']
+
+                if len(metrics):
+                    metric_data = MetricInformation(metadata=MetricMetaData(title=group_name,
+                                                                            ts=now,
+                                                                            logical_device_id=self.logical_device_id,
+                                                                            serial_no=self.serial_number,
+                                                                            device_id=self.device_id,
+                                                                            context={
+                                                                                'intf_id': str(entity_id)
+                                                                            }),
+                                                    metrics=metrics)
+                    metrics_info.append(metric_data)
+
+        return metrics_info
+
+    def collect_uni_status_metrics(self):
+        """
+        Collect the metrics for optical information from all ANI/PONs
+
+        :return: (list) collected metrics (MetricInformation)
+        """
+        now = self._omci_onu_device.timestamp
+
+        group_name = OnuOmciPmMetrics.UNI_STATUS_GROUP_NAME
+        if now is None or not self.pm_group_metrics[group_name].enabled:
+            return []
+
+        # Scan all UNI-G and PPTP ports
+        uni_g_entities = self._omci_onu_device.configuration.uni_g_entities
+        uni_g_entities_ids = uni_g_entities.keys() if uni_g_entities is not None else None
+        pptp_entities = self._omci_onu_device.configuration.pptp_entities
+        pptp_entities_ids = pptp_entities.keys() if pptp_entities is not None else None
+
+        metrics_info = []
+
+        if uni_g_entities_ids and pptp_entities_ids and len(uni_g_entities_ids) and \
+                len(uni_g_entities_ids) <= len(pptp_entities_ids):
+
+            uni_g_items = ['administrative_state']
+            pptp_items = ['administrative_state', 'operational_state', 'sensed_type']
+
+            for entity_id in pptp_entities_ids:
+                metrics = dict()
+                data = self._omci_onu_device.query_mib(class_id=UniG.class_id,
+                                                       instance_id=entity_id,
+                                                       attributes=uni_g_items)
+                if len(data):
+                    if 'administrative_state' in data:
+                        metrics['uni_admin_state'] = data['administrative_state']
+
+                data = self._omci_onu_device.query_mib(class_id=PptpEthernetUni.class_id,
+                                                       instance_id=entity_id,
+                                                       attributes=pptp_items)
+                if len(data):
+                    if 'administrative_state' in data:
+                        metrics['pptp_admin_state'] = data['administrative_state']
+
+                    if 'operational_state' in data:
+                        metrics['oper_status'] = data['operational_state']
+
+                    if 'sensed_type' in data:
+                        metrics['ethernet_type'] = data['sensed_type']
+
+                if len(metrics):
+                    metric_data = MetricInformation(metadata=MetricMetaData(title=group_name,
+                                                                            ts=now,
+                                                                            logical_device_id=self.logical_device_id,
+                                                                            serial_no=self.serial_number,
+                                                                            device_id=self.device_id,
+                                                                            context={
+                                                                                'intf_id': str(entity_id & 0xFF)
+                                                                            }),
+                                                    metrics=metrics)
+                    metrics_info.append(metric_data)
+
+        return metrics_info
diff --git a/python/extensions/kpi/onu/onu_pm_interval_metrics.py b/python/extensions/kpi/onu/onu_pm_interval_metrics.py
new file mode 100644
index 0000000..5629e7c
--- /dev/null
+++ b/python/extensions/kpi/onu/onu_pm_interval_metrics.py
@@ -0,0 +1,383 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import arrow
+from voltha.protos.device_pb2 import PmConfig, PmGroupConfig
+from voltha.protos.events_pb2 import KpiEvent2, MetricInformation, MetricMetaData, KpiEventType
+from voltha.extensions.kpi.adapter_pm_metrics import AdapterPmMetrics
+from voltha.extensions.omci.omci_entities import \
+    EthernetFrameUpstreamPerformanceMonitoringHistoryData, \
+    EthernetFrameDownstreamPerformanceMonitoringHistoryData, \
+    EthernetFrameExtendedPerformanceMonitoring, \
+    EthernetFrameExtendedPerformanceMonitoring64Bit, \
+    EthernetPMMonitoringHistoryData, FecPerformanceMonitoringHistoryData, \
+    GemPortNetworkCtpMonitoringHistoryData, XgPonTcPerformanceMonitoringHistoryData, \
+    XgPonDownstreamPerformanceMonitoringHistoryData, \
+    XgPonUpstreamPerformanceMonitoringHistoryData
+
+
+class OnuPmIntervalMetrics(AdapterPmMetrics):
+    """
+    ONU OMCI PM Interval metrics
+
+    These differ from other PM Metrics as they are collected and generated as a
+    result of receipt of OMCI get responses on various PM History MEs.  They are
+    also always managed as a group with a fixed frequency of 15 minutes.
+    """
+    ME_ID_INFO = {
+        EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id: 'Ethernet_Bridge_Port_History',
+        EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id: 'Ethernet_Bridge_Port_History',
+        EthernetFrameExtendedPerformanceMonitoring.class_id: 'Ethernet_Bridge_Port_History',
+        EthernetFrameExtendedPerformanceMonitoring64Bit.class_id: 'Ethernet_Bridge_Port_History',
+        EthernetPMMonitoringHistoryData.class_id: 'Ethernet_UNI_History',
+        FecPerformanceMonitoringHistoryData.class_id: 'FEC_History',
+        GemPortNetworkCtpMonitoringHistoryData.class_id: 'GEM_Port_History',
+        XgPonTcPerformanceMonitoringHistoryData.class_id: 'xgPON_TC_History',
+        XgPonDownstreamPerformanceMonitoringHistoryData.class_id: 'xgPON_Downstream_History',
+        XgPonUpstreamPerformanceMonitoringHistoryData.class_id: 'xgPON_Upstream_History'
+    }
+    ETHERNET_BRIDGE_HISTORY_ENABLED = True
+    ETHERNET_UNI_HISTORY_ENABLED = True
+    FEC_HISTORY_ENABLED = True
+    GEM_PORT_HISTORY_ENABLED = False
+    TRANS_CONV_HISTORY_ENABLED = False
+    XGPON_DOWNSTREAM_HISTORY = False
+    XGPON_UPSTREAM_HISTORY = False
+
+    def __init__(self, adapter_agent, device_id, logical_device_id, **kwargs):
+        super(OnuPmIntervalMetrics, self).__init__(adapter_agent, device_id, logical_device_id,
+                                                   grouped=True, freq_override=False,
+                                                   **kwargs)
+        ethernet_bridge_history = {
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+            ('parent_class_id', PmConfig.CONTEXT),
+            ('parent_entity_id', PmConfig.CONTEXT),
+            ('upstream', PmConfig.CONTEXT),
+
+            ("drop_events", PmConfig.COUNTER),
+            ("octets", PmConfig.COUNTER),
+            ("packets", PmConfig.COUNTER),
+            ("broadcast_packets", PmConfig.COUNTER),
+            ("multicast_packets", PmConfig.COUNTER),
+            ("crc_errored_packets", PmConfig.COUNTER),
+            ("undersize_packets", PmConfig.COUNTER),
+            ("oversize_packets", PmConfig.COUNTER),
+            ("64_octets", PmConfig.COUNTER),
+            ("65_to_127_octets", PmConfig.COUNTER),
+            ("128_to_255_octets", PmConfig.COUNTER),
+            ("256_to_511_octets", PmConfig.COUNTER),
+            ("512_to_1023_octets", PmConfig.COUNTER),
+            ("1024_to_1518_octets", PmConfig.COUNTER)
+        }
+        self._ethernet_bridge_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                for (m, t) in ethernet_bridge_history}
+
+        ethernet_uni_history = {   # Ethernet History Data (Class ID 24)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("fcs_errors", PmConfig.COUNTER),
+            ("excessive_collision_counter", PmConfig.COUNTER),
+            ("late_collision_counter", PmConfig.COUNTER),
+            ("frames_too_long", PmConfig.COUNTER),
+            ("buffer_overflows_on_rx", PmConfig.COUNTER),
+            ("buffer_overflows_on_tx", PmConfig.COUNTER),
+            ("single_collision_frame_counter", PmConfig.COUNTER),
+            ("multiple_collisions_frame_counter", PmConfig.COUNTER),
+            ("sqe_counter", PmConfig.COUNTER),
+            ("deferred_tx_counter", PmConfig.COUNTER),
+            ("internal_mac_tx_error_counter", PmConfig.COUNTER),
+            ("carrier_sense_error_counter", PmConfig.COUNTER),
+            ("alignment_error_counter", PmConfig.COUNTER),
+            ("internal_mac_rx_error_counter", PmConfig.COUNTER),
+        }
+        self._ethernet_uni_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                             for (m, t) in ethernet_uni_history}
+
+        fec_history = {   # FEC History Data (Class ID 312)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("corrected_bytes", PmConfig.COUNTER),
+            ("corrected_code_words", PmConfig.COUNTER),
+            ("uncorrectable_code_words", PmConfig.COUNTER),
+            ("total_code_words", PmConfig.COUNTER),
+            ("fec_seconds", PmConfig.COUNTER),
+        }
+        self._fec_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                    for (m, t) in fec_history}
+
+        gem_port_history = {  # GEM Port Network CTP History Data (Class ID 341)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("transmitted_gem_frames", PmConfig.COUNTER),
+            ("received_gem_frames", PmConfig.COUNTER),
+            ("received_payload_bytes", PmConfig.COUNTER),
+            ("transmitted_payload_bytes", PmConfig.COUNTER),
+            ("encryption_key_errors", PmConfig.COUNTER),
+        }
+        self._gem_port_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                         for (m, t) in gem_port_history}
+
+        xgpon_tc_history = {  # XgPon TC History Data (Class ID 344)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("psbd_hec_error_count", PmConfig.COUNTER),
+            ("xgtc_hec_error_count", PmConfig.COUNTER),
+            ("unknown_profile_count", PmConfig.COUNTER),
+            ("transmitted_xgem_frames", PmConfig.COUNTER),
+            ("fragment_xgem_frames", PmConfig.COUNTER),
+            ("xgem_hec_lost_words_count", PmConfig.COUNTER),
+            ("xgem_key_errors", PmConfig.COUNTER),
+            ("xgem_hec_error_count", PmConfig.COUNTER),
+        }
+        self._xgpon_tc_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                         for (m, t) in xgpon_tc_history}
+
+        xgpon_downstream_history = {  # XgPon Downstream History Data (Class ID 345)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("ploam_mic_error_count", PmConfig.COUNTER),
+            ("downstream_ploam_messages_count", PmConfig.COUNTER),
+            ("profile_messages_received", PmConfig.COUNTER),
+            ("ranging_time_messages_received", PmConfig.COUNTER),
+            ("deactivate_onu_id_messages_received", PmConfig.COUNTER),
+            ("disable_serial_number_messages_received", PmConfig.COUNTER),
+            ("request_registration_messages_received", PmConfig.COUNTER),
+            ("assign_alloc_id_messages_received", PmConfig.COUNTER),
+            ("key_control_messages_received", PmConfig.COUNTER),
+            ("sleep_allow_messages_received", PmConfig.COUNTER),
+            ("baseline_omci_messages_received_count", PmConfig.COUNTER),
+            ("extended_omci_messages_received_count", PmConfig.COUNTER),
+            ("assign_onu_id_messages_received", PmConfig.COUNTER),
+            ("omci_mic_error_count", PmConfig.COUNTER),
+        }
+        self._xgpon_downstream_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                 for (m, t) in xgpon_downstream_history}
+
+        xgpon_upstream_history = {  # XgPon Upstream History Data (Class ID 346)
+            ('class_id', PmConfig.CONTEXT),
+            ('entity_id', PmConfig.CONTEXT),
+            ("interval_end_time", PmConfig.CONTEXT),
+
+            ("upstream_ploam_message_count", PmConfig.COUNTER),
+            ("serial_number_onu_message_count", PmConfig.COUNTER),
+            ("registration_message_count", PmConfig.COUNTER),
+            ("key_report_message_count", PmConfig.COUNTER),
+            ("acknowledge_message_count", PmConfig.COUNTER),
+            ("sleep_request_message_count", PmConfig.COUNTER),
+        }
+        self._xgpon_upstream_history_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                               for (m, t) in xgpon_upstream_history}
+        self._configs = {
+            EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id: self._ethernet_bridge_history_config,
+            EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id: self._ethernet_bridge_history_config,
+            EthernetFrameExtendedPerformanceMonitoring.class_id: self._ethernet_bridge_history_config,
+            EthernetFrameExtendedPerformanceMonitoring64Bit.class_id: self._ethernet_bridge_history_config,
+            EthernetPMMonitoringHistoryData.class_id: self._ethernet_uni_history_config,
+            FecPerformanceMonitoringHistoryData.class_id: self._fec_history_config,
+            GemPortNetworkCtpMonitoringHistoryData.class_id: self._gem_port_history_config,
+            XgPonTcPerformanceMonitoringHistoryData.class_id: self._xgpon_tc_history_config,
+            XgPonDownstreamPerformanceMonitoringHistoryData.class_id: self._xgpon_downstream_history_config,
+            XgPonUpstreamPerformanceMonitoringHistoryData.class_id: self._xgpon_upstream_history_config
+        }
+
+    def update(self, pm_config):
+        """
+        Update the PM Configuration.
+
+        For historical PM Intervals, the frequency always zero since the actual collection
+        and publishing is provided by the OpenOMCI library
+
+        :param pm_config:
+        """
+        self.log.debug('update')
+
+        try:
+            if pm_config.grouped:
+                for group in pm_config.groups:
+                    group_config = self.pm_group_metrics.get(group.group_name)
+                    if group_config is not None and group_config.enabled != group.enabled:
+                        group_config.enabled = group.enabled
+                        # TODO: For OMCI PM Metrics, tie this into add/remove of the PM Interval ME itself
+            else:
+                msg = 'There are on independent OMCI Interval metrics, only group metrics at this time'
+                raise NotImplemented(msg)
+
+        except Exception as e:
+            self.log.exception('update-failure', e=e)
+            raise
+
+    def make_proto(self, pm_config=None):
+        """
+        From the PM Configurations defined in this class's initializer, create
+        the PMConfigs protobuf message that defines our PM configuration and
+        data.
+
+        All ONU PM Interval metrics are grouped metrics that are generated autonmouslly
+        from the OpenOMCI Performance Intervals state machine.
+
+        :param pm_config (PMConfigs) PM Configuration message to add OpenOMCI config items too
+        :return: (PmConfigs) PM Configuration Protobuf message
+        """
+        assert pm_config is not None
+
+        pm_ethernet_bridge_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id],
+                                                   group_freq=0,
+                                                   enabled=OnuPmIntervalMetrics.ETHERNET_BRIDGE_HISTORY_ENABLED)
+        self.pm_group_metrics[pm_ethernet_bridge_history.group_name] = pm_ethernet_bridge_history
+
+        for m in sorted(self._ethernet_bridge_history_config):
+            pm = self._ethernet_bridge_history_config[m]
+            pm_ethernet_bridge_history.metrics.extend([PmConfig(name=pm.name,
+                                                                type=pm.type,
+                                                                enabled=pm.enabled)])
+
+        pm_ethernet_uni_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[EthernetPMMonitoringHistoryData.class_id],
+                                                group_freq=0,
+                                                enabled=OnuPmIntervalMetrics.ETHERNET_UNI_HISTORY_ENABLED)
+        self.pm_group_metrics[pm_ethernet_uni_history.group_name] = pm_ethernet_uni_history
+
+        for m in sorted(self._ethernet_uni_history_config):
+            pm = self._ethernet_uni_history_config[m]
+            pm_ethernet_uni_history.metrics.extend([PmConfig(name=pm.name,
+                                                             type=pm.type,
+                                                             enabled=pm.enabled)])
+
+        pm_fec_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[FecPerformanceMonitoringHistoryData.class_id],
+                                       group_freq=0,
+                                       enabled=OnuPmIntervalMetrics.FEC_HISTORY_ENABLED)
+        self.pm_group_metrics[pm_fec_history.group_name] = pm_fec_history
+
+        for m in sorted(self._fec_history_config):
+            pm = self._fec_history_config[m]
+            pm_fec_history.metrics.extend([PmConfig(name=pm.name,
+                                                    type=pm.type,
+                                                    enabled=pm.enabled)])
+
+        pm_gem_port_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[GemPortNetworkCtpMonitoringHistoryData.class_id],
+                                            group_freq=0,
+                                            enabled=OnuPmIntervalMetrics.GEM_PORT_HISTORY_ENABLED)
+        self.pm_group_metrics[pm_gem_port_history.group_name] = pm_gem_port_history
+
+        for m in sorted(self._gem_port_history_config):
+            pm = self._gem_port_history_config[m]
+            pm_gem_port_history.metrics.extend([PmConfig(name=pm.name,
+                                                         type=pm.type,
+                                                         enabled=pm.enabled)])
+
+        pm_xgpon_tc_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[XgPonTcPerformanceMonitoringHistoryData.class_id],
+                                            group_freq=0,
+                                            enabled=OnuPmIntervalMetrics.TRANS_CONV_HISTORY_ENABLED)
+        self.pm_group_metrics[pm_xgpon_tc_history.group_name] = pm_xgpon_tc_history
+
+        for m in sorted(self._xgpon_tc_history_config):
+            pm = self._xgpon_tc_history_config[m]
+            pm_xgpon_tc_history.metrics.extend([PmConfig(name=pm.name,
+                                                         type=pm.type,
+                                                         enabled=pm.enabled)])
+
+        pm_xgpon_downstream_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[XgPonDownstreamPerformanceMonitoringHistoryData.class_id],
+                                                    group_freq=0,
+                                                    enabled=OnuPmIntervalMetrics.XGPON_DOWNSTREAM_HISTORY)
+        self.pm_group_metrics[pm_xgpon_downstream_history.group_name] = pm_xgpon_downstream_history
+
+        for m in sorted(self._xgpon_downstream_history_config):
+            pm = self._xgpon_downstream_history_config[m]
+            pm_xgpon_downstream_history.metrics.extend([PmConfig(name=pm.name,
+                                                                 type=pm.type,
+                                                                 enabled=pm.enabled)])
+
+        pm_xgpon_upstream_history = PmGroupConfig(group_name=OnuPmIntervalMetrics.ME_ID_INFO[XgPonUpstreamPerformanceMonitoringHistoryData.class_id],
+                                                  group_freq=0,
+                                                  enabled=OnuPmIntervalMetrics.XGPON_UPSTREAM_HISTORY)
+        self.pm_group_metrics[pm_xgpon_upstream_history.group_name] = pm_xgpon_upstream_history
+
+        for m in sorted(self._xgpon_upstream_history_config):
+            pm = self._xgpon_upstream_history_config[m]
+            pm_xgpon_upstream_history.metrics.extend([PmConfig(name=pm.name,
+                                                               type=pm.type,
+                                                               enabled=pm.enabled)])
+
+        pm_config.groups.extend([stats for stats in self.pm_group_metrics.itervalues()])
+
+        return pm_config
+
+    def publish_metrics(self, interval_data):
+        """
+        Collect the metrics for this ONU PM Interval
+
+        :param interval_data: (dict) PM interval dictionary with structure of
+                    {
+                        'class_id': self._class_id,
+                        'entity_id': self._entity_id,
+                        'me_name': self._entity.__name__,   # Mostly for debugging...
+                        'interval_utc_time': None,
+                        # Counters added here as they are retrieved
+                    }
+
+        :return: (dict) Key/Value of metric data
+        """
+        self.log.debug('publish-metrics')
+
+        try:
+            # Locate config
+            now = arrow.utcnow()
+            class_id = interval_data['class_id']
+            config = self._configs.get(class_id)
+            group = self.pm_group_metrics.get(OnuPmIntervalMetrics.ME_ID_INFO.get(class_id, ''))
+
+            if config is not None and group is not None and group.enabled:
+                # Extract only the metrics we need to publish
+                metrics = dict()
+                context = {
+                    'interval_start_time': str(now.replace(minute=int(now.minute / 15) * 15,
+                                                           second=0,
+                                                           microsecond=0).timestamp)
+                }
+                for metric, config_item in config.items():
+                    if config_item.type == PmConfig.CONTEXT and metric in interval_data:
+                        context[metric] = str(interval_data[metric])
+
+                    elif (config_item.type in (PmConfig.COUNTER, PmConfig.GAUGE, PmConfig.STATE) and
+                          metric in interval_data and
+                          config_item.enabled):
+                        metrics[metric] = interval_data[metric]
+
+                if len(metrics):
+                    metadata = MetricMetaData(title=group.group_name,
+                                              ts=now.float_timestamp,
+                                              logical_device_id=self.logical_device_id,
+                                              serial_no=self.serial_number,
+                                              device_id=self.device_id,
+                                              context=context)
+                    slice_data = [MetricInformation(metadata=metadata, metrics=metrics)]
+
+                    kpi_event = KpiEvent2(type=KpiEventType.slice,
+                                          ts=now.float_timestamp,
+                                          slice_data=slice_data)
+                    self.adapter_agent.submit_kpis(kpi_event)
+
+        except Exception as e:
+            self.log.exception('failed-to-submit-kpis', e=e)
diff --git a/python/extensions/kpi/onu/onu_pm_metrics.py b/python/extensions/kpi/onu/onu_pm_metrics.py
new file mode 100644
index 0000000..c94136a
--- /dev/null
+++ b/python/extensions/kpi/onu/onu_pm_metrics.py
@@ -0,0 +1,171 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
+from voltha.extensions.kpi.adapter_pm_metrics import AdapterPmMetrics
+from voltha.extensions.kpi.onu.onu_omci_pm import OnuOmciPmMetrics
+
+
+class OnuPmMetrics(AdapterPmMetrics):
+    """
+    Shared ONU Device Adapter PM Metrics Manager
+
+    This class specifically addresses ONU general PM (health, ...) area
+    specific PM (OMCI, PON, UNI) is supported in encapsulated classes accessible
+    from this object
+    """
+
+    # Metric default settings
+    DEFAULT_HEARTBEAT_ENABLED = False
+    DEFAULT_HEARTBEAT_FREQUENCY = 1200  # 1/10ths of a second
+    #
+    # Currently only a single KPI metrics collection occurs (individual group
+    # frequency not supported). The next value defines this single frequency until
+    # the KPI shared library supports individual collection.
+    DEFAULT_ONU_COLLECTION_FREQUENCY = 60 * 10      # 1 minute
+
+    def __init__(self, adapter_agent, device_id, logical_device_id,
+                 grouped=False, freq_override=False, **kwargs):
+        """
+        Initializer for shared ONU Device Adapter PM metrics
+
+        :param adapter_agent: (AdapterAgent) Adapter agent for the device
+        :param device_id: (str) Device ID
+        :param logical_device_id: (str) VOLTHA Logical Device ID
+        :param grouped: (bool) Flag indicating if statistics are managed as a group
+        :param freq_override: (bool) Flag indicating if frequency collection can be specified
+                                     on a per group basis
+        :param kwargs: (dict) Device Adapter specific values. For an ONU Device adapter, the
+                              expected key-value pairs are listed below. If not provided, the
+                              associated PMv statistics are not gathered:
+
+                              'heartbeat': Reference to the a class that provides an ONU heartbeat
+                                           statistics.   TODO: This should be standardized across adapters
+        """
+        super(OnuPmMetrics, self).__init__(adapter_agent, device_id, logical_device_id,
+                                           grouped=grouped, freq_override=freq_override,
+                                           **kwargs)
+
+        # The following HeartBeat PM is only an example. We may want to have a common heartbeat
+        # object for OLT and ONU DAs that work the same.  If so, it could also provide PM information
+        #
+        # TODO: In the actual 'collection' of PM data, I have the heartbeat stats disabled since
+        #       there is not yet a common 'heartbeat' object
+        #
+        self.health_pm_names = {
+            ('alarm_active', PmConfig.STATE),
+            ('heartbeat_count', PmConfig.COUNTER),
+            ('heartbeat_miss', PmConfig.COUNTER),
+            ('alarms_raised_count', PmConfig.COUNTER),
+            ('heartbeat_failed_limit', PmConfig.COUNTER),
+            ('heartbeat_interval', PmConfig.COUNTER),
+        }
+        # TODO Add PON Port pollable PM as a separate class and include like OMCI
+        # TODO Add UNI Port pollable PM as a separate class and include like OMCI
+        self._heartbeat = kwargs.pop('heartbeat', None)
+        self.health_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                      for (m, t) in self.health_pm_names}
+
+        self.omci_pm = OnuOmciPmMetrics(adapter_agent, device_id, logical_device_id,
+                                        grouped=grouped, freq_override=freq_override,
+                                        **kwargs)
+
+    def update(self, pm_config):
+        try:
+            # TODO: Test frequency override capability for a particular group
+            if self.default_freq != pm_config.default_freq:
+                # Update the callback to the new frequency.
+                self.default_freq = pm_config.default_freq
+                self.lc.stop()
+                self.lc.start(interval=self.default_freq / 10)
+
+            if pm_config.grouped:
+                for group in pm_config.groups:
+                    group_config = self.pm_group_metrics.get(group.group_name)
+                    if group_config is not None:
+                        group_config.enabled = group.enabled
+            else:
+                msg = 'There are no independent ONU metrics, only group metrics at this time'
+                raise NotImplemented(msg)
+
+        except Exception as e:
+            self.log.exception('update-failure', e=e)
+            raise
+
+        self.omci_pm.update(pm_config)
+
+    def make_proto(self, pm_config=None):
+        if pm_config is None:
+            pm_config = PmConfigs(id=self.device_id,
+                                  default_freq=self.default_freq,
+                                  grouped=self.grouped,
+                                  freq_override=self.freq_override)
+        metrics = set()
+
+        if self._heartbeat is not None:
+            if self.grouped:
+                pm_health_stats = PmGroupConfig(group_name='Heartbeat',
+                                                group_freq=OnuPmMetrics.DEFAULT_HEARTBEAT_FREQUENCY,
+                                                enabled=OnuPmMetrics.DEFAULT_HEARTBEAT_ENABLED)
+                self.pm_group_metrics[pm_health_stats.group_name] = pm_health_stats
+            else:
+                pm_health_stats = pm_config
+
+            # Add metrics to the PM Group (or as individual metrics_
+            for m in sorted(self.health_metrics_config):
+                pm = self.health_metrics_config[m]
+                if not self.grouped:
+                    if pm.name in metrics:
+                        continue
+                    metrics.add(pm.name)
+
+                pm_health_stats.metrics.extend([PmConfig(name=pm.name,
+                                                         type=pm.type,
+                                                         enabled=pm.enabled)])
+            if self.grouped:
+                pm_config.groups.extend([pm_health_stats])
+
+        # TODO Add PON Port PM
+        # TODO Add UNI Port PM
+        pm_config = self.omci_pm.make_proto(pm_config)
+        return pm_config
+
+    def collect_metrics(self, data=None):
+        """
+        Collect metrics for this adapter.
+
+        The data collected (or passed in) is a list of pairs/tuples.  Each
+        pair is composed of a MetricMetaData metadata-portion and list of MetricValuePairs
+        that contains a single individual metric or list of metrics if this is a
+        group metric.
+
+        This method is called for each adapter at a fixed frequency.
+        TODO: Currently all group metrics are collected on a single timer tick.
+              This needs to be fixed as independent group or instance collection is
+              desirable.
+
+        :param data: (list) Existing list of collected metrics (MetricInformation).
+                            This is provided to allow derived classes to call into
+                            further encapsulated classes.
+
+        :return: (list) metadata and metrics pairs - see description above
+        """
+        if data is None:
+            data = list()
+
+        # TODO: Heartbeat stats disabled since it is not a common item on all ONUs (or OLTs)
+        # if self._heartbeat is not None:
+        #     data.extend(self.collect_metrics(self._heartbeat, self.health_pm_names,
+        #                                      self.health_metrics_config))
+        return self.omci_pm.collect_metrics(data=data)
diff --git a/python/extensions/omci/__init__.py b/python/extensions/omci/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/omci/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/omci/database/__init__.py b/python/extensions/omci/database/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/omci/database/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/omci/database/alarm_db_ext.py b/python/extensions/omci/database/alarm_db_ext.py
new file mode 100644
index 0000000..2af6923
--- /dev/null
+++ b/python/extensions/omci/database/alarm_db_ext.py
@@ -0,0 +1,698 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from mib_db_api import *
+from voltha.protos.omci_alarm_db_pb2 import AlarmInstanceData, AlarmClassData, \
+    AlarmDeviceData, AlarmAttributeData
+
+
+class AlarmDbExternal(MibDbApi):
+    """
+    A persistent external OpenOMCI Alarm Database
+    """
+    CURRENT_VERSION = 1                       # VOLTHA v1.3.0 release
+    ALARM_BITMAP_KEY = 'alarm_bit_map'
+
+    _TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+
+    # Paths from root proxy
+    ALARM_PATH = '/omci_alarms'
+    DEVICE_PATH = ALARM_PATH + '/{}'            # .format(device_id)
+
+    # Classes, Instances, and Attributes as lists from root proxy
+    CLASSES_PATH = DEVICE_PATH + '/classes'                                # .format(device_id)
+    INSTANCES_PATH = DEVICE_PATH + '/classes/{}/instances'                 # .format(device_id, class_id)
+    ATTRIBUTES_PATH = DEVICE_PATH + '/classes/{}/instances/{}/attributes'  # .format(device_id, class_id, instance_id)
+
+    # Single Class, Instance, and Attribute as objects from device proxy
+    CLASS_PATH = '/classes/{}'                                 # .format(class_id)
+    INSTANCE_PATH = '/classes/{}/instances/{}'                 # .format(class_id, instance_id)
+    ATTRIBUTE_PATH = '/classes/{}/instances/{}/attributes/{}'  # .format(class_id, instance_id
+                                                               #         attribute_name)
+
+    def __init__(self, omci_agent):
+        """
+        Class initializer
+        :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+        """
+        super(AlarmDbExternal, self).__init__(omci_agent)
+        self._core = omci_agent.core
+
+    def start(self):
+        """
+        Start up/restore the database
+        """
+        self.log.debug('start')
+
+        if not self._started:
+            super(AlarmDbExternal, self).start()
+            root_proxy = self._core.get_proxy('/')
+
+            try:
+                base = root_proxy.get(AlarmDbExternal.ALARM_PATH)
+                self.log.info('db-exists', num_devices=len(base))
+
+            except Exception as e:
+                self.log.exception('start-failure', e=e)
+                raise
+
+    def stop(self):
+        """
+        Start up the database
+        """
+        self.log.debug('stop')
+
+        if self._started:
+            super(AlarmDbExternal, self).stop()
+            # TODO: Delete this method if nothing else is done except calling the base class
+
+    def _time_to_string(self, time):
+        return time.strftime(AlarmDbExternal._TIME_FORMAT) if time is not None else ''
+
+    def _string_to_time(self, time):
+        return datetime.strptime(time, AlarmDbExternal._TIME_FORMAT) if len(time) else None
+
+    def _attribute_to_string(self, value):
+        """
+        Convert an ME's attribute value to string representation
+
+        :param value: (long) Alarm bitmaps are always a Long
+        :return: (str) String representation of the value
+        """
+        return str(value)
+
+    def _string_to_attribute(self, str_value):
+        """
+        Convert an ME's attribute value-string to its Scapy decode equivalent
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param attr_name: (str) Attribute Name (see EntityClasses)
+        :param str_value: (str) Attribute Value in string form
+
+        :return: (various) String representation of the value
+        :raises KeyError: Device, Class ID, or Attribute does not exist
+        """
+        # Alarms are always a bitmap which is a long
+        return long(str_value) if len(str_value) else 0L
+
+    def add(self, device_id, overwrite=False):
+        """
+        Add a new ONU to database
+
+        :param device_id: (str) Device ID of ONU to add
+        :param overwrite: (bool) Overwrite existing entry if found.
+
+        :raises KeyError: If device already exists and 'overwrite' is False
+        """
+        self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+        now = datetime.utcnow()
+        found = False
+        root_proxy = self._core.get_proxy('/')
+
+        data = AlarmDeviceData(device_id=device_id,
+                               created=self._time_to_string(now),
+                               version=AlarmDbExternal.CURRENT_VERSION,
+                               last_alarm_sequence=0)
+        try:
+            dev_proxy = self._device_proxy(device_id)
+            found = True
+
+            if not overwrite:
+                # Device already exists
+                raise KeyError('Device with ID {} already exists in Alarm database'.
+                               format(device_id))
+
+            # Overwrite with new data
+            data = dev_proxy.get('/', depth=0)
+            self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id), data)
+            self._modified = now
+
+        except KeyError:
+            if found:
+                raise
+            # Did not exist, add it now
+            root_proxy.add(AlarmDbExternal.ALARM_PATH, data)
+            self._created = now
+            self._modified = now
+
+    def remove(self, device_id):
+        """
+        Remove an ONU from the database
+
+        :param device_id: (str) Device ID of ONU to remove from database
+        """
+        self.log.debug('remove-device', device_id=device_id)
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        try:
+            # self._root_proxy.get(AlarmDbExternal.DEVICE_PATH.format(device_id))
+            self._root_proxy.remove(AlarmDbExternal.DEVICE_PATH.format(device_id))
+            self._modified = datetime.utcnow()
+
+        except KeyError:
+            # Did not exists, which is not a failure
+            pass
+
+        except Exception as e:
+            self.log.exception('remove-exception', device_id=device_id, e=e)
+            raise
+
+    @property
+    def _root_proxy(self):
+        return self._core.get_proxy('/')
+
+    def _device_proxy(self, device_id):
+        """
+        Return a config proxy to the OMCI Alarm_DB leaf for a given device
+
+        :param device_id: (str) ONU Device ID
+        :return: (ConfigProxy) Configuration proxy rooted at OMCI Alarm DB
+        :raises KeyError: If the device does not exist in the database
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        return self._core.get_proxy(AlarmDbExternal.DEVICE_PATH.format(device_id))
+
+    def _class_proxy(self, device_id, class_id, create=False):
+        """
+        Get a config proxy to a specific managed entity class
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param create: (bool) If true, create default instance (and class)
+        :return: (ConfigProxy) Class configuration proxy
+
+        :raises DatabaseStateError: If database is not started
+        :raises KeyError: If Instance does not exist and 'create' is False
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        fmt = AlarmDbExternal.DEVICE_PATH + AlarmDbExternal.CLASS_PATH
+        path = fmt.format(device_id, class_id)
+
+        try:
+            return self._core.get_proxy(path)
+
+        except KeyError:
+            if not create:
+                self.log.error('class-proxy-does-not-exist', device_id=device_id,
+                               class_id=class_id)
+                raise
+
+        # Create class
+        data = AlarmClassData(class_id=class_id)
+        root_path = AlarmDbExternal.CLASSES_PATH.format(device_id)
+        self._root_proxy.add(root_path, data)
+
+        return self._core.get_proxy(path)
+
+    def _instance_proxy(self, device_id, class_id, instance_id, create=False):
+        """
+        Get a config proxy to a specific managed entity instance
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param instance_id: (int) Instance ID
+        :param create: (bool) If true, create default instance (and class)
+        :return: (ConfigProxy) Instance configuration proxy
+
+        :raises DatabaseStateError: If database is not started
+        :raises KeyError: If Instance does not exist and 'create' is False
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID is a string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        if not 0 <= instance_id <= 0xFFFF:
+            raise ValueError('instance-id is 0..0xFFFF')
+
+        fmt = AlarmDbExternal.DEVICE_PATH + AlarmDbExternal.INSTANCE_PATH
+        path = fmt.format(device_id, class_id, instance_id)
+
+        try:
+            return self._core.get_proxy(path)
+
+        except KeyError:
+            if not create:
+                self.log.error('instance-proxy-does-not-exist', device_id=device_id,
+                               class_id=class_id, instance_id=instance_id)
+                raise
+
+        # Create instance, first make sure class exists
+        self._class_proxy(device_id, class_id, create=True)
+
+        now = self._time_to_string(datetime.utcnow())
+        data = AlarmInstanceData(instance_id=instance_id, created=now, modified=now)
+        root_path = AlarmDbExternal.INSTANCES_PATH.format(device_id, class_id)
+        self._root_proxy.add(root_path, data)
+
+        return self._core.get_proxy(path)
+
+    def save_last_sync_time(self, device_id, value):
+        """
+        Save the Last Sync time to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (DateTime) Value to save
+        """
+        self.log.debug('save-last-sync-time', device_id=device_id, time=str(value))
+
+        try:
+            if not isinstance(value, datetime):
+                raise TypeError('Expected a datetime object, got {}'.
+                                format(type(datetime)))
+
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.last_sync_time = self._time_to_string(value)
+
+            # Update
+            self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-sync-time-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+    def get_last_sync_time(self, device_id):
+        """
+        Get the Last Sync Time saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        self.log.debug('get-last-sync-time', device_id=device_id)
+
+        try:
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+            return self._string_to_time(data.last_sync_time)
+
+        except KeyError:
+            return None     # OMCI MIB_DB entry has not yet been created
+
+        except Exception as e:
+            self.log.exception('get-last-sync-time-exception', e=e)
+            raise
+
+    def save_alarm_last_sync(self, device_id, value):
+        """
+        Save the Last Alarm Sequence value to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (int) Value to save
+        """
+        self.log.debug('save-last-sync', device_id=device_id, seq=str(value))
+
+        try:
+            if not isinstance(value, int):
+                raise TypeError('Expected a integer, got {}'.format(type(value)))
+
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.last_alarm_sequence = int(value)
+
+            # Update
+            self._root_proxy.update(AlarmDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-sequence-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+    def get_alarm_last_sync(self, device_id):
+        """
+        Get the Last Sync Time saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        self.log.debug('get-last-sync', device_id=device_id)
+
+        try:
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+            return int(data.last_alarm_sequence)
+
+        except KeyError:
+            return None     # OMCI ALARM_DB entry has not yet been created
+
+        except Exception as e:
+            self.log.exception('get-last-alarm-exception', e=e)
+            raise
+
+    def _add_new_class(self, device_id, class_id, instance_id, attributes):
+        """
+        Create an entry for a new class in the external database
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+        """
+        self.log.debug('add', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        now = self._time_to_string(datetime.utcnow())
+        attrs = [AlarmAttributeData(name=k,
+                                    value=self._attribute_to_string(v)) for k, v in attributes.items()]
+        class_data = AlarmClassData(class_id=class_id,
+                                    instances=[AlarmInstanceData(instance_id=instance_id,
+                                                                 created=now,
+                                                                 modified=now,
+                                                                 attributes=attrs)])
+
+        self._root_proxy.add(AlarmDbExternal.CLASSES_PATH.format(device_id), class_data)
+        self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+                       entity_id=instance_id, attributes=attributes)
+        return True
+
+    def _add_new_instance(self,  device_id, class_id, instance_id, attributes):
+        """
+        Create an entry for a instance of an existing class in the external database
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+        """
+        self.log.debug('add', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        now = self._time_to_string(datetime.utcnow())
+        attrs = [AlarmAttributeData(name=k,
+                                    value=self._attribute_to_string(v)) for k, v in attributes.items()]
+        instance_data = AlarmInstanceData(instance_id=instance_id,
+                                          created=now,
+                                          modified=now,
+                                          attributes=attrs)
+
+        self._root_proxy.add(AlarmDbExternal.INSTANCES_PATH.format(device_id, class_id),
+                             instance_data)
+
+        self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+                       entity_id=instance_id, attributes=attributes)
+        return True
+
+    def set(self, device_id, class_id, instance_id, attributes):
+        """
+        Set a database value.  This should only be called by the Alarm synchronizer
+        and its related tasks
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('set', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+        try:
+            if not isinstance(device_id, basestring):
+                raise TypeError('Device ID should be a string')
+
+            if not 0 <= class_id <= 0xFFFF:
+                raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+            if not 0 <= instance_id <= 0xFFFF:
+                raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+            if not isinstance(attributes, dict):
+                raise TypeError("Attributes should be a dictionary")
+
+            if not self._started:
+                raise DatabaseStateError('The Database is not currently active')
+
+            # Determine the best strategy to add the information
+            dev_proxy = self._device_proxy(device_id)
+
+            try:
+                class_data = dev_proxy.get(AlarmDbExternal.CLASS_PATH.format(class_id), deep=True)
+
+                inst_data = next((inst for inst in class_data.instances
+                                 if inst.instance_id == instance_id), None)
+
+                if inst_data is None:
+                    return self._add_new_instance(device_id, class_id, instance_id, attributes)
+
+                # Possibly adding to or updating an existing instance
+                # Get instance proxy, creating it if needed
+
+                exist_attr_indexes = dict()
+                attr_len = len(inst_data.attributes)
+
+                for index in xrange(0, attr_len):
+                    exist_attr_indexes[inst_data.attributes[index].name] = index
+
+                modified = False
+                str_value = ''
+                new_attributes = []
+
+                for k, v in attributes.items():
+                    try:
+                        str_value = self._attribute_to_string(v)
+                        new_attributes.append(AlarmAttributeData(name=k, value=str_value))
+
+                    except Exception as e:
+                        self.log.exception('save-error', e=e, class_id=class_id,
+                                           attr=k, value_type=type(v))
+
+                    if k not in exist_attr_indexes or \
+                            inst_data.attributes[exist_attr_indexes[k]].value != str_value:
+                        modified = True
+
+                if modified:
+                    now = datetime.utcnow()
+                    new_data = AlarmInstanceData(instance_id=instance_id,
+                                                 created=inst_data.created,
+                                                 modified=self._time_to_string(now),
+                                                 attributes=new_attributes)
+                    dev_proxy.remove(AlarmDbExternal.INSTANCE_PATH.format(class_id, instance_id))
+                    self._root_proxy.add(AlarmDbExternal.INSTANCES_PATH.format(device_id,
+                                                                               class_id), new_data)
+
+                self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+                               entity_id=instance_id, attributes=attributes, modified=modified)
+                return modified
+
+            except KeyError:
+                # Here if the class-id does not yet exist in the database
+                return self._add_new_class(device_id, class_id, instance_id,
+                                           attributes)
+        except Exception as e:
+            self.log.exception('set-exception', device_id=device_id, class_id=class_id,
+                               instance_id=instance_id, attributes=attributes, e=e)
+            raise
+
+    def delete(self, device_id, class_id, entity_id):
+        """
+        Delete an entity from the database if it exists.  If all instances
+        of a class are deleted, the class is deleted as well.
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Entity ID
+
+        :returns: (bool) True if the instance was found and deleted. False
+                         if it did not exist.
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('delete', device_id=device_id, class_id=class_id,
+                       entity_id=entity_id)
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        if not 0 <= entity_id <= 0xFFFF:
+            raise ValueError('instance-id is 0..0xFFFF')
+
+        try:
+            # Remove instance
+            self._instance_proxy(device_id, class_id, entity_id).remove('/')
+            now = datetime.utcnow()
+
+            # If resulting class has no instance, remove it as well
+            class_proxy = self._class_proxy(device_id, class_id)
+            class_data = class_proxy.get('/', depth=1)
+
+            if len(class_data.instances) == 0:
+                class_proxy.remove('/')
+
+            self._modified = now
+            return True
+
+        except KeyError:
+            return False    # Not found
+
+        except Exception as e:
+            self.log.exception('get-last-data-exception', device_id=device_id, e=e)
+            raise
+
+    def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+        """
+        Get database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param device_id: (str) ONU Device ID
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list/set or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises KeyError: If the requested device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+        try:
+            if class_id is None:
+                # Get full device info
+                dev_data = self._device_proxy(device_id).get('/', depth=-1)
+                data = self._device_to_dict(dev_data)
+
+            elif instance_id is None:
+                # Get all instances of the class
+                try:
+                    cls_data = self._class_proxy(device_id, class_id).get('/', depth=-1)
+                    data = self._class_to_dict(cls_data)
+
+                except KeyError:
+                    data = dict()
+
+            else:
+                # Get all attributes of a specific ME
+                try:
+                    inst_data = self._instance_proxy(device_id, class_id, instance_id).\
+                        get('/', depth=-1)
+
+                    if attributes is None:
+                        # All Attributes
+                        data = self._instance_to_dict(inst_data)
+
+                    else:
+                        # Specific attribute(s)
+                        if isinstance(attributes, basestring):
+                            attributes = {attributes}
+
+                        data = {
+                            attr.name: self._string_to_attribute(attr.value)
+                            for attr in inst_data.attributes if attr.name in attributes}
+
+                except KeyError:
+                    data = dict()
+
+            return data
+
+        except KeyError:
+            self.log.warn('query-no-device', device_id=device_id)
+            raise
+
+        except Exception as e:
+            self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+    def _instance_to_dict(self, instance):
+        if not isinstance(instance, AlarmInstanceData):
+            raise TypeError('{} is not of type AlarmInstanceData'.format(type(instance)))
+
+        data = {
+            INSTANCE_ID_KEY: instance.instance_id,
+            CREATED_KEY: self._string_to_time(instance.created),
+            MODIFIED_KEY: self._string_to_time(instance.modified),
+            ATTRIBUTES_KEY: dict()
+        }
+        for attribute in instance.attributes:
+            data[ATTRIBUTES_KEY][attribute.name] = self._string_to_attribute(attribute.value)
+        return data
+
+    def _class_to_dict(self, val):
+        if not isinstance(val, AlarmClassData):
+            raise TypeError('{} is not of type AlarmClassData'.format(type(val)))
+
+        data = {
+            CLASS_ID_KEY: val.class_id,
+        }
+        for instance in val.instances:
+            data[instance.instance_id] = self._instance_to_dict(instance)
+        return data
+
+    def _device_to_dict(self, val):
+        if not isinstance(val, AlarmDeviceData):
+            raise TypeError('{} is not of type AlarmDeviceData'.format(type(val)))
+
+        data = {
+            DEVICE_ID_KEY: val.device_id,
+            CREATED_KEY: self._string_to_time(val.created),
+            VERSION_KEY: val.version,
+            ME_KEY: dict(),
+            MSG_TYPE_KEY: set()
+        }
+        for class_data in val.classes:
+            data[class_data.class_id] = self._class_to_dict(class_data)
+        for managed_entity in val.managed_entities:
+            data[ME_KEY][managed_entity.class_id] = managed_entity.name
+
+        for msg_type in val.message_types:
+            data[MSG_TYPE_KEY].add(msg_type.message_type)
+
+        return data
diff --git a/python/extensions/omci/database/mib_db_api.py b/python/extensions/omci/database/mib_db_api.py
new file mode 100644
index 0000000..eb93323
--- /dev/null
+++ b/python/extensions/omci/database/mib_db_api.py
@@ -0,0 +1,245 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OpenOMCI MIB Database API
+"""
+
+import structlog
+from datetime import datetime
+
+CREATED_KEY = 'created'
+MODIFIED_KEY = 'modified'
+MDS_KEY = 'mib_data_sync'
+LAST_SYNC_KEY = 'last_mib_sync'
+VERSION_KEY = 'version'
+DEVICE_ID_KEY = 'device_id'
+CLASS_ID_KEY = 'class_id'
+INSTANCE_ID_KEY = 'instance_id'
+ATTRIBUTES_KEY = 'attributes'
+ME_KEY = 'managed_entities'
+MSG_TYPE_KEY = 'message_types'
+
+
+class DatabaseStateError(Exception):
+    def __init__(self, *args):
+        Exception.__init__(self, *args)
+
+
+class MibDbApi(object):
+    """
+    MIB Database API Base Class
+
+    Derive the ME MIB Database implementation from this API.  For an example
+    implementation, look at the mib_db_dict.py implementation
+    """
+    def __init__(self, omci_agent):
+        """
+        Class initializer
+        :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+        """
+        self.log = structlog.get_logger()
+        self._omci_agent = omci_agent
+        self._started = False
+
+        now = datetime.utcnow()
+        self._created = now
+        self._modified = now
+
+    def start(self):
+        """
+        Start up/restore the database. For in-memory, will be a nop. For external
+        DB, may need to create the DB and fetch create/modified values
+        """
+        if not self._started:
+            self._started = True
+        # For a derived class that is a persistent DB, Restore DB (connect,
+        # get created/modified times, ....) or something along those lines.
+        # Minimal restore could just be getting ONU device IDs' so they are cached
+        # locally. Maximum restore would be a full in-memory version of database
+        # for fast 'GET' request support.
+        # Remember to restore the '_created' and '_modified' times (above) as well
+        # from the database
+
+    def stop(self):
+        """
+        Start up the database. For in-memory, will be a nop. For external
+        DB, may need to create the DB and fetch create/modified values
+        """
+        if self._started:
+            self._started = False
+
+    @property
+    def active(self):
+        """
+        Is the database active
+        :return: (bool) True if active
+        """
+        return self._started
+
+    @property
+    def created(self):
+        """
+        Date (UTC) that the database was created
+        :return: (datetime) creation date
+        """
+        return self._created
+
+    @property
+    def modified(self):
+        """
+        Date (UTC) that the database last added or removed a device
+        or updated a device's ME information
+        :return: (datetime) last modification date
+        """
+        return self._modified
+
+    def add(self, device_id, overwrite=False):
+        """
+        Add a new ONU to database
+
+        :param device_id: (str) Device ID of ONU to add
+        :param overwrite: (bool) Overwrite existing entry if found.
+
+        :raises KeyError: If device already exists and 'overwrite' is False
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def remove(self, device_id):
+        """
+        Remove an ONU from the database
+
+        :param device_id: (str) Device ID of ONU to remove from database
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def set(self, device_id, class_id, entity_id, attributes):
+        """
+        Set/Create a database value.  This should only be called by the MIB synchronizer
+        and its related tasks
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def delete(self, device_id, class_id, entity_id):
+        """
+        Delete an entity from the database if it exists
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Entity ID
+
+        :returns: (bool) True if the instance was found and deleted. False
+                         if it did not exist.
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+        """
+        Get database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param device_id: (str) ONU Device ID
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list/set or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises KeyError: If the requested device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def on_mib_reset(self, device_id):
+        """
+        Reset/clear the database for a specific Device
+
+        :param device_id: (str) ONU Device ID
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        # Your derived class should clear out all MIB data and update the
+        # modified stats appropriately
+        raise NotImplementedError('Implement this in your derive class')
+
+    def save_mib_data_sync(self, device_id, value):
+        """
+        Save the MIB Data Sync to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (int) Value to save
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def get_mib_data_sync(self, device_id):
+        """
+        Get the MIB Data Sync value last saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def save_last_sync(self, device_id, value):
+        """
+        Save the Last Sync time to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (DateTime) Value to save
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def get_last_sync(self, device_id):
+        """
+        Get the Last SYnc Time saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def update_supported_managed_entities(self, device_id, managed_entities):
+        """
+        Update the supported OMCI Managed Entities for this device
+
+        :param device_id: (str) ONU Device ID
+        :param managed_entities: (set) Managed Entity class IDs
+        """
+        raise NotImplementedError('Implement this in your derive class')
+
+    def update_supported_message_types(self, device_id, msg_types):
+        """
+        Update the supported OMCI Managed Entities for this device
+
+        :param device_id: (str) ONU Device ID
+        :param msg_types: (set) Message Type values (ints)
+        """
+        raise NotImplementedError('Implement this in your derive class')
diff --git a/python/extensions/omci/database/mib_db_dict.py b/python/extensions/omci/database/mib_db_dict.py
new file mode 100644
index 0000000..6a7de8f
--- /dev/null
+++ b/python/extensions/omci/database/mib_db_dict.py
@@ -0,0 +1,524 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import copy
+from mib_db_api import *
+import json
+
+
+class MibDbVolatileDict(MibDbApi):
+    """
+    A very simple in-memory database for ME storage. Data is not persistent
+    across reboots.
+
+    In Phase 2, this DB will be instantiated on a per-ONU basis but act as if
+    it is shared for all ONUs. This class will be updated with and external
+    key-value store (or other appropriate database) in Voltha 1.3 Sprint 3
+
+    This class can be used for unit tests
+    """
+    CURRENT_VERSION = 1
+
+    def __init__(self, omci_agent):
+        """
+        Class initializer
+        :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+        """
+        super(MibDbVolatileDict, self).__init__(omci_agent)
+        self._data = dict()   # device_id -> ME ID -> Inst ID -> Attr Name -> Values
+
+    def start(self):
+        """
+        Start up/restore the database. For in-memory, will be a nop. For external
+        DB, may need to create the DB and fetch create/modified values
+        """
+        super(MibDbVolatileDict, self).start()
+        # TODO: Delete this method if nothing else is done except calling the base class
+
+    def stop(self):
+        """
+        Start up the database. For in-memory, will be a nop. For external
+        DB, may need to create the DB and fetch create/modified values
+        """
+        super(MibDbVolatileDict, self).stop()
+        # TODO: Delete this method if nothing else is done except calling the base class
+
+    def add(self, device_id, overwrite=False):
+        """
+        Add a new ONU to database
+
+        :param device_id: (str) Device ID of ONU to add
+        :param overwrite: (bool) Overwrite existing entry if found.
+
+        :raises KeyError: If device already exist and 'overwrite' is False
+        """
+        self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not overwrite and device_id in self._data:
+            raise KeyError('Device {} already exists in the database'
+                           .format(device_id))
+
+        now = datetime.utcnow()
+        self._data[device_id] = {
+            DEVICE_ID_KEY: device_id,
+            CREATED_KEY: now,
+            LAST_SYNC_KEY: None,
+            MDS_KEY: 0,
+            VERSION_KEY: MibDbVolatileDict.CURRENT_VERSION,
+            ME_KEY: dict(),
+            MSG_TYPE_KEY: set()
+        }
+
+    def remove(self, device_id):
+        """
+        Remove an ONU from the database
+
+        :param device_id: (str) Device ID of ONU to remove from database
+        """
+        self.log.debug('remove-device', device_id=device_id)
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if device_id in self._data:
+            del self._data[device_id]
+            self._modified = datetime.utcnow()
+
+    def on_mib_reset(self, device_id):
+        """
+        Reset/clear the database for a specific Device
+
+        :param device_id: (str) ONU Device ID
+        :raises DatabaseStateError: If the database is not enabled
+        :raises KeyError: If the device does not exist in the database
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        device_db = self._data[device_id]
+        self._modified = datetime.utcnow()
+
+        self._data[device_id] = {
+            DEVICE_ID_KEY: device_id,
+            CREATED_KEY: device_db[CREATED_KEY],
+            LAST_SYNC_KEY: device_db[LAST_SYNC_KEY],
+            MDS_KEY: 0,
+            VERSION_KEY: MibDbVolatileDict.CURRENT_VERSION,
+            ME_KEY: device_db[ME_KEY],
+            MSG_TYPE_KEY: device_db[MSG_TYPE_KEY]
+        }
+
+    def save_mib_data_sync(self, device_id, value):
+        """
+        Save the MIB Data Sync to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (int) Value to save
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not isinstance(value, int):
+            raise TypeError('MIB Data Sync is an integer')
+
+        if not 0 <= value <= 255:
+            raise ValueError('Invalid MIB-data-sync value {}.  Must be 0..255'.
+                             format(value))
+
+        self._data[device_id][MDS_KEY] = value
+        self._modified = datetime.utcnow()
+
+    def get_mib_data_sync(self, device_id):
+        """
+        Get the MIB Data Sync value last saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if device_id not in self._data:
+            return None
+
+        return self._data[device_id].get(MDS_KEY)
+
+    def save_last_sync(self, device_id, value):
+        """
+        Save the Last Sync time to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (DateTime) Value to save
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not isinstance(value, datetime):
+            raise TypeError('Expected a datetime object, got {}'.
+                            format(type(datetime)))
+
+        self._data[device_id][LAST_SYNC_KEY] = value
+        self._modified = datetime.utcnow()
+
+    def get_last_sync(self, device_id):
+        """
+        Get the Last SYnc Time saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if device_id not in self._data:
+            return None
+
+        return self._data[device_id].get(LAST_SYNC_KEY)
+
+    def set(self, device_id, class_id, instance_id, attributes):
+        """
+        Set a database value.  This should only be called by the MIB synchronizer
+        and its related tasks
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be a string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+        if not 0 <= instance_id <= 0xFFFF:
+            raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+        if not isinstance(attributes, dict):
+            raise TypeError("Attributes should be a dictionary")
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        now = datetime.utcnow()
+        try:
+            device_db = self._data[device_id]
+            class_db = device_db.get(class_id)
+            created = False
+
+            if class_db is None:
+                device_db[class_id] = {CLASS_ID_KEY: class_id}
+
+                class_db = device_db[class_id]
+                self._modified = now
+                created = True
+
+            instance_db = class_db.get(instance_id)
+            if instance_db is None:
+                class_db[instance_id] = {
+                    INSTANCE_ID_KEY: instance_id,
+                    CREATED_KEY: now,
+                    MODIFIED_KEY: now,
+                    ATTRIBUTES_KEY: dict()
+                }
+                instance_db = class_db[instance_id]
+                self._modified = now
+                created = True
+
+            changed = False
+
+            me_map = self._omci_agent.get_device(device_id).me_map
+            entity = me_map.get(class_id)
+
+            for attribute, value in attributes.items():
+                assert isinstance(attribute, basestring)
+                assert value is not None, "Attribute '{}' value cannot be 'None'".\
+                    format(attribute)
+
+                db_value = instance_db[ATTRIBUTES_KEY].get(attribute) \
+                    if ATTRIBUTES_KEY in instance_db else None
+
+                if entity is not None and isinstance(value, basestring):
+                    from scapy.fields import StrFixedLenField
+                    attr_index = entity.attribute_name_to_index_map[attribute]
+                    eca = entity.attributes[attr_index]
+                    field = eca.field
+
+                    if isinstance(field, StrFixedLenField):
+                        from scapy.base_classes import Packet_metaclass
+                        if isinstance(field.default, Packet_metaclass) \
+                                and hasattr(field.default, 'json_from_value'):
+                            # Value/hex of Packet Class to string
+                            value = field.default.json_from_value(value)
+
+                if entity is not None and attribute in entity.attribute_name_to_index_map:
+                    attr_index = entity.attribute_name_to_index_map[attribute]
+                    eca = entity.attributes[attr_index]
+                    field = eca.field
+
+                    if hasattr(field, 'to_json'):
+                        value = field.to_json(value, db_value)
+
+                # Complex packet types may have an attribute encoded as an object, this
+                # can be check by seeing if there is a to_json() conversion callable
+                # defined
+                if hasattr(value, 'to_json'):
+                    value = value.to_json()
+
+                # Other complex packet types may be a repeated list field (FieldListField)
+                elif isinstance(value, (list, dict)):
+                    value = json.dumps(value, separators=(',', ':'))
+
+                assert db_value is None or isinstance(value, type(db_value)), \
+                    "New value type for attribute '{}' type is changing from '{}' to '{}'".\
+                    format(attribute, type(db_value), type(value))
+
+                if db_value is None or db_value != value:
+                    instance_db[ATTRIBUTES_KEY][attribute] = value
+                    changed = True
+
+            if changed:
+                instance_db[MODIFIED_KEY] = now
+                self._modified = now
+
+            return changed or created
+
+        except Exception as e:
+            self.log.error('set-failure', e=e, class_id=class_id,
+                           instance_id=instance_id, attributes=attributes)
+            raise
+
+    def delete(self, device_id, class_id, instance_id):
+        """
+        Delete an entity from the database if it exists.  If all instances
+        of a class are deleted, the class is deleted as well.
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+
+        :returns: (bool) True if the instance was found and deleted. False
+                         if it did not exist.
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        if not 0 <= instance_id <= 0xFFFF:
+            raise ValueError('instance-id is 0..0xFFFF')
+
+        try:
+            device_db = self._data[device_id]
+            class_db = device_db.get(class_id)
+
+            if class_db is None:
+                return False
+
+            instance_db = class_db.get(instance_id)
+            if instance_db is None:
+                return False
+
+            now = datetime.utcnow()
+            del class_db[instance_id]
+
+            if len(class_db) == 1:      # Is only 'CLASS_ID_KEY' remaining
+                del device_db[class_id]
+
+            self._modified = now
+            return True
+
+        except Exception as e:
+            self.log.error('delete-failure', e=e)
+            raise
+
+    def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+        """
+        Get database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param device_id: (str) ONU Device ID
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list/set or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises KeyError: If the requested device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID is a string')
+
+        device_db = self._data[device_id]
+        if class_id is None:
+            return self._fix_dev_json_attributes(copy.copy(device_db), device_id)
+
+        if not isinstance(class_id, int):
+            raise TypeError('Class ID is an integer')
+
+        me_map = self._omci_agent.get_device(device_id).me_map
+        entity = me_map.get(class_id)
+
+        class_db = device_db.get(class_id, dict())
+        if instance_id is None or len(class_db) == 0:
+            return self._fix_cls_json_attributes(copy.copy(class_db), entity)
+
+        if not isinstance(instance_id, int):
+            raise TypeError('Instance ID is an integer')
+
+        instance_db = class_db.get(instance_id, dict())
+        if attributes is None or len(instance_db) == 0:
+            return self._fix_inst_json_attributes(copy.copy(instance_db), entity)
+
+        if not isinstance(attributes, (basestring, list, set)):
+            raise TypeError('Attributes should be a string or list/set of strings')
+
+        if not isinstance(attributes, (list, set)):
+            attributes = [attributes]
+
+        results = {attr: val for attr, val in instance_db[ATTRIBUTES_KEY].iteritems()
+                   if attr in attributes}
+
+        for attr, attr_data in results.items():
+            attr_index = entity.attribute_name_to_index_map[attr]
+            eca = entity.attributes[attr_index]
+            results[attr] = self._fix_attr_json_attribute(copy.copy(attr_data), eca)
+
+        return results
+
+    #########################################################################
+    # Following routines are used to fix-up JSON encoded complex data. A
+    # nice side effect is that the values returned will be a deep-copy of
+    # the class/instance/attribute data of what is in the database. Note
+    # That other database values (created, modified, ...) will still reference
+    # back to the original DB.
+
+    def _fix_dev_json_attributes(self, dev_data, device_id):
+        for cls_id, cls_data in dev_data.items():
+            if isinstance(cls_id, int):
+                me_map = self._omci_agent.get_device(device_id).me_map
+                entity = me_map.get(cls_id)
+                dev_data[cls_id] = self._fix_cls_json_attributes(copy.copy(cls_data), entity)
+        return dev_data
+
+    def _fix_cls_json_attributes(self, cls_data, entity):
+        for inst_id, inst_data in cls_data.items():
+            if isinstance(inst_id, int):
+                cls_data[inst_id] = self._fix_inst_json_attributes(copy.copy(inst_data), entity)
+        return cls_data
+
+    def _fix_inst_json_attributes(self, inst_data, entity):
+        if ATTRIBUTES_KEY in inst_data:
+            for attr, attr_data in inst_data[ATTRIBUTES_KEY].items():
+                attr_index = entity.attribute_name_to_index_map[attr] \
+                    if entity is not None and attr in entity.attribute_name_to_index_map else None
+                eca = entity.attributes[attr_index] if attr_index is not None else None
+                inst_data[ATTRIBUTES_KEY][attr] = self._fix_attr_json_attribute(copy.copy(attr_data), eca)
+        return inst_data
+
+    def _fix_attr_json_attribute(self, attr_data, eca):
+
+        try:
+            if eca is not None:
+                field = eca.field
+                if hasattr(field, 'load_json'):
+                    value = field.load_json(attr_data)
+                    return value
+
+            return json.loads(attr_data) if isinstance(attr_data, basestring) else attr_data
+
+        except ValueError:
+            return attr_data
+
+        except Exception as e:
+            pass
+
+    def update_supported_managed_entities(self, device_id, managed_entities):
+        """
+        Update the supported OMCI Managed Entities for this device
+
+        :param device_id: (str) ONU Device ID
+        :param managed_entities: (set) Managed Entity class IDs
+        """
+        now = datetime.utcnow()
+        try:
+            device_db = self._data[device_id]
+
+            entities = {class_id: self._managed_entity_to_name(device_id, class_id)
+                        for class_id in managed_entities}
+
+            device_db[ME_KEY] = entities
+            self._modified = now
+
+        except Exception as e:
+            self.log.error('set-me-failure', e=e)
+            raise
+
+    def _managed_entity_to_name(self, device_id, class_id):
+        me_map = self._omci_agent.get_device(device_id).me_map
+        entity = me_map.get(class_id)
+
+        return entity.__name__ if entity is not None else 'UnknownManagedEntity'
+
+    def update_supported_message_types(self, device_id, msg_types):
+        """
+        Update the supported OMCI Managed Entities for this device
+
+        :param device_id: (str) ONU Device ID
+        :param msg_types: (set) Message Type values (ints)
+        """
+        now = datetime.utcnow()
+        try:
+            msg_type_set = {msg_type.value for msg_type in msg_types}
+            self._data[device_id][MSG_TYPE_KEY] = msg_type_set
+            self._modified = now
+
+        except Exception as e:
+            self.log.error('set-me-failure', e=e)
+            raise
diff --git a/python/extensions/omci/database/mib_db_ext.py b/python/extensions/omci/database/mib_db_ext.py
new file mode 100644
index 0000000..6b49e2b
--- /dev/null
+++ b/python/extensions/omci/database/mib_db_ext.py
@@ -0,0 +1,1049 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from mib_db_api import *
+from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, \
+    MibDeviceData, MibAttributeData, MessageType, ManagedEntity
+from voltha.extensions.omci.omci_entities import *
+from voltha.extensions.omci.omci_fields import *
+from scapy.fields import StrField, FieldListField, PacketField
+
+
+class MibDbStatistic(object):
+    """
+    For debug/tuning purposes.
+
+    With etcd around the v1.5 time frame, seeing the following:
+
+        o Creates:  Avg:  57.1 mS, Min:  76 mS, Max: 511 mS    (146 samples)
+        o Sets:     Avg: 303.9 mS, Min: 126 mS, Max: 689 mS    (103 samples)
+        o Gets:     Avg:   3.3 mS, Min:   0 mS, Max:   8 mS    (  9 samples)
+        o Deletes:  No samples
+    """
+    def __init__(self, name):
+        self._name = name
+        self._count = 0
+        self._total_time = 0        # Total milliseconds
+        self._min_time = 99999999
+        self._max_time = 0
+
+    def get_statistics(self):
+        return {
+            'name': self._name,
+            'count': self._count,
+            'total_time': self._total_time,
+            'min_time': self._min_time,
+            'max_time': self._max_time,
+            'avg_time': self._total_time / self._count if self._count > 0 else 0
+        }
+
+    def clear_statistics(self):
+        self._count = 0
+        self._total_time = 0        # Total milliseconds
+        self._min_time = 99999999
+        self._max_time = 0
+
+    def increment(self, time):
+        self._count += 1
+        self._total_time += time        # Total milliseconds
+        if self._min_time > time:
+            self._min_time = time
+        if self._max_time < time:
+            self._max_time = time
+
+
+class MibDbExternal(MibDbApi):
+    """
+    A persistent external OpenOMCI MIB Database
+    """
+    CURRENT_VERSION = 1                       # VOLTHA v1.3.0 release
+
+    _TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+
+    # Paths from root proxy
+    MIB_PATH = '/omci_mibs'
+    DEVICE_PATH = MIB_PATH + '/{}'            # .format(device_id)
+
+    # Classes, Instances, and Attributes as lists from root proxy
+    CLASSES_PATH = DEVICE_PATH + '/classes'                                # .format(device_id)
+    INSTANCES_PATH = DEVICE_PATH + '/classes/{}/instances'                 # .format(device_id, class_id)
+    ATTRIBUTES_PATH = DEVICE_PATH + '/classes/{}/instances/{}/attributes'  # .format(device_id, class_id, instance_id)
+
+    # Single Class, Instance, and Attribute as objects from device proxy
+    CLASS_PATH = '/classes/{}'                                 # .format(class_id)
+    INSTANCE_PATH = '/classes/{}/instances/{}'                 # .format(class_id, instance_id)
+    ATTRIBUTE_PATH = '/classes/{}/instances/{}/attributes/{}'  # .format(class_id, instance_id
+                                                               #         attribute_name)
+
+    def __init__(self, omci_agent):
+        """
+        Class initializer
+        :param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
+        """
+        super(MibDbExternal, self).__init__(omci_agent)
+        self._core = omci_agent.core
+        # Some statistics to help with debug/tuning/...
+        self._statistics = {
+            'get': MibDbStatistic('get'),
+            'set': MibDbStatistic('set'),
+            'create': MibDbStatistic('create'),
+            'delete': MibDbStatistic('delete')
+        }
+
+    def start(self):
+        """
+        Start up/restore the database
+        """
+        self.log.debug('start')
+
+        if not self._started:
+            super(MibDbExternal, self).start()
+            root_proxy = self._core.get_proxy('/')
+
+            try:
+                base = root_proxy.get(MibDbExternal.MIB_PATH)
+                self.log.info('db-exists', num_devices=len(base))
+
+            except Exception as e:
+                self.log.exception('start-failure', e=e)
+                raise
+
+    def stop(self):
+        """
+        Start up the database
+        """
+        self.log.debug('stop')
+
+        if self._started:
+            super(MibDbExternal, self).stop()
+            # TODO: Delete this method if nothing else is done except calling the base class
+
+    def _time_to_string(self, time):
+        return time.strftime(MibDbExternal._TIME_FORMAT) if time is not None else ''
+
+    def _string_to_time(self, time):
+        return datetime.strptime(time, MibDbExternal._TIME_FORMAT) if len(time) else None
+
+    def _attribute_to_string(self, device_id, class_id, attr_name, value, old_value = None):
+        """
+        Convert an ME's attribute value to string representation
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param attr_name: (str) Attribute Name (see EntityClasses)
+        :param value: (various) Attribute Value
+
+        :return: (str) String representation of the value
+        :raises KeyError: Device, Class ID, or Attribute does not exist
+        """
+        try:
+            me_map = self._omci_agent.get_device(device_id).me_map
+
+            if class_id in me_map:
+                entity = me_map[class_id]
+                attr_index = entity.attribute_name_to_index_map[attr_name]
+                eca = entity.attributes[attr_index]
+                field = eca.field
+            else:
+                # Here for auto-defined MEs (ones not defined in ME Map)
+                from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+                field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
+
+            if isinstance(field, StrFixedLenField):
+                from scapy.base_classes import Packet_metaclass
+                if hasattr(value, 'to_json') and not isinstance(value, basestring):
+                    # Packet Class to string
+                    str_value = value.to_json()
+                elif isinstance(field.default, Packet_metaclass) \
+                        and hasattr(field.default, 'json_from_value'):
+                        #and not isinstance(value, basestring):
+                    # Value/hex of Packet Class to string
+                    str_value = field.default.json_from_value(value)
+                else:
+                    str_value = str(value)
+
+            elif isinstance(field, (StrField, MACField, IPField)):
+                #  For StrField, value is an str already
+                #  For MACField, value is a string in ':' delimited form
+                #  For IPField, value is a string in '.' delimited form
+                str_value = str(value)
+
+            elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
+                #  For ByteField, ShortField, IntField, and LongField value is an int
+                str_value = str(value)
+
+            elif isinstance(field, BitField):
+                # For BitField, value is a long
+                #
+                str_value = str(value)
+
+            elif hasattr(field, 'to_json'):
+                str_value = field.to_json(value, old_value)
+
+            elif isinstance(field, FieldListField):
+                str_value = json.dumps(value, separators=(',', ':'))
+
+            else:
+                self.log.warning('default-conversion', type=type(field),
+                                 class_id=class_id, attribute=attr_name, value=str(value))
+                str_value = str(value)
+
+            return str_value
+
+        except Exception as e:
+            self.log.exception('attr-to-string', device_id=device_id,
+                               class_id=class_id, attr=attr_name,
+                               value=value, e=e)
+            raise
+
+    def _string_to_attribute(self, device_id, class_id, attr_name, str_value):
+        """
+        Convert an ME's attribute value-string to its Scapy decode equivalent
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param attr_name: (str) Attribute Name (see EntityClasses)
+        :param str_value: (str) Attribute Value in string form
+
+        :return: (various) String representation of the value
+        :raises KeyError: Device, Class ID, or Attribute does not exist
+        """
+        try:
+            me_map = self._omci_agent.get_device(device_id).me_map
+
+            if class_id in me_map:
+                entity = me_map[class_id]
+                attr_index = entity.attribute_name_to_index_map[attr_name]
+                eca = entity.attributes[attr_index]
+                field = eca.field
+            else:
+                # Here for auto-defined MEs (ones not defined in ME Map)
+                from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+                field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
+
+            if isinstance(field, StrFixedLenField):
+                from scapy.base_classes import Packet_metaclass
+                default = field.default
+                if isinstance(default, Packet_metaclass) and \
+                        hasattr(default, 'to_json'):
+                    value = json.loads(str_value)
+                else:
+                    value = str_value
+
+            elif isinstance(field, MACField):
+                value = str_value
+
+            elif isinstance(field, IPField):
+                value = str_value
+
+            elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
+                if str_value.lower() in ('true', 'false'):
+                    str_value = '1' if str_value.lower() == 'true' else '0'
+                value = int(str_value)
+
+            elif isinstance(field, BitField):
+                value = long(str_value)
+
+            elif hasattr(field, 'load_json'):
+                value = field.load_json(str_value)
+
+            elif isinstance(field, FieldListField):
+                value = json.loads(str_value)
+
+            else:
+                self.log.warning('default-conversion', type=type(field),
+                                 class_id=class_id, attribute=attr_name, value=str_value)
+                value = None
+
+            return value
+
+        except Exception as e:
+            self.log.exception('attr-to-string', device_id=device_id,
+                               class_id=class_id, attr=attr_name,
+                               value=str_value, e=e)
+            raise
+
+    def add(self, device_id, overwrite=False):
+        """
+        Add a new ONU to database
+
+        :param device_id: (str) Device ID of ONU to add
+        :param overwrite: (bool) Overwrite existing entry if found.
+
+        :raises KeyError: If device already exists and 'overwrite' is False
+        """
+        self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
+
+        now = datetime.utcnow()
+        found = False
+        root_proxy = self._core.get_proxy('/')
+
+        data = MibDeviceData(device_id=device_id,
+                             created=self._time_to_string(now),
+                             last_sync_time='',
+                             mib_data_sync=0,
+                             version=MibDbExternal.CURRENT_VERSION)
+        try:
+            dev_proxy = self._device_proxy(device_id)
+            found = True
+
+            if not overwrite:
+                # Device already exists
+                raise KeyError('Device with ID {} already exists in MIB database'.
+                               format(device_id))
+
+            # Overwrite with new data
+            data = dev_proxy.get('/', depth=0)
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id), data)
+            self._modified = now
+
+        except KeyError:
+            if found:
+                raise
+            # Did not exist, add it now
+            root_proxy.add(MibDbExternal.MIB_PATH, data)
+            self._created = now
+            self._modified = now
+
+    def remove(self, device_id):
+        """
+        Remove an ONU from the database
+
+        :param device_id: (str) Device ID of ONU to remove from database
+        """
+        self.log.debug('remove-device', device_id=device_id)
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        try:
+            # self._root_proxy.get(MibDbExternal.DEVICE_PATH.format(device_id))
+            self._root_proxy.remove(MibDbExternal.DEVICE_PATH.format(device_id))
+            self._modified = datetime.utcnow()
+
+        except KeyError:
+            # Did not exists, which is not a failure
+            pass
+
+        except Exception as e:
+            self.log.exception('remove-exception', device_id=device_id, e=e)
+            raise
+
+    @property
+    def _root_proxy(self):
+        return self._core.get_proxy('/')
+
+    def _device_proxy(self, device_id):
+        """
+        Return a config proxy to the OMCI MIB_DB leaf for a given device
+
+        :param device_id: (str) ONU Device ID
+        :return: (ConfigProxy) Configuration proxy rooted at OMCI MIB DB
+        :raises KeyError: If the device does not exist in the database
+        """
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        return self._core.get_proxy(MibDbExternal.DEVICE_PATH.format(device_id))
+
+    def _class_proxy(self, device_id, class_id, create=False):
+        """
+        Get a config proxy to a specific managed entity class
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param create: (bool) If true, create default instance (and class)
+        :return: (ConfigProxy) Class configuration proxy
+
+        :raises DatabaseStateError: If database is not started
+        :raises KeyError: If Instance does not exist and 'create' is False
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.CLASS_PATH
+        path = fmt.format(device_id, class_id)
+
+        try:
+            return self._core.get_proxy(path)
+
+        except KeyError:
+            if not create:
+                # This can occur right after a MIB Reset if the ONU publishes AVCs right away
+                # and during the MIB audit resync for ONU created MEs in response to an OLT
+                # created ME.  Fail since for these test cases they occur during a verification
+                # 'query' and not the ME creation during resync. Calling code should handle
+                # they exception if it is expected to occur on occasion.
+                self.log.debug('class-proxy-does-not-exist', device_id=device_id,
+                               class_id=class_id)
+                raise
+
+        # Create class
+        data = MibClassData(class_id=class_id)
+        root_path = MibDbExternal.CLASSES_PATH.format(device_id)
+        self._root_proxy.add(root_path, data)
+
+        return self._core.get_proxy(path)
+
+    def _instance_proxy(self, device_id, class_id, instance_id, create=False):
+        """
+        Get a config proxy to a specific managed entity instance
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) Class ID
+        :param instance_id: (int) Instance ID
+        :param create: (bool) If true, create default instance (and class)
+        :return: (ConfigProxy) Instance configuration proxy
+
+        :raises DatabaseStateError: If database is not started
+        :raises KeyError: If Instance does not exist and 'create' is False
+        """
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID is a string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        if not 0 <= instance_id <= 0xFFFF:
+            raise ValueError('instance-id is 0..0xFFFF')
+
+        fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.INSTANCE_PATH
+        path = fmt.format(device_id, class_id, instance_id)
+
+        try:
+            return self._core.get_proxy(path)
+
+        except KeyError:
+            if not create:
+                # This can occur right after a MIB Reset if the ONU publishes AVCs right away
+                # and during the MIB audit resync for ONU created MEs in response to an OLT
+                # created ME.  Fail since for these test cases they occur during a verification
+                # 'query' and not the ME creation during resync. Calling code should handle
+                # they exception if it is expected to occur on occasion.
+                self.log.info('instance-proxy-does-not-exist', device_id=device_id,
+                              class_id=class_id, instance_id=instance_id)
+                raise
+
+        # Create instance, first make sure class exists
+        self._class_proxy(device_id, class_id, create=True)
+
+        now = self._time_to_string(datetime.utcnow())
+        data = MibInstanceData(instance_id=instance_id, created=now, modified=now)
+        root_path = MibDbExternal.INSTANCES_PATH.format(device_id, class_id)
+        self._root_proxy.add(root_path, data)
+
+        return self._core.get_proxy(path)
+
+    def on_mib_reset(self, device_id):
+        """
+        Reset/clear the database for a specific Device
+
+        :param device_id: (str) ONU Device ID
+        :raises DatabaseStateError: If the database is not enabled
+        :raises KeyError: If the device does not exist in the database
+        """
+        self.log.debug('on-mib-reset', device_id=device_id)
+
+        try:
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=2)
+
+            # Wipe out any existing class IDs
+            class_ids = [c.class_id for c in data.classes]
+
+            if len(class_ids):
+                for class_id in class_ids:
+                    device_proxy.remove(MibDbExternal.CLASS_PATH.format(class_id))
+
+            # Reset MIB Data Sync to zero
+            now = datetime.utcnow()
+            data = MibDeviceData(device_id=device_id,
+                                 created=data.created,
+                                 last_sync_time=data.last_sync_time,
+                                 mib_data_sync=0,
+                                 version=MibDbExternal.CURRENT_VERSION)
+            # Update
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('mib-reset-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('mib-reset-exception', device_id=device_id, e=e)
+            raise
+
+    def save_mib_data_sync(self, device_id, value):
+        """
+        Save the MIB Data Sync to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (int) Value to save
+        """
+        self.log.debug('save-mds', device_id=device_id, value=value)
+
+        try:
+            if not isinstance(value, int):
+                raise TypeError('MIB Data Sync is an integer')
+
+            if not 0 <= value <= 255:
+                raise ValueError('Invalid MIB-data-sync value {}.  Must be 0..255'.
+                                 format(value))
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.mib_data_sync = value
+
+            # Update
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-mds-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('save-mds-exception', device_id=device_id, e=e)
+            raise
+
+    def get_mib_data_sync(self, device_id):
+        """
+        Get the MIB Data Sync value last saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        self.log.debug('get-mds', device_id=device_id)
+
+        try:
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+            return int(data.mib_data_sync)
+
+        except KeyError:
+            return None     # OMCI MIB_DB entry has not yet been created
+
+        except Exception as e:
+            self.log.exception('get-mds-exception', device_id=device_id, e=e)
+            raise
+
+    def save_last_sync(self, device_id, value):
+        """
+        Save the Last Sync time to the database in an easy location to access
+
+        :param device_id: (str) ONU Device ID
+        :param value: (DateTime) Value to save
+        """
+        self.log.debug('save-last-sync', device_id=device_id, time=str(value))
+
+        try:
+            if not isinstance(value, datetime):
+                raise TypeError('Expected a datetime object, got {}'.
+                                format(type(datetime)))
+
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.last_sync_time = self._time_to_string(value)
+
+            # Update
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-mds-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+    def get_last_sync(self, device_id):
+        """
+        Get the Last Sync Time saved to the database for a device
+
+        :param device_id: (str) ONU Device ID
+        :return: (int) The Value or None if not found
+        """
+        self.log.debug('get-last-sync', device_id=device_id)
+
+        try:
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+            return self._string_to_time(data.last_sync_time)
+
+        except KeyError:
+            return None     # OMCI MIB_DB entry has not yet been created
+
+        except Exception as e:
+            self.log.exception('get-last-sync-exception', e=e)
+            raise
+
+    def _add_new_class(self, device_id, class_id, instance_id, attributes):
+        """
+        Create an entry for a new class in the external database
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+        """
+        self.log.debug('add', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        now = self._time_to_string(datetime.utcnow())
+        attrs = []
+        for k, v in attributes.items():
+            if k == 'serial_number':
+                vendor_id = str(v[0:4])
+                vendor_specific = v[4:]
+                vendor_specific = str(vendor_specific.encode('hex'))
+                str_value = vendor_id + vendor_specific
+                attrs.append(MibAttributeData(name=k, value=str_value))
+            else:
+                str_value = self._attribute_to_string(device_id, class_id, k, v)
+                attrs.append(MibAttributeData(name=k, value=str_value))
+
+        class_data = MibClassData(class_id=class_id,
+                                  instances=[MibInstanceData(instance_id=instance_id,
+                                                             created=now,
+                                                             modified=now,
+                                                             attributes=attrs)])
+
+        self._root_proxy.add(MibDbExternal.CLASSES_PATH.format(device_id), class_data)
+        self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+                       entity_id=instance_id, attributes=attributes)
+        return True
+
+    def _add_new_instance(self,  device_id, class_id, instance_id, attributes):
+        """
+        Create an entry for a instance of an existing class in the external database
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+        """
+        self.log.debug('add', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        now = self._time_to_string(datetime.utcnow())
+        attrs = []
+        for k, v in attributes.items():
+            if k == 'serial_number':
+                vendor_id = str(v[0:4])
+                vendor_specific = v[4:]
+                vendor_specific = str(vendor_specific.encode('hex'))
+                str_value = vendor_id+vendor_specific
+                attrs.append(MibAttributeData(name=k, value=str_value))
+            else:
+                str_value = self._attribute_to_string(device_id, class_id, k, v)
+                attrs.append(MibAttributeData(name=k, value=str_value))
+
+        instance_data = MibInstanceData(instance_id=instance_id,
+                                        created=now,
+                                        modified=now,
+                                        attributes=attrs)
+
+        self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id, class_id),
+                             instance_data)
+
+        self.log.debug('set-complete', device_id=device_id, class_id=class_id,
+                       entity_id=instance_id, attributes=attributes)
+        return True
+
+    def set(self, device_id, class_id, instance_id, attributes):
+        """
+        Set a database value.  This should only be called by the MIB synchronizer
+        and its related tasks
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) ME Entity ID
+        :param attributes: (dict) Attribute dictionary
+
+        :returns: (bool) True if the value was saved to the database. False if the
+                         value was identical to the current instance
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('set', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+        try:
+            if not isinstance(device_id, basestring):
+                raise TypeError('Device ID should be a string')
+
+            if not 0 <= class_id <= 0xFFFF:
+                raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
+
+            if not 0 <= instance_id <= 0xFFFF:
+                raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
+
+            if not isinstance(attributes, dict):
+                raise TypeError("Attributes should be a dictionary")
+
+            if not self._started:
+                raise DatabaseStateError('The Database is not currently active')
+
+            # Determine the best strategy to add the information
+            dev_proxy = self._device_proxy(device_id)
+
+            operation = 'set'
+            start_time = None
+            try:
+                class_data = dev_proxy.get(MibDbExternal.CLASS_PATH.format(class_id), deep=True)
+
+                inst_data = next((inst for inst in class_data.instances
+                                 if inst.instance_id == instance_id), None)
+
+                if inst_data is None:
+                    operation = 'create'
+                    start_time = datetime.utcnow()
+                    return self._add_new_instance(device_id, class_id, instance_id, attributes)
+
+                # Possibly adding to or updating an existing instance
+                # Get instance proxy, creating it if needed
+
+                modified = False
+                new_attributes = []
+                exist_attr_indexes = dict()
+                attr_len = len(inst_data.attributes)
+
+                for index in xrange(0, attr_len):
+                    name = inst_data.attributes[index].name
+                    value = inst_data.attributes[index].value
+                    exist_attr_indexes[name] = index
+                    new_attributes.append(MibAttributeData(name=name, value=value))
+
+                for k, v in attributes.items():
+                    try:
+                        old_value = None if k not in exist_attr_indexes \
+                            else new_attributes[exist_attr_indexes[k]].value
+
+                        str_value = self._attribute_to_string(device_id, class_id, k, v, old_value)
+
+                        if k not in exist_attr_indexes:
+                            new_attributes.append(MibAttributeData(name=k, value=str_value))
+                            modified = True
+
+                        elif new_attributes[exist_attr_indexes[k]].value != str_value:
+                            new_attributes[exist_attr_indexes[k]].value = str_value
+                            modified = True
+
+                    except Exception as e:
+                        self.log.exception('save-error', e=e, class_id=class_id,
+                                           attr=k, value_type=type(v))
+
+                if modified:
+                    now = datetime.utcnow()
+                    start_time = now
+                    new_data = MibInstanceData(instance_id=instance_id,
+                                               created=inst_data.created,
+                                               modified=self._time_to_string(now),
+                                               attributes=new_attributes)
+                    dev_proxy.remove(MibDbExternal.INSTANCE_PATH.format(class_id, instance_id))
+                    self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id,
+                                                                             class_id), new_data)
+                return modified
+
+            except KeyError:
+                # Here if the class-id does not yet exist in the database
+                self.log.debug("adding-key-not-found", class_id=class_id)
+                return self._add_new_class(device_id, class_id, instance_id,
+                                           attributes)
+            finally:
+                if start_time is not None:
+                    diff = datetime.utcnow() - start_time
+                    # NOTE: Change to 'debug' when checked in, manually change to 'info'
+                    #       for development testing.
+                    self.log.debug('db-{}-time'.format(operation), milliseconds=diff.microseconds/1000)
+                    self._statistics[operation].increment(diff.microseconds/1000)
+
+        except Exception as e:
+            self.log.exception('set-exception', device_id=device_id, class_id=class_id,
+                               instance_id=instance_id, attributes=attributes, e=e)
+            raise
+
+    def delete(self, device_id, class_id, entity_id):
+        """
+        Delete an entity from the database if it exists.  If all instances
+        of a class are deleted, the class is deleted as well.
+
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Entity ID
+
+        :returns: (bool) True if the instance was found and deleted. False
+                         if it did not exist.
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('delete', device_id=device_id, class_id=class_id,
+                       entity_id=entity_id)
+
+        if not self._started:
+            raise DatabaseStateError('The Database is not currently active')
+
+        if not isinstance(device_id, basestring):
+            raise TypeError('Device ID should be an string')
+
+        if not 0 <= class_id <= 0xFFFF:
+            raise ValueError('class-id is 0..0xFFFF')
+
+        if not 0 <= entity_id <= 0xFFFF:
+            raise ValueError('instance-id is 0..0xFFFF')
+
+        start_time = datetime.utcnow()
+        try:
+            # Remove instance
+            self._instance_proxy(device_id, class_id, entity_id).remove('/')
+            now = datetime.utcnow()
+
+            # If resulting class has no instance, remove it as well
+            class_proxy = self._class_proxy(device_id, class_id)
+            class_data = class_proxy.get('/', depth=1)
+
+            if len(class_data.instances) == 0:
+                class_proxy.remove('/')
+
+            self._modified = now
+            return True
+
+        except KeyError:
+            return False    # Not found
+
+        except Exception as e:
+            self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+        finally:
+            diff = datetime.utcnow() - start_time
+            # NOTE: Change to 'debug' when checked in, manually change to 'info'
+            #       for development testing.
+            self.log.debug('db-delete-time', milliseconds=diff.microseconds/1000)
+            self._statistics['delete'].increment(diff.microseconds/1000)
+
+    def query(self, device_id, class_id=None, instance_id=None, attributes=None):
+        """
+        Get database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param device_id: (str) ONU Device ID
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list/set or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises KeyError: If the requested device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query', device_id=device_id, class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+
+        start_time = datetime.utcnow()
+        end_time = None
+        try:
+            if class_id is None:
+                # Get full device info
+                dev_data = self._device_proxy(device_id).get('/', depth=-1)
+                end_time = datetime.utcnow()
+                data = self._device_to_dict(dev_data)
+
+            elif instance_id is None:
+                # Get all instances of the class
+                try:
+                    cls_data = self._class_proxy(device_id, class_id).get('/', depth=-1)
+                    end_time = datetime.utcnow()
+                    data = self._class_to_dict(device_id, cls_data)
+
+                except KeyError:
+                    data = dict()
+
+            else:
+                # Get all attributes of a specific ME
+                try:
+                    inst_data = self._instance_proxy(device_id, class_id, instance_id).\
+                        get('/', depth=-1)
+                    end_time = datetime.utcnow()
+
+                    if attributes is None:
+                        # All Attributes
+                        data = self._instance_to_dict(device_id, class_id, inst_data)
+
+                    else:
+                        # Specific attribute(s)
+                        if isinstance(attributes, basestring):
+                            attributes = {attributes}
+
+                        data = {
+                            attr.name: self._string_to_attribute(device_id,
+                                                                 class_id,
+                                                                 attr.name,
+                                                                 attr.value)
+                            for attr in inst_data.attributes if attr.name in attributes}
+
+                except KeyError:
+                    data = dict()
+
+            return data
+
+        except KeyError:
+            self.log.warn('query-no-device', device_id=device_id)
+            raise
+
+        except Exception as e:
+            self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
+            raise
+
+        finally:
+            if end_time is not None:
+                diff = end_time.utcnow() - start_time
+                # NOTE: Change to 'debug' when checked in, manually change to 'info'
+                #       for development testing.
+                self.log.debug('db-get-time', milliseconds=diff.microseconds/1000, class_id=class_id,
+                               instance_id=instance_id)
+                self._statistics['get'].increment(diff.microseconds/1000)
+
+    def _instance_to_dict(self, device_id, class_id, instance):
+        if not isinstance(instance, MibInstanceData):
+            raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
+
+        data = {
+            INSTANCE_ID_KEY: instance.instance_id,
+            CREATED_KEY: self._string_to_time(instance.created),
+            MODIFIED_KEY: self._string_to_time(instance.modified),
+            ATTRIBUTES_KEY: dict()
+        }
+        for attribute in instance.attributes:
+            data[ATTRIBUTES_KEY][attribute.name] = self._string_to_attribute(device_id,
+                                                                             class_id,
+                                                                             attribute.name,
+                                                                             attribute.value)
+        return data
+
+    def _class_to_dict(self, device_id, val):
+        if not isinstance(val, MibClassData):
+            raise TypeError('{} is not of type MibClassData'.format(type(val)))
+
+        data = {
+            CLASS_ID_KEY: val.class_id,
+        }
+        for instance in val.instances:
+            data[instance.instance_id] = self._instance_to_dict(device_id,
+                                                                val.class_id,
+                                                                instance)
+        return data
+
+    def _device_to_dict(self, val):
+        if not isinstance(val, MibDeviceData):
+            raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
+
+        data = {
+            DEVICE_ID_KEY: val.device_id,
+            CREATED_KEY: self._string_to_time(val.created),
+            LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
+            MDS_KEY: val.mib_data_sync,
+            VERSION_KEY: val.version,
+            ME_KEY: dict(),
+            MSG_TYPE_KEY: set()
+        }
+        for class_data in val.classes:
+            data[class_data.class_id] = self._class_to_dict(val.device_id,
+                                                            class_data)
+        for managed_entity in val.managed_entities:
+            data[ME_KEY][managed_entity.class_id] = managed_entity.name
+
+        for msg_type in val.message_types:
+            data[MSG_TYPE_KEY].add(msg_type.message_type)
+
+        return data
+
+    def _managed_entity_to_name(self, device_id, class_id):
+        me_map = self._omci_agent.get_device(device_id).me_map
+        entity = me_map.get(class_id)
+
+        return entity.__name__ if entity is not None else 'UnknownManagedEntity'
+
+    def update_supported_managed_entities(self, device_id, managed_entities):
+        """
+        Update the supported OMCI Managed Entities for this device
+        :param device_id: (str) ONU Device ID
+        :param managed_entities: (set) Managed Entity class IDs
+        """
+        try:
+            me_list = [ManagedEntity(class_id=class_id,
+                                     name=self._managed_entity_to_name(device_id,
+                                                                       class_id))
+                       for class_id in managed_entities]
+
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.managed_entities.extend(me_list)
+
+            # Update
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-me-list-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('add-me-failure', e=e, me_list=managed_entities)
+            raise
+
+    def update_supported_message_types(self, device_id, msg_types):
+        """
+        Update the supported OMCI Managed Entities for this device
+        :param device_id: (str) ONU Device ID
+        :param msg_types: (set) Message Type values (ints)
+        """
+        try:
+            msg_type_list = [MessageType(message_type=msg_type.value)
+                             for msg_type in msg_types]
+
+            device_proxy = self._device_proxy(device_id)
+            data = device_proxy.get(depth=0)
+
+            now = datetime.utcnow()
+            data.message_types.extend(msg_type_list)
+
+            # Update
+            self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
+                                    data)
+            self._modified = now
+            self.log.debug('save-msg-types-complete', device_id=device_id)
+
+        except Exception as e:
+            self.log.exception('add-msg-types-failure', e=e, msg_types=msg_types)
+            raise
diff --git a/python/extensions/omci/me_frame.py b/python/extensions/omci/me_frame.py
new file mode 100644
index 0000000..1724100
--- /dev/null
+++ b/python/extensions/omci/me_frame.py
@@ -0,0 +1,478 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Managed Entity Message support base class
+"""
+from voltha.extensions.omci.omci import *
+
+# abbreviations
+OP = EntityOperations
+AA = AttributeAccess
+
+
+class MEFrame(object):
+    """Base class to help simplify Frame Creation"""
+    def __init__(self, entity_class, entity_id, data):
+        assert issubclass(entity_class, EntityClass), \
+            "'{}' must be a subclass of MEFrame".format(entity_class)
+        self.check_type(entity_id, int)
+
+        if not 0 <= entity_id <= 0xFFFF:
+            raise ValueError('entity_id should be 0..65535')
+
+        self.log = structlog.get_logger()
+        self._class = entity_class
+        self._entity_id = entity_id
+        self.data = data
+
+    def __str__(self):
+        return '{}: Entity_ID: {}, Data: {}'.\
+            format(self.entity_class_name, self._entity_id, self.data)
+
+    def __repr__(self):
+        return str(self)
+
+    @property
+    def entity_class(self):
+        """
+        The Entity Class for this ME
+        :return: (EntityClass) Entity class
+        """
+        return self._class
+
+    @property
+    def entity_class_name(self):
+        return self._class.__name__
+
+    @property
+    def entity_id(self):
+        """
+        The Entity ID for this ME frame
+        :return: (int) Entity ID (0..0xFFFF)
+        """
+        return self._entity_id
+
+    @staticmethod
+    def check_type(param, types):
+        if not isinstance(param, types):
+            raise TypeError("Parameter '{}' should be a {}".format(param, types))
+
+    def _check_operation(self, operation):
+        allowed = self.entity_class.mandatory_operations | self.entity_class.optional_operations
+        assert operation in allowed, "{} not allowed for '{}'".format(operation.name,
+                                                                      self.entity_class_name)
+
+    def _check_attributes(self, attributes, access):
+        keys = attributes.keys() if isinstance(attributes, dict) else attributes
+        for attr_name in keys:
+            # Bad attribute name (invalid or spelling error)?
+            index = self.entity_class.attribute_name_to_index_map.get(attr_name)
+            if index is None:
+                raise KeyError("Attribute '{}' is not valid for '{}'".
+                               format(attr_name, self.entity_class_name))
+            # Invalid access?
+            assert access in self.entity_class.attributes[index].access, \
+                "Access '{}' for attribute '{}' is not valid for '{}'".format(access.name,
+                                                                              attr_name,
+                                                                              self.entity_class_name)
+
+        if access.value in [AA.W.value, AA.SBC.value] and isinstance(attributes, dict):
+            for attr_name, value in attributes.iteritems():
+                index = self.entity_class.attribute_name_to_index_map.get(attr_name)
+                attribute = self.entity_class.attributes[index]
+                if not attribute.valid(value):
+                    raise ValueError("Invalid value '{}' for attribute '{}' of '{}".
+                                     format(value, attr_name, self.entity_class_name))
+
+    @staticmethod
+    def _attr_to_data(attributes):
+        """
+        Convert an object into the 'data' set or dictionary for get/set/create/delete
+        requests.
+
+        This method takes a 'string', 'list', or 'set' for get requests and
+        converts it to a 'set' of attributes.
+
+        For create/set requests a dictionary of attribute/value pairs is required
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, set, or dict can be provided. For create/set
+                           operations, a dictionary should be provided. For delete
+                           the attributes may be None since they are ignored.
+
+        :return: (set, dict) set for get/deletes, dict for create/set
+        """
+        if isinstance(attributes, basestring):
+            # data = [str(attributes)]
+            data = set()
+            data.add(str(attributes))
+
+        elif isinstance(attributes, list):
+            assert all(isinstance(attr, basestring) for attr in attributes),\
+                'attribute list must be strings'
+            data = {str(attr) for attr in attributes}
+            assert len(data) == len(attributes), 'Attributes were not unique'
+
+        elif isinstance(attributes, set):
+            assert all(isinstance(attr, basestring) for attr in attributes),\
+                'attribute set must be strings'
+            data = {str(attr) for attr in attributes}
+
+        elif isinstance(attributes, (dict, type(None))):
+            data = attributes
+
+        else:
+            raise TypeError("Unsupported attributes type '{}'".format(type(attributes)))
+
+        return data
+
+    def create(self):
+        """
+        Create a Create request frame for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        assert hasattr(self.entity_class, 'class_id'), 'class_id required for Create actions'
+        assert hasattr(self, 'entity_id'), 'entity_id required for Create actions'
+        assert hasattr(self, 'data'), 'data required for Create actions'
+
+        data = getattr(self, 'data')
+        MEFrame.check_type(data, dict)
+        assert len(data) > 0, 'No attributes supplied'
+
+        self._check_operation(OP.Create)
+        self._check_attributes(data, AA.Writable)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciCreate.message_id,
+            omci_message=OmciCreate(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                data=data
+            ))
+
+    def delete(self):
+        """
+        Create a Delete request frame for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.Delete)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciDelete.message_id,
+            omci_message=OmciDelete(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id')
+            ))
+
+    def set(self):
+        """
+        Create a Set request frame for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        assert hasattr(self, 'data'), 'data required for Set actions'
+        data = getattr(self, 'data')
+        MEFrame.check_type(data, dict)
+        assert len(data) > 0, 'No attributes supplied'
+
+        self._check_operation(OP.Set)
+        self._check_attributes(data, AA.Writable)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciSet.message_id,
+            omci_message=OmciSet(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                attributes_mask=self.entity_class.mask_for(*data.keys()),
+                data=data
+            ))
+
+    def get(self):
+        """
+        Create a Get request frame for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        assert hasattr(self, 'data'), 'data required for Get actions'
+        data = getattr(self, 'data')
+        MEFrame.check_type(data, (list, set, dict))
+        assert len(data) > 0, 'No attributes supplied'
+
+        mask_set = data.keys() if isinstance(data, dict) else data
+
+        self._check_operation(OP.Get)
+        self._check_attributes(mask_set, AA.Readable)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciGet.message_id,
+            omci_message=OmciGet(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                attributes_mask=self.entity_class.mask_for(*mask_set)
+            ))
+
+    def reboot(self, reboot_code=0):
+        """
+        Create a Reboot request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.Reboot)
+        assert 0 <= reboot_code <= 2, 'Reboot code must be 0..2'
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciReboot.message_id,
+            omci_message=OmciReboot(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                reboot_code=reboot_code
+            ))
+
+    def mib_reset(self):
+        """
+        Create a MIB Reset request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.MibReset)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciMibReset.message_id,
+            omci_message=OmciMibReset(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id')
+            ))
+
+    def mib_upload(self):
+        """
+        Create a MIB Upload request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.MibUpload)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciMibUpload.message_id,
+            omci_message=OmciMibUpload(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id')
+            ))
+
+    def mib_upload_next(self):
+        """
+        Create a MIB Upload Next request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        assert hasattr(self, 'data'), 'data required for Set actions'
+        data = getattr(self, 'data')
+        MEFrame.check_type(data, dict)
+        assert len(data) > 0, 'No attributes supplied'
+        assert 'mib_data_sync' in data, "'mib_data_sync' not in attributes list"
+
+        self._check_operation(OP.MibUploadNext)
+        self._check_attributes(data, AA.Writable)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciMibUploadNext.message_id,
+            omci_message=OmciMibUploadNext(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                command_sequence_number=data['mib_data_sync']
+            ))
+
+    def get_next(self):
+        """
+        Create a Get Next request frame for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        assert hasattr(self, 'data'), 'data required for Get Next actions'
+        data = getattr(self, 'data')
+        MEFrame.check_type(data, dict)
+        assert len(data) == 1, 'Only one attribute should be specified'
+
+        mask_set = data.keys() if isinstance(data, dict) else data
+
+        self._check_operation(OP.GetNext)
+        self._check_attributes(mask_set, AA.Readable)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciGetNext.message_id,
+            omci_message=OmciGetNext(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                attributes_mask=self.entity_class.mask_for(*mask_set),
+                command_sequence_number=data.values()[0]
+            ))
+
+    def synchronize_time(self, time=None):
+        """
+        Create a Synchronize Time request from for this ME
+        :param time: (DateTime) Time to set to. If none, use UTC
+        :return: (OmciFrame) OMCI Frame
+        """
+        from datetime import datetime
+        self._check_operation(OP.SynchronizeTime)
+        dt = time or datetime.utcnow()
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciSynchronizeTime.message_id,
+            omci_message=OmciSynchronizeTime(
+                    entity_class=getattr(self.entity_class, 'class_id'),
+                    entity_id=getattr(self, 'entity_id'),
+                    year=dt.year,
+                    month=dt.month,
+                    hour=dt.hour,
+                    minute=dt.minute,
+                    second=dt.second,
+            ))
+
+    def get_all_alarm(self, alarm_retrieval_mode):
+        """
+        Create a Alarm request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.GetAllAlarms)
+        assert 0 <= alarm_retrieval_mode <= 1, 'Alarm retrieval mode must be 0..1'
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciGetAllAlarms.message_id,
+            omci_message=OmciGetAllAlarms(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                alarm_retrieval_mode=alarm_retrieval_mode
+            ))
+
+    def get_all_alarm_next(self, command_sequence_number):
+        """
+        Create a Alarm request from for this ME
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.GetAllAlarmsNext)
+
+        return OmciFrame(
+            transaction_id=None,
+            message_type=OmciGetAllAlarmsNext.message_id,
+            omci_message=OmciGetAllAlarmsNext(
+                entity_class=getattr(self.entity_class, 'class_id'),
+                entity_id=getattr(self, 'entity_id'),
+                command_sequence_number=command_sequence_number
+            ))
+
+    def start_software_download(self, image_size, window_size):
+        """
+        Create Start Software Download message
+        :return: (OmciFrame) OMCI Frame
+        """
+        self.log.debug("--> start_software_download")
+        self._check_operation(OP.StartSoftwareDownload)
+        return OmciFrame(
+	            transaction_id=None,
+	            message_type=OmciStartSoftwareDownload.message_id,
+	            omci_message=OmciStartSoftwareDownload(
+	                entity_class=getattr(self.entity_class, 'class_id'),
+	                entity_id=getattr(self, 'entity_id'),
+	                window_size=window_size,
+	                image_size=image_size,
+	                instance_id=getattr(self, 'entity_id')
+	           ))
+        
+    def end_software_download(self, crc32, image_size):
+        """
+        Create End Software Download message
+        :return: (OmciFrame) OMCI Frame
+        """
+        self._check_operation(OP.EndSoftwareDownload)
+        return OmciFrame(
+	            transaction_id=None,
+	            message_type=OmciEndSoftwareDownload.message_id,
+	            omci_message=OmciEndSoftwareDownload(
+	                entity_class=getattr(self.entity_class, 'class_id'),
+	                entity_id=getattr(self, 'entity_id'),
+	                crc32=crc32,
+	                image_size=image_size,
+	                instance_id=getattr(self, 'entity_id')
+	           ))
+    
+    def download_section(self, is_last_section, section_number, data):
+        """
+        Create Download Section message
+        :is_last_section: (bool) indicate the last section in the window
+        :section_num    : (int)  current section number
+        :data           : (byte) data to be sent in the section
+        :return: (OmciFrame) OMCI Frame
+        """
+        self.log.debug("--> download_section: ", section_number=section_number)
+        
+        self._check_operation(OP.DownloadSection)
+        if is_last_section:
+            return OmciFrame(
+                    transaction_id=None,
+                    message_type=OmciDownloadSectionLast.message_id,
+                    omci_message=OmciDownloadSectionLast(
+                        entity_class=getattr(self.entity_class, 'class_id'),
+                        entity_id=getattr(self, 'entity_id'),
+                        section_number=section_number,
+                        data=data
+                   ))
+        else:
+            return OmciFrame(
+                    transaction_id=None,
+                    message_type=OmciDownloadSection.message_id,
+                    omci_message=OmciDownloadSection(
+                        entity_class=getattr(self.entity_class, 'class_id'),
+                        entity_id=getattr(self, 'entity_id'),
+                        section_number=section_number,
+                        data=data
+                   ))
+
+    def activate_image(self, activate_flag=0):
+        """
+        Activate Image message
+        :activate_flag: 00	Activate image unconditionally
+                        01	Activate image only if no POTS/VoIP calls are in progress
+                        10	Activate image only if no emergency call is in progress
+        :return: (OmciFrame) OMCI Frame
+        """
+        self.log.debug("--> activate_image", entity=self.entity_id, flag=activate_flag)
+        return OmciFrame(
+                transaction_id=None,
+                message_type=OmciActivateImage.message_id,
+                omci_message=OmciActivateImage(
+                    entity_class=getattr(self.entity_class, 'class_id'),
+                    entity_id=getattr(self, 'entity_id'),
+                    activate_flag=activate_flag
+               ))
+
+    def commit_image(self):
+        """
+        Commit Image message
+        :return: (OmciFrame) OMCI Frame
+        """
+        self.log.debug("--> commit_image", entity=self.entity_id)
+        return OmciFrame(
+                transaction_id=None,
+                message_type=OmciCommitImage.message_id,
+                omci_message=OmciCommitImage(
+                    entity_class=getattr(self.entity_class, 'class_id'),
+                    entity_id=getattr(self, 'entity_id'),
+               ))
+    
diff --git a/python/extensions/omci/omci.py b/python/extensions/omci/omci.py
new file mode 100644
index 0000000..5a94146
--- /dev/null
+++ b/python/extensions/omci/omci.py
@@ -0,0 +1,23 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Omci message generator and parser implementation using scapy
+"""
+
+from omci_frame import OmciFrame
+from omci_messages import *
+from omci_entities import *
diff --git a/python/extensions/omci/omci_cc.py b/python/extensions/omci/omci_cc.py
new file mode 100644
index 0000000..7d4d304
--- /dev/null
+++ b/python/extensions/omci/omci_cc.py
@@ -0,0 +1,1047 @@
+#
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Message support
+"""
+
+import sys
+import arrow
+from twisted.internet import reactor, defer
+from twisted.internet.defer import TimeoutError, CancelledError, failure, fail, succeed, inlineCallbacks
+from common.frameio.frameio import hexify
+from voltha.extensions.omci.omci import *
+from voltha.extensions.omci.omci_me import OntGFrame, OntDataFrame, SoftwareImageFrame
+from voltha.extensions.omci.me_frame import MEFrame
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+from common.event_bus import EventBusClient
+from enum import IntEnum
+from binascii import hexlify
+
+
+def hexify(buffer):
+    """Return a hexadecimal string encoding of input buffer"""
+    return ''.join('%02x' % ord(c) for c in buffer)
+
+
+DEFAULT_OMCI_TIMEOUT = 3                           # Seconds
+MAX_OMCI_REQUEST_AGE = 60                          # Seconds
+DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE = 31            # Bytes
+MAX_TABLE_ROW_COUNT = 512                          # Keep get-next logic reasonable
+
+CONNECTED_KEY = 'connected'
+TX_REQUEST_KEY = 'tx-request'
+RX_RESPONSE_KEY = 'rx-response'
+UNKNOWN_CLASS_ATTRIBUTE_KEY = 'voltha-unknown-blob'
+
+
+class OmciCCRxEvents(IntEnum):
+    AVC_Notification = 0,
+    MIB_Upload = 1,
+    MIB_Upload_Next = 2,
+    Create = 3,
+    Delete = 4,
+    Set = 5,
+    Alarm_Notification = 6,
+    Test_Result = 7,
+    MIB_Reset = 8,
+    Connectivity = 9,
+    Get_ALARM_Get = 10,
+    Get_ALARM_Get_Next = 11
+
+
+# abbreviations
+OP = EntityOperations
+RxEvent = OmciCCRxEvents
+
+
+class OMCI_CC(object):
+    """ Handle OMCI Communication Channel specifics for Adtran ONUs"""
+
+    MIN_OMCI_TX_ID_LOW_PRIORITY = 0x0001  # 2 Octets max
+    MAX_OMCI_TX_ID_LOW_PRIORITY = 0x7FFF  # 2 Octets max
+    MIN_OMCI_TX_ID_HIGH_PRIORITY = 0x8000  # 2 Octets max
+    MAX_OMCI_TX_ID_HIGH_PRIORITY = 0xFFFF  # 2 Octets max
+    LOW_PRIORITY = 0
+    HIGH_PRIORITY = 1
+
+    # Offset into some tuples for pending lists and tx in progress
+    PENDING_DEFERRED = 0
+    PENDING_FRAME = 1
+    PENDING_TIMEOUT = 2
+    PENDING_RETRY = 3
+
+    REQUEST_TIMESTAMP = 0
+    REQUEST_DEFERRED = 1
+    REQUEST_FRAME = 2
+    REQUEST_TIMEOUT = 3
+    REQUEST_RETRY = 4
+    REQUEST_DELAYED_CALL = 5
+
+    _frame_to_event_type = {
+        OmciMibResetResponse.message_id: RxEvent.MIB_Reset,
+        OmciMibUploadResponse.message_id: RxEvent.MIB_Upload,
+        OmciMibUploadNextResponse.message_id: RxEvent.MIB_Upload_Next,
+        OmciCreateResponse.message_id: RxEvent.Create,
+        OmciDeleteResponse.message_id: RxEvent.Delete,
+        OmciSetResponse.message_id: RxEvent.Set,
+        OmciGetAllAlarmsResponse.message_id: RxEvent.Get_ALARM_Get,
+        OmciGetAllAlarmsNextResponse.message_id: RxEvent.Get_ALARM_Get_Next
+    }
+
+    def __init__(self, adapter_agent, device_id, me_map=None,
+                 clock=None):
+        self.log = structlog.get_logger(device_id=device_id)
+        self._adapter_agent = adapter_agent
+        self._device_id = device_id
+        self._proxy_address = None
+        self._enabled = False
+        self._extended_messaging = False
+        self._me_map = me_map
+        if clock is None:
+            self.reactor = reactor
+        else:
+            self.reactor = clock
+
+        # Support 2 levels of priority since only baseline message set supported
+        self._tx_tid = [OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY, OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY]
+        self._tx_request = [None, None]    # Tx in progress (timestamp, defer, frame, timeout, retry, delayedCall)
+        self._tx_request_deferred = [None, None]    # Tx in progress but held till child Rx/TX can finish. ie omci tables.
+        self._pending = [list(), list()]   # pending queue (deferred, tx_frame, timeout, retry)
+        self._rx_response = [None, None]
+
+        # Statistics
+        self._tx_frames = 0
+        self._rx_frames = 0
+        self._rx_unknown_tid = 0      # Rx OMCI with no Tx TID match
+        self._rx_onu_frames = 0       # Autonomously generated ONU frames
+        self._rx_onu_discards = 0     # Autonomously generated ONU unknown message types
+        self._rx_timeouts = 0
+        self._rx_late = 0             # Frame response received after timeout on Tx
+        self._rx_unknown_me = 0       # Number of managed entities Rx without a decode definition
+        self._tx_errors = 0           # Exceptions during tx request
+        self._consecutive_errors = 0  # Rx & Tx errors in a row, a good RX resets this to 0
+        self._reply_min = sys.maxint  # Fastest successful tx -> rx
+        self._reply_max = 0           # Longest successful tx -> rx
+        self._reply_sum = 0.0         # Total seconds for successful tx->rx (float for average)
+        self._max_hp_tx_queue = 0     # Maximum size of high priority tx pending queue
+        self._max_lp_tx_queue = 0     # Maximum size of low priority tx pending queue
+
+        self.event_bus = EventBusClient()
+
+        # If a list of custom ME Entities classes were provided, insert them into
+        # main class_id to entity map.
+        # TODO: If this class becomes hidden from the ONU DA, move this to the OMCI State Machine runner
+
+    def __str__(self):
+        return "OMCISupport: {}".format(self._device_id)
+
+    def _get_priority_index(self, high_priority):
+        """ Centralized logic to help make extended message support easier in the future"""
+        return OMCI_CC.HIGH_PRIORITY if high_priority and not self._extended_messaging \
+            else OMCI_CC.LOW_PRIORITY
+
+    def _tid_is_high_priority(self, tid):
+        """ Centralized logic to help make extended message support easier in the future"""
+
+        return not self._extended_messaging and \
+            OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY <= tid <= OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
+
+    @staticmethod
+    def event_bus_topic(device_id, event):
+        """
+        Get the topic name for a given event Frame Type
+        :param device_id: (str) ONU Device ID
+        :param event: (OmciCCRxEvents) Type of event
+        :return: (str) Topic string
+        """
+        assert event in OmciCCRxEvents, \
+            'Event {} is not an OMCI-CC Rx Event'.format(event.name)
+
+        return 'omci-rx:{}:{}'.format(device_id, event.name)
+
+    @property
+    def enabled(self):
+        return self._enabled
+
+    @enabled.setter
+    def enabled(self, value):
+        """
+        Enable/disable the OMCI Communications Channel
+
+        :param value: (boolean) True to enable, False to disable
+        """
+        assert isinstance(value, bool), 'enabled is a boolean'
+
+        if self._enabled != value:
+            self._enabled = value
+            if self._enabled:
+                self._start()
+            else:
+                self._stop()
+
+    @property
+    def tx_frames(self):
+        return self._tx_frames
+
+    @property
+    def rx_frames(self):
+        return self._rx_frames
+
+    @property
+    def rx_unknown_tid(self):
+        return self._rx_unknown_tid         # Tx TID not found
+
+    @property
+    def rx_unknown_me(self):
+        return self._rx_unknown_me
+
+    @property
+    def rx_onu_frames(self):
+        return self._rx_onu_frames
+
+    @property
+    def rx_onu_discards(self):
+        return self._rx_onu_discards        # Attribute Value change autonomous overflows
+
+    @property
+    def rx_timeouts(self):
+        return self._rx_timeouts
+
+    @property
+    def rx_late(self):
+        return self._rx_late
+
+    @property
+    def tx_errors(self):
+        return self._tx_errors
+
+    @property
+    def consecutive_errors(self):
+        return self._consecutive_errors
+
+    @property
+    def reply_min(self):
+        return int(round(self._reply_min * 1000.0))     # Milliseconds
+
+    @property
+    def reply_max(self):
+        return int(round(self._reply_max * 1000.0))     # Milliseconds
+
+    @property
+    def reply_average(self):
+        avg = self._reply_sum / self._rx_frames if self._rx_frames > 0 else 0.0
+        return int(round(avg * 1000.0))     # Milliseconds
+
+    @property
+    def hp_tx_queue_len(self):
+        return len(self._pending[OMCI_CC.HIGH_PRIORITY])
+
+    @property
+    def lp_tx_queue_len(self):
+        return len(self._pending[OMCI_CC.LOW_PRIORITY])
+
+    @property
+    def max_hp_tx_queue(self):
+        return self._max_hp_tx_queue
+
+    @property
+    def max_lp_tx_queue(self):
+        return self._max_lp_tx_queue
+
+    def _start(self):
+        """
+        Start the OMCI Communications Channel
+        """
+        assert self._enabled, 'Start should only be called if enabled'
+        self.flush()
+
+        device = self._adapter_agent.get_device(self._device_id)
+        self._proxy_address = device.proxy_address
+
+    def _stop(self):
+        """
+        Stop the OMCI Communications Channel
+        """
+        assert not self._enabled, 'Stop should only be called if disabled'
+        self.flush()
+        self._proxy_address = None
+
+    def _receive_onu_message(self, rx_frame):
+        """ Autonomously generated ONU frame Rx handler"""
+        self.log.debug('rx-onu-frame', frame_type=type(rx_frame),
+                       frame=hexify(str(rx_frame)))
+
+        msg_type = rx_frame.fields['message_type']
+        self._rx_onu_frames += 1
+
+        msg = {TX_REQUEST_KEY: None,
+               RX_RESPONSE_KEY: rx_frame}
+
+        if msg_type == EntityOperations.AlarmNotification.value:
+            topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Alarm_Notification)
+            self.reactor.callLater(0,  self.event_bus.publish, topic, msg)
+
+        elif msg_type == EntityOperations.AttributeValueChange.value:
+            topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.AVC_Notification)
+            self.reactor.callLater(0,  self.event_bus.publish, topic, msg)
+
+        elif msg_type == EntityOperations.TestResult.value:
+            topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Test_Result)
+            self.reactor.callLater(0,  self.event_bus.publish, topic, msg)
+
+        else:
+            self.log.warn('onu-unsupported-autonomous-message', type=msg_type)
+            self._rx_onu_discards += 1
+
+    def _update_rx_tx_stats(self, now, ts):
+        ts_diff = now - arrow.Arrow.utcfromtimestamp(ts)
+        secs = ts_diff.total_seconds()
+        self._reply_sum += secs
+        if secs < self._reply_min:
+            self._reply_min = secs
+        if secs > self._reply_max:
+            self._reply_max = secs
+        return secs
+
+    def receive_message(self, msg):
+        """
+        Receive and OMCI message from the proxy channel to the OLT.
+
+        Call this from your ONU Adapter on a new OMCI Rx on the proxy channel
+        :param msg: (str) OMCI binary message (used as input to Scapy packet decoder)
+        """
+        if not self.enabled:
+            return
+
+        try:
+            now = arrow.utcnow()
+            d = None
+
+            # NOTE: Since we may need to do an independent ME map on a per-ONU basis
+            #       save the current value of the entity_id_to_class_map, then
+            #       replace it with our custom one before decode, and then finally
+            #       restore it later. Tried other ways but really made the code messy.
+            saved_me_map = omci_entities.entity_id_to_class_map
+            omci_entities.entity_id_to_class_map = self._me_map
+
+            try:
+                rx_frame = msg if isinstance(msg, OmciFrame) else OmciFrame(msg)
+                rx_tid = rx_frame.fields['transaction_id']
+
+                if rx_tid == 0:
+                    return self._receive_onu_message(rx_frame)
+
+                # Previously unreachable if this is the very first Rx or we
+                # have been running consecutive errors
+                if self._rx_frames == 0 or self._consecutive_errors != 0:
+                    self.reactor.callLater(0, self._publish_connectivity_event, True)
+
+                self._rx_frames += 1
+                self._consecutive_errors = 0
+
+            except KeyError as e:
+                # Unknown, Unsupported, or vendor-specific ME. Key is the unknown classID
+                self.log.debug('frame-decode-key-error', msg=hexlify(msg), e=e)
+                rx_frame = self._decode_unknown_me(msg)
+                self._rx_unknown_me += 1
+                rx_tid = rx_frame.fields.get('transaction_id')
+
+            except Exception as e:
+                self.log.exception('frame-decode', msg=hexlify(msg), e=e)
+                return
+
+            finally:
+                omci_entities.entity_id_to_class_map = saved_me_map     # Always restore it.
+
+            try:
+                high_priority = self._tid_is_high_priority(rx_tid)
+                index = self._get_priority_index(high_priority)
+
+                # (timestamp, defer, frame, timeout, retry, delayedCall)
+                last_tx_tuple = self._tx_request[index]
+
+                if last_tx_tuple is None or \
+                        last_tx_tuple[OMCI_CC.REQUEST_FRAME].fields.get('transaction_id') != rx_tid:
+                    # Possible late Rx on a message that timed-out
+                    self._rx_unknown_tid += 1
+                    #self.log.warn('tx-message-missing', rx_id=rx_tid, msg=hexlify(msg))
+                    #return
+
+                ts, d, tx_frame, timeout, retry, dc = last_tx_tuple
+                if dc is not None and not dc.cancelled and not dc.called:
+                    dc.cancel()
+                    # self.log.debug("cancel-timeout-called")
+
+                secs = self._update_rx_tx_stats(now, ts)
+
+                # Late arrival already serviced by a timeout?
+                if d.called:
+                    self._rx_late += 1
+                    return
+
+            except Exception as e:
+                self.log.exception('frame-match', msg=hexlify(msg), e=e)
+                if d is not None:
+                    return d.errback(failure.Failure(e))
+                return
+
+            # Extended processing needed. Note 'data' field will be None on some error
+            # status returns
+            omci_msg = rx_frame.fields['omci_message']
+
+            if isinstance(omci_msg, OmciGetResponse) and \
+                    omci_msg.fields.get('data') is not None and \
+                    'table_attribute_mask' in omci_msg.fields['data']:
+                # Yes, run in a separate generator
+                reactor.callLater(0, self._process_get_rx_frame, timeout, secs,
+                                  rx_frame, d, tx_frame, high_priority)
+            else:
+                # Publish Rx event to listeners in a different task
+                reactor.callLater(0, self._publish_rx_frame, tx_frame, rx_frame)
+
+                # begin success callback chain (will cancel timeout and queue next Tx message)
+                from copy import copy
+                original_callbacks = copy(d.callbacks)
+                self._rx_response[index] = rx_frame
+                d.callback(rx_frame)
+
+        except Exception as e:
+            self.log.exception('rx-msg', e=e)
+
+    @inlineCallbacks
+    def _process_get_rx_frame(self, timeout, secs, rx_frame, d, tx_frame, high_priority):
+        """
+        Special handling for Get Requests that may require additional 'get_next' operations
+        if a table attribute was requested.
+        """
+        omci_msg = rx_frame.fields['omci_message']
+        rx_tid = rx_frame.fields.get('transaction_id')
+        high_priority = self._tid_is_high_priority(rx_tid)
+        frame_index = self._get_priority_index(high_priority)
+
+        if isinstance(omci_msg, OmciGetResponse) and 'table_attribute_mask' in omci_msg.fields['data']:
+
+            # save tx request for later so that below send/recv can finish
+            self._tx_request_deferred[frame_index] = self._tx_request[frame_index]
+            self._tx_request[frame_index] = None
+
+            try:
+                entity_class = omci_msg.fields['entity_class']
+                entity_id = omci_msg.fields['entity_id']
+                table_attributes = omci_msg.fields['data']['table_attribute_mask']
+
+                # Table attribute mask is encoded opposite of managed entity mask.
+                if entity_class in self._me_map:
+                    ec = self._me_map[entity_class]
+                    for index in xrange(1, len(ec.attributes) + 1):
+                        attr_mask = 1 << index
+
+                        if attr_mask & table_attributes:
+                            self.log.debug('omcc-get-table-ec', ec=ec, index=index, attr_mask=attr_mask,
+                                           table_attributes=table_attributes)
+                            eca = ec.attributes[index]
+                            self.log.debug('omcc-get-table-attribute', table_name=eca.field.name)
+
+                            seq_no = 0
+                            data_buffer = ''
+                            count = omci_msg.fields['data'][eca.field.name + '_size']
+
+                            if count > MAX_TABLE_ROW_COUNT:
+                                self.log.error('omcc-get-table-huge', count=count, name=eca.field.name)
+                                raise ValueError('Huge Table Size: {}'.format(count))
+
+                            # Original timeout must be chopped up into each individual get-next request
+                            # in order for total transaction to complete within the timeframe of the
+                            # original get() timeout.
+                            number_transactions = 1 + (count + OmciTableField.PDU_SIZE - 1) / OmciTableField.PDU_SIZE
+                            timeout /= (1 + number_transactions)
+
+                            # Start the loop
+                            vals = []
+                            for offset in xrange(0, count, OmciTableField.PDU_SIZE):
+                                frame = MEFrame(ec, entity_id, {eca.field.name: seq_no}).get_next()
+                                seq_no += 1
+
+                                max_retries = 3
+                                results = yield self.send(frame, min(timeout / max_retries, secs * 3), max_retries)
+
+                                omci_getnext_msg = results.fields['omci_message']
+                                status = omci_getnext_msg.fields['success_code']
+
+                                if status != ReasonCodes.Success.value:
+                                    raise Exception('get-next-failure table=' + eca.field.name +
+                                                    ' entity_id=' + str(entity_id) +
+                                                    ' sqn=' + str(seq_no) + ' omci-status ' + str(status))
+
+                                # Extract the data
+                                num_octets = count - offset
+                                if num_octets > OmciTableField.PDU_SIZE:
+                                    num_octets = OmciTableField.PDU_SIZE
+
+                                data = omci_getnext_msg.fields['data'][eca.field.name]
+                                data_buffer += data[:num_octets]
+
+                            while data_buffer:
+                                data_buffer, val = eca.field.getfield(None, data_buffer)
+                                vals.append(val)
+
+                            omci_msg.fields['data'][eca.field.name] = vals
+                            del omci_msg.fields['data'][eca.field.name + '_size']
+                            self.log.debug('omcc-got-table-attribute-rows', table_name=eca.field.name,
+                                           row_count=len(vals))
+                del omci_msg.fields['data']['table_attribute_mask']
+
+            except Exception as e:
+                self.log.exception('get-next-error', e=e)
+                self._tx_request_deferred[frame_index] = None
+                d.errback(failure.Failure(e), high_priority)
+                return
+
+            except IndexError as e:
+                self.log.exception('get-next-index-error', e=e)
+                self._tx_request_deferred[frame_index] = None
+                d.errback(failure.Failure(e), high_priority)
+                return
+
+            # Put it back so the outer Rx/Tx can finish
+            self._tx_request[frame_index] = self._tx_request_deferred[frame_index]
+            self._tx_request_deferred[frame_index] = None
+
+        # Publish Rx event to listeners in a different task
+        if not isinstance(omci_msg, OmciGetNextResponse):
+            reactor.callLater(0, self._publish_rx_frame, tx_frame, rx_frame)
+
+        from copy import copy
+        original_callbacks = copy(d.callbacks)
+        self._rx_response[frame_index] = rx_frame
+        d.callback(rx_frame)
+        self.log.debug("finished-processing-get-rx-frame")
+
+    def _decode_unknown_me(self, msg):
+        """
+        Decode an ME for an unsupported class ID.  This should only occur for a subset
+        of message types (Get, Set, MIB Upload Next, ...) and they should only be
+        responses as well.
+
+        There are some times below that are commented out. For VOLTHA 2.0, it is
+        expected that any get, set, create, delete for unique (often vendor) MEs
+        will be coded by the ONU utilizing it and supplied to OpenOMCI as a
+        vendor-specific ME during device initialization.
+
+        :param msg: (str) Binary data
+        :return: (OmciFrame) resulting frame
+        """
+        from struct import unpack
+
+        (tid, msg_type, framing) = unpack('!HBB', msg[0:4])
+
+        assert framing == 0xa, 'Only basic OMCI framing supported at this time'
+        msg = msg[4:]
+
+        # TODO: Commented out items below are future work (not expected for VOLTHA v2.0)
+        (msg_class, kwargs) = {
+            # OmciCreateResponse.message_id: (OmciCreateResponse, None),
+            # OmciDeleteResponse.message_id: (OmciDeleteResponse, None),
+            # OmciSetResponse.message_id: (OmciSetResponse, None),
+            # OmciGetResponse.message_id: (OmciGetResponse, None),
+            # OmciGetAllAlarmsNextResponse.message_id: (OmciGetAllAlarmsNextResponse, None),
+            OmciMibUploadNextResponse.message_id: (OmciMibUploadNextResponse,
+                                                   {
+                                                       'entity_class': unpack('!H', msg[0:2])[0],
+                                                       'entity_id': unpack('!H', msg[2:4])[0],
+                                                       'object_entity_class': unpack('!H', msg[4:6])[0],
+                                                       'object_entity_id': unpack('!H', msg[6:8])[0],
+                                                       'object_attributes_mask': unpack('!H', msg[8:10])[0],
+                                                       'object_data': {
+                                                           UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[10:-4])
+                                                       },
+                                                   }),
+            # OmciAlarmNotification.message_id: (OmciAlarmNotification, None),
+            OmciAttributeValueChange.message_id: (OmciAttributeValueChange,
+                                                   {
+                                                       'entity_class': unpack('!H', msg[0:2])[0],
+                                                       'entity_id': unpack('!H', msg[2:4])[0],
+                                                       'data': {
+                                                           UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[4:-8])
+                                                       },
+                                                   }),
+            # OmciTestResult.message_id: (OmciTestResult, None),
+        }.get(msg_type, None)
+
+        if msg_class is None:
+            raise TypeError('Unsupport Message Type for Unknown Decode: {}',
+                            msg_type)
+
+        return OmciFrame(transaction_id=tid, message_type=msg_type,
+                         omci_message=msg_class(**kwargs))
+
+    def _publish_rx_frame(self, tx_frame, rx_frame):
+        """
+        Notify listeners of successful response frame
+        :param tx_frame: (OmciFrame) Original request frame
+        :param rx_frame: (OmciFrame) Response frame
+        """
+        if self._enabled and isinstance(rx_frame, OmciFrame):
+            frame_type = rx_frame.fields['omci_message'].message_id
+            event_type = OMCI_CC._frame_to_event_type.get(frame_type)
+
+            if event_type is not None:
+                topic = OMCI_CC.event_bus_topic(self._device_id, event_type)
+                msg = {TX_REQUEST_KEY: tx_frame,
+                       RX_RESPONSE_KEY: rx_frame}
+
+                self.event_bus.publish(topic=topic, msg=msg)
+
+    def _publish_connectivity_event(self, connected):
+        """
+        Notify listeners of Rx/Tx connectivity over OMCI
+        :param connected: (bool) True if connectivity transitioned from unreachable
+                                 to reachable
+        """
+        if self._enabled:
+            topic = OMCI_CC.event_bus_topic(self._device_id,
+                                            RxEvent.Connectivity)
+            msg = {CONNECTED_KEY: connected}
+            self.event_bus.publish(topic=topic, msg=msg)
+
+    def flush(self):
+        """Flush/cancel in active or pending Tx requests"""
+        requests = []
+
+        for priority in {OMCI_CC.HIGH_PRIORITY, OMCI_CC.LOW_PRIORITY}:
+            next_frame, self._tx_request[priority] = self._tx_request[priority], None
+            if next_frame is not None:
+                requests.append((next_frame[OMCI_CC.REQUEST_DEFERRED], next_frame[OMCI_CC.REQUEST_DELAYED_CALL]))
+
+            requests += [(next_frame[OMCI_CC.PENDING_DEFERRED], None)
+                         for next_frame in self._pending[priority]]
+            self._pending[priority] = list()
+
+        # Cancel them...
+        def cleanup_unhandled_error(_):
+            pass    # So the cancel below does not flag an unhandled error
+
+        for d, dc in requests:
+            if d is not None and not d.called:
+                d.addErrback(cleanup_unhandled_error)
+                d.cancel()
+
+            if dc is not None and not dc.called and not dc.cancelled:
+                dc.cancel()
+
+    def _get_tx_tid(self, high_priority=False):
+        """
+        Get the next Transaction ID for a tx.  Note TID=0 is reserved
+        for autonomously generated messages from an ONU
+
+        :return: (int) TID
+        """
+        if self._extended_messaging or not high_priority:
+            index = OMCI_CC.LOW_PRIORITY
+            min_tid = OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY
+            max_tid = OMCI_CC.MAX_OMCI_TX_ID_LOW_PRIORITY
+        else:
+            index = OMCI_CC.HIGH_PRIORITY
+            min_tid = OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY
+            max_tid = OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
+
+        tx_tid, self._tx_tid[index] = self._tx_tid[index], self._tx_tid[index] + 1
+
+        if self._tx_tid[index] > max_tid:
+            self._tx_tid[index] = min_tid
+
+        return tx_tid
+
+    def _request_failure(self, value, tx_tid, high_priority):
+        """
+        Handle a transmit failure. Rx Timeouts are handled on the 'dc' deferred and
+        will call a different method that may retry if requested.  This routine
+        will be called after the final (if any) timeout or other error
+
+        :param value: (Failure) Twisted failure
+        :param tx_tid: (int) Associated Tx TID
+        """
+        index = self._get_priority_index(high_priority)
+
+        if self._tx_request[index] is not None:
+            tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
+            tx_frame_tid = tx_frame.fields['transaction_id']
+
+            if tx_frame_tid == tx_tid:
+                timeout = self._tx_request[index][OMCI_CC.REQUEST_TIMEOUT]
+                dc = self._tx_request[index][OMCI_CC.REQUEST_DELAYED_CALL]
+                self._tx_request[index] = None
+
+                if dc is not None and not dc.called and not dc.cancelled:
+                    dc.cancel()
+
+                if isinstance(value, failure.Failure):
+                    value.trap(CancelledError)
+                    self._rx_timeouts += 1
+                    self._consecutive_errors += 1
+                    if self._consecutive_errors == 1:
+                        reactor.callLater(0, self._publish_connectivity_event, False)
+
+                    self.log.debug('timeout', tx_id=tx_tid, timeout=timeout)
+                    value = failure.Failure(TimeoutError(timeout, "Deferred"))
+            else:
+                # Search pending queue. This may be a cancel coming in from the original
+                # task that requested the Tx.  If found, remove
+                # from pending queue
+                for index, request in enumerate(self._pending[index]):
+                    req = request.get(OMCI_CC.PENDING_DEFERRED)
+                    if req is not None and req.fields['transaction_id'] == tx_tid:
+                        self._pending[index].pop(index)
+                        break
+
+        self._send_next_request(high_priority)
+        return value
+
+    def _request_success(self, rx_frame, high_priority):
+        """
+        Handle transmit success (a matching Rx was received)
+
+        :param rx_frame: (OmciFrame) OMCI response frame with matching TID
+        :return: (OmciFrame) OMCI response frame with matching TID
+        """
+        index = self._get_priority_index(high_priority)
+
+        if rx_frame is None:
+            rx_frame = self._rx_response[index]
+
+        rx_tid = rx_frame.fields.get('transaction_id')
+
+        if rx_tid is not None:
+            if self._tx_request[index] is not None:
+                tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
+                tx_tid = tx_frame.fields['transaction_id']
+
+                if rx_tid == tx_tid:
+                    # Remove this request. Next callback in chain initiates next Tx
+                    self._tx_request[index] = None
+                else:
+                    self._rx_late += 1
+            else:
+                self._rx_late += 1
+
+        self._send_next_request(high_priority)
+
+        # Return rx_frame (to next item in callback list)
+        return rx_frame
+
+    def _request_timeout(self, tx_tid, high_priority):
+        """
+        Tx Request timed out.  Resend immediately if there retries is non-zero.  A
+        separate deferred (dc) is used on each actual Tx which is not the deferred
+        (d) that is returned to the caller of the 'send()' method.
+
+        If the timeout if the transmitted frame was zero, this is just cleanup of
+        that transmit request and not necessarily a transmit timeout
+
+        :param tx_tid: (int) TID of frame
+        :param high_priority: (bool) True if high-priority queue
+        """
+        self.log.debug("_request_timeout", tx_tid=tx_tid)
+        index = self._get_priority_index(high_priority)
+
+        if self._tx_request[index] is not None:
+            # (0: timestamp, 1: defer, 2: frame, 3: timeout, 4: retry, 5: delayedCall)
+            ts, d, frame, timeout, retry, _dc = self._tx_request[index]
+
+            if frame.fields.get('transaction_id', 0) == tx_tid:
+                self._tx_request[index] = None
+
+                if timeout > 0:
+                    self._rx_timeouts += 1
+
+                    if retry > 0:
+                        # Push on front of TX pending queue so that it transmits next with the
+                        # original TID
+                        self._queue_frame(d, frame, timeout, retry - 1, high_priority, front=True)
+
+                    elif not d.called:
+                        d.errback(failure.Failure(TimeoutError(timeout, "Send OMCI TID -{}".format(tx_tid))))
+            else:
+                self.log.warn('timeout-but-not-the-tx-frame')  # Statement mainly for debugging
+
+        self._send_next_request(high_priority)
+
+    def _queue_frame(self, d, frame, timeout, retry, high_priority, front=False):
+        index = self._get_priority_index(high_priority)
+        tx_tuple = (d, frame, timeout, retry)        # Pending -> (deferred, tx_frame, timeout, retry)
+
+        if front:
+            self._pending[index].insert(0, tuple)
+        else:
+            self._pending[index].append(tx_tuple)
+
+        # Monitor queue stats
+        qlen = len(self._pending[index])
+
+        if high_priority:
+            if self._max_hp_tx_queue < qlen:
+                self._max_hp_tx_queue = qlen
+
+        elif self._max_lp_tx_queue < qlen:
+            self._max_lp_tx_queue = qlen
+
+        self.log.debug("queue-size", index=index, pending_qlen=qlen)
+
+    def send(self, frame, timeout=DEFAULT_OMCI_TIMEOUT, retry=0, high_priority=False):
+        """
+        Queue the OMCI Frame for a transmit to the ONU via the proxy_channel
+
+        :param frame: (OMCIFrame) Message to send
+        :param timeout: (int) Rx Timeout. 0=No response needed
+        :param retry: (int) Additional retry attempts on channel failure, default=0
+        :param high_priority: (bool) High Priority requests
+        :return: (deferred) A deferred that fires when the response frame is received
+                            or if an error/timeout occurs
+        """
+        if not self.enabled or self._proxy_address is None:
+            # TODO custom exceptions throughout this code would be helpful
+            self._tx_errors += 1
+            return fail(result=failure.Failure(Exception('OMCI is not enabled')))
+
+        timeout = float(timeout)
+        if timeout > float(MAX_OMCI_REQUEST_AGE):
+            self._tx_errors += 1
+            msg = 'Maximum timeout is {} seconds'.format(MAX_OMCI_REQUEST_AGE)
+            return fail(result=failure.Failure(Exception(msg)))
+
+        if not isinstance(frame, OmciFrame):
+            self._tx_errors += 1
+            msg = "Invalid frame class '{}'".format(type(frame))
+            return fail(result=failure.Failure(Exception(msg)))
+        try:
+            index = self._get_priority_index(high_priority)
+            tx_tid = frame.fields['transaction_id']
+
+            if tx_tid is None:
+                tx_tid = self._get_tx_tid(high_priority=high_priority)
+                frame.fields['transaction_id'] = tx_tid
+
+            assert tx_tid not in self._pending[index], 'TX TID {} is already exists'.format(tx_tid)
+            assert tx_tid > 0, 'Invalid Tx TID: {}'.format(tx_tid)
+
+            # Queue it and request next Tx if tx channel is free
+            d = defer.Deferred()
+
+            self._queue_frame(d, frame, timeout, retry, high_priority, front=False)
+            self._send_next_request(high_priority)
+
+            if timeout == 0:
+                self.log.debug("send-timeout-zero", tx_tid=tx_tid)
+                self.reactor.callLater(0, d.callback, 'queued')
+
+            return d
+
+        except Exception as e:
+            self._tx_errors += 1
+            self._consecutive_errors += 1
+
+            if self._consecutive_errors == 1:
+                self.reactor.callLater(0, self._publish_connectivity_event, False)
+
+            self.log.exception('send-omci', e=e)
+            return fail(result=failure.Failure(e))
+
+    def _ok_to_send(self, tx_request, high_priority):
+        """
+        G.988 specifies not to issue a MIB upload or a Software download request
+        when a similar action is in progress on the other channel. To keep the
+        logic here simple, a new upload/download will not be allowed if either a
+        upload/download is going on
+
+        :param tx_request (OmciFrame) Frame to send
+        :param high_priority: (bool) for queue selection
+        :return: True if okay to dequeue and send frame
+        """
+        other = self._get_priority_index(not high_priority)
+
+        if self._tx_request[other] is None:
+            return True
+
+        this_msg_type = tx_request.fields['message_type'] & 0x1f
+        not_allowed = {OP.MibUpload.value,
+                       OP.MibUploadNext.value,
+                       OP.StartSoftwareDownload.value,
+                       OP.DownloadSection.value,
+                       OP.EndSoftwareDownload.value}
+
+        if this_msg_type not in not_allowed:
+            return True
+
+        other_msg_type = self._tx_request[other][OMCI_CC.REQUEST_FRAME].fields['message_type'] & 0x1f
+        return other_msg_type not in not_allowed
+
+    def _send_next_request(self, high_priority):
+        """
+        Pull next tx request and send it
+
+        :param high_priority: (bool) True if this was a high priority request
+        :return: results, so callback chain continues if needed
+        """
+        index = self._get_priority_index(high_priority)
+
+        if self._tx_request[index] is None:  # TODO or self._tx_request[index][OMCI_CC.REQUEST_DEFERRED].called:
+            d = None
+            try:
+                if len(self._pending[index]) and \
+                        not self._ok_to_send(self._pending[index][0][OMCI_CC.PENDING_FRAME],
+                                             high_priority):
+                    reactor.callLater(0.05, self._send_next_request, high_priority)
+                    return
+
+                next_frame = self._pending[index].pop(0)
+
+                d = next_frame[OMCI_CC.PENDING_DEFERRED]
+                frame = next_frame[OMCI_CC.PENDING_FRAME]
+                timeout = next_frame[OMCI_CC.PENDING_TIMEOUT]
+                retry = next_frame[OMCI_CC.PENDING_RETRY]
+
+                tx_tid = frame.fields['transaction_id']
+
+                # NOTE: Since we may need to do an independent ME map on a per-ONU basis
+                #       save the current value of the entity_id_to_class_map, then
+                #       replace it with our custom one before decode, and then finally
+                #       restore it later. Tried other ways but really made the code messy.
+                saved_me_map = omci_entities.entity_id_to_class_map
+                omci_entities.entity_id_to_class_map = self._me_map
+
+                ts = arrow.utcnow().float_timestamp
+                try:
+                    self._rx_response[index] = None
+                    self._adapter_agent.send_proxied_message(self._proxy_address,
+                                                             hexify(str(frame)))
+                finally:
+                    omci_entities.entity_id_to_class_map = saved_me_map
+
+                self._tx_frames += 1
+
+                # Note: the 'd' deferred in the queued request we just got will
+                # already have its success callback queued (callLater -> 0) with a
+                # result of "queued".  Here we need time it out internally so
+                # we can call cleanup appropriately. G.988 mentions that most ONUs
+                # will process an request in < 1 second.
+                dc_timeout = timeout if timeout > 0 else 1.0
+
+                # Timeout on internal deferred to support internal retries if requested
+                dc = self.reactor.callLater(dc_timeout, self._request_timeout, tx_tid, high_priority)
+
+                # (timestamp, defer, frame, timeout, retry, delayedCall)
+                self._tx_request[index] = (ts, d, frame, timeout, retry, dc)
+
+                if timeout > 0:
+                    d.addCallbacks(self._request_success, self._request_failure,
+                                   callbackArgs=(high_priority,),
+                                   errbackArgs=(tx_tid, high_priority))
+
+            except IndexError:
+                pass    # Nothing pending in this queue
+
+            except Exception as e:
+                self.log.exception('send-proxy-exception', e=e)
+                self._tx_request[index] = None
+                self.reactor.callLater(0, self._send_next_request, high_priority)
+
+                if d is not None:
+                    d.errback(failure.Failure(e))
+        else:
+            self.log.debug("tx-request-occupied", index=index)
+
+    ###################################################################################
+    # MIB Action shortcuts
+
+    def send_mib_reset(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        """
+        Perform a MIB Reset
+        """
+        self.log.debug('send-mib-reset')
+
+        frame = OntDataFrame().mib_reset()
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_mib_upload(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        self.log.debug('send-mib-upload')
+
+        frame = OntDataFrame().mib_upload()
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_mib_upload_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        self.log.debug('send-mib-upload-next')
+
+        frame = OntDataFrame(sequence_number=seq_no).mib_upload_next()
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_reboot(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        """
+        Send an ONU Device reboot request (ONU-G ME).
+
+        NOTICE: This method is being deprecated and replaced with a tasks to preform this function
+        """
+        self.log.debug('send-mib-reboot')
+
+        frame = OntGFrame().reboot()
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_get_all_alarm(self, alarm_retrieval_mode=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        self.log.debug('send_get_alarm')
+
+        frame = OntDataFrame().get_all_alarm(alarm_retrieval_mode)
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_get_all_alarm_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        self.log.debug('send_get_alarm_next')
+
+        frame = OntDataFrame().get_all_alarm_next(seq_no)
+        return self.send(frame, timeout=timeout, high_priority=high_priority)
+
+    def send_start_software_download(self, image_inst_id, image_size, window_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        frame = SoftwareImageFrame(image_inst_id).start_software_download(image_size, window_size-1)
+        return self.send(frame, timeout, 3, high_priority=high_priority)
+        
+    def send_download_section(self, image_inst_id, section_num, data, size=DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE, timeout=0, high_priority=False):
+        """
+        # timeout=0 indicates no repons needed
+        """
+        # self.log.debug("send_download_section", instance_id=image_inst_id, section=section_num, timeout=timeout)
+        if timeout > 0:
+            frame = SoftwareImageFrame(image_inst_id).download_section(True, section_num, data)
+        else:
+            frame = SoftwareImageFrame(image_inst_id).download_section(False, section_num, data)
+        return self.send(frame, timeout, high_priority=high_priority)
+        
+        # if timeout > 0:
+        #     self.reactor.callLater(0, self.sim_receive_download_section_resp, 
+        #                            frame.fields["transaction_id"], 
+        #                            frame.fields["omci_message"].fields["section_number"])
+        # return d
+
+    def send_end_software_download(self, image_inst_id, crc32, image_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        frame = SoftwareImageFrame(image_inst_id).end_software_download(crc32, image_size)
+        return self.send(frame, timeout, high_priority=high_priority)
+        # self.reactor.callLater(0, self.sim_receive_end_software_download_resp, frame.fields["transaction_id"])
+        # return d
+
+    def send_active_image(self, image_inst_id, flag=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        frame = SoftwareImageFrame(image_inst_id).activate_image(flag)
+        return self.send(frame, timeout, high_priority=high_priority)
+
+    def send_commit_image(self, image_inst_id, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
+        frame = SoftwareImageFrame(image_inst_id).commit_image()
+        return self.send(frame, timeout, high_priority=high_priority)
+
diff --git a/python/extensions/omci/omci_defs.py b/python/extensions/omci/omci_defs.py
new file mode 100644
index 0000000..64fefc5
--- /dev/null
+++ b/python/extensions/omci/omci_defs.py
@@ -0,0 +1,100 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from enum import Enum, IntEnum
+
+class OmciUninitializedFieldError(Exception):
+    pass
+
+
+class OmciInvalidTypeError(Exception):
+    pass
+
+def bitpos_from_mask(mask, lsb_pos=0, increment=1):
+    """
+    Turn a decimal value (bitmask) into a list of indices where each
+    index value corresponds to the bit position of a bit that was set (1)
+    in the mask. What numbers are assigned to the bit positions is controlled
+    by lsb_pos and increment, as explained below.
+    :param mask: a decimal value used as a bit mask
+    :param lsb_pos: The decimal value associated with the LSB bit
+    :param increment: If this is +i, then the bit next to LSB will take
+    the decimal value of lsb_pos + i.
+    :return: List of bit positions where the bit was set in mask
+    """
+    out = []
+    while mask:
+        if mask & 0x01:
+            out.append(lsb_pos)
+        lsb_pos += increment
+        mask >>= 1
+    return sorted(out)
+
+
+class AttributeAccess(Enum):
+    Readable = 1
+    R = 1
+    Writable = 2
+    W = 2
+    SetByCreate = 3
+    SBC = 3
+
+
+OmciNullPointer = 0xffff
+OmciSectionDataSize = 31
+
+class EntityOperations(Enum):
+    # keep these numbers match msg_type field per OMCI spec
+    Create = 4
+    CreateComplete = 5
+    Delete = 6
+    Set = 8
+    Get = 9
+    GetComplete = 10
+    GetAllAlarms = 11
+    GetAllAlarmsNext = 12
+    MibUpload = 13
+    MibUploadNext = 14
+    MibReset = 15
+    AlarmNotification = 16
+    AttributeValueChange = 17
+    Test = 18
+    StartSoftwareDownload = 19
+    DownloadSection = 20
+    EndSoftwareDownload = 21
+    ActivateSoftware = 22
+    CommitSoftware = 23
+    SynchronizeTime = 24
+    Reboot = 25
+    GetNext = 26
+    TestResult = 27
+    GetCurrentData = 28
+    SetTable = 29       # Defined in Extended Message Set Only
+
+
+class ReasonCodes(IntEnum):
+    # OMCI Result and reason codes
+    Success = 0,            # Command processed successfully
+    ProcessingError = 1,    # Command processing error
+    NotSupported = 2,       # Command not supported
+    ParameterError = 3,     # Parameter error
+    UnknownEntity = 4,      # Unknown managed entity
+    UnknownInstance = 5,    # Unknown managed entity instance
+    DeviceBusy = 6,         # Device busy
+    InstanceExists = 7,     # Instance Exists
+    AttributeFailure = 9,   # Attribute(s) failed or unknown
+
+    OperationCancelled = 255 # Proprietary defined for internal use
+    
diff --git a/python/extensions/omci/omci_entities.py b/python/extensions/omci/omci_entities.py
new file mode 100644
index 0000000..3968224
--- /dev/null
+++ b/python/extensions/omci/omci_entities.py
@@ -0,0 +1,1564 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import inspect
+
+import sys
+from binascii import hexlify
+from bitstring import BitArray
+import json
+from scapy.fields import ByteField, ShortField, MACField, BitField, IPField
+from scapy.fields import IntField, StrFixedLenField, LongField, FieldListField, PacketLenField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_defs import OmciUninitializedFieldError, \
+    AttributeAccess, OmciNullPointer, EntityOperations, OmciInvalidTypeError
+from voltha.extensions.omci.omci_fields import OmciSerialNumberField, OmciTableField
+from voltha.extensions.omci.omci_defs import bitpos_from_mask
+
+class EntityClassAttribute(object):
+
+    def __init__(self, fld, access=set(), optional=False, range_check=None,
+                 avc=False, tca=False, counter=False, deprecated=False):
+        """
+        Initialize an Attribute for a Managed Entity Class
+
+        :param fld: (Field) Scapy field type
+        :param access: (AttributeAccess) Allowed access
+        :param optional: (boolean) If true, attribute is option, else mandatory
+        :param range_check: (callable) None, Lambda, or Function to validate value
+        :param avc: (boolean) If true, an AVC notification can occur for the attribute
+        :param tca: (boolean) If true, a threshold crossing alert alarm notification can occur
+                              for the attribute
+        :param counter: (boolean) If true, this attribute is a PM counter
+        :param deprecated: (boolean) If true, this attribute is deprecated and
+                           only 'read' operations (if-any) performed.
+        """
+        self._fld = fld
+        self._access = access
+        self._optional = optional
+        self._range_check = range_check
+        self._avc = avc
+        self._tca = tca
+        self._counter = counter
+        self._deprecated = deprecated
+
+    @property
+    def field(self):
+        return self._fld
+
+    @property
+    def access(self):
+        return self._access
+
+    @property
+    def optional(self):
+        return self._optional
+
+    @property
+    def is_counter(self):
+        return self._counter
+
+    @property
+    def range_check(self):
+        return self._range_check
+
+    @property
+    def avc_allowed(self):
+        return self._avc
+
+    @property
+    def deprecated(self):
+        return self._deprecated
+
+    _type_checker_map = {
+        'ByteField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFF,
+        'ShortField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFF,
+        'IntField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFFFFFF,
+        'LongField': lambda val: isinstance(val, (int, long)) and 0 <= val <= 0xFFFFFFFFFFFFFFFF,
+        'StrFixedLenField': lambda val: isinstance(val, basestring),
+        'MACField': lambda val: True,   # TODO: Add a constraint for this field type
+        'BitField': lambda val: True,   # TODO: Add a constraint for this field type
+        'IPField': lambda val: True,    # TODO: Add a constraint for this field type
+        'OmciTableField': lambda val: True,
+
+        # TODO: As additional Scapy field types are used, add constraints
+    }
+
+    def valid(self, value):
+        def _isa_lambda_function(v):
+            import inspect
+            return callable(v) and len(inspect.getargspec(v).args) == 1
+
+        field_type = self.field.__class__.__name__
+        type_check = EntityClassAttribute._type_checker_map.get(field_type,
+                                                                lambda val: True)
+
+        # TODO: Currently StrFixedLenField is used heavily for both bit fields as
+        #       and other 'byte/octet' related strings that are NOT textual. Until
+        #       all of these are corrected, 'StrFixedLenField' cannot test the type
+        #       of the value provided
+
+        if field_type != 'StrFixedLenField' and not type_check(value):
+            return False
+
+        if _isa_lambda_function(self.range_check):
+            return self.range_check(value)
+        return True
+
+
+class EntityClassMeta(type):
+    """
+    Metaclass for EntityClass to generate secondary class attributes
+    for class attributes of the derived classes.
+    """
+    def __init__(cls, name, bases, dct):
+        super(EntityClassMeta, cls).__init__(name, bases, dct)
+
+        # initialize attribute_name_to_index_map
+        cls.attribute_name_to_index_map = dict(
+            (a._fld.name, idx) for idx, a in enumerate(cls.attributes))
+
+
+class EntityClass(object):
+
+    class_id = 'to be filled by subclass'
+    attributes = []
+    mandatory_operations = set()
+    optional_operations = set()
+    notifications = set()
+    alarms = dict()       # Alarm Number -> Alarm Name
+    hidden = False        # If true, this attribute is not reported by a MIB upload.
+                          # This attribute is needed to be able to properly perform
+                          # MIB Audits.
+
+    # will be map of attr_name -> index in attributes, initialized by metaclass
+    attribute_name_to_index_map = None
+    __metaclass__ = EntityClassMeta
+
+    def __init__(self, **kw):
+        assert(isinstance(kw, dict))
+        for k, v in kw.iteritems():
+            assert(k in self.attribute_name_to_index_map)
+        self._data = kw
+
+    def serialize(self, mask=None, operation=None):
+        octets = ''
+
+        # generate ordered list of attribute indices needed to be processed
+        # if mask is provided, we use that explicitly
+        # if mask is not provided, we determine attributes from the self._data
+        # content also taking into account the type of operation in hand
+        if mask is not None:
+            attribute_indices = EntityClass.attribute_indices_from_mask(mask)
+        else:
+            attribute_indices = self.attribute_indices_from_data()
+
+        # Serialize each indexed field (ignoring entity id)
+        for index in attribute_indices:
+            eca = self.attributes[index]
+            field = eca.field
+            try:
+                value = self._data[field.name]
+
+                if not eca.valid(value):
+                    raise OmciInvalidTypeError(
+                        'Value "{}" for Entity field "{}" is not valid'.format(value,
+                                                                               field.name))
+            except KeyError:
+                raise OmciUninitializedFieldError(
+                    'Entity field "{}" not set'.format(field.name))
+
+            octets = field.addfield(None, octets, value)
+
+        return octets
+
+    def attribute_indices_from_data(self):
+        return sorted(
+            self.attribute_name_to_index_map[attr_name]
+            for attr_name in self._data.iterkeys())
+
+    byte1_mask_to_attr_indices = dict(
+        (m, bitpos_from_mask(m, 8, -1)) for m in range(256))
+    byte2_mask_to_attr_indices = dict(
+        (m, bitpos_from_mask(m, 16, -1)) for m in range(256))
+
+    @classmethod
+    def attribute_indices_from_mask(cls, mask):
+        # each bit in the 2-byte field denote an attribute index; we use a
+        # lookup table to make lookup a bit faster
+        return \
+            cls.byte1_mask_to_attr_indices[(mask >> 8) & 0xff] + \
+            cls.byte2_mask_to_attr_indices[(mask & 0xff)]
+
+    @classmethod
+    def mask_for(cls, *attr_names):
+        """
+        Return mask value corresponding to given attributes names
+        :param attr_names: Attribute names
+        :return: integer mask value
+        """
+        mask = 0
+        for attr_name in attr_names:
+            index = cls.attribute_name_to_index_map[attr_name]
+            mask |= (1 << (16 - index))
+        return mask
+
+
+# abbreviations
+ECA = EntityClassAttribute
+AA = AttributeAccess
+OP = EntityOperations
+
+
+class OntData(EntityClass):
+    class_id = 2
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+        # Only 1 octet used if GET/SET operation
+        ECA(ShortField("mib_data_sync", 0), {AA.R, AA.W})
+    ]
+    mandatory_operations = {OP.Get, OP.Set,
+                            OP.GetAllAlarms, OP.GetAllAlarmsNext,
+                            OP.MibReset, OP.MibUpload, OP.MibUploadNext}
+
+
+class Cardholder(EntityClass):
+    class_id = 5
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: 0 <= x < 255 or 256 <= x < 511,
+            avc=True),
+        ECA(ByteField("actual_plugin_unit_type", None), {AA.R}),
+        ECA(ByteField("expected_plugin_unit_type", None), {AA.R, AA.W}),
+        ECA(ByteField("expected_port_count", None), {AA.R, AA.W},
+            optional=True),
+        ECA(StrFixedLenField("expected_equipment_id", None, 20), {AA.R, AA.W},
+            optional=True, avc=True),
+        ECA(StrFixedLenField("actual_equipment_id", None, 20), {AA.R},
+            optional=True),
+        ECA(ByteField("protection_profile_pointer", None), {AA.R},
+            optional=True),
+        ECA(ByteField("invoke_protection_switch", None), {AA.R, AA.W},
+            optional=True, range_check=lambda x: 0 <= x <= 3),
+        ECA(ByteField("arc", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+        ECA(ByteField("arc_interval", 0), {AA.R, AA.W}, optional=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Plug-in circuit pack missing',
+        1: 'Plug-in type mismatch alarm',
+        2: 'Improper card removal',
+        3: 'Plug-in equipment ID mismatch alarm',
+        4: 'Protection switch',
+    }
+
+
+class CircuitPack(EntityClass):
+    class_id = 6
+    attributes = [
+        ECA(StrFixedLenField("managed_entity_id", None, 22), {AA.R, AA.SBC},
+            range_check=lambda x: 0 <= x < 255 or 256 <= x < 511),
+        ECA(ByteField("type", None), {AA.R, AA.SBC}),
+        ECA(ByteField("number_of_ports", None), {AA.R}, optional=True),
+        ECA(OmciSerialNumberField("serial_number"), {AA.R}),
+        ECA(StrFixedLenField("version", None, 14), {AA.R}),
+        ECA(StrFixedLenField("vendor_id", None, 4), {AA.R}),
+        ECA(ByteField("administrative_state", None), {AA.R, AA.W}),
+        ECA(ByteField("operational_state", None), {AA.R}, optional=True, avc=True),
+        ECA(ByteField("bridged_or_ip_ind", None), {AA.R, AA.W}, optional=True,
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(StrFixedLenField("equipment_id", None, 20), {AA.R}, optional=True),
+        ECA(ByteField("card_configuration", None), {AA.R, AA.W, AA.SBC},
+            optional=True),  # not really mandatory, see spec ITU-T G.988, 9.1.6
+        ECA(ByteField("total_tcont_buffer_number", None), {AA.R},
+            optional=True),  # not really mandatory, see spec ITU-T G.988, 9.1.6
+        ECA(ByteField("total_priority_queue_number", None), {AA.R},
+            optional=True),  # not really mandatory, see spec ITU-T G.988, 9.1.6
+        ECA(ByteField("total_traffic_scheduler_number", None), {AA.R},
+            optional=True),  # not really mandatory, see spec ITU-T G.988, 9.1.6
+        ECA(IntField("power_sched_override", None), {AA.R, AA.W},
+            optional=True)
+    ]
+    mandatory_operations = {OP.Get, OP.Set, OP.Reboot}
+    optional_operations = {OP.Create, OP.Delete, OP.Test}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Equipment alarm',
+        1: 'Powering alarm',
+        2: 'Self-test failure',
+        3: 'Laser end of life',
+        4: 'Temperature yellow',
+        5: 'Temperature red',
+    }
+
+class SoftwareImage(EntityClass):
+    class_id = 7
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: 0 <= x/256 <= 254 or 0 <= x % 256 <= 1),
+        ECA(StrFixedLenField("version", None, 14), {AA.R}, avc=True),
+        ECA(ByteField("is_committed", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("is_active", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("is_valid", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(StrFixedLenField("product_code", None, 25), {AA.R}, optional=True, avc=True),
+        ECA(StrFixedLenField("image_hash", None, 16), {AA.R}, optional=True, avc=True),
+    ]
+    mandatory_operations = {OP.Get, OP.StartSoftwareDownload, OP.DownloadSection,
+                            OP.EndSoftwareDownload, OP.ActivateSoftware,
+                            OP.CommitSoftware}
+    notifications = {OP.AttributeValueChange}
+
+
+class PptpEthernetUni(EntityClass):
+    class_id = 11
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ByteField("expected_type", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 254),
+        ECA(ByteField("sensed_type", 0), {AA.R}, optional=True, avc=True),
+        # TODO: For sensed_type AVC, see note in AT&T OMCI Specification, V3.0, page 123
+        ECA(ByteField("autodetection_config", 0), {AA.R, AA.W},
+            range_check=lambda x: x in [0, 1, 2, 3, 4, 5,
+                                        0x10, 0x11, 0x12, 0x13, 0x14,
+                                        0x20, 0x30], optional=True),  # See ITU-T G.988
+        ECA(ByteField("ethernet_loopback_config", 0), {AA.R, AA.W},
+            range_check=lambda x: x in [0, 3]),
+        ECA(ByteField("administrative_state", 1), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("operational_state", 1), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+        ECA(ByteField("config_ind", 0), {AA.R},
+            range_check=lambda x: x in [0, 1, 2, 3, 4, 0x11, 0x12, 0x13]),
+        ECA(ShortField("max_frame_size", 1518), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("dte_dce_ind", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(ShortField("pause_time", 0), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("bridged_ip_ind", 2), {AA.R, AA.W},
+            optional=True, range_check=lambda x: 0 <= x <= 2),
+        ECA(ByteField("arc", 0), {AA.R, AA.W}, optional=True,
+            range_check=lambda x: 0 <= x <= 1, avc=True),
+        ECA(ByteField("arc_interval", 0), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("pppoe_filter", 0), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("power_control", 0), {AA.R, AA.W}, optional=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'LAN Loss Of Signal',
+    }
+
+
+class MacBridgeServiceProfile(EntityClass):
+    class_id = 45
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("spanning_tree_ind", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("learning_ind", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("port_bridging_ind", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("priority", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("max_age", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0x0600 <= x <= 0x2800),
+        ECA(ShortField("hello_time", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0x0100 <= x <= 0x0A00),
+        ECA(ShortField("forward_delay", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0x0400 <= x <= 0x1E00),
+        ECA(ByteField("unknown_mac_address_discard", None),
+            {AA.R, AA.W, AA.SBC}, range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("mac_learning_depth", None),
+            {AA.R, AA.W, AA.SBC}, optional=True),
+        ECA(ByteField("dynamic_filtering_ageing_time", None),
+            {AA.R, AA.W, AA.SBC}, optional=True),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class MacBridgePortConfigurationData(EntityClass):
+    class_id = 47
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("bridge_id_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("port_num", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("tp_type", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 1 <= x <= 12),
+        ECA(ShortField("tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("port_priority", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("port_path_cost", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("port_spanning_tree_in", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("encapsulation_methods", None), {AA.R, AA.W, AA.SBC},
+            optional=True, deprecated=True),
+        ECA(ByteField("lan_fcs_ind", None), {AA.R, AA.W, AA.SBC},
+            optional=True, deprecated=True),
+        ECA(MACField("port_mac_address", None), {AA.R}, optional=True),
+        ECA(ShortField("outbound_td_pointer", None), {AA.R, AA.W},
+            optional=True),
+        ECA(ShortField("inbound_td_pointer", None), {AA.R, AA.W},
+            optional=True),
+        # TODO:
+        ECA(ByteField("mac_learning_depth", 0), {AA.R, AA.W, AA.SBC},
+            optional=True),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Port blocking',
+    }
+
+
+class MacBridgePortFilterPreAssignTable(EntityClass):
+    class_id = 79
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("ipv4_multicast", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("ipv6_multicast", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("ipv4_broadcast", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("rarp", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("ipx", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("netbeui", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("appletalk", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("bridge_management_information", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("arp", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("pppoe_broadcast", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1)
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+
+
+class VlanTaggingFilterData(EntityClass):
+    class_id = 84
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(FieldListField("vlan_filter_list", None,
+                           ShortField('', 0), count_from=lambda _: 12),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("forward_operation", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0x00 <= x <= 0x21),
+        ECA(ByteField("number_of_entries", None), {AA.R, AA.W, AA.SBC})
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class Ieee8021pMapperServiceProfile(EntityClass):
+    class_id = 130
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_0",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_1",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_2",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_3",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_4",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_5",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_6",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interwork_tp_pointer_for_p_bit_priority_7",
+                       OmciNullPointer), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("unmarked_frame_option", None),
+            {AA.R, AA.W, AA.SBC}, range_check=lambda x: 0 <= x <= 1),
+        ECA(StrFixedLenField("dscp_to_p_bit_mapping", None, length=24),
+            {AA.R, AA.W}),  # TODO: Would a custom 3-bit group bitfield work better?
+        ECA(ByteField("default_p_bit_marking", None),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("tp_type", None), {AA.R, AA.W, AA.SBC},
+            optional=True, range_check=lambda x: 0 <= x <= 8)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class OltG(EntityClass):
+    class_id = 131
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+        ECA(StrFixedLenField("olt_vendor_id", None, 4), {AA.R, AA.W}),
+        ECA(StrFixedLenField("equipment_id", None, 20), {AA.R, AA.W}),
+        ECA(StrFixedLenField("version", None, 14), {AA.R, AA.W}),
+        ECA(StrFixedLenField("time_of_day", None, 14), {AA.R, AA.W})
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+
+
+class OntPowerShedding(EntityClass):
+    class_id = 133
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+        ECA(ShortField("restore_power_time_reset_interval", 0),
+            {AA.R, AA.W}),
+        ECA(ShortField("data_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("voice_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("video_overlay_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("video_return_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("dsl_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("atm_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("ces_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("frame_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("sonet_class_shedding_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("shedding_status", None), {AA.R, AA.W}, optional=True,
+            avc=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange}
+
+
+class IpHostConfigData(EntityClass):
+    class_id = 134
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(BitField("ip_options", 0, size=8), {AA.R, AA.W}),
+        ECA(MACField("mac_address", None), {AA.R}),
+        ECA(StrFixedLenField("onu_identifier", None, 25), {AA.R, AA.W}),
+        ECA(IPField("ip_address", None), {AA.R, AA.W}),
+        ECA(IPField("mask", None), {AA.R, AA.W}),
+        ECA(IPField("gateway", None), {AA.R, AA.W}),
+        ECA(IPField("primary_dns", None), {AA.R, AA.W}),
+        ECA(IPField("secondary_dns", None), {AA.R, AA.W}),
+        ECA(IPField("current_address", None), {AA.R}, avc=True),
+        ECA(IPField("current_mask", None), {AA.R}, avc=True),
+        ECA(IPField("current_gateway", None), {AA.R}, avc=True),
+        ECA(IPField("current_primary_dns", None), {AA.R}, avc=True),
+        ECA(IPField("current_secondary_dns", None), {AA.R}, avc=True),
+        ECA(StrFixedLenField("domain_name", None, 25), {AA.R}, avc=True),
+        ECA(StrFixedLenField("host_name", None, 25), {AA.R}, avc=True),
+        ECA(ShortField("relay_agent_options", None), {AA.R, AA.W},
+            optional=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set, OP.Test}
+    notifications = {OP.AttributeValueChange}
+
+
+class VlanTaggingOperation(Packet):
+    name = "VlanTaggingOperation"
+    fields_desc = [
+        BitField("filter_outer_priority", 0, 4),
+        BitField("filter_outer_vid", 0, 13),
+        BitField("filter_outer_tpid_de", 0, 3),
+        BitField("pad1", 0, 12),
+
+        BitField("filter_inner_priority", 0, 4),
+        BitField("filter_inner_vid", 0, 13),
+        BitField("filter_inner_tpid_de", 0, 3),
+        BitField("pad2", 0, 8),
+        BitField("filter_ether_type", 0, 4),
+
+        BitField("treatment_tags_to_remove", 0, 2),
+        BitField("pad3", 0, 10),
+        BitField("treatment_outer_priority", 0, 4),
+        BitField("treatment_outer_vid", 0, 13),
+        BitField("treatment_outer_tpid_de", 0, 3),
+
+        BitField("pad4", 0, 12),
+        BitField("treatment_inner_priority", 0, 4),
+        BitField("treatment_inner_vid", 0, 13),
+        BitField("treatment_inner_tpid_de", 0, 3),
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+    @staticmethod
+    def json_from_value(value):
+        bits = BitArray(hex=hexlify(value))
+        temp = VlanTaggingOperation(
+            filter_outer_priority=bits[0:4].uint,         # 4  <-size
+            filter_outer_vid=bits[4:17].uint,             # 13
+            filter_outer_tpid_de=bits[17:20].uint,        # 3
+                                                          # pad 12
+            filter_inner_priority=bits[32:36].uint,       # 4
+            filter_inner_vid=bits[36:49].uint,            # 13
+            filter_inner_tpid_de=bits[49:52].uint,        # 3
+                                                          # pad 8
+            filter_ether_type=bits[60:64].uint,           # 4
+            treatment_tags_to_remove=bits[64:66].uint,    # 2
+                                                          # pad 10
+            treatment_outer_priority=bits[76:80].uint,    # 4
+            treatment_outer_vid=bits[80:93].uint,         # 13
+            treatment_outer_tpid_de=bits[93:96].uint,     # 3
+                                                          # pad 12
+            treatment_inner_priority=bits[108:112].uint,  # 4
+            treatment_inner_vid=bits[112:125].uint,       # 13
+            treatment_inner_tpid_de=bits[125:128].uint,   # 3
+        )
+        return json.dumps(temp.fields, separators=(',', ':'))
+
+    def index(self):
+        return '{:02}'.format(self.fields.get('filter_outer_priority',0)) + \
+               '{:03}'.format(self.fields.get('filter_outer_vid',0)) + \
+               '{:01}'.format(self.fields.get('filter_outer_tpid_de',0)) + \
+               '{:03}'.format(self.fields.get('filter_inner_priority',0)) + \
+               '{:04}'.format(self.fields.get('filter_inner_vid',0)) + \
+               '{:01}'.format(self.fields.get('filter_inner_tpid_de',0)) + \
+               '{:02}'.format(self.fields.get('filter_ether_type',0))
+
+    def is_delete(self):
+        return self.fields.get('treatment_tags_to_remove',0) == 0x3 and \
+            self.fields.get('pad3',0) == 0x3ff and \
+            self.fields.get('treatment_outer_priority',0) == 0xf and \
+            self.fields.get('treatment_outer_vid',0) == 0x1fff and \
+            self.fields.get('treatment_outer_tpid_de',0) == 0x7 and \
+            self.fields.get('pad4',0) == 0xfff and \
+            self.fields.get('treatment_inner_priority',0) == 0xf and \
+            self.fields.get('treatment_inner_vid',0) == 0x1fff and \
+            self.fields.get('treatment_inner_tpid_de',0) == 0x7
+
+    def delete(self):
+        self.fields['treatment_tags_to_remove'] = 0x3
+        self.fields['pad3'] = 0x3ff
+        self.fields['treatment_outer_priority'] = 0xf
+        self.fields['treatment_outer_vid'] = 0x1fff
+        self.fields['treatment_outer_tpid_de'] = 0x7
+        self.fields['pad4'] = 0xfff
+        self.fields['treatment_inner_priority'] = 0xf
+        self.fields['treatment_inner_vid'] = 0x1fff
+        self.fields['treatment_inner_tpid_de'] = 0x7
+        return self
+
+class ExtendedVlanTaggingOperationConfigurationData(EntityClass):
+    class_id = 171
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("association_type", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 11),
+        ECA(ShortField("received_vlan_tagging_operation_table_max_size", None),
+            {AA.R}),
+        ECA(ShortField("input_tpid", None), {AA.R, AA.W}),
+        ECA(ShortField("output_tpid", None), {AA.R, AA.W}),
+        ECA(ByteField("downstream_mode", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 8),
+        ECA(OmciTableField(
+            PacketLenField("received_frame_vlan_tagging_operation_table", None,
+                VlanTaggingOperation, length_from=lambda pkt: 16)), {AA.R, AA.W}),
+        ECA(ShortField("associated_me_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(FieldListField("dscp_to_p_bit_mapping", None,
+                           BitField('',  0, size=3), count_from=lambda _: 64),
+            {AA.R, AA.W}),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext}
+    optional_operations = {OP.SetTable}
+
+
+class OntG(EntityClass):
+    class_id = 256
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+        ECA(StrFixedLenField("vendor_id", None, 4), {AA.R}),
+        ECA(StrFixedLenField("version", None, 14), {AA.R}),
+        ECA(OmciSerialNumberField("serial_number"), {AA.R}),
+        ECA(ByteField("traffic_management_options", None), {AA.R},
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(ByteField("vp_vc_cross_connection_option", 0), {AA.R},
+            optional=True, deprecated=True),
+        ECA(ByteField("battery_backup", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("administrative_state", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("operational_state", None), {AA.R}, optional=True,
+            range_check=lambda x: 0 <= x <= 1, avc=True),
+        ECA(ByteField("ont_survival_time", None), {AA.R}, optional=True),
+        ECA(StrFixedLenField("logical_onu_id", None, 24), {AA.R},
+            optional=True, avc=True),
+        ECA(StrFixedLenField("logical_password", None, 12), {AA.R},
+            optional=True, avc=True),
+        ECA(ByteField("credentials_status", None), {AA.R, AA.W},
+            optional=True, range_check=lambda x: 0 <= x <= 4),
+        ECA(BitField("extended_tc_layer_options", None, size=16), {AA.R},
+            optional=True),
+    ]
+    mandatory_operations = {
+        OP.Get, OP.Set, OP.Reboot, OP.Test, OP.SynchronizeTime}
+    notifications = {OP.TestResult, OP.AttributeValueChange,
+                     OP.AlarmNotification}
+    alarms = {
+        0: 'Equipment alarm',
+        1: 'Powering alarm',
+        2: 'Battery missing',
+        3: 'Battery failure',
+        4: 'Battery low',
+        5: 'Physical intrusion',
+        6: 'Self-test failure',
+        7: 'Dying gasp',
+        8: 'Temperature yellow',
+        9: 'Temperature red',
+        10: 'Voltage yellow',
+        11: 'Voltage red',
+        12: 'ONU manual power off',
+        13: 'Invalid image',
+        14: 'PSE overload yellow',
+        15: 'PSE overload red',
+    }
+
+
+class Ont2G(EntityClass):
+    class_id = 257
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+        ECA(StrFixedLenField("equipment_id", None, 20), {AA.R}),
+        ECA(ByteField("omcc_version", None), {AA.R}, avc=True),
+        ECA(ShortField("vendor_product_code", None), {AA.R}),
+        ECA(ByteField("security_capability", None), {AA.R},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("security_mode", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("total_priority_queue_number", None), {AA.R}),
+        ECA(ByteField("total_traffic_scheduler_number", None), {AA.R}),
+        ECA(ByteField("mode", None), {AA.R}, deprecated=True),
+        ECA(ShortField("total_gem_port_id_number", None), {AA.R}),
+        ECA(IntField("sys_uptime", None), {AA.R}),
+        ECA(BitField("connectivity_capability", None, size=16), {AA.R}),
+        ECA(ByteField("current_connectivity_mode", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 7),
+        ECA(BitField("qos_configuration_flexibility", None, size=16),
+            {AA.R}, optional=True),
+        ECA(ShortField("priority_queue_scale_factor", None), {AA.R, AA.W},
+            optional=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange}
+
+
+class Tcont(EntityClass):
+    class_id = 262
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ShortField("alloc_id", None), {AA.R, AA.W}),
+        ECA(ByteField("mode_indicator", 1), {AA.R}, deprecated=True),
+        ECA(ByteField("policy", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 2),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+
+
+class AniG(EntityClass):
+    class_id = 263
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ByteField("sr_indication", None), {AA.R}),
+        ECA(ShortField("total_tcont_number", None), {AA.R}),
+        ECA(ShortField("gem_block_length", None), {AA.R, AA.W}),
+        ECA(ByteField("piggyback_dba_reporting", None), {AA.R},
+            range_check=lambda x: 0 <= x <= 4),
+        ECA(ByteField("whole_ont_dba_reporting", None), {AA.R},
+            deprecated=True),
+        ECA(ByteField("sf_threshold", 5), {AA.R, AA.W}),
+        ECA(ByteField("sd_threshold", 9), {AA.R, AA.W}),
+        ECA(ByteField("arc", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1, avc=True),
+        ECA(ByteField("arc_interval", 0), {AA.R, AA.W}),
+        ECA(ShortField("optical_signal_level", None), {AA.R}),
+        ECA(ByteField("lower_optical_threshold", 0xFF), {AA.R, AA.W}),
+        ECA(ByteField("upper_optical_threshold", 0xFF), {AA.R, AA.W}),
+        ECA(ShortField("ont_response_time", None), {AA.R}),
+        ECA(ShortField("transmit_optical_level", None), {AA.R}),
+        ECA(ByteField("lower_transmit_power_threshold", 0x81), {AA.R, AA.W}),
+        ECA(ByteField("upper_transmit_power_threshold", 0x81), {AA.R, AA.W}),
+    ]
+    mandatory_operations = {OP.Get, OP.Set, OP.Test}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Low received optical power',
+        1: 'High received optical power',
+        2: 'Signal fail',
+        3: 'Signal degrade',
+        4: 'Low transmit optical power',
+        5: 'High transmit optical power',
+        6: 'Laser bias current',
+    }
+
+
+class UniG(EntityClass):
+    class_id = 264
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ShortField("configuration_option_status", None), {AA.R, AA.W},
+            deprecated=True),
+        ECA(ByteField("administrative_state", None), {AA.R, AA.W}),
+        ECA(ByteField("management_capability", None), {AA.R},
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(ShortField("non_omci_management_identifier", None), {AA.R, AA.W}),
+        ECA(ShortField("relay_agent_options", None), {AA.R, AA.W},
+            optional=True),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+
+
+class GemInterworkingTp(EntityClass):
+    class_id = 266
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("gem_port_network_ctp_pointer", None),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("interworking_option", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 7),
+        ECA(ShortField("service_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interworking_tp_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("pptp_counter", None), {AA.R}, optional=True),
+        ECA(ByteField("operational_state", None), {AA.R}, optional=True,
+            range_check=lambda x: 0 <= x <= 1, avc=True),
+        ECA(ShortField("gal_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("gal_loopback_configuration", 0),
+            {AA.R, AA.W}, range_check=lambda x: 0 <= x <= 1),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        6: 'Operational state change',
+    }
+
+
+class GemPortNetworkCtp(EntityClass):
+    class_id = 268
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("port_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("tcont_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("direction", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 1 <= x <= 3),
+        ECA(ShortField("traffic_management_pointer_upstream", None),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("traffic_descriptor_profile_pointer", None),
+            {AA.R, AA.W, AA.SBC}, optional=True),
+        ECA(ByteField("uni_counter", None), {AA.R}, optional=True),
+        ECA(ShortField("priority_queue_pointer_downstream", None),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("encryption_state", None), {AA.R}, optional=True),
+        ECA(ShortField("traffic_desc_profile_pointer_downstream", None),
+            {AA.R, AA.W, AA.SBC}, optional=True),
+        ECA(ShortField("encryption_key_ring", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 3)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        5: 'End-to-end loss of continuity',
+    }
+
+
+class GalEthernetProfile(EntityClass):
+    class_id = 272
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ShortField("max_gem_payload_size", None), {AA.R, AA.W, AA.SBC}),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+
+
+class PriorityQueueG(EntityClass):
+    class_id = 277
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ByteField("queue_configuration_option", None), {AA.R},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("maximum_queue_size", None), {AA.R}),
+        ECA(ShortField("allocated_queue_size", None), {AA.R, AA.W}),
+        ECA(ShortField("discard_block_counter_reset_interval", None), {AA.R, AA.W}),
+        ECA(ShortField("threshold_value_for_discarded_blocks", None), {AA.R, AA.W}),
+        ECA(IntField("related_port", None), {AA.R}),
+        ECA(ShortField("traffic_scheduler_pointer", 0), {AA.R, AA.W}),
+        ECA(ByteField("weight", 1), {AA.R, AA.W}),
+        ECA(ShortField("back_pressure_operation", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(IntField("back_pressure_time", 0), {AA.R, AA.W}),
+        ECA(ShortField("back_pressure_occur_queue_threshold", None), {AA.R, AA.W}),
+        ECA(ShortField("back_pressure_clear_queue_threshold", None), {AA.R, AA.W}),
+        # TODO: Custom field of 4 2-byte values would help below
+        ECA(LongField("packet_drop_queue_thresholds", None), {AA.R, AA.W},
+            optional=True),
+        ECA(ShortField("packet_drop_max_p", 0xFFFF), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("queue_drop_w_q", 9), {AA.R, AA.W}, optional=True),
+        ECA(ByteField("drop_precedence_colour_marking", 0), {AA.R, AA.W},
+            optional=True, range_check=lambda x: 0 <= x <= 7),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Block loss',
+    }
+
+
+class TrafficSchedulerG(EntityClass):
+    class_id = 278
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ShortField("tcont_pointer", None), {AA.R}),
+        ECA(ShortField("traffic_scheduler_pointer", None), {AA.R}),
+        ECA(ByteField("policy", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(ByteField("priority_weight", 0), {AA.R, AA.W}),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+
+
+class MulticastGemInterworkingTp(EntityClass):
+    class_id = 281
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC},
+            range_check=lambda x: x != OmciNullPointer),
+        ECA(ShortField("gem_port_network_ctp_pointer", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interworking_option", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: x in [0, 1, 3, 5]),
+        ECA(ShortField("service_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("interworking_tp_pointer", 0), {AA.R, AA.W, AA.SBC},
+            deprecated=True),
+        ECA(ByteField("pptp_counter", None), {AA.R}),
+        ECA(ByteField("operational_state", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("gal_profile_pointer", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("gal_loopback_configuration", None), {AA.R, AA.W, AA.SBC},
+            deprecated=True),
+        # TODO add multicast_address_table here (page 85 of spec.)
+        # ECA(...("multicast_address_table", None), {AA.R, AA.W})
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.GetNext, OP.Set}
+    optional_operations = {OP.SetTable}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Deprecated',
+    }
+
+
+class AccessControlRow0(Packet):
+    name = "AccessControlRow0"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("row_part_id", 0, 3),
+        BitField("test", 0, 1),
+        BitField("row_key", 0, 10),
+
+        ShortField("gem_port_id", None),
+        ShortField("vlan_id", None),
+        IPField("src_ip", None),
+        IPField("dst_ip_start", None),
+        IPField("dst_ip_end", None),
+        IntField("ipm_group_bw", None),
+        ShortField("reserved0", 0)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AccessControlRow1(Packet):
+    name = "AccessControlRow1"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("row_part_id", 0, 3),
+        BitField("test", 0, 1),
+        BitField("row_key", 0, 10),
+
+        StrFixedLenField("ipv6_src_addr_start_bytes", None, 12),
+        ShortField("preview_length", None),
+        ShortField("preview_repeat_time", None),
+        ShortField("preview_repeat_count", None),
+        ShortField("preview_reset_time", None),
+        ShortField("reserved1", 0)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AccessControlRow2(Packet):
+    name = "AccessControlRow2"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("row_part_id", 0, 3),
+        BitField("test", 0, 1),
+        BitField("row_key", 0, 10),
+
+        StrFixedLenField("ipv6_dst_addr_start_bytes", None, 12),
+        StrFixedLenField("reserved2", None, 10)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class DownstreamIgmpMulticastTci(Packet):
+    name = "DownstreamIgmpMulticastTci"
+    fields_desc = [
+        ByteField("ctrl_type", None),
+        ShortField("tci", None)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class MulticastOperationsProfile(EntityClass):
+    class_id = 309
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC},
+            range_check=lambda x: x != 0 and x != OmciNullPointer),
+        ECA(ByteField("igmp_version", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: x in [1, 2, 3, 16, 17]),
+        ECA(ByteField("igmp_function", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 2),
+        ECA(ByteField("immediate_leave", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("us_igmp_tci", None), {AA.R, AA.W, AA.SBC}, optional=True),
+        ECA(ByteField("us_igmp_tag_ctrl", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 3, optional=True),
+        ECA(IntField("us_igmp_rate", None), {AA.R, AA.W, AA.SBC}, optional=True),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "dynamic_access_control_list_table", None, 24), {AA.R, AA.W}),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "static_access_control_list_table", None, 24), {AA.R, AA.W}),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField("lost_groups_list_table", None, 10), {AA.R}),
+        ECA(ByteField("robustness", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("querier_ip", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("query_interval", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("querier_max_response_time", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("last_member_response_time", 10), {AA.R, AA.W}),
+        ECA(ByteField("unauthorized_join_behaviour", None), {AA.R, AA.W}),
+        ECA(StrFixedLenField("ds_igmp_mcast_tci", None, 3), {AA.R, AA.W, AA.SBC}, optional=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext}
+    optional_operations = {OP.SetTable}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Lost multicast group',
+    }
+
+
+class MulticastServicePackage(Packet):
+    name = "MulticastServicePackage"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("reserved0", 0, 4),
+        BitField("row_key", 0, 10),
+
+        ShortField("vid_uni", None),
+        ShortField("max_simultaneous_groups", None),
+        IntField("max_multicast_bw", None),
+        ShortField("mcast_operations_profile_pointer", None),
+        StrFixedLenField("reserved1", None, 8)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AllowedPreviewGroupsRow0(Packet):
+    name = "AllowedPreviewGroupsRow0"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("row_part_id", 0, 3),
+        BitField("reserved0", 0, 1),
+        BitField("row_key", 0, 10),
+
+        StrFixedLenField("ipv6_pad", 0, 12),
+        IPField("src_ip", None),
+        ShortField("vlan_id_ani", None),
+        ShortField("vlan_id_uni", None)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class AllowedPreviewGroupsRow1(Packet):
+    name = "AllowedPreviewGroupsRow1"
+    fields_desc = [
+        BitField("set_ctrl", 0, 2),
+        BitField("row_part_id", 0, 3),
+        BitField("reserved0", 0, 1),
+        BitField("row_key", 0, 10),
+
+        StrFixedLenField("ipv6_pad", 0, 12),
+        IPField("dst_ip", None),
+        ShortField("duration", None),
+        ShortField("time_left", None)
+    ]
+
+    def to_json(self):
+        return json.dumps(self.fields, separators=(',', ':'))
+
+
+class MulticastSubscriberConfigInfo(EntityClass):
+    class_id = 310
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("me_type", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ShortField("mcast_operations_profile_pointer", None),
+            {AA.R, AA.W, AA.SBC}),
+        ECA(ShortField("max_simultaneous_groups", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("max_multicast_bandwidth", None), {AA.R, AA.W, AA.SBC}),
+        ECA(ByteField("bandwidth_enforcement", None), {AA.R, AA.W, AA.SBC},
+            range_check=lambda x: 0 <= x <= 1),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "multicast_service_package_table", None, 20), {AA.R, AA.W}),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "allowed_preview_groups_table", None, 22), {AA.R, AA.W}),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Set, OP.Get, OP.GetNext,
+                            OP.SetTable}
+
+
+class VirtualEthernetInterfacePt(EntityClass):
+    class_id = 329
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x != 0 and x != OmciNullPointer),
+        ECA(ByteField("administrative_state", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("operational_state", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(StrFixedLenField(
+            "interdomain_name", None, 25), {AA.R, AA.W}, optional=True),
+        ECA(ShortField("tcp_udp_pointer", None), {AA.R, AA.W}, optional=True),
+        ECA(ShortField("iana_assigned_port", None), {AA.R}),
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Connecting function fail',
+    }
+
+
+class Omci(EntityClass):
+    class_id = 287
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R},
+            range_check=lambda x: x == 0),
+
+        # TODO: Can this be expressed better in SCAPY, probably not?
+        # On the initial, Get request for either the me_type or message_type
+        # attributes, you will receive a 4 octet value (big endian) that is
+        # the number of octets to 'get-next' to fully load the desired
+        # attribute.  For a basic OMCI formatted message, that will be 29
+        # octets per get-request.
+        #
+        # For the me_type_table, these are 16-bit values (ME Class IDs)
+        #
+        # For the message_type_table, these are 8-bit values (Actions)
+
+        ECA(FieldListField("me_type_table", None, ByteField('', 0),
+                           count_from=lambda _: 29), {AA.R}),
+        ECA(FieldListField("message_type_table", None, ByteField('', 0),
+                           count_from=lambda _: 29), {AA.R}),
+    ]
+    mandatory_operations = {OP.Get, OP.GetNext}
+
+
+class EnhSecurityControl(EntityClass):
+    class_id = 332
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(BitField("olt_crypto_capabilities", None, 16*8), {AA.W}),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "olt_random_challenge_table", None, 17), {AA.R, AA.W}),
+        ECA(ByteField("olt_challenge_status", 0), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("onu_selected_crypto_capabilities", None), {AA.R}),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "onu_random_challenge_table", None, 16), {AA.R}, avc=True),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "onu_authentication_result_table", None, 16), {AA.R}, avc=True),
+        # TODO: need to make table and add column data
+        ECA(StrFixedLenField(
+            "olt_authentication_result_table", None, 17), {AA.W}),
+        ECA(ByteField("olt_result_status", None), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("onu_authentication_status", None), {AA.R}, avc=True,
+            range_check=lambda x: 0 <= x <= 5),
+        ECA(StrFixedLenField(
+            "master_session_key_name", None, 16), {AA.R}),
+        ECA(StrFixedLenField(
+            "broadcast_key_table", None, 18), {AA.R, AA.W}),
+        ECA(ShortField("effective_key_length", None), {AA.R}),
+
+    ]
+    mandatory_operations = {OP.Set, OP.Get, OP.GetNext}
+    notifications = {OP.AttributeValueChange}
+
+
+class EthernetPMMonitoringHistoryData(EntityClass):
+    class_id = 24
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("fcs_errors", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("excessive_collision_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("late_collision_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("frames_too_long", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("buffer_overflows_on_rx", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("buffer_overflows_on_tx", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("single_collision_frame_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("multiple_collisions_frame_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("sqe_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("deferred_tx_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("internal_mac_tx_error_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("carrier_sense_error_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("alignment_error_counter", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("internal_mac_rx_error_counter", None), {AA.R}, tca=True, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'FCS errors',
+        1: 'Excessive collision counter',
+        2: 'Late collision counter',
+        3: 'Frames too long',
+        4: 'Buffer overflows on receive',
+        5: 'Buffer overflows on transmit',
+        6: 'Single collision frame counter',
+        7: 'Multiple collision frame counter',
+        8: 'SQE counter',
+        9: 'Deferred transmission counter',
+        10: 'Internal MAC transmit error counter',
+        11: 'Carrier sense error counter',
+        12: 'Alignment error counter',
+        13: 'Internal MAC receive error counter',
+    }
+
+
+class FecPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 312
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("corrected_bytes", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("corrected_code_words", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("uncorrectable_code_words", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("total_code_words", None), {AA.R}, counter=True),
+        ECA(ShortField("fec_seconds", None), {AA.R}, tca=True, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Corrected bytes',
+        1: 'Corrected code words',
+        2: 'Uncorrectable code words',
+        4: 'FEC seconds',
+    }
+
+
+class EthernetFrameDownstreamPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 321
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("octets", None), {AA.R}, counter=True),
+        ECA(IntField("packets", None), {AA.R}, counter=True),
+        ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("64_octets", None), {AA.R}, counter=True),
+        ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+        ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+        ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+        ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+        ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Drop events',
+        1: 'CRC errored packets',
+        2: 'Undersize packets',
+        3: 'Oversize packets',
+    }
+
+
+class EthernetFrameUpstreamPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 322
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("octets", None), {AA.R}, counter=True),
+        ECA(IntField("packets", None), {AA.R}, counter=True),
+        ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("64_octets", None), {AA.R}, counter=True),
+        ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+        ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+        ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+        ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+        ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Drop events',
+        1: 'CRC errored packets',
+        2: 'Undersize packets',
+        3: 'Oversize packets',
+    }
+
+
+class VeipUni(EntityClass):
+    class_id = 329
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R}),
+        ECA(ByteField("administrative_state", 1), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1),
+        ECA(ByteField("operational_state", 1), {AA.R, AA.W},
+            range_check=lambda x: 0 <= x <= 1, optional=True, avc=True),
+        ECA(StrFixedLenField("interdomain_name", None, 25), {AA.R, AA.W},
+            optional=True),
+        ECA(ShortField("tcp_udp_pointer", None), {AA.R, AA.W}, optional=True),
+        ECA(ShortField("iana_assigned_port", 0xFFFF), {AA.R})
+    ]
+    mandatory_operations = {OP.Get, OP.Set}
+    notifications = {OP.AttributeValueChange, OP.AlarmNotification}
+    alarms = {
+        0: 'Connecting function fail'
+    }
+
+
+class EthernetFrameExtendedPerformanceMonitoring(EntityClass):
+    class_id = 334
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        # 2-octet field -> Threshold data 1/2 ID
+        # 2-octet field -> Parent ME Class
+        # 2-octet field -> Parent ME Instance
+        # 2-octet field -> Accumulation disable
+        # 2-octet field -> TCA Disable
+        # 2-octet field -> Control fields bitmap
+        # 2-octet field -> TCI
+        # 2-octet field -> Reserved
+        ECA(FieldListField("control_block", None, ShortField('', 0),
+                           count_from=lambda _: 8), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("drop_events", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("octets", None), {AA.R}, counter=True),
+        ECA(IntField("packets", None), {AA.R}, counter=True),
+        ECA(IntField("broadcast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("multicast_packets", None), {AA.R}, counter=True),
+        ECA(IntField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("64_octets", None), {AA.R}, counter=True),
+        ECA(IntField("65_to_127_octets", None), {AA.R}, counter=True),
+        ECA(IntField("128_to_255_octets", None), {AA.R}, counter=True),
+        ECA(IntField("256_to_511_octets", None), {AA.R}, counter=True),
+        ECA(IntField("512_to_1023_octets", None), {AA.R}, counter=True),
+        ECA(IntField("1024_to_1518_octets", None), {AA.R}, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    optional_operations = {OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Drop events',
+        1: 'CRC errored packets',
+        2: 'Undersize packets',
+        3: 'Oversize packets',
+    }
+
+
+class EthernetFrameExtendedPerformanceMonitoring64Bit(EntityClass):
+    class_id = 426
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        # 2-octet field -> Threshold data 1/2 ID
+        # 2-octet field -> Parent ME Class
+        # 2-octet field -> Parent ME Instance
+        # 2-octet field -> Accumulation disable
+        # 2-octet field -> TCA Disable
+        # 2-octet field -> Control fields bitmap
+        # 2-octet field -> TCI
+        # 2-octet field -> Reserved
+        ECA(FieldListField("control_block", None, ShortField('', 0),
+                           count_from=lambda _: 8), {AA.R, AA.W, AA.SBC}),
+        ECA(LongField("drop_events", None), {AA.R}, tca=True, counter=True),
+        ECA(LongField("octets", None), {AA.R}, counter=True),
+        ECA(LongField("packets", None), {AA.R}, counter=True),
+        ECA(LongField("broadcast_packets", None), {AA.R}, counter=True),
+        ECA(LongField("multicast_packets", None), {AA.R}, counter=True),
+        ECA(LongField("crc_errored_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(LongField("undersize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(LongField("oversize_packets", None), {AA.R}, tca=True, counter=True),
+        ECA(LongField("64_octets", None), {AA.R}, counter=True),
+        ECA(LongField("65_to_127_octets", None), {AA.R}, counter=True),
+        ECA(LongField("128_to_255_octets", None), {AA.R}, counter=True),
+        ECA(LongField("256_to_511_octets", None), {AA.R}, counter=True),
+        ECA(LongField("512_to_1023_octets", None), {AA.R}, counter=True),
+        ECA(LongField("1024_to_1518_octets", None), {AA.R}, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    optional_operations = {OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        0: 'Drop events',
+        1: 'CRC errored packets',
+        2: 'Undersize packets',
+        3: 'Oversize packets',
+    }
+
+
+class GemPortNetworkCtpMonitoringHistoryData(EntityClass):
+    class_id = 341
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("transmitted_gem_frames", None), {AA.R}, counter=True),
+        ECA(IntField("received_gem_frames", None), {AA.R}, counter=True),
+        ECA(LongField("received_payload_bytes", None), {AA.R}, counter=True),
+        ECA(LongField("transmitted_payload_bytes", None), {AA.R}, counter=True),
+        ECA(IntField("encryption_key_errors", None), {AA.R}, tca=True, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set, OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        1: 'Encryption key errors',
+    }
+
+
+class XgPonTcPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 344
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("psbd_hec_error_count", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("xgtc_hec_error_count", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("unknown_profile_count", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("transmitted_xgem_frames", None), {AA.R}, counter=True),
+        ECA(IntField("fragment_xgem_frames", None), {AA.R}, counter=True),
+        ECA(IntField("xgem_hec_lost_words_count", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("xgem_key_errors", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("xgem_hec_error_count", None), {AA.R}, tca=True, counter=True)
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    optional_operations = {OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        1: 'PSBd HEC error count',
+        2: 'XGTC HEC error count',
+        3: 'Unknown profile count',
+        4: 'XGEM HEC loss count',
+        5: 'XGEM key errors',
+        6: 'XGEM HEC error count',
+    }
+
+
+class XgPonDownstreamPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 345
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R},),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("ploam_mic_error_count", None), {AA.R}, tca=True, counter=True),
+        ECA(IntField("downstream_ploam_messages_count", None), {AA.R}, counter=True),
+        ECA(IntField("profile_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("ranging_time_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("deactivate_onu_id_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("disable_serial_number_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("request_registration_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("assign_alloc_id_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("key_control_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("sleep_allow_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("baseline_omci_messages_received_count", None), {AA.R}, counter=True),
+        ECA(IntField("extended_omci_messages_received_count", None), {AA.R}, counter=True),
+        ECA(IntField("assign_onu_id_messages_received", None), {AA.R}, counter=True),
+        ECA(IntField("omci_mic_error_count", None), {AA.R}, tca=True, counter=True),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    optional_operations = {OP.GetCurrentData}
+    notifications = {OP.AlarmNotification}
+    alarms = {
+        1: 'PLOAM MIC error count',
+        2: 'OMCI MIC error count',
+    }
+
+
+class XgPonUpstreamPerformanceMonitoringHistoryData(EntityClass):
+    class_id = 346
+    hidden = True
+    attributes = [
+        ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
+        ECA(ByteField("interval_end_time", None), {AA.R}),
+        ECA(ShortField("threshold_data_1_2_id", None), {AA.R, AA.W, AA.SBC}),
+        ECA(IntField("upstream_ploam_message_count", None), {AA.R}, counter=True),
+        ECA(IntField("serial_number_onu_message_count", None), {AA.R}, counter=True),
+        ECA(IntField("registration_message_count", None), {AA.R}, counter=True),
+        ECA(IntField("key_report_message_count", None), {AA.R}, counter=True),
+        ECA(IntField("acknowledge_message_count", None), {AA.R}, counter=True),
+        ECA(IntField("sleep_request_message_count", None), {AA.R}, counter=True),
+    ]
+    mandatory_operations = {OP.Create, OP.Delete, OP.Get, OP.Set}
+    optional_operations = {OP.GetCurrentData}
+
+
+# entity class lookup table from entity_class values
+entity_classes_name_map = dict(
+    inspect.getmembers(sys.modules[__name__],
+    lambda o: inspect.isclass(o) and \
+              issubclass(o, EntityClass) and \
+              o is not EntityClass)
+)
+
+entity_classes = [c for c in entity_classes_name_map.itervalues()]
+entity_id_to_class_map = dict((c.class_id, c) for c in entity_classes)
diff --git a/python/extensions/omci/omci_fields.py b/python/extensions/omci/omci_fields.py
new file mode 100644
index 0000000..b6ccf5e
--- /dev/null
+++ b/python/extensions/omci/omci_fields.py
@@ -0,0 +1,239 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import binascii
+import json
+from scapy.fields import Field, StrFixedLenField, PadField, IntField, FieldListField, ByteField, StrField, \
+    StrFixedLenField, PacketField
+from scapy.packet import Raw
+
+class FixedLenField(PadField):
+    """
+    This Pad field limits parsing of its content to its size
+    """
+    def __init__(self, fld, align, padwith='\x00'):
+        super(FixedLenField, self).__init__(fld, align, padwith)
+
+    def getfield(self, pkt, s):
+        remain, val = self._fld.getfield(pkt, s[:self._align])
+        if isinstance(val.payload, Raw) and \
+                not val.payload.load.replace(self._padwith, ''):
+            # raw payload is just padding
+            val.remove_payload()
+        return remain + s[self._align:], val
+
+class StrCompoundField(Field):
+    __slots__ = ['flds']
+
+    def __init__(self, name, flds):
+        super(StrCompoundField, self).__init__(name=name, default=None, fmt='s')
+        self.flds = flds
+        for fld in self.flds:
+            assert not fld.holds_packets, 'compound field cannot have packet field members'
+
+    def addfield(self, pkt, s, val):
+        for fld in self.flds:
+            # run though fake add/get to consume the relevant portion of the input value for this field
+            x, extracted = fld.getfield(pkt, fld.addfield(pkt, '', val))
+            l = len(extracted)
+            s = fld.addfield(pkt, s, val[0:l])
+            val = val[l:]
+        return s;
+
+    def getfield(self, pkt, s):
+        data = ''
+        for fld in self.flds:
+            s, value = fld.getfield(pkt, s)
+            if not isinstance(value, str):
+                value = fld.i2repr(pkt, value)
+            data += value
+        return s, data
+
+class XStrFixedLenField(StrFixedLenField):
+    """
+    XStrFixedLenField which value is printed as hexadecimal.
+    """
+    def i2m(self, pkt, x):
+        l = self.length_from(pkt) * 2
+        return None if x is None else binascii.a2b_hex(x)[0:l+1]
+
+    def m2i(self, pkt, x):
+        return None if x is None else binascii.b2a_hex(x)
+
+class MultipleTypeField(object):
+    """MultipleTypeField are used for fields that can be implemented by
+        various Field subclasses, depending on conditions on the packet.
+
+        It is initialized with `flds` and `default`.
+
+        `default` is the default field type, to be used when none of the
+        conditions matched the current packet.
+
+        `flds` is a list of tuples (`fld`, `cond`), where `fld` if a field
+        type, and `cond` a "condition" to determine if `fld` is the field type
+        that should be used.
+
+        `cond` is either:
+
+        - a callable `cond_pkt` that accepts one argument (the packet) and
+            returns True if `fld` should be used, False otherwise.
+
+          - a tuple (`cond_pkt`, `cond_pkt_val`), where `cond_pkt` is the same
+            as in the previous case and `cond_pkt_val` is a callable that
+            accepts two arguments (the packet, and the value to be set) and
+            returns True if `fld` should be used, False otherwise.
+
+        See scapy.layers.l2.ARP (type "help(ARP)" in Scapy) for an example of
+        use.
+    """
+
+    __slots__ = ["flds", "default", "name"]
+
+    def __init__(self, flds, default):
+        self.flds  = flds
+        self.default = default
+        self.name = self.default.name
+
+    def _find_fld_pkt(self, pkt):
+        """Given a Packet instance `pkt`, returns the Field subclass to be
+            used. If you know the value to be set (e.g., in .addfield()), use
+            ._find_fld_pkt_val() instead.
+        """
+        for fld, cond in self.flds:
+            if isinstance(cond, tuple):
+                cond = cond[0]
+            if cond(pkt):
+                return fld
+        return self.default
+
+    def _find_fld_pkt_val(self, pkt, val):
+        """Given a Packet instance `pkt` and the value `val` to be set,
+            returns the Field subclass to be used.
+        """
+        for fld, cond in self.flds:
+            if isinstance(cond, tuple):
+                if cond[1](pkt, val):
+                    return fld
+            elif cond(pkt):
+                return fld
+        return self.default
+
+    def getfield(self, pkt, s):
+        return self._find_fld_pkt(pkt).getfield(pkt, s)
+
+    def addfield(self, pkt, s, val):
+        return self._find_fld_pkt_val(pkt, val).addfield(pkt, s, val)
+
+    def any2i(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).any2i(pkt, val)
+
+    def h2i(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).h2i(pkt, val)
+
+    def i2h(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).i2h(pkt, val)
+
+    def i2m(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).i2m(pkt, val)
+
+    def i2len(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).i2len(pkt, val)
+
+    def i2repr(self, pkt, val):
+        return self._find_fld_pkt_val(pkt, val).i2repr(pkt, val)
+
+    def register_owner(self, cls):
+        for fld, _ in self.flds:
+            fld.owners.append(cls)
+        self.dflt.owners.append(cls)
+
+    def __getattr__(self, attr):
+        return getattr(self._find_fld(), attr)
+
+class OmciSerialNumberField(StrCompoundField):
+    def __init__(self, name, default=None):
+        assert default is None or (isinstance(default, str) and len(default) == 12), 'invalid default serial number'
+        vendor_default = default[0:4] if default is not None else None
+        vendor_serial_default = default[4:12] if default is not None else None
+        super(OmciSerialNumberField, self).__init__(name,
+            [StrFixedLenField('vendor_id', vendor_default, 4),
+            XStrFixedLenField('vendor_serial_number', vendor_serial_default, 4)])
+
+class OmciTableField(MultipleTypeField):
+    def __init__(self, tblfld):
+        assert isinstance(tblfld, PacketField)
+        assert hasattr(tblfld.cls, 'index'), 'No index() method defined for OmciTableField row object'
+        assert hasattr(tblfld.cls, 'is_delete'), 'No delete() method defined for OmciTableField row object'
+        super(OmciTableField, self).__init__(
+            [
+            (IntField('table_length', 0), (self.cond_pkt, self.cond_pkt_val)),
+            (PadField(StrField('me_type_table', None), OmciTableField.PDU_SIZE),
+                (self.cond_pkt2, self.cond_pkt_val2))
+            ], tblfld)
+
+    PDU_SIZE = 29 # Baseline message set raw get-next PDU size
+    OmciGetResponseMessageId = 0x29 # Ugh circular dependency
+    OmciGetNextResponseMessageId = 0x3a # Ugh circular dependency
+
+    def cond_pkt(self, pkt):
+        return pkt is not None and pkt.message_id == self.OmciGetResponseMessageId
+
+    def cond_pkt_val(self, pkt, val):
+        return pkt is not None and pkt.message_id == self.OmciGetResponseMessageId
+
+    def cond_pkt2(self, pkt):
+        return pkt is not None and pkt.message_id == self.OmciGetNextResponseMessageId
+
+    def cond_pkt_val2(self, pkt, val):
+        return pkt is not None and pkt.message_id == self.OmciGetNextResponseMessageId
+
+    def to_json(self, new_values, old_values_json):
+        if not isinstance(new_values, list): new_values = [new_values] # If setting a scalar, augment the old table
+        else: old_values_json = None # If setting a vector of new values, erase all old_values
+
+        key_value_pairs = dict()
+
+        old_table = self.load_json(old_values_json)
+        for old in old_table:
+            index = old.index()
+            key_value_pairs[index] = old
+        for new in new_values:
+            index = new.index()
+            if new.is_delete():
+                del key_value_pairs[index]
+            else:
+                key_value_pairs[index] = new
+
+        new_table = []
+        for k, v in sorted(key_value_pairs.iteritems()):
+            assert isinstance(v, self.default.cls), 'object type for Omci Table row object invalid'
+            new_table.append(v.fields)
+
+        str_values = json.dumps(new_table, separators=(',', ':'))
+
+        return str_values
+
+    def load_json(self, json_str):
+        if json_str is None: json_str = '[]'
+        json_values = json.loads(json_str)
+        key_value_pairs = dict()
+        for json_value in json_values:
+            v = self.default.cls(**json_value)
+            index = v.index()
+            key_value_pairs[index] = v
+        table = []
+        for k, v in sorted(key_value_pairs.iteritems()):
+            table.append(v)
+        return table
\ No newline at end of file
diff --git a/python/extensions/omci/omci_frame.py b/python/extensions/omci/omci_frame.py
new file mode 100644
index 0000000..c0d7d4a
--- /dev/null
+++ b/python/extensions/omci/omci_frame.py
@@ -0,0 +1,207 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from scapy.fields import ByteField, PacketField, IntField
+from scapy.fields import ShortField, ConditionalField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_fields import FixedLenField
+from voltha.extensions.omci.omci_messages import OmciCreate, OmciDelete, \
+    OmciDeleteResponse, OmciSet, OmciSetResponse, OmciGet, OmciGetResponse, \
+    OmciGetAllAlarms, OmciGetAllAlarmsResponse, OmciGetAllAlarmsNext, \
+    OmciMibResetResponse, OmciMibReset, OmciMibUploadNextResponse, \
+    OmciMibUploadNext, OmciMibUploadResponse, OmciMibUpload, \
+    OmciGetAllAlarmsNextResponse, OmciAttributeValueChange, \
+    OmciTestResult, OmciAlarmNotification, \
+    OmciReboot, OmciRebootResponse, OmciGetNext, OmciGetNextResponse, \
+    OmciSynchronizeTime, OmciSynchronizeTimeResponse, OmciGetCurrentData, \
+    OmciGetCurrentDataResponse, OmciStartSoftwareDownload, OmciStartSoftwareDownloadResponse, \
+    OmciDownloadSection, OmciDownloadSectionLast, OmciDownloadSectionResponse, \
+    OmciEndSoftwareDownload, OmciEndSoftwareDownloadResponse, \
+    OmciActivateImage, OmciActivateImageResponse, \
+    OmciCommitImage, OmciCommitImageResponse
+
+from voltha.extensions.omci.omci_messages import OmciCreateResponse
+
+
+class OmciFrame(Packet):
+    name = "OmciFrame"
+    fields_desc = [
+        ShortField("transaction_id", 0),
+        ByteField("message_type", None),
+        ByteField("omci", 0x0a),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciCreate), align=36),
+            lambda pkt: pkt.message_type == OmciCreate.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciCreateResponse), align=36),
+            lambda pkt: pkt.message_type == OmciCreateResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciDelete), align=36),
+            lambda pkt: pkt.message_type == OmciDelete.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciDeleteResponse), align=36),
+            lambda pkt: pkt.message_type == OmciDeleteResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciSet), align=36),
+            lambda pkt: pkt.message_type == OmciSet.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciSetResponse), align=36),
+            lambda pkt: pkt.message_type == OmciSetResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGet), align=36),
+            lambda pkt: pkt.message_type == OmciGet.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetResponse), align=36),
+            lambda pkt: pkt.message_type == OmciGetResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetAllAlarms), align=36),
+            lambda pkt: pkt.message_type == OmciGetAllAlarms.message_id),
+        ConditionalField(FixedLenField(
+            PacketField(
+                "omci_message", None, OmciGetAllAlarmsResponse), align=36),
+                lambda pkt:
+                pkt.message_type == OmciGetAllAlarmsResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetAllAlarmsNext), align=36),
+            lambda pkt: pkt.message_type == OmciGetAllAlarmsNext.message_id),
+        ConditionalField(FixedLenField(
+            PacketField(
+                "omci_message", None, OmciGetAllAlarmsNextResponse), align=36),
+                lambda pkt:
+                pkt.message_type == OmciGetAllAlarmsNextResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibUpload), align=36),
+            lambda pkt: pkt.message_type == OmciMibUpload.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibUploadResponse), align=36),
+            lambda pkt: pkt.message_type == OmciMibUploadResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibUploadNext), align=36),
+            lambda pkt:
+                pkt.message_type == OmciMibUploadNext.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibUploadNextResponse), align=36),
+            lambda pkt: pkt.message_type == OmciMibUploadNextResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibReset), align=36),
+            lambda pkt: pkt.message_type == OmciMibReset.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciMibResetResponse), align=36),
+            lambda pkt: pkt.message_type == OmciMibResetResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciAlarmNotification), align=36),
+            lambda pkt: pkt.message_type == OmciAlarmNotification.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciAttributeValueChange), align=36),
+            lambda pkt: pkt.message_type == OmciAttributeValueChange.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciTestResult), align=36),
+            lambda pkt: pkt.message_type == OmciTestResult.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciReboot), align=36),
+            lambda pkt: pkt.message_type == OmciReboot.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciRebootResponse), align=36),
+            lambda pkt: pkt.message_type == OmciRebootResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetNext), align=36),
+            lambda pkt: pkt.message_type == OmciGetNext.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetNextResponse), align=36),
+            lambda pkt: pkt.message_type == OmciGetNextResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciSynchronizeTime), align=36),
+            lambda pkt: pkt.message_type == OmciSynchronizeTime.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciSynchronizeTimeResponse), align=36),
+            lambda pkt: pkt.message_type == OmciSynchronizeTimeResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetCurrentData), align=36),
+            lambda pkt: pkt.message_type == OmciGetCurrentData.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciGetCurrentDataResponse), align=36),
+            lambda pkt: pkt.message_type == OmciGetCurrentDataResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciStartSoftwareDownload), align=36),
+            lambda pkt: pkt.message_type == OmciStartSoftwareDownload.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciStartSoftwareDownloadResponse), align=36),
+            lambda pkt: pkt.message_type == OmciStartSoftwareDownloadResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciDownloadSection), align=36),
+            lambda pkt: pkt.message_type == OmciDownloadSection.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciDownloadSectionLast), align=36),
+            lambda pkt: pkt.message_type == OmciDownloadSectionLast.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciDownloadSectionResponse), align=36),
+            lambda pkt: pkt.message_type == OmciDownloadSectionResponse.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciEndSoftwareDownload), align=36),
+            lambda pkt: pkt.message_type == OmciEndSoftwareDownload.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciEndSoftwareDownloadResponse), align=36),
+            lambda pkt: pkt.message_type == OmciEndSoftwareDownloadResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciActivateImage), align=36),
+            lambda pkt: pkt.message_type == OmciActivateImage.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciActivateImageResponse), align=36),
+            lambda pkt: pkt.message_type == OmciActivateImageResponse.message_id),
+
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciCommitImage), align=36),
+            lambda pkt: pkt.message_type == OmciCommitImage.message_id),
+        ConditionalField(FixedLenField(
+            PacketField("omci_message", None, OmciCommitImageResponse), align=36),
+            lambda pkt: pkt.message_type == OmciCommitImageResponse.message_id),
+
+        # TODO add entries for remaining OMCI message types
+
+        IntField("omci_trailer", 0x00000028)
+    ]
+
+    # We needed to patch the do_dissect(...) method of Packet, because
+    # it wiped out already dissected conditional fields with None if they
+    # referred to the same field name. We marked the only new line of code
+    # with "Extra condition added".
+    def do_dissect(self, s):
+        raw = s
+        self.raw_packet_cache_fields = {}
+        for f in self.fields_desc:
+            if not s:
+                break
+            s, fval = f.getfield(self, s)
+            # We need to track fields with mutable values to discard
+            # .raw_packet_cache when needed.
+            if f.islist or f.holds_packets:
+                self.raw_packet_cache_fields[f.name] = f.do_copy(fval)
+            # Extra condition added
+            if fval is not None or f.name not in self.fields:
+                self.fields[f.name] = fval
+        assert(raw.endswith(s))
+        self.raw_packet_cache = raw[:-len(s)] if s else raw
+        self.explicit = 1
+        return s
diff --git a/python/extensions/omci/omci_me.py b/python/extensions/omci/omci_me.py
new file mode 100644
index 0000000..a8a2d05
--- /dev/null
+++ b/python/extensions/omci/omci_me.py
@@ -0,0 +1,939 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+OMCI Managed Entity Frame support
+"""
+from voltha.extensions.omci.omci import *
+from voltha.extensions.omci.me_frame import MEFrame
+
+
+class CardholderFrame(MEFrame):
+    """
+    This managed entity represents fixed equipment slot configuration
+    for the ONU
+    """
+    def __init__(self, single, slot_number, attributes):
+        """
+        :param single:(bool) True if the ONU is a single piece of integrated equipment,
+                             False if the ONU contains pluggable equipment modules
+        :param slot_number: (int) slot number (0..254)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        # Validate
+        MEFrame.check_type(single, bool)
+        MEFrame.check_type(slot_number, int)
+        if not 0 <= slot_number <= 254:
+            raise ValueError('slot_number should be 0..254')
+
+        entity_id = 256 + slot_number if single else slot_number
+
+        super(CardholderFrame, self).__init__(Cardholder, entity_id,
+                                              MEFrame._attr_to_data(attributes))
+
+
+class CircuitPackFrame(MEFrame):
+    """
+    This managed entity models a real or virtual circuit pack that is equipped in
+    a real or virtual ONU slot.
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Its value is the same as that
+                                of the cardholder managed entity containing this
+                                circuit pack instance. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(CircuitPackFrame, self).__init__(CircuitPack, entity_id,
+                                               MEFrame._attr_to_data(attributes))
+
+
+class ExtendedVlanTaggingOperationConfigurationDataFrame(MEFrame):
+    """
+    This managed entity organizes data associated with VLAN tagging. Regardless
+    of its point of attachment, the specified tagging operations refer to the
+     upstream direction.
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Its value is the same as that
+                                of the cardholder managed entity containing this
+                                circuit pack instance. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(ExtendedVlanTaggingOperationConfigurationDataFrame,
+              self).__init__(ExtendedVlanTaggingOperationConfigurationData,
+                             entity_id,
+                             MEFrame._attr_to_data(attributes))
+
+
+class IpHostConfigDataFrame(MEFrame):
+    """
+    The IP host config data configures IPv4 based services offered on the ONU.
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(IpHostConfigDataFrame, self).__init__(IpHostConfigData,
+                                                    entity_id,
+                                                    MEFrame._attr_to_data(attributes))
+
+
+class GalEthernetProfileFrame(MEFrame):
+    """
+    This managed entity organizes data that describe the GTC adaptation layer
+    processing functions of the ONU for Ethernet services.
+    """
+    def __init__(self, entity_id, max_gem_payload_size=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param max_gem_payload_size: (int) This attribute defines the maximum payload
+                                     size generated in the associated GEM interworking
+                                     termination point managed entity. (0..65535
+        """
+        MEFrame.check_type(max_gem_payload_size, (int, type(None)))
+        if max_gem_payload_size is not None and not 0 <= max_gem_payload_size <= 0xFFFF:  # TODO: verify min/max
+            raise ValueError('max_gem_payload_size should be 0..0xFFFF')
+
+        data = None if max_gem_payload_size is None else\
+            {
+                'max_gem_payload_size': max_gem_payload_size
+            }
+        super(GalEthernetProfileFrame, self).__init__(GalEthernetProfile,
+                                                      entity_id,
+                                                      data)
+
+
+class GemInterworkingTpFrame(MEFrame):
+    """
+    An instance of this managed entity represents a point in the ONU where the
+    interworking of a bearer service (usually Ethernet) to the GEM layer takes
+    place.
+    """
+    def __init__(self, entity_id,
+                 gem_port_network_ctp_pointer=None,
+                 interworking_option=None,
+                 service_profile_pointer=None,
+                 interworking_tp_pointer=None,
+                 pptp_counter=None,
+                 gal_profile_pointer=None,
+                 attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param gem_port_network_ctp_pointer: (int) This attribute points to an instance of
+                                                 the GEM port network CTP. (0..65535)
+
+        :param interworking_option: (int) This attribute identifies the type
+                                of non-GEM function that is being interworked.
+                                The options are:
+                                    0 Circuit-emulated TDM
+                                    1 MAC bridged LAN
+                                    2 Reserved
+                                    3 Reserved
+                                    4 Video return path
+                                    5 IEEE 802.1p mapper
+                                    6 Downstream broadcast
+                                    7 MPLS PW TDM service
+
+        :param service_profile_pointer: (int) This attribute points to an instance of
+                                              a service profile.
+                            CES service profile                 if interworking option = 0
+                            MAC bridge service profile          if interworking option = 1
+                            Video return path service profile   if interworking option = 4
+                            IEEE 802.1p mapper service profile  if interworking option = 5
+                            Null pointer                        if interworking option = 6
+                            CES service profile                 if interworking option = 7
+
+        :param interworking_tp_pointer: (int) This attribute is used for the circuit
+                                              emulation service and IEEE 802.1p mapper
+                                              service without a MAC bridge.
+
+        :param gal_profile_pointer: (int) This attribute points to an instance of
+                                              a service profile.
+
+        :param attributes: (basestring, list, set, dict) additional ME attributes.
+                           not specifically specified as a parameter. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified..
+        """
+        # Validate
+        self.check_type(gem_port_network_ctp_pointer, (int, type(None)))
+        self.check_type(interworking_option, (int, type(None)))
+        self.check_type(service_profile_pointer, (int, type(None)))
+        self.check_type(interworking_tp_pointer,(int, type(None)))
+        self.check_type(pptp_counter,(int, type(None)))
+        self.check_type(gal_profile_pointer, (int, type(None)))
+
+        if gem_port_network_ctp_pointer is not None and not 0 <= gem_port_network_ctp_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('gem_port_network_ctp_pointer should be 0..0xFFFE')
+
+        if interworking_option is not None and not 0 <= interworking_option <= 7:
+            raise ValueError('interworking_option should be 0..7')
+
+        if service_profile_pointer is not None and not 0 <= service_profile_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('service_profile_pointer should be 0..0xFFFE')
+
+        if interworking_tp_pointer is not None and not 0 <= interworking_tp_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+        if pptp_counter is not None and not 0 <= pptp_counter <= 255:  # TODO: Verify max
+            raise ValueError('pptp_counter should be 0..255')
+
+        if gal_profile_pointer is not None and not 0 <= gal_profile_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('gal_profile_pointer should be 0..0xFFFE')
+
+        data = MEFrame._attr_to_data(attributes)
+
+        if gem_port_network_ctp_pointer is not None or \
+                interworking_option is not None or \
+                service_profile_pointer is not None or \
+                interworking_tp_pointer is not None or \
+                gal_profile_pointer is not None:
+
+            data = data or dict()
+
+            if gem_port_network_ctp_pointer is not None:
+                data['gem_port_network_ctp_pointer'] = gem_port_network_ctp_pointer
+
+            if interworking_option is not None:
+                data['interworking_option'] = interworking_option
+
+            if service_profile_pointer is not None:
+                data['service_profile_pointer'] = service_profile_pointer
+
+            if interworking_tp_pointer is not None:
+                data['interworking_tp_pointer'] = interworking_tp_pointer
+
+            if gal_profile_pointer is not None:
+                data['gal_profile_pointer'] = gal_profile_pointer
+
+        super(GemInterworkingTpFrame, self).__init__(GemInterworkingTp,
+                                                     entity_id,
+                                                     data)
+
+
+class GemPortNetworkCtpFrame(MEFrame):
+    """
+    This managed entity represents the termination of a GEM port on an ONU.
+    """
+    def __init__(self, entity_id, port_id=None, tcont_id=None,
+                 direction=None, upstream_tm=None, attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param port_id: (int) This attribute is the port-ID of the GEM port associated
+                              with this CTP
+
+        :param tcont_id: (int) This attribute points to a T-CONT instance
+
+        :param direction: (string) Data direction.  Valid values are:
+                                   'upstream'       - UNI-to-ANI
+                                   'downstream'     - ANI-to-UNI
+                                   'bi-directional' - guess :-)
+
+        :param upstream_tm: (int) If the traffic management option attribute in
+                                  the ONU-G ME is 0 (priority controlled) or 2
+                                  (priority and rate controlled), this pointer
+                                  specifies the priority queue ME serving this GEM
+                                  port network CTP. If the traffic management
+                                  option attribute is 1 (rate controlled), this
+                                  attribute redundantly points to the T-CONT serving
+                                  this GEM port network CTP.
+
+        :param attributes: (basestring, list, set, dict) additional ME attributes.
+                           not specifically specified as a parameter. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        _directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
+
+        # Validate
+        self.check_type(port_id, (int, type(None)))
+        self.check_type(tcont_id, (int, type(None)))
+        self.check_type(direction, (basestring, type(None)))
+        self.check_type(upstream_tm, (int, type(None)))
+
+        if port_id is not None and not 0 <= port_id <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('port_id should be 0..0xFFFE')
+
+        if tcont_id is not None and not 0 <= tcont_id <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('tcont_id should be 0..0xFFFE')
+
+        if direction is not None and str(direction).lower() not in _directions:
+            raise ValueError('direction should one of {}'.format(_directions.keys()))
+
+        if upstream_tm is not None and not 0 <= upstream_tm <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('upstream_tm should be 0..0xFFFE')
+
+        data = MEFrame._attr_to_data(attributes)
+
+        if port_id is not None or tcont_id is not None or\
+                direction is not None or upstream_tm is not None:
+
+            data = data or dict()
+
+            if port_id is not None:
+                data['port_id'] = port_id
+            if tcont_id is not None:
+                data['tcont_pointer'] = tcont_id
+            if direction is not None:
+                data['direction'] = _directions[str(direction).lower()]
+            if upstream_tm is not None:
+                data['traffic_management_pointer_upstream'] = upstream_tm
+
+        super(GemPortNetworkCtpFrame, self).__init__(GemPortNetworkCtp,
+                                                     entity_id,
+                                                     data)
+
+
+class Ieee8021pMapperServiceProfileFrame(MEFrame):
+    """
+    This managed entity associates the priorities of IEEE 802.1p [IEEE
+    802.1D] priority tagged frames with specific connections.
+    """
+    def __init__(self, entity_id, tp_pointer=None, interwork_tp_pointers=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param tp_pointer: (int) This attribute points to an instance of the
+                                 associated termination point. (0..65535)
+
+        :param interwork_tp_pointers: (list) List of 1 to 8 interworking termination
+                                   point IDs. The first entry is assigned
+                                   got p-bit priority 0. If less than 8 IDs
+                                   are provided, the last ID is used for
+                                   the remaining items.
+        """
+        if tp_pointer is None and interwork_tp_pointers is None:
+            data = dict(
+                    tp_pointer=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_1=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_2=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_3=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_4=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_5=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_6=OmciNullPointer,
+                    interwork_tp_pointer_for_p_bit_priority_7=OmciNullPointer
+                )
+        else:
+            self.check_type(tp_pointer, (list, type(None)))
+            self.check_type(interwork_tp_pointers, (list, type(None)))
+
+            data = dict()
+
+            if tp_pointer is not None:
+                data['tp_pointer'] = tp_pointer
+
+            if interwork_tp_pointers is not None:
+                assert all(isinstance(tp, int) and 0 <= tp <= 0xFFFF
+                           for tp in interwork_tp_pointers),\
+                    'Interworking TP IDs must be 0..0xFFFF'
+                assert 1 <= len(interwork_tp_pointers) <= 8, \
+                    'Invalid number of Interworking TP IDs. Must be 1..8'
+
+                data = dict()
+                for pbit in range(0, len(interwork_tp_pointers)):
+                    data['interwork_tp_pointer_for_p_bit_priority_{}'.format(pbit)] = \
+                        interwork_tp_pointers[pbit]
+
+                for pbit in range(len(interwork_tp_pointers), 8):
+                    data['interwork_tp_pointer_for_p_bit_priority_{}'.format(pbit)] = \
+                        interwork_tp_pointers[len(interwork_tp_pointers) - 1]
+
+        super(Ieee8021pMapperServiceProfileFrame, self).__init__(Ieee8021pMapperServiceProfile,
+                                                                 entity_id,
+                                                                 data)
+
+
+class MacBridgePortConfigurationDataFrame(MEFrame):
+    """
+    This managed entity represents the ONU as equipment.
+    """
+    def __init__(self, entity_id, bridge_id_pointer=None, port_num=None,
+                 tp_type=None, tp_pointer=None, attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param bridge_id_pointer: (int) This attribute points to an instance of the
+                                        MAC bridge service profile. (0..65535)
+
+        :param port_num: (int) This attribute is the bridge port number. (0..255)
+
+        :param tp_type: (int) This attribute identifies the type of termination point
+                              associated with this MAC bridge port. Valid values are:
+                        1  Physical path termination point Ethernet UNI
+                        2  Interworking VCC termination point
+                        3  IEEE 802.1p mapper service profile
+                        4  IP host config data or IPv6 host config data
+                        5  GEM interworking termination point
+                        6  Multicast GEM interworking termination point
+                        7  Physical path termination point xDSL UNI part 1
+                        8  Physical path termination point VDSL UNI
+                        9  Ethernet flow termination point
+                        10 Reserved
+                        11 Virtual Ethernet interface point
+                        12 Physical path termination point MoCA UNI
+
+        :param tp_pointer: (int) This attribute points to the termination point
+                                 associated with this MAC bridge por. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) additional ME attributes.
+                           not specifically specified as a parameter. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        # Validate
+        self.check_type(bridge_id_pointer, (int, type(None)))
+        self.check_type(port_num, (int, type(None)))
+        self.check_type(tp_type, (int, type(None)))
+        self.check_type(tp_pointer, (int, type(None)))
+
+        if bridge_id_pointer is not None and not 0 <= bridge_id_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('bridge_id_pointer should be 0..0xFFFE')
+
+        if port_num is not None and not 0 <= port_num <= 255:
+            raise ValueError('port_num should be 0..255')       # TODO: Verify min,max
+
+        if tp_type is not None and not 1 <= tp_type <= 12:
+            raise ValueError('service_profile_pointer should be 1..12')
+
+        if tp_pointer is not None and not 0 <= tp_pointer <= 0xFFFE:  # TODO: Verify max
+            raise ValueError('interworking_tp_pointer should be 0..0xFFFE')
+
+        data = MEFrame._attr_to_data(attributes)
+
+        if bridge_id_pointer is not None or \
+                port_num is not None or \
+                tp_type is not None or \
+                tp_pointer is not None:
+
+            data = data or dict()
+
+            if bridge_id_pointer is not None:
+                data['bridge_id_pointer'] = bridge_id_pointer
+
+            if port_num is not None:
+                data['port_num'] = port_num
+
+            if tp_type is not None:
+                data['tp_type'] = tp_type
+
+            if tp_pointer is not None:
+                data['tp_pointer'] = tp_pointer
+
+        super(MacBridgePortConfigurationDataFrame, self).\
+            __init__(MacBridgePortConfigurationData, entity_id, data)
+
+
+class MacBridgeServiceProfileFrame(MEFrame):
+    """
+    This managed entity models a MAC bridge in its entirety; any number
+    of ports may be associated with the bridge through pointers to the
+    MAC bridge service profile managed entity.
+    """
+    def __init__(self, entity_id, attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(MacBridgeServiceProfileFrame, self).__init__(MacBridgeServiceProfile,
+                                                           entity_id,
+                                                           MEFrame._attr_to_data(attributes))
+
+
+class OntGFrame(MEFrame):
+    """
+    This managed entity represents the ONU as equipment.
+    """
+    def __init__(self, attributes=None):
+        """
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(OntGFrame, self).__init__(OntG, 0,
+                                        MEFrame._attr_to_data(attributes))
+
+
+class Ont2GFrame(MEFrame):
+    """
+    This managed entity contains additional attributes associated with a PON ONU.
+    """
+    def __init__(self, attributes=None):
+        """
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        # Only one managed entity instance (Entity ID=0)
+        super(Ont2GFrame, self).__init__(Ont2G, 0,
+                                         MEFrame._attr_to_data(attributes))
+
+
+class PptpEthernetUniFrame(MEFrame):
+    """
+    This managed entity represents the point at an Ethernet UNI where the physical path
+    terminates and Ethernet physical level functions are performed.
+    """
+    def __init__(self, entity_id, attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(PptpEthernetUniFrame, self).__init__(PptpEthernetUni, entity_id,
+                                                   MEFrame._attr_to_data(attributes))
+
+
+class VeipUniFrame(MEFrame):
+    """
+    This managed entity represents the point a virtual UNI interfaces to a non omci management domain
+    This is typically seen in RG+ONU all-in-one type devices
+    """
+    def __init__(self, entity_id, attributes=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For create/set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(VeipUniFrame, self).__init__(VeipUni, entity_id,
+                                           MEFrame._attr_to_data(attributes))
+
+
+class SoftwareImageFrame(MEFrame):
+    """
+    This managed entity models an executable software image stored in the ONU.
+    """
+    def __init__(self, entity_id):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+        """
+        super(SoftwareImageFrame, self).__init__(SoftwareImage, entity_id, None)
+
+
+class TcontFrame(MEFrame):
+    """
+    An instance of the traffic container managed entity T-CONT represents a
+    logical connection group associated with a G-PON PLOAM layer alloc-ID.
+    """
+    def __init__(self, entity_id, alloc_id=None, policy=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param alloc_id: (int) This attribute links the T-CONT with the alloc-ID
+                               assigned by the OLT in the assign_alloc-ID PLOAM
+                               message (0..0xFFF) or 0xFFFF to mark as free
+
+        :param policy: (int) This attribute indicates the T-CONT's traffic scheduling
+                             policy. Valid values:
+                                0 - Null
+                                1 - Strict priority
+                                2 - WRR - Weighted round robin
+        """
+        # Validate
+        self.check_type(alloc_id, (int, type(None)))
+        self.check_type(policy, (int, type(None)))
+
+        if alloc_id is not None and not (0 <= alloc_id <= 0xFFF or alloc_id == 0xFFFF):
+            raise ValueError('alloc_id should be 0..0xFFF or 0xFFFF to mark it as free')
+
+        if policy is not None and not 0 <= policy <= 2:
+            raise ValueError('policy should be 0..2')
+
+        if alloc_id is None and policy is None:
+            data = None
+        else:
+            data = dict()
+
+            if alloc_id is not None:
+                data['alloc_id'] = alloc_id
+
+            if policy is not None:
+                data['policy'] = policy
+
+        super(TcontFrame, self).__init__(Tcont, entity_id, data)
+
+
+class VlanTaggingFilterDataFrame(MEFrame):
+    """
+    An instance of this managed entity represents a point in the ONU where the
+    interworking of a bearer service (usually Ethernet) to the GEM layer takes
+    place.
+    """
+    def __init__(self, entity_id, vlan_tcis=None, forward_operation=None):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. (0..65535)
+
+        :param vlan_tcis: (list) This attribute is a list of provisioned TCI values
+                                 for the bridge port. (0..0xFFFF)
+
+        :param forward_operation: (int) What to do.  See ITU spec for more information
+
+        """
+        # Validate
+        self.check_type(vlan_tcis, (list, type(None)))
+        self.check_type(forward_operation, (int, type(None)))
+
+        if forward_operation is not None and not 0 <= forward_operation <= 0x21:
+            raise ValueError('forward_operation should be 0..0x21')
+
+        if vlan_tcis is None and forward_operation is None:
+            data = None
+
+        else:
+            data = dict()
+
+            if vlan_tcis is not None:
+                num_tcis = len(vlan_tcis)
+
+                assert 0 <= num_tcis <= 12, 'Number of VLAN TCI values is 0..12'
+                assert all(isinstance(tci, int) and 0 <= tci <= 0xFFFF
+                           for tci in vlan_tcis), "VLAN TCI's are 0..0xFFFF"
+
+                if num_tcis > 0:
+                    vlan_filter_list = [0] * 12
+                    for index in range(0, num_tcis):
+                        vlan_filter_list[index] = vlan_tcis[index]
+
+                    data['vlan_filter_list'] = vlan_filter_list
+                data['number_of_entries'] = num_tcis
+
+            if forward_operation is not None:
+                assert 0 <= forward_operation <= 0x21, \
+                    'forwarding_operation must be 0x00..0x21'
+                data['forward_operation'] = forward_operation
+
+        super(VlanTaggingFilterDataFrame, self).__init__(VlanTaggingFilterData,
+                                                         entity_id,
+                                                         data)
+
+
+class OntDataFrame(MEFrame):
+    """
+    This managed entity models the MIB itself
+    """
+    def __init__(self, mib_data_sync=None, sequence_number=None, ignore_arc=None):
+        """
+        For 'get', 'MIB reset', 'MIB upload', pass no value
+        For 'set' actions, pass mib_data_sync value (0..255)
+        For 'MIB upload next',and 'Get all alarms next' pass sequence_number value (0..65535)
+        For 'Get all alarms", set ignore_arc to True to get all alarms regadrless
+                              of ARC status or False to get all alarms not currently
+                              under ARC
+
+        :param mib_data_sync: (int) This attribute is used to check the alignment
+                                    of the MIB of the ONU with the corresponding MIB
+                                    in the OLT. (0..0xFF)
+        :param sequence_number: (int) This is used for MIB Upload Next (0..0xFFFF)
+        :param ignore_arc: (bool) None for all but 'get_all_alarm' commands
+        """
+        self.check_type(mib_data_sync, (int, type(None)))
+        if mib_data_sync is not None and not 0 <= mib_data_sync <= 0xFF:
+            raise ValueError('mib_data_sync should be 0..0xFF')
+
+        if sequence_number is not None and not 0 <= sequence_number <= 0xFFFF:
+            raise ValueError('sequence_number should be 0..0xFFFF')
+
+        if ignore_arc is not None and not isinstance(ignore_arc, bool):
+            raise TypeError('ignore_arc should be a boolean')
+
+        if mib_data_sync is not None:
+            # Note: Currently the Scapy decode/encode is 16-bits since we need
+            #       the data field that large in order to support MIB and Alarm Upload Next
+            #       commands.  Push our 8-bit MDS value into the upper 8-bits so that
+            #       it is encoded properly into the ONT_Data 'set' frame
+            data = {'mib_data_sync': mib_data_sync << 8}
+
+        elif sequence_number is not None:
+            data = {'mib_data_sync': sequence_number}
+
+        elif ignore_arc is not None:
+            data = {'mib_data_sync': 0 if ignore_arc else 1}
+
+        else:
+            data = {'mib_data_sync'}    # Make Get's happy
+
+        super(OntDataFrame, self).__init__(OntData, 0, data)
+
+
+class OmciFrame(MEFrame):
+    """
+    This managed entity describes the ONU's general level of support for OMCI managed
+    entities and messages. This ME is not included in a MIB upload.
+    """
+    def __init__(self, me_type_table=None, message_type_table=None):
+        """
+        For 'get' request, set the type of table count you wish by
+        setting either me_me_type_table or message_type_table to
+        a boolean 'True' value
+
+        For 'get-next' requests, set the sequence number for the
+        table you wish to retrieve by setting either me_me_type_table or message_type_table to
+        a integer value.
+        """
+        if not isinstance(me_type_table, (bool, int, type(None))):
+            raise TypeError('Parameters must be a boolean or integer')
+
+        if not isinstance(message_type_table, (bool, int, type(None))):
+            raise TypeError('Parameters must be a boolean or integer')
+
+        if me_type_table is not None:
+            if isinstance(me_type_table, bool):
+                data = {'me_type_table'}
+            else:
+                data = {'me_type_table': me_type_table}
+
+        elif message_type_table is not None:
+            if isinstance('message_type_table', bool):
+                data = {'message_type_table'}
+            else:
+                data = {'message_type_table': message_type_table}
+        else:
+            raise NotImplemented('Unknown request')
+
+        super(OmciFrame, self).__init__(Omci, 0, data)
+
+
+class EthernetPMMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects some of the performance monitoring data for a physical
+    Ethernet interface
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                 managed entity is implicitly linked to an instance
+                                 of the physical path termination point Ethernet UNI
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(EthernetPMMonitoringHistoryDataFrame, self).__init__(
+            EthernetPMMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class FecPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with PON
+    downstream FEC counters.
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                the ANI-G
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(FecPerformanceMonitoringHistoryDataFrame, self).__init__(
+            FecPerformanceMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class EthernetFrameDownstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with downstream
+    Ethernet frame delivery. It is based on the Etherstats group of [IETF RFC 2819].
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                a MAC bridge port configuration data
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(EthernetFrameDownstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+            EthernetFrameDownstreamPerformanceMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class EthernetFrameUpstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with upstream
+    Ethernet frame delivery. It is based on the Etherstats group of [IETF RFC 2819].
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                a MAC bridge port configuration data
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(EthernetFrameUpstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+            EthernetFrameUpstreamPerformanceMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class GemPortNetworkCtpMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects GEM frame performance monitoring data associated
+    with a GEM port network CTP
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance
+                                of the GEM port network CTP.
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(GemPortNetworkCtpMonitoringHistoryDataFrame, self).__init__(
+            GemPortNetworkCtpMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class XgPonTcPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with
+    the XG-PON transmission convergence layer, as defined in [ITU-T G.987.3]
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                the ANI-G.
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(XgPonTcPerformanceMonitoringHistoryDataFrame, self).__init__(
+            XgPonTcPerformanceMonitoringHistoryData, entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class XgPonDownstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with
+    the XG-PON ined in [ITU-T G.987.3]
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) This attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                the ANI-G.
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(XgPonDownstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+            XgPonDownstreamPerformanceMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
+
+
+class XgPonUpstreamPerformanceMonitoringHistoryDataFrame(MEFrame):
+    """
+    This managed entity collects performance monitoring data associated with
+    the XG-PON transmission convergence layer, as defined in [ITU-T G.987.3]
+    """
+    def __init__(self, entity_id, attributes):
+        """
+        :param entity_id: (int) TThis attribute uniquely identifies each instance of
+                                this managed entity. Through an identical ID, this
+                                managed entity is implicitly linked to an instance of
+                                the ANI-G.
+
+        :param attributes: (basestring, list, set, dict) attributes. For gets
+                           a string, list, or set can be provided. For set
+                           operations, a dictionary should be provided, for
+                           deletes None may be specified.
+        """
+        super(XgPonUpstreamPerformanceMonitoringHistoryDataFrame, self).__init__(
+            XgPonUpstreamPerformanceMonitoringHistoryData,
+            entity_id,
+            MEFrame._attr_to_data(attributes))
diff --git a/python/extensions/omci/omci_messages.py b/python/extensions/omci/omci_messages.py
new file mode 100644
index 0000000..f6559a3
--- /dev/null
+++ b/python/extensions/omci/omci_messages.py
@@ -0,0 +1,551 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from scapy.fields import ByteField, ThreeBytesField, StrFixedLenField, ConditionalField, IntField, Field
+from scapy.fields import ShortField, BitField
+from scapy.packet import Packet
+
+from voltha.extensions.omci.omci_defs import AttributeAccess, OmciSectionDataSize
+from voltha.extensions.omci.omci_fields import OmciTableField
+import voltha.extensions.omci.omci_entities as omci_entities
+
+
+log = structlog.get_logger()
+
+
+class OmciData(Field):
+
+    __slots__ = Field.__slots__ + ['_entity_class']
+
+    def __init__(self, name, entity_class="entity_class"):
+        Field.__init__(self, name=name, default=None, fmt='s')
+        self._entity_class = entity_class
+
+    def addfield(self, pkt, s, val):
+        class_id = getattr(pkt, self._entity_class)
+        entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+        for attribute in entity_class.attributes:
+            if AttributeAccess.SetByCreate not in attribute.access:
+                continue
+            if attribute.field.name == 'managed_entity_id':
+                continue
+            fld = attribute.field
+            s = fld.addfield(pkt, s, val.get(fld.name, fld.default))
+        return s
+
+    def getfield(self, pkt, s):
+        """Extract an internal value from a string"""
+        class_id = getattr(pkt, self._entity_class)
+        entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+        data = {}
+        for attribute in entity_class.attributes:
+            if AttributeAccess.SetByCreate not in attribute.access:
+                continue
+            if attribute.field.name == 'managed_entity_id':
+                continue
+            fld = attribute.field
+            s, value = fld.getfield(pkt, s)
+            data[fld.name] = value
+        return s, data
+
+
+class OmciMaskedData(Field):
+
+    __slots__ = Field.__slots__ + ['_entity_class', '_attributes_mask']
+
+    def __init__(self, name, entity_class="entity_class",
+                 attributes_mask="attributes_mask"):
+        Field.__init__(self, name=name, default=None, fmt='s')
+        self._entity_class = entity_class
+        self._attributes_mask = attributes_mask
+
+    def addfield(self, pkt, s, val):
+        class_id = getattr(pkt, self._entity_class)
+        attribute_mask = getattr(pkt, self._attributes_mask)
+        entity_class = omci_entities.entity_id_to_class_map.get(class_id)
+        indices = entity_class.attribute_indices_from_mask(attribute_mask)
+        for index in indices:
+            fld = entity_class.attributes[index].field
+            s = fld.addfield(pkt, s, val[fld.name])
+        return s
+
+    def getfield(self, pkt, s):
+        """Extract an internal value from a string"""
+        class_id = getattr(pkt, self._entity_class)
+        attribute_mask = getattr(pkt, self._attributes_mask)
+        entity_class = omci_entities.entity_id_to_class_map[class_id]
+        indices = entity_class.attribute_indices_from_mask(attribute_mask)
+        data = {}
+        table_attribute_mask = 0
+        for index in indices:
+            try:
+                fld = entity_class.attributes[index].field
+            except IndexError, e:
+                log.error("attribute-decode-failure", attribute_index=index,
+                          entity_class=entity_class, e=e)
+                continue
+            try:
+                s, value = fld.getfield(pkt, s)
+            except Exception, _e:
+                raise
+            if isinstance(pkt, OmciGetResponse) and isinstance(fld, OmciTableField):
+                data[fld.name + '_size'] = value
+                table_attribute_mask = table_attribute_mask | (1 << index)
+            else:
+                data[fld.name] = value
+        if table_attribute_mask:
+            data['table_attribute_mask'] = table_attribute_mask
+        return s, data
+
+
+class OmciMessage(Packet):
+    name = "OmciMessage"
+    message_id = None  # OMCI message_type value, filled by derived classes
+    fields_desc = []
+
+
+class OmciCreate(OmciMessage):
+    name = "OmciCreate"
+    message_id = 0x44
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        OmciData("data")
+    ]
+
+
+class OmciCreateResponse(OmciMessage):
+    name = "OmciCreateResponse"
+    message_id = 0x24
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", None),
+        ByteField("success_code", 0),
+        ShortField("parameter_error_attributes_mask", None),
+    ]
+
+
+class OmciDelete(OmciMessage):
+    name = "OmciDelete"
+    message_id = 0x46
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", None),
+    ]
+
+
+class OmciDeleteResponse(OmciMessage):
+    name = "OmciDeleteResponse"
+    message_id = 0x26
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", None),
+        ByteField("success_code", 0),
+    ]
+
+
+class OmciSet(OmciMessage):
+    name = "OmciSet"
+    message_id = 0x48
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ShortField("attributes_mask", None),
+        OmciMaskedData("data")
+    ]
+
+
+class OmciSetResponse(OmciMessage):
+    name = "OmciSetResponse"
+    message_id = 0x28
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", None),
+        ByteField("success_code", 0),
+        ShortField("unsupported_attributes_mask", None),
+        ShortField("failed_attributes_mask", None),
+    ]
+
+
+class OmciGet(OmciMessage):
+    name = "OmciGet"
+    message_id = 0x49
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ShortField("attributes_mask", None)
+    ]
+
+
+class OmciGetResponse(OmciMessage):
+    name = "OmciGetResponse"
+    message_id = 0x29
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0),
+        ShortField("attributes_mask", None),
+        ConditionalField(
+            OmciMaskedData("data"), lambda pkt: pkt.success_code == 0)
+    ]
+
+
+class OmciGetAllAlarms(OmciMessage):
+    name = "OmciGetAllAlarms"
+    message_id = 0x4b
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),  # Always 0 (ONT instance)
+        ByteField("alarm_retrieval_mode", 0)  # 0 or 1
+    ]
+
+
+class OmciGetAllAlarmsResponse(OmciMessage):
+    name = "OmciGetAllAlarmsResponse"
+    message_id = 0x2b
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("number_of_commands", None)
+    ]
+
+
+class OmciGetAllAlarmsNext(OmciMessage):
+    name = "OmciGetAllAlarmsNext"
+    message_id = 0x4c
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("command_sequence_number", None)
+    ]
+
+
+class OmciGetAllAlarmsNextResponse(OmciMessage):
+    name = "OmciGetAllAlarmsNextResponse"
+    message_id = 0x2c
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("alarmed_entity_class", None),
+        ShortField("alarmed_entity_id", 0),
+        BitField("alarm_bit_map", None, 224)
+    ]
+
+
+class OmciMibUpload(OmciMessage):
+    name = "OmciMibUpload"
+    message_id = 0x4d
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+    ]
+
+
+class OmciMibUploadResponse(OmciMessage):
+    name = "OmciMibUploadResponse"
+    message_id = 0x2d
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("number_of_commands", None)
+    ]
+
+
+class OmciMibUploadNext(OmciMessage):
+    name = "OmciMibUploadNext"
+    message_id = 0x4e
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("command_sequence_number", None)
+    ]
+
+
+class OmciMibUploadNextResponse(OmciMessage):
+    name = "OmciMibUploadNextResponse"
+    message_id = 0x2e
+    fields_desc = [
+        ShortField("entity_class", 2),  # Always 2 (ONT data)
+        ShortField("entity_id", 0),
+        ShortField("object_entity_class", None),
+        ShortField("object_entity_id", 0),
+        ShortField("object_attributes_mask", None),
+        OmciMaskedData("object_data", entity_class='object_entity_class',
+                       attributes_mask='object_attributes_mask')
+    ]
+
+
+class OmciMibReset(OmciMessage):
+    name = "OmciMibReset"
+    message_id = 0x4f
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0)
+    ]
+
+
+class OmciMibResetResponse(OmciMessage):
+    name = "OmciMibResetResponse"
+    message_id = 0x2f
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0)
+    ]
+
+
+class OmciAlarmNotification(OmciMessage):
+    name = "AlarmNotification"
+    message_id = 0x10
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        BitField("alarm_bit_map", 0, 224),
+        ThreeBytesField("zero_padding", 0),
+        ByteField("alarm_sequence_number", None)
+    ]
+
+
+class OmciAttributeValueChange(OmciMessage):
+    name = "AttributeValueChange"
+    message_id = 0x11
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ShortField("attributes_mask", None),
+        OmciMaskedData("data")
+    ]
+
+
+class OmciTestResult(OmciMessage):
+    name = "TestResult"
+    message_id = 0x1B
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0)
+        # ME Test specific message contents starts here
+        # TODO: Can this be coded easily with scapy?
+    ]
+
+
+class OmciReboot(OmciMessage):
+    name = "OmciOnuReboot"
+    message_id = 0x59
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("reboot_code", 0)
+    ]
+
+
+class OmciRebootResponse(OmciMessage):
+    name = "OmciOnuRebootResponse"
+    message_id = 0x39
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0)
+    ]
+
+
+class OmciGetNext(OmciMessage):
+    name = "OmciGetNext"
+    message_id = 0x5A
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ShortField("attributes_mask", None),
+        ShortField("command_sequence_number", None)
+    ]
+
+
+class OmciGetNextResponse(OmciMessage):
+    name = "OmciGetNextResponse"
+    message_id = 0x3A
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0),
+        ShortField("attributes_mask", None),
+        ConditionalField(OmciMaskedData("data"),
+                         lambda pkt: pkt.success_code == 0)
+    ]
+
+
+class OmciSynchronizeTime(OmciMessage):
+    name = "OmciSynchronizeTime"
+    message_id = 0x58
+    fields_desc = [
+        ShortField("entity_class", 256),  # OntG
+        ShortField("entity_id", 0),
+        ShortField("year", 0),       # eg) 2018
+        ByteField("month", 0),       # 1..12
+        ByteField("day", 0),         # 1..31
+        ByteField("hour", 0),        # 0..23
+        ByteField("minute", 0),      # 0..59
+        ByteField("second", 0)       # 0..59
+    ]
+
+
+class OmciSynchronizeTimeResponse(OmciMessage):
+    name = "OmciSynchronizeTimeResponse"
+    message_id = 0x38
+    fields_desc = [
+        ShortField("entity_class", 256),  # OntG
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0),
+        ConditionalField(ShortField("success_info", None),
+                         lambda pkt: pkt.success_code == 0)
+    ]
+
+
+class OmciGetCurrentData(OmciMessage):
+    name = "OmciGetCurrentData"
+    message_id = 0x5C
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ShortField("attributes_mask", None),
+    ]
+
+
+class OmciGetCurrentDataResponse(OmciMessage):
+    name = "OmciGetCurrentDataResponse"
+    message_id = 0x3C
+    fields_desc = [
+        ShortField("entity_class", None),
+        ShortField("entity_id", 0),
+        ByteField("success_code", 0),
+        ShortField("attributes_mask", None),
+        ShortField("unsupported_attributes_mask", None),
+        ShortField("failed_attributes_mask", None),
+        ConditionalField(
+            OmciMaskedData("data"), lambda pkt: pkt.success_code == 0)
+    ]
+
+class OmciStartSoftwareDownload(OmciMessage):
+    name = "OmciStartSoftwareDownload"
+    message_id = 0x53
+    fields_desc = [
+        ShortField("entity_class", 7),  # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("window_size", 0),
+        IntField("image_size", 0),
+        ByteField("image_number", 1),   # Always only 1 in parallel
+        ShortField("instance_id", None) # should be same as "entity_id"        
+    ]
+
+class OmciStartSoftwareDownloadResponse(OmciMessage):
+    name = "OmciStartSoftwareDownloadResponse"
+    message_id = 0x33
+    fields_desc = [
+        ShortField("entity_class", 7),  # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("result", 0),
+        ByteField("window_size", 0),
+        ByteField("image_number", 1),   # Always only 1 in parallel
+        ShortField("instance_id", None) # should be same as "entity_id"        
+    ]
+
+class OmciEndSoftwareDownload(OmciMessage):
+    name = "OmciEndSoftwareDownload"
+    message_id = 0x55
+    fields_desc = [
+        ShortField("entity_class", 7),  # Always 7 (Software image)
+        ShortField("entity_id", None),
+        IntField("crc32", 0),
+        IntField("image_size", 0),
+        ByteField("image_number", 1),   # Always only 1 in parallel
+        ShortField("instance_id", None),# should be same as "entity_id"
+    ]
+
+class OmciEndSoftwareDownloadResponse(OmciMessage):
+    name = "OmciEndSoftwareDownload"
+    message_id = 0x35
+    fields_desc = [
+        ShortField("entity_class", 7),  # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("result", 0),
+        ByteField("image_number", 1),   # Always only 1 in parallel
+        ShortField("instance_id", None),# should be same as "entity_id"
+        ByteField("result0", 0)         # same as result 
+    ]
+
+class OmciDownloadSection(OmciMessage):
+    name = "OmciDownloadSection"
+    message_id = 0x14
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("section_number", 0),  # Always only 1 in parallel
+        StrFixedLenField("data", 0, length=OmciSectionDataSize) # section data
+    ]
+
+class OmciDownloadSectionLast(OmciMessage):
+    name = "OmciDownloadSection"
+    message_id = 0x54
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("section_number", 0),  # Always only 1 in parallel
+        StrFixedLenField("data", 0, length=OmciSectionDataSize) # section data
+    ]
+
+class OmciDownloadSectionResponse(OmciMessage):
+    name = "OmciDownloadSectionResponse"
+    message_id = 0x34
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("result", 0),
+        ByteField("section_number", 0),  # Always only 1 in parallel
+    ]
+
+class OmciActivateImage(OmciMessage):
+    name = "OmciActivateImage"
+    message_id = 0x56
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("activate_flag", 0)    # Activate image unconditionally
+    ]
+
+class OmciActivateImageResponse(OmciMessage):
+    name = "OmciActivateImageResponse"
+    message_id = 0x36
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("result", 0)           # Activate image unconditionally
+    ]
+
+class OmciCommitImage(OmciMessage):
+    name = "OmciCommitImage"
+    message_id = 0x57
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+    ]
+
+class OmciCommitImageResponse(OmciMessage):
+    name = "OmciCommitImageResponse"
+    message_id = 0x37
+    fields_desc = [
+        ShortField("entity_class", 7),   # Always 7 (Software image)
+        ShortField("entity_id", None),
+        ByteField("result", 0)           # Activate image unconditionally
+    ]
+    
diff --git a/python/extensions/omci/onu_configuration.py b/python/extensions/omci/onu_configuration.py
new file mode 100644
index 0000000..2bd82ce
--- /dev/null
+++ b/python/extensions/omci/onu_configuration.py
@@ -0,0 +1,509 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+
+from voltha.protos.device_pb2 import Image
+from omci_entities import *
+from database.mib_db_api import *
+from enum import IntEnum
+
+
+class OMCCVersion(IntEnum):
+    Unknown                 = 0     # Unknown or unsupported version
+    G_984_4                 = 0x80  # (06/04)
+    G_984_4_2005_Amd_1      = 0x81  # Amd.1 (06/05)
+    G_984_4_2006_Amd_2      = 0x82  # Amd.2 (03/06)
+    G_984_4_2006_Amd_3      = 0x83  # Amd.3 (12/06)
+    G_984_4_2008            = 0x84  # (02/08)
+    G_984_4_2009_Amd_1      = 0x85  # Amd.1 (06/09)
+    G_984_4_2009_Amd_2_Base = 0x86  # Amd.2 (2009)  Baseline message set only, w/o the extended message set option
+    G_984_4_2009_Amd_2      = 0x96  # Amd.2 (2009)  Extended message set option +  baseline message set.
+    G_988_2010_Base         = 0xA0  # (2010) Baseline message set only, w/o the extended message set option
+    G_988_2011_Amd_1_Base   = 0xA1  # Amd.1 (2011) Baseline message set only
+    G_988_2012_Amd_2_Base   = 0xA2  # Amd.2 (2012) Baseline message set only
+    G_988_2012_Base         = 0xA3  # (2012) Baseline message set only
+    G_988_2010              = 0xB0  # (2010) Baseline and extended message set
+    G_988_2011_Amd_1        = 0xB1  # Amd.1 (2011) Baseline and extended message set
+    G_988_2012_Amd_2        = 0xB2  # Amd.2 (2012) Baseline and extended message set
+    G_988_2012              = 0xB3  # (2012)Baseline and extended message set
+
+    @staticmethod
+    def values():
+        return {OMCCVersion[member].value for member in OMCCVersion.__members__.keys()}
+
+    @staticmethod
+    def to_enum(value):
+        return next((v for k, v in OMCCVersion.__members__.items()
+                     if v.value == value), OMCCVersion.Unknown)
+
+
+class OnuConfiguration(object):
+    """
+    Utility class to query OMCI MIB Database for various ONU/OMCI Configuration
+    and capabilties. These capabilities revolve around read-only MEs discovered
+    during the MIB Upload process.
+
+    There is also a 'omci_onu_capabilities' State Machine and an
+    'onu_capabilities_task.py' OMCI Task that will query the ONU, via the
+    OMCI (ME#287) Managed entity to get the full list of supported OMCI ME's
+    and available actions/message-types supported.
+
+    NOTE: Currently this class is optimized/tested for ONUs that support the
+          OpenOMCI implementation.
+    """
+    def __init__(self, omci_agent, device_id):
+        """
+        Initialize this instance of the OnuConfiguration class
+
+        :param omci_agent: (OpenOMCIAgent) agent reference
+        :param device_id: (str) ONU Device ID
+
+        :raises KeyError: If ONU Device is not registered with OpenOMCI
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+        self._device_id = device_id
+        self._onu_device = omci_agent.get_device(device_id)
+
+        # The capabilities
+        self._attributes = None
+        self.reset()
+
+    def _get_capability(self, attr, class_id, instance_id=None):
+        """
+        Get the OMCI capabilities for this device
+
+        :param attr: (str) OnuConfiguration attribute field
+        :param class_id: (int) ME Class ID
+        :param instance_id: (int) Instance ID. If not provided, all instances of the
+                            specified class ID are returned if present in the DB.
+
+        :return: (dict) Class and/or Instances. None is returned if the CLASS is not present
+        """
+        try:
+            assert self._onu_device.mib_synchronizer.last_mib_db_sync is not None, \
+                'MIB Database for ONU {} has never been synchronized'.format(self._device_id)
+
+            # Get the requested information
+            if self._attributes[attr] is None:
+                value = self._onu_device.query_mib(class_id, instance_id=instance_id)
+
+                if isinstance(value, dict) and len(value) > 0:
+                    self._attributes[attr] = value
+
+            return self._attributes[attr]
+
+        except Exception as e:
+            self.log.exception('onu-capabilities', e=e, class_id=class_id,
+                               instance_id=instance_id)
+            raise
+
+    def reset(self):
+        """
+        Reset the cached database entries to None.  This method should be
+        called after any communications loss to the ONU (reboot, PON down, ...)
+        in case a new software load with different capabilities is available.
+        """
+        self._attributes = {
+            '_ont_g': None,
+            '_ont_2g': None,
+            '_ani_g': None,
+            '_uni_g': None,
+            '_cardholder': None,
+            '_circuit_pack': None,
+            '_software': None,
+            '_pptp': None,
+            '_veip': None
+        }
+
+    @property
+    def version(self):
+        """
+        This attribute identifies the version of the ONU as defined by the vendor
+        """
+        ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+        if ontg is None or ATTRIBUTES_KEY not in ontg:
+            return None
+
+        return ontg[ATTRIBUTES_KEY].get('version')
+
+    @property
+    def serial_number(self):
+        """
+        The serial number is unique for each ONU
+        """
+        ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+        if ontg is None or ATTRIBUTES_KEY not in ontg:
+            return None
+
+        return ontg[ATTRIBUTES_KEY].get('serial_number')
+
+    @property
+    def traffic_management_option(self):
+        """
+        This attribute identifies the upstream traffic management function
+        implemented in the ONU. There are three options:
+
+            0 Priority controlled and flexibly scheduled upstream traffic. The traffic
+              scheduler and priority queue mechanism are used for upstream traffic.
+
+            1 Rate controlled upstream traffic. The maximum upstream traffic of each
+              individual connection is guaranteed by shaping.
+
+            2 Priority and rate controlled. The traffic scheduler and priority queue
+              mechanism are used for upstream traffic. The maximum upstream traffic
+              of each individual connection is guaranteed by shaping.
+        """
+        ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+        if ontg is None or ATTRIBUTES_KEY not in ontg:
+            return None
+
+        return ontg[ATTRIBUTES_KEY].get('traffic_management_options')
+
+    @property
+    def onu_survival_time(self):
+        """
+        This attribute indicates the minimum guaranteed time in milliseconds
+        between the loss of external power and the silence of the ONU. This does not
+        include survival time attributable to a backup battery. The value zero implies that
+        the actual time is not known.
+
+        Optional
+        """
+        ontg = self._get_capability('_ont_g', OntG.class_id, 0)
+        if ontg is None or ATTRIBUTES_KEY not in ontg:
+            return None
+
+        return ontg[ATTRIBUTES_KEY].get('ont_survival_time', 0)
+
+    @property
+    def equipment_id(self):
+        """
+        This attribute may be used to identify the specific type of ONU. In some
+        environments, this attribute may include the equipment CLEI code.
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('equipment_id')
+
+    @property
+    def omcc_version(self):
+        """
+        This attribute identifies the version of the OMCC protocol being used by the
+        ONU. This allows the OLT to manage a network with ONUs that support different
+        OMCC versions. Release levels of [ITU-T G.984.4] are supported with code
+        points of the form 0x8y and 0x9y, where y is a hexadecimal digit in the range
+        0..F.  Support for continuing revisions of this Recommendation is defined in
+        the 0xAy range.
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return OMCCVersion.to_enum(ont2g[ATTRIBUTES_KEY].get('omcc_version', 0))
+
+    @property
+    def vendor_product_code(self):
+        """
+        This attribute contains a vendor-specific product code for the ONU
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('vendor_product_code')
+
+    @property
+    def total_priority_queues(self):
+        """
+        This attribute reports the total number of upstream priority queues
+        that are not associated with a circuit pack, but with the ONU in its entirety
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('total_priority_queue_number')
+
+    @property
+    def total_traffic_schedulers(self):
+        """
+        This attribute reports the total number of traffic schedulers that
+        are not associated with a circuit pack, but with the ONU in its entirety.
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('total_traffic_scheduler_number')
+
+    @property
+    def total_gem_ports(self):
+        """
+        This attribute reports the total number of GEM port-IDs supported
+        by the ONU.
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('total_gem_port_id_number')
+
+    @property
+    def uptime(self):
+        """
+        This attribute counts 10 ms intervals since the ONU was last initialized.
+        It rolls over to 0 when full
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('sys_uptime')
+
+    @property
+    def connectivity_capability(self):
+        """
+        This attribute indicates the Ethernet connectivity models that the ONU
+        can support. The value 0 indicates that the capability is not supported; 1 signifies
+        support.
+
+             Bit    Model [Figure reference ITU-T 988]
+              1     (LSB) N:1 bridging, Figure 8.2.2-3
+              2     1:M mapping, Figure 8.2.2-4
+              3     1:P filtering, Figure 8.2.2-5
+              4     N:M bridge-mapping, Figure 8.2.2-6
+              5     1:MP map-filtering, Figure 8.2.2-7
+              6     N:P bridge-filtering, Figure 8.2.2-8
+              7  to refer to    N:MP bridge-map-filtering, Figure 8.2.2-9
+            8...16  Reserved
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('connectivity_capability')
+
+    @property
+    def qos_configuration_flexibility(self):
+        """
+        This attribute reports whether various managed entities in the
+        ONU are fixed by the ONU's architecture or whether they are configurable. For
+        backward compatibility, and if the ONU does not support this attribute, all such
+        attributes are understood to be hard-wired.
+
+            Bit      Interpretation when bit value = 1
+             1 (LSB) Priority queue ME: Port field of related port attribute is
+                     read-write and can point to any T-CONT or UNI port in the
+                     same slot
+             2       Priority queue ME: The traffic scheduler pointer is permitted
+                     to refer to any other traffic scheduler in the same slot
+             3       Traffic scheduler ME: T-CONT pointer is read-write
+             4       Traffic scheduler ME: Policy attribute is read-write
+             5       T-CONT ME: Policy attribute is read-write
+             6       Priority queue ME: Priority field of related port attribute is
+                     read-write
+             7..16   Reserved
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('qos_configuration_flexibility')
+
+    @property
+    def priority_queue_scale_factor(self):
+        """
+        This specifies the scale factor of several attributes of the priority
+        queue managed entity of section 5.2.8
+        """
+        ont2g = self._get_capability('_ont_2g', Ont2G.class_id, 0)
+        if ont2g is None or ATTRIBUTES_KEY not in ont2g:
+            return None
+
+        return ont2g[ATTRIBUTES_KEY].get('priority_queue_scale_factor', 1)
+
+    @property
+    def cardholder_entities(self):
+        """
+        Return a dictionary containing some overall information on the CardHolder
+        instances for this ONU.
+        """
+        ch = self._get_capability('_cardholder', Cardholder.class_id)
+        results = dict()
+
+        if ch is not None:
+            for inst, inst_data in ch.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id':              inst,
+                        'is-single-piece':        inst >= 256,
+                        'slot-number':            inst & 0xff,
+                        'actual-plug-in-type':    inst_data[ATTRIBUTES_KEY].get('actual_plugin_unit_type', 0),
+                        'actual-equipment-id':    inst_data[ATTRIBUTES_KEY].get('actual_equipment_id', 0),
+                        'protection-profile-ptr': inst_data[ATTRIBUTES_KEY].get('protection_profile_pointer', 0),
+                    }
+        return results if len(results) else None
+
+    @property
+    def circuitpack_entities(self):
+        """
+        This specifies the scale factor of several attributes of the priority
+        queue managed entity of section 5.2.8
+        """
+        cp = self._get_capability('_circuit_pack', CircuitPack.class_id)
+        results = dict()
+
+        if cp is not None:
+            for inst, inst_data in cp.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id': inst,
+                        'number-of-ports': inst_data[ATTRIBUTES_KEY].get('number_of_ports', 0),
+                        'serial-number': inst_data[ATTRIBUTES_KEY].get('serial_number', 0),
+                        'version': inst_data[ATTRIBUTES_KEY].get('version', 0),
+                        'vendor-id': inst_data[ATTRIBUTES_KEY].get('vendor_id', 0),
+                        'total-tcont-count': inst_data[ATTRIBUTES_KEY].get('total_tcont_buffer_number', 0),
+                        'total-priority-queue-count': inst_data[ATTRIBUTES_KEY].get('total_priority_queue_number', 0),
+                        'total-traffic-sched-count': inst_data[ATTRIBUTES_KEY].get('total_traffic_scheduler_number', 0),
+                    }
+
+        return results if len(results) else None
+
+    @property
+    def software_images(self):
+        """
+        Get a list of software image information for the ONU.  The information is provided
+        so that it may be directly added to the protobuf Device information software list.
+        """
+        sw = self._get_capability('_software', SoftwareImage.class_id)
+        images = list()
+
+        if sw is not None:
+            for inst, inst_data in sw.items():
+                if isinstance(inst, int):
+                    is_active = inst_data[ATTRIBUTES_KEY].get('is_active', False)
+
+                    images.append(Image(name='running-revision' if is_active else 'candidate-revision',
+                                        version=str(inst_data[ATTRIBUTES_KEY].get('version',
+                                                                                  'Not Available').rstrip('\0')),
+                                        is_active=is_active,
+                                        is_committed=inst_data[ATTRIBUTES_KEY].get('is_committed',
+                                                                                   False),
+                                        is_valid=inst_data[ATTRIBUTES_KEY].get('is_valid',
+                                                                               False),
+                                        install_datetime='Not Available',
+                                        hash=str(inst_data[ATTRIBUTES_KEY].get('image_hash',
+                                                                               'Not Available').rstrip('\0'))))
+        return images if len(images) else None
+
+    @property
+    def ani_g_entities(self):
+        """
+        This managed entity organizes data associated with each access network
+        interface supported by a G-PON ONU. The ONU automatically creates one
+        instance of this managed entity for each PON physical port.
+        """
+        ag = self._get_capability('_ani_g', AniG.class_id)
+        results = dict()
+
+        if ag is not None:
+            for inst, inst_data in ag.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id':               inst,
+                        'slot-number':             (inst >> 8) & 0xff,
+                        'port-number':             inst & 0xff,
+                        'total-tcont-count':       inst_data[ATTRIBUTES_KEY].get('total_tcont_number', 0),
+                        'piggyback-dba-reporting': inst_data[ATTRIBUTES_KEY].get('piggyback_dba_reporting', 0),
+                    }
+        return results if len(results) else None
+
+    @property
+    def uni_g_entities(self):
+        """
+        This managed entity organizes data associated with user network interfaces
+        (UNIs) supported by GEM. One instance of the UNI-G managed entity exists
+        for each UNI supported by the ONU.
+
+        The ONU automatically creates or deletes instances of this managed entity
+        upon the creation or deletion of a real or virtual circuit pack managed
+        entity, one per port.
+        """
+        ug = self._get_capability('_uni_g', UniG.class_id)
+        results = dict()
+
+        if ug is not None:
+            for inst, inst_data in ug.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id':               inst,
+                        'management-capability':   inst_data[ATTRIBUTES_KEY].get('management_capability', 0)
+                    }
+        return results if len(results) else None
+
+    @property
+    def pptp_entities(self):
+        """
+        Returns discovered PPTP Ethernet entities.  TODO more detail here
+        """
+        pptp = self._get_capability('_pptp', PptpEthernetUni.class_id)
+        results = dict()
+
+        if pptp is not None:
+            for inst, inst_data in pptp.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id':                inst,
+                        'expected-type':            inst_data[ATTRIBUTES_KEY].get('expected_type', 0),
+                        'sensed-type':              inst_data[ATTRIBUTES_KEY].get('sensed_type', 0),
+                        'autodetection-config':     inst_data[ATTRIBUTES_KEY].get('autodetection_config', 0),
+                        'ethernet-loopback-config': inst_data[ATTRIBUTES_KEY].get('ethernet_loopback_config', 0),
+                        'administrative-state':     inst_data[ATTRIBUTES_KEY].get('administrative_state', 0),
+                        'operational-state':        inst_data[ATTRIBUTES_KEY].get('operational_state', 0),
+                        'config-ind':               inst_data[ATTRIBUTES_KEY].get('config_ind', 0),
+                        'max-frame-size':           inst_data[ATTRIBUTES_KEY].get('max_frame_size', 0),
+                        'dte-dce-ind':              inst_data[ATTRIBUTES_KEY].get('dte_dce_ind', 0),
+                        'pause-time':               inst_data[ATTRIBUTES_KEY].get('pause_time', 0),
+                        'bridged-ip-ind':           inst_data[ATTRIBUTES_KEY].get('bridged_ip_ind', 0),
+                        'arc':                      inst_data[ATTRIBUTES_KEY].get('arc', 0),
+                        'arc-interval':             inst_data[ATTRIBUTES_KEY].get('arc_interval', 0),
+                        'pppoe-filter':             inst_data[ATTRIBUTES_KEY].get('ppoe_filter', 0),
+                        'power-control':            inst_data[ATTRIBUTES_KEY].get('power_control', 0)
+                    }
+        return results if len(results) else None
+
+    @property
+    def veip_entities(self):
+        """
+        Returns discovered VEIP entities.  TODO more detail here
+        """
+        veip = self._get_capability('_veip', VeipUni.class_id)
+        results = dict()
+
+        if veip is not None:
+            for inst, inst_data in veip.items():
+                if isinstance(inst, int):
+                    results[inst] = {
+                        'entity-id':                inst,
+                        'administrative-state':     inst_data[ATTRIBUTES_KEY].get('administrative_state', 0),
+                        'operational-state':        inst_data[ATTRIBUTES_KEY].get('operational_state', 0),
+                        'interdomain-name':         inst_data[ATTRIBUTES_KEY].get('interdomain_name', ""),
+                        'tcp-udp-pointer':          inst_data[ATTRIBUTES_KEY].get('tcp_udp_pointer', 0),
+                        'iana-assigned-port':       inst_data[ATTRIBUTES_KEY].get('iana_assigned_port', 0)
+                    }
+        return results if len(results) else None
diff --git a/python/extensions/omci/onu_device_entry.py b/python/extensions/omci/onu_device_entry.py
new file mode 100644
index 0000000..7a0c439
--- /dev/null
+++ b/python/extensions/omci/onu_device_entry.py
@@ -0,0 +1,635 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import structlog
+from copy import deepcopy
+from voltha.protos.device_pb2 import ImageDownload
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+import voltha.extensions.omci.omci_entities as omci_entities
+from voltha.extensions.omci.omci_cc import OMCI_CC
+from common.event_bus import EventBusClient
+from voltha.extensions.omci.tasks.task_runner import TaskRunner
+from voltha.extensions.omci.onu_configuration import OnuConfiguration
+from voltha.extensions.omci.tasks.reboot_task import OmciRebootRequest, RebootFlags
+from voltha.extensions.omci.tasks.omci_modify_request import OmciModifyRequest
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.state_machines.image_agent import ImageAgent
+
+from twisted.internet import reactor, defer
+from enum import IntEnum
+
+OP = EntityOperations
+RC = ReasonCodes
+
+ACTIVE_KEY = 'active'
+IN_SYNC_KEY = 'in-sync'
+LAST_IN_SYNC_KEY = 'last-in-sync-time'
+SUPPORTED_MESSAGE_ENTITY_KEY = 'managed-entities'
+SUPPORTED_MESSAGE_TYPES_KEY = 'message-type'
+
+
+class OnuDeviceEvents(IntEnum):
+    # Events of interest to Device Adapters and OpenOMCI State Machines
+    DeviceStatusEvent = 0       # OnuDeviceEntry running status changed
+    MibDatabaseSyncEvent = 1    # MIB database sync changed
+    OmciCapabilitiesEvent = 2   # OMCI ME and message type capabilities
+    AlarmDatabaseSyncEvent = 3  # Alarm database sync changed
+
+    # TODO: Add other events here as needed
+
+
+class OnuDeviceEntry(object):
+    """
+    An ONU Device entry in the MIB
+    """
+    def __init__(self, omci_agent, device_id, adapter_agent, custom_me_map,
+                 mib_db, alarm_db, support_classes, clock=None):
+        """
+        Class initializer
+
+        :param omci_agent: (OpenOMCIAgent) Reference to OpenOMCI Agent
+        :param device_id: (str) ONU Device ID
+        :param adapter_agent: (AdapterAgent) Adapter agent for ONU
+        :param custom_me_map: (dict) Additional/updated ME to add to class map
+        :param mib_db: (MibDbApi) MIB Database reference
+        :param alarm_db: (MibDbApi) Alarm Table/Database reference
+        :param support_classes: (dict) State machines and tasks for this ONU
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+
+        self._started = False
+        self._omci_agent = omci_agent         # OMCI AdapterAgent
+        self._device_id = device_id           # ONU Device ID
+        self._adapter_agent = adapter_agent
+        self._runner = TaskRunner(device_id, clock=clock)  # OMCI_CC Task runner
+        self._deferred = None
+        # self._img_download_deferred = None    # deferred of image file download from server
+        self._omci_upgrade_deferred = None    # deferred of ONU OMCI upgrading procedure
+        self._omci_activate_deferred = None   # deferred of ONU OMCI Softwre Image Activate
+        self._img_deferred = None             # deferred returned to caller of do_onu_software_download
+        self._first_in_sync = False
+        self._first_capabilities = False
+        self._timestamp = None
+        # self._image_download = None  # (voltha_pb2.ImageDownload)
+        self.reactor = clock if clock is not None else reactor
+        
+        # OMCI related databases are on a per-agent basis. State machines and tasks
+        # are per ONU Vendor
+        #
+        self._support_classes = support_classes
+        self._configuration = None
+
+        try:
+            # MIB Synchronization state machine
+            self._mib_db_in_sync = False
+            mib_synchronizer_info = support_classes.get('mib-synchronizer')
+            advertise = mib_synchronizer_info['advertise-events']
+            self._mib_sync_sm = mib_synchronizer_info['state-machine'](self._omci_agent,
+                                                                       device_id,
+                                                                       mib_synchronizer_info['tasks'],
+                                                                       mib_db,
+                                                                       advertise_events=advertise)
+            # ONU OMCI Capabilities state machine
+            capabilities_info = support_classes.get('omci-capabilities')
+            advertise = capabilities_info['advertise-events']
+            self._capabilities_sm = capabilities_info['state-machine'](self._omci_agent,
+                                                                       device_id,
+                                                                       capabilities_info['tasks'],
+                                                                       advertise_events=advertise)
+            # ONU Performance Monitoring Intervals state machine
+            interval_info = support_classes.get('performance-intervals')
+            advertise = interval_info['advertise-events']
+            self._pm_intervals_sm = interval_info['state-machine'](self._omci_agent, device_id,
+                                                                   interval_info['tasks'],
+                                                                   advertise_events=advertise)
+
+            # ONU ALARM Synchronization state machine
+            self._alarm_db_in_sync = False
+            alarm_synchronizer_info = support_classes.get('alarm-synchronizer')
+            advertise = alarm_synchronizer_info['advertise-events']
+            self._alarm_sync_sm = alarm_synchronizer_info['state-machine'](self._omci_agent,
+                                                                           device_id,
+                                                                           alarm_synchronizer_info['tasks'],
+                                                                           alarm_db,
+                                                                           advertise_events=advertise)
+            # State machine of downloading image file from server
+            downloader_info = support_classes.get('image_downloader')
+            image_upgrader_info = support_classes.get('image_upgrader')
+            # image_activate_info = support_classes.get('image_activator')
+            advertise = downloader_info['advertise-event']
+            # self._img_download_sm = downloader_info['state-machine'](self._omci_agent, device_id, 
+            #                                                       downloader_info['tasks'],
+            #                                                       advertise_events=advertise)
+            self._image_agent = ImageAgent(self._omci_agent, device_id, 
+                                           downloader_info['state-machine'], downloader_info['tasks'], 
+                                           image_upgrader_info['state-machine'], image_upgrader_info['tasks'],
+                                           # image_activate_info['state-machine'],
+                                           advertise_events=advertise, clock=clock)
+
+            # self._omci_upgrade_sm = image_upgrader_info['state-machine'](device_id, advertise_events=advertise)
+            
+        except Exception as e:
+            self.log.exception('state-machine-create-failed', e=e)
+            raise
+
+        # Put state machines in the order you wish to start them
+
+        self._state_machines = []
+        self._on_start_state_machines = [       # Run when 'start()' called
+            self._mib_sync_sm,
+            self._capabilities_sm,
+        ]
+        self._on_sync_state_machines = [        # Run after first in_sync event
+            self._alarm_sync_sm,
+        ]
+        self._on_capabilities_state_machines = [  # Run after first capabilities events
+            self._pm_intervals_sm
+        ]
+        self._custom_me_map = custom_me_map
+        self._me_map = omci_entities.entity_id_to_class_map.copy()
+
+        if custom_me_map is not None:
+            self._me_map.update(custom_me_map)
+
+        self.event_bus = EventBusClient()
+
+        # Create OMCI communications channel
+        self._omci_cc = OMCI_CC(adapter_agent, self.device_id, self._me_map, clock=clock)
+
+    @staticmethod
+    def event_bus_topic(device_id, event):
+        """
+        Get the topic name for a given event for this ONU Device
+        :param device_id: (str) ONU Device ID
+        :param event: (OnuDeviceEvents) Type of event
+        :return: (str) Topic string
+        """
+        assert event in OnuDeviceEvents, \
+            'Event {} is not an ONU Device Event'.format(event.name)
+        return 'omci-device:{}:{}'.format(device_id, event.name)
+
+    @property
+    def device_id(self):
+        return self._device_id
+
+    @property
+    def omci_cc(self):
+        return self._omci_cc
+
+    @property
+    def adapter_agent(self):
+        return self._adapter_agent
+        
+    @property
+    def task_runner(self):
+        return self._runner
+
+    @property
+    def mib_synchronizer(self):
+        """
+        Reference to the OpenOMCI MIB Synchronization state machine for this ONU
+        """
+        return self._mib_sync_sm
+
+    @property
+    def omci_capabilities(self):
+        """
+        Reference to the OpenOMCI OMCI Capabilities state machine for this ONU
+        """
+        return self._capabilities_sm
+
+    @property
+    def pm_intervals_state_machine(self):
+        """
+        Reference to the OpenOMCI PM Intervals state machine for this ONU
+        """
+        return self._pm_intervals_sm
+
+    def set_pm_config(self, pm_config):
+        """
+        Set PM interval configuration
+
+        :param pm_config: (OnuPmIntervalMetrics) PM Interval configuration
+        """
+        self._pm_intervals_sm.set_pm_config(pm_config)
+
+    @property
+    def timestamp(self):
+        """Pollable Metrics last collected timestamp"""
+        return self._timestamp
+
+    @timestamp.setter
+    def timestamp(self, value):
+        self._timestamp = value
+
+    @property
+    def alarm_synchronizer(self):
+        """
+        Reference to the OpenOMCI Alarm Synchronization state machine for this ONU
+        """
+        return self._alarm_sync_sm
+
+    @property
+    def active(self):
+        """
+        Is the ONU device currently active/running
+        """
+        return self._started
+
+    @property
+    def custom_me_map(self):
+        """ Vendor-specific Managed Entity Map for this vendor's device"""
+        return self._custom_me_map
+
+    @property
+    def me_map(self):
+        """ Combined ME and Vendor-specific Managed Entity Map for this device"""
+        return self._me_map
+
+    def _cancel_deferred(self):
+        d, self._deferred = self._deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    @property
+    def mib_db_in_sync(self):
+        return self._mib_db_in_sync
+
+    @mib_db_in_sync.setter
+    def mib_db_in_sync(self, value):
+        if self._mib_db_in_sync != value:
+            # Save value
+            self._mib_db_in_sync = value
+
+            # Start up other state machines if needed
+            if self._first_in_sync:
+                self.first_in_sync_event()
+
+            # Notify any event listeners
+            topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                                   OnuDeviceEvents.MibDatabaseSyncEvent)
+            msg = {
+                IN_SYNC_KEY: self._mib_db_in_sync,
+                LAST_IN_SYNC_KEY: self.mib_synchronizer.last_mib_db_sync
+            }
+            self.event_bus.publish(topic=topic, msg=msg)
+
+    @property
+    def alarm_db_in_sync(self):
+        return self._alarm_db_in_sync
+
+    @alarm_db_in_sync.setter
+    def alarm_db_in_sync(self, value):
+        if self._alarm_db_in_sync != value:
+            # Save value
+            self._alarm_db_in_sync = value
+
+            # Start up other state machines if needed
+            if self._first_in_sync:
+                self.first_in_sync_event()
+
+            # Notify any event listeners
+            topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                                   OnuDeviceEvents.AlarmDatabaseSyncEvent)
+            msg = {
+                IN_SYNC_KEY: self._alarm_db_in_sync
+            }
+            self.event_bus.publish(topic=topic, msg=msg)
+
+    @property
+    def configuration(self):
+        """
+        Get the OMCI Configuration object for this ONU.  This is a class that provides some
+        common database access functions for ONU capabilities and read-only configuration values.
+
+        :return: (OnuConfiguration)
+        """
+        return self._configuration
+
+    @property
+    def image_agent(self):
+        return self._image_agent
+
+    # @property
+    # def image_download(self):
+    #     return self._image_download
+        
+    def start(self):
+        """
+        Start the ONU Device Entry state machines
+        """
+        self.log.debug('OnuDeviceEntry.start', previous=self._started)
+        if self._started:
+            return
+
+        self._started = True
+        self._omci_cc.enabled = True
+        self._first_in_sync = True
+        self._first_capabilities = True
+        self._runner.start()
+        self._configuration = OnuConfiguration(self._omci_agent, self._device_id)
+
+        # Start MIB Sync and other state machines that can run before the first
+        # MIB Synchronization event occurs. Start 'later' so that any
+        # ONU Device, OMCI DB, OMCI Agent, and others are fully started before
+        # performing the start.
+
+        self._state_machines = []
+
+        def start_state_machines(machines):
+            for sm in machines:
+                self._state_machines.append(sm)
+                sm.start()
+
+        self._deferred = reactor.callLater(0, start_state_machines,
+                                           self._on_start_state_machines)
+        # Notify any event listeners
+        self._publish_device_status_event()
+
+    def stop(self):
+        """
+        Stop the ONU Device Entry state machines
+        """
+        if not self._started:
+            return
+
+        self._started = False
+        self._cancel_deferred()
+        self._omci_cc.enabled = False
+
+        # Halt MIB Sync and other state machines
+        for sm in self._state_machines:
+            sm.stop()
+
+        self._state_machines = []
+
+        # Stop task runner
+        self._runner.stop()
+
+        # Notify any event listeners
+        self._publish_device_status_event()
+
+    def first_in_sync_event(self):
+        """
+        This event is called on the first MIB synchronization event after
+        OpenOMCI has been started. It is responsible for starting any
+        other state machine and to initiate an ONU Capabilities report
+        """
+        if self._first_in_sync:
+            self._first_in_sync = False
+
+            # Start up the ONU Capabilities task
+            self._configuration.reset()
+
+            # Insure that the ONU-G Administrative lock is disabled
+            def failure(reason):
+                self.log.error('disable-admin-state-lock', reason=reason)
+
+            frame = OntGFrame(attributes={'administrative_state': 0}).set()
+            task = OmciModifyRequest(self._omci_agent, self.device_id, frame)
+            self.task_runner.queue_task(task).addErrback(failure)
+
+            # Start up any other remaining OpenOMCI state machines
+            def start_state_machines(machines):
+                for sm in machines:
+                    self._state_machines.append(sm)
+                    reactor.callLater(0, sm.start)
+
+            self._deferred = reactor.callLater(0, start_state_machines,
+                                               self._on_sync_state_machines)
+
+            # if an ongoing upgrading is not accomplished, restart it
+            if self._img_deferred is not None:
+               self._image_agent.onu_bootup() 
+
+    def first_in_capabilities_event(self):
+        """
+        This event is called on the first capabilities event after
+        OpenOMCI has been started. It is responsible for starting any
+        other state machine. These are often state machines that have tasks
+        that are dependent upon knowing if various MEs are supported
+        """
+        if self._first_capabilities:
+            self._first_capabilities = False
+
+            # Start up any other remaining OpenOMCI state machines
+            def start_state_machines(machines):
+                for sm in machines:
+                    self._state_machines.append(sm)
+                    reactor.callLater(0, sm.start)
+
+            self._deferred = reactor.callLater(0, start_state_machines,
+                                               self._on_capabilities_state_machines)
+
+    # def __on_omci_download_success(self, image_download):
+    #     self.log.debug("__on_omci_download_success", image=image_download)
+    #     self._omci_upgrade_deferred = None
+    #     # self._ret_deferred = None
+    #     self._omci_activate_deferred = self._image_agent.activate_onu_image(image_download.name)
+    #     self._omci_activate_deferred.addCallbacks(self.__on_omci_image_activate_success, 
+    #                                               self.__on_omci_image_activate_fail, errbackArgs=(image_name,))
+    #     return image_name
+        
+    # def __on_omci_download_fail(self, fail, image_name):
+    #     self.log.debug("__on_omci_download_fail", failure=fail, image_name=image_name)
+    #     self.reactor.callLater(0, self._img_deferred.errback, fail)
+    #     self._omci_upgrade_deferred = None
+    #     self._img_deferred = None
+
+    def __on_omci_image_activate_success(self, image_name):
+        self.log.debug("__on_omci_image_activate_success", image_name=image_name)
+        self._omci_activate_deferred = None
+        self._img_deferred.callback(image_name)
+        self._img_deferred = None
+        return image_name
+
+    def __on_omci_image_activate_fail(self, fail, image_name):
+        self.log.debug("__on_omci_image_activate_fail", faile=fail, image_name=image_name)
+        self._omci_activate_deferred = None
+        self._img_deferred.errback(fail)
+        self._img_deferred = None
+    
+    def _publish_device_status_event(self):
+        """
+        Publish the ONU Device start/start status.
+        """
+        topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                               OnuDeviceEvents.DeviceStatusEvent)
+        msg = {ACTIVE_KEY: self._started}
+        self.event_bus.publish(topic=topic, msg=msg)
+
+    def publish_omci_capabilities_event(self):
+        """
+        Publish the ONU Device start/start status.
+        """
+        if self.first_in_capabilities_event:
+            self.first_in_capabilities_event()
+
+        topic = OnuDeviceEntry.event_bus_topic(self.device_id,
+                                               OnuDeviceEvents.OmciCapabilitiesEvent)
+        msg = {
+            SUPPORTED_MESSAGE_ENTITY_KEY: self.omci_capabilities.supported_managed_entities,
+            SUPPORTED_MESSAGE_TYPES_KEY: self.omci_capabilities.supported_message_types
+        }
+        self.event_bus.publish(topic=topic, msg=msg)
+
+    def delete(self):
+        """
+        Stop the ONU Device's state machine and remove the ONU, and any related
+        OMCI state information from the OpenOMCI Framework
+        """
+        self.stop()
+        self.mib_synchronizer.delete()
+
+        # OpenOMCI cleanup
+        if self._omci_agent is not None:
+            self._omci_agent.remove_device(self._device_id, cleanup=True)
+
+    def query_mib(self, class_id=None, instance_id=None, attributes=None):
+        """
+        Get MIB database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query', class_id=class_id, instance_id=instance_id,
+                       attributes=attributes)
+
+        return self.mib_synchronizer.query_mib(class_id=class_id, instance_id=instance_id,
+                                               attributes=attributes)
+
+    def query_mib_single_attribute(self, class_id, instance_id, attribute):
+        """
+        Get MIB database information for a single specific attribute
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attribute: (str) Managed Entity instance's attribute
+
+        :return: (varies) The value requested. If class/inst/attribute is
+                          not found, None is returned
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query-single', class_id=class_id,
+                       instance_id=instance_id, attributes=attribute)
+        assert isinstance(attribute, basestring), \
+            'Only a single attribute value can be retrieved'
+
+        entry = self.mib_synchronizer.query_mib(class_id=class_id,
+                                                instance_id=instance_id,
+                                                attributes=attribute)
+
+        return entry[attribute] if attribute in entry else None
+
+    def query_alarm_table(self, class_id=None, instance_id=None):
+        """
+        Get Alarm information
+
+        This method can be used to request information from the alarm database to
+        the detailed level requested
+
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self.log.debug('query', class_id=class_id, instance_id=instance_id)
+
+        return self.alarm_synchronizer.query_mib(class_id=class_id, instance_id=instance_id)
+
+    def reboot(self,
+               flags=RebootFlags.Reboot_Unconditionally,
+               timeout=OmciRebootRequest.DEFAULT_REBOOT_TIMEOUT):
+        """
+        Request a reboot of the ONU
+
+        :param flags: (RebootFlags) Reboot condition
+        :param timeout: (int) Reboot task priority
+        :return: (deferred) Fires upon completion or error
+        """
+        assert self.active, 'This device is not active'
+
+        return self.task_runner.queue_task(OmciRebootRequest(self._omci_agent,
+                                                             self.device_id,
+                                                             flags=flags,
+                                                             timeout=timeout))
+
+    # def get_imagefile(self, local_name, local_dir, remote_url=None):
+    #     """
+    #     Return a Deferred that will be triggered if the file is locally available
+    #     or downloaded successfully
+    #     """
+    #     self.log.info('start download from {}'.format(remote_url))
+
+    #     # for debug purpose, start runner here to queue downloading task
+    #     # self._runner.start()
+
+    #     return self._image_agent.get_image(self._image_download)
+
+    def do_onu_software_download(self, image_dnld):
+        """
+        image_dnld: (ImageDownload)
+        : Return a Deferred that will be triggered when upgrading results in success or failure
+        """
+        self.log.debug('do_onu_software_download')
+        image_download = deepcopy(image_dnld)
+        # self._img_download_deferred = self._image_agent.get_image(self._image_download)
+        # self._img_download_deferred.addCallbacks(self.__on_download_success, self.__on_download_fail, errbackArgs=(self._image_download,))
+        # self._ret_deferred = defer.Deferred()
+        # return self._ret_deferred
+        return self._image_agent.get_image(image_download)
+
+    # def do_onu_software_switch(self):
+    def do_onu_image_activate(self, image_dnld_name):
+        """
+        Return a Deferred that will be triggered when switching software image results in success or failure
+        """
+        if self._img_deferred is None:
+            self.log.debug('do_onu_image_activate')
+            self._img_deferred = defer.Deferred()
+            self._omci_upgrade_deferred = self._image_agent.onu_omci_download(image_dnld_name)
+            self._omci_upgrade_deferred.addCallbacks(self.__on_omci_image_activate_success, 
+	                                                 self.__on_omci_image_activate_fail, errbackArgs=(image_dnld_name,))
+        return self._img_deferred
+
+    def cancel_onu_software_download(self, image_name):
+        self.log.debug('cancel_onu_software_download')
+        self._image_agent.cancel_download_image(image_name)
+        self._image_agent.cancel_upgrade_onu()
+        if self._img_deferred and not self._img_deferred.called:
+            self._img_deferred.cancel()
+        self._img_deferred = None
+        # self._image_download = None
+
+    def get_image_download_status(self, image_name):
+        return self._image_agent.get_image_status(image_name)
+        
diff --git a/python/extensions/omci/openomci_agent.py b/python/extensions/omci/openomci_agent.py
new file mode 100644
index 0000000..b47fbab
--- /dev/null
+++ b/python/extensions/omci/openomci_agent.py
@@ -0,0 +1,283 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import reactor
+from voltha.extensions.omci.database.mib_db_dict import MibDbVolatileDict
+from voltha.extensions.omci.database.mib_db_ext import MibDbExternal
+from voltha.extensions.omci.state_machines.mib_sync import MibSynchronizer
+from voltha.extensions.omci.tasks.mib_upload import MibUploadTask
+from voltha.extensions.omci.tasks.get_mds_task import GetMdsTask
+from voltha.extensions.omci.tasks.mib_resync_task import MibResyncTask
+from voltha.extensions.omci.tasks.mib_reconcile_task import MibReconcileTask
+from voltha.extensions.omci.tasks.sync_time_task import SyncTimeTask
+from voltha.extensions.omci.state_machines.alarm_sync import AlarmSynchronizer
+from voltha.extensions.omci.tasks.alarm_resync_task import AlarmResyncTask
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+from voltha.extensions.omci.tasks.interval_data_task import IntervalDataTask
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEntry
+from voltha.extensions.omci.state_machines.omci_onu_capabilities import OnuOmciCapabilities
+from voltha.extensions.omci.tasks.onu_capabilities_task import OnuCapabilitiesTask
+from voltha.extensions.omci.state_machines.performance_intervals import PerformanceIntervals
+from voltha.extensions.omci.tasks.omci_create_pm_task import OmciCreatePMRequest
+from voltha.extensions.omci.tasks.omci_delete_pm_task import OmciDeletePMRequest
+from voltha.extensions.omci.state_machines.image_agent import ImageDownloadeSTM, OmciSoftwareImageDownloadSTM
+from voltha.extensions.omci.tasks.file_download_task import FileDownloadTask
+from voltha.extensions.omci.tasks.omci_sw_image_upgrade_task import OmciSwImageUpgradeTask
+
+OpenOmciAgentDefaults = {
+    'mib-synchronizer': {
+        'state-machine': MibSynchronizer,  # Implements the MIB synchronization state machine
+        'database': MibDbVolatileDict,     # Implements volatile ME MIB database
+        # 'database': MibDbExternal,         # Implements persistent ME MIB database
+        'advertise-events': True,          # Advertise events on OpenOMCI event bus
+        'tasks': {
+            'mib-upload': MibUploadTask,
+            'get-mds': GetMdsTask,
+            'mib-audit': GetMdsTask,
+            'mib-resync': MibResyncTask,
+            'mib-reconcile': MibReconcileTask
+        }
+    },
+    'omci-capabilities': {
+        'state-machine': OnuOmciCapabilities,   # Implements OMCI capabilities state machine
+        'advertise-events': False,              # Advertise events on OpenOMCI event bus
+        'tasks': {
+            'get-capabilities': OnuCapabilitiesTask  # Get supported ME and Commands
+        }
+    },
+    'performance-intervals': {
+        'state-machine': PerformanceIntervals,  # Implements PM Intervals State machine
+        'advertise-events': False,              # Advertise events on OpenOMCI event bus
+        'tasks': {
+            'sync-time': SyncTimeTask,
+            'collect-data': IntervalDataTask,
+            'create-pm': OmciCreatePMRequest,
+            'delete-pm': OmciDeletePMRequest,
+        },
+    },
+    'alarm-synchronizer': {
+        'state-machine': AlarmSynchronizer,    # Implements the Alarm sync state machine
+        'database': AlarmDbExternal,           # For any State storage needs
+        'advertise-events': True,              # Advertise events on OpenOMCI event bus
+        'tasks': {
+            'alarm-resync': AlarmResyncTask
+        }
+     },
+    'image_downloader': {
+        'state-machine': ImageDownloadeSTM,
+        'advertise-event': True,
+        'tasks': {
+            'download-file': FileDownloadTask
+        }
+    },
+    'image_upgrader': {
+        'state-machine': OmciSoftwareImageDownloadSTM,
+        'advertise-event': True,
+        'tasks': {
+            'omci_upgrade_task': OmciSwImageUpgradeTask
+        }
+    }
+    # 'image_activator': {
+    #     'state-machine': OmciSoftwareImageActivateSTM,
+    #     'advertise-event': True,
+    # }
+}
+
+
+class OpenOMCIAgent(object):
+    """
+    OpenOMCI for VOLTHA
+
+    This will become the primary interface into OpenOMCI for ONU Device Adapters
+    in VOLTHA v1.3 sprint 3 time frame.
+    """
+    def __init__(self, core, support_classes=OpenOmciAgentDefaults, clock=None):
+        """
+        Class initializer
+
+        :param core: (VolthaCore) VOLTHA Core
+        :param support_classes: (Dict) Classes to support OMCI
+        """
+        self.log = structlog.get_logger()
+        self._core = core
+        self.reactor = clock if clock is not None else reactor
+        self._started = False
+        self._devices = dict()       # device-id -> DeviceEntry
+        self._event_bus = None
+
+        # OMCI related databases are on a per-agent basis. State machines and tasks
+        # are per ONU Vendore
+        #
+        # MIB Synchronization Database
+        self._mib_db = None
+        self._mib_database_cls = support_classes['mib-synchronizer']['database']
+
+        # Alarm Synchronization Database
+        self._alarm_db = None
+        self._alarm_database_cls = support_classes['alarm-synchronizer']['database']
+
+    @property
+    def core(self):
+        """ Return a reference to the VOLTHA Core component"""
+        return self._core
+
+    @property
+    def database_class(self):
+        return self._mib_database_cls
+
+    # TODO: Need to deprecate this. ImageAgent is using it and should not
+    @property
+    def database(self):
+        return self._mib_db
+        
+    def start(self):
+        """
+        Start OpenOMCI
+        """
+        if self._started:
+            return
+
+        self.log.debug('OpenOMCIAgent.start')
+        self._started = True
+
+        try:
+            # Create all databases as needed. This should be done before
+            # State machines are started for the first time
+
+            if self._mib_db is None:
+                self._mib_db = self._mib_database_cls(self)
+
+            if self._alarm_db is None:
+                self._alarm_db = self._alarm_database_cls(self)
+
+            # Start/restore databases
+
+            self._mib_db.start()
+            self._alarm_db.start()
+
+            for device in self._devices.itervalues():
+                device.start()
+
+        except Exception as e:
+            self.log.exception('startup', e=e)
+
+    def stop(self):
+        """
+        Shutdown OpenOMCI
+        """
+        if not self._started:
+            return
+
+        self.log.debug('stop')
+        self._started = False
+        self._event_bus = None
+
+        # ONUs OMCI shutdown
+        for device in self._devices.itervalues():
+            device.stop()
+
+        # DB shutdown
+        self._mib_db.stop()
+        self._alarm_db.stop()
+
+    def mk_event_bus(self):
+        """ Get the event bus for OpenOMCI"""
+        if self._event_bus is None:
+            from voltha.extensions.omci.openomci_event_bus import OpenOmciEventBus
+            self._event_bus = OpenOmciEventBus()
+
+        return self._event_bus
+
+    def advertise(self, event_type, data):
+        """
+        Advertise an OpenOMCU event on the kafka bus
+        :param event_type: (int) Event Type (enumberation from OpenOMCI protobuf definitions)
+        :param data: (Message, dict, ...) Associated data (will be convert to a string)
+        """
+        if self._started:
+            try:
+                self.mk_event_bus().advertise(event_type, data)
+
+            except Exception as e:
+                self.log.exception('advertise-failure', e=e)
+
+    def add_device(self, device_id, adapter_agent, custom_me_map=None,
+                   support_classes=OpenOmciAgentDefaults):
+        """
+        Add a new ONU to be managed.
+
+        To provide vendor-specific or custom Managed Entities, create your own Entity
+        ID to class mapping dictionary.
+
+        Since ONU devices can be added at any time (even during Device Handler
+        startup), the ONU device handler is responsible for calling start()/stop()
+        for this object.
+
+        :param device_id: (str) Device ID of ONU to add
+        :param adapter_agent: (AdapterAgent) Adapter agent for ONU
+        :param custom_me_map: (dict) Additional/updated ME to add to class map
+        :param support_classes: (dict) State machines and tasks for this ONU
+
+        :return: (OnuDeviceEntry) The ONU device
+        """
+        self.log.debug('OpenOMCIAgent.add-device', device_id=device_id)
+
+        device = self._devices.get(device_id)
+
+        if device is None:
+            device = OnuDeviceEntry(self, device_id, adapter_agent, custom_me_map,
+                                    self._mib_db, self._alarm_db, support_classes, clock=self.reactor)
+
+            self._devices[device_id] = device
+
+        return device
+
+    def remove_device(self, device_id, cleanup=False):
+        """
+        Remove a managed ONU
+
+        :param device_id: (str) Device ID of ONU to remove
+        :param cleanup: (bool) If true, scrub any state related information
+        """
+        self.log.debug('remove-device', device_id=device_id, cleanup=cleanup)
+
+        device = self._devices.get(device_id)
+
+        if device is not None:
+            device.stop()
+
+            if cleanup:
+                del self._devices[device_id]
+
+    def device_ids(self):
+        """
+        Get an immutable set of device IDs managed by this OpenOMCI instance
+
+        :return: (frozenset) Set of device IDs (str)
+        """
+        return frozenset(self._devices.keys())
+
+    def get_device(self, device_id):
+        """
+        Get ONU device entry.  For external (non-OpenOMCI users) the ONU Device
+        returned should be used for read-only activity.
+
+        :param device_id: (str) ONU Device ID
+
+        :return: (OnuDeviceEntry) ONU Device entry
+        :raises KeyError: If device does not exist
+        """
+        return self._devices[device_id]
diff --git a/python/extensions/omci/openomci_event_bus.py b/python/extensions/omci/openomci_event_bus.py
new file mode 100644
index 0000000..5c67865
--- /dev/null
+++ b/python/extensions/omci/openomci_event_bus.py
@@ -0,0 +1,54 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+from common.event_bus import EventBusClient
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEvent
+from voltha.protos.omci_alarm_db_pb2 import AlarmOpenOmciEvent
+from common.utils.json_format import MessageToDict
+
+
+class OpenOmciEventBus(object):
+    """ Event bus for publishing OpenOMCI related events. """
+    __slots__ = (
+        '_event_bus_client',  # The event bus client used to publish events.
+        '_topic'              # the topic to publish to
+    )
+
+    def __init__(self):
+        self._event_bus_client = EventBusClient()
+        self._topic = 'openomci-events'
+
+    def message_to_dict(m):
+        return MessageToDict(m, True, True, False)
+
+    def advertise(self, event_type, data):
+        if isinstance(data, Message):
+            msg = dumps(MessageToDict(data, True, True))
+        elif isinstance(data, dict):
+            msg = dumps(data)
+        else:
+            msg = str(data)
+
+        event_func = AlarmOpenOmciEvent if 'AlarmSynchronizer' in msg \
+                                  else OpenOmciEvent
+        event = event_func(
+                type=event_type,
+                data=msg
+        )
+
+        self._event_bus_client.publish(self._topic, event)
diff --git a/python/extensions/omci/state_machines/__init__.py b/python/extensions/omci/state_machines/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/omci/state_machines/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/omci/state_machines/alarm_sync.py b/python/extensions/omci/state_machines/alarm_sync.py
new file mode 100644
index 0000000..c7b7d64
--- /dev/null
+++ b/python/extensions/omci/state_machines/alarm_sync.py
@@ -0,0 +1,670 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from datetime import datetime
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, RX_RESPONSE_KEY
+from voltha.extensions.omci.omci_messages import OmciGetAllAlarmsResponse
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+from voltha.extensions.omci.omci_entities import CircuitPack, PptpEthernetUni, OntG, AniG
+
+from common.event_bus import EventBusClient
+from voltha.protos.omci_alarm_db_pb2 import AlarmOpenOmciEventType
+
+RxEvent = OmciCCRxEvents
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class AlarmSynchronizer(object):
+    """
+    OpenOMCI Alarm Synchronizer state machine
+    """
+    DEFAULT_STATES = ['disabled', 'starting', 'auditing', 'in_sync']
+
+    DEFAULT_TRANSITIONS = [
+        {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+
+        {'trigger': 'audit_alarm', 'source': 'starting', 'dest': 'auditing'},
+        {'trigger': 'sync_alarm', 'source': 'starting', 'dest': 'in_sync'},
+
+        {'trigger': 'success', 'source': 'auditing', 'dest': 'in_sync'},
+        {'trigger': 'audit_alarm', 'source': 'auditing', 'dest': 'auditing'},
+        {'trigger': 'failure', 'source': 'auditing', 'dest': 'auditing'},
+
+        {'trigger': 'audit_alarm', 'source': 'in_sync', 'dest': 'auditing'},
+
+        # Do wildcard 'stop' trigger last so it covers all previous states
+        {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+    ]
+    DEFAULT_TIMEOUT_RETRY = 15     # Seconds to delay after task failure/timeout
+    DEFAULT_AUDIT_DELAY = 180      # Periodic tick to audit the ONU's alarm table
+
+    def __init__(self, agent, device_id, alarm_sync_tasks, db,
+                 advertise_events=False,
+                 states=DEFAULT_STATES,
+                 transitions=DEFAULT_TRANSITIONS,
+                 initial_state='disabled',
+                 timeout_delay=DEFAULT_TIMEOUT_RETRY,
+                 audit_delay=DEFAULT_AUDIT_DELAY):
+        """
+        Class initialization
+
+        :param agent: (OpenOmciAgent) Agent
+        :param device_id: (str) ONU Device ID
+        :param db: (MibDbApi) MIB/Alarm Database
+        :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+        :param alarm_sync_tasks: (dict) Tasks to run
+        :param states: (list) List of valid states
+        :param transitions: (dict) Dictionary of triggers and state changes
+        :param initial_state: (str) Initial state machine state
+        :param timeout_delay: (int/float) Number of seconds after a timeout to attempt
+                                          a retry (goes back to starting state)
+        :param audit_delay: (int) Seconds between Alarm audits while in sync. Set to
+                                  zero to disable audit. An operator can request
+                                  an audit manually by calling 'self.audit_alarm'
+        """
+
+        self.log = structlog.get_logger(device_id=device_id)
+
+        self._agent = agent
+        self._device_id = device_id
+        self._device = None
+        self._database = db
+        self._timeout_delay = timeout_delay
+        self._audit_delay = audit_delay
+        self._resync_task = alarm_sync_tasks['alarm-resync']
+        self._advertise_events = advertise_events
+        self._alarm_manager = None
+        self._onu_id = None
+        self._uni_ports = list()
+        self._ani_ports = list()
+
+        self._deferred = None
+        self._current_task = None
+        self._task_deferred = None
+        self._last_alarm_sequence_value = 0
+        self._device_in_db = False
+
+        self._event_bus = EventBusClient()
+        self._omci_cc_subscriptions = {               # RxEvent.enum -> Subscription Object
+            RxEvent.Get_ALARM_Get: None,
+            RxEvent.Alarm_Notification: None
+        }
+        self._omci_cc_sub_mapping = {
+            RxEvent.Get_ALARM_Get: self.on_alarm_update_response,
+            RxEvent.Alarm_Notification: self.on_alarm_notification
+        }
+
+        # Statistics and attributes
+        # TODO: add any others if it will support problem diagnosis
+
+        # Set up state machine to manage states
+        self.machine = Machine(model=self, states=states,
+                               transitions=transitions,
+                               initial=initial_state,
+                               queued=True,
+                               name='{}-{}'.format(self.__class__.__name__,
+                                                   device_id))
+
+    def _cancel_deferred(self):
+        d1, self._deferred = self._deferred, None
+        d2, self._task_deferred = self._task_deferred, None
+
+        for d in [d1, d1]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def __str__(self):
+        return 'Alarm Synchronizer: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+    def delete(self):
+        """
+        Cleanup any state information
+        """
+        self.stop()
+        db, self._database = self._database, None
+
+        if db is not None:
+            db.remove(self._device_id)
+
+    @property
+    def device_id(self):
+        return self._device_id
+
+    @property
+    def last_alarm_sequence(self):
+        return self._last_alarm_sequence_value
+
+    def reset_alarm_sequence(self):
+        if self._last_alarm_sequence_value != 0:
+            self._last_alarm_sequence_value = 0
+
+    def increment_alarm_sequence(self):
+        self._last_alarm_sequence_value += 1
+        if self._last_alarm_sequence_value > 255:
+            self._last_alarm_sequence_value = 1
+
+    @property
+    def advertise_events(self):
+        return self._advertise_events
+
+    @advertise_events.setter
+    def advertise_events(self, value):
+        if not isinstance(value, bool):
+            raise TypeError('Advertise event is a boolean')
+        self._advertise_events = value
+
+    def advertise(self, event, info):
+        """Advertise an event on the OpenOMCI event bus"""
+        if self._advertise_events:
+            self._agent.advertise(event,
+                                  {
+                                      'state-machine': self.machine.name,
+                                      'info': info,
+                                      'time': str(datetime.utcnow())
+                                  })
+
+    def set_alarm_params(self, mgr=None, onu_id=None, uni_ports=None, ani_ports=None):
+        if mgr is not None:
+            self._alarm_manager = mgr
+
+        if onu_id is not None:
+            self._onu_id = onu_id
+
+        if uni_ports is not None:
+            assert isinstance(uni_ports, list)
+            self._uni_ports = uni_ports
+
+        if ani_ports is not None:
+            assert isinstance(ani_ports, list)
+            self._ani_ports = ani_ports
+
+    def on_enter_disabled(self):
+        """
+        State machine is being stopped
+        """
+        self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+        self._cancel_deferred()
+
+        task, self._current_task = self._current_task, None
+        if task is not None:
+            task.stop()
+
+        # Drop Response and Autonomous notification subscriptions
+        for event, sub in self._omci_cc_subscriptions.iteritems():
+            if sub is not None:
+                self._omci_cc_subscriptions[event] = None
+                self._device.omci_cc.event_bus.unsubscribe(sub)
+
+    def _seed_database(self):
+        if not self._device_in_db:
+            try:
+                try:
+                    self._database.start()
+                    self._database.add(self._device_id)
+                    self.log.debug('seed-db-does-not-exist', device_id=self._device_id)
+
+                except KeyError:
+                    # Device already is in database
+                    self.log.debug('seed-db-exist', device_id=self._device_id)
+
+                self._device_in_db = True
+
+            except Exception as e:
+                self.log.exception('seed-database-failure', e=e)
+
+    def on_enter_starting(self):
+        """
+        Determine ONU status and start Alarm Synchronization tasks
+        """
+        self._device = self._agent.get_device(self._device_id)
+        self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+        # Make sure root of external Alarm Database exists
+        self._seed_database()
+
+        # Set up Response and Autonomous notification subscriptions
+        try:
+            for event, sub in self._omci_cc_sub_mapping.iteritems():
+                if self._omci_cc_subscriptions[event] is None:
+                    self._omci_cc_subscriptions[event] = \
+                        self._device.omci_cc.event_bus.subscribe(
+                            topic=OMCI_CC.event_bus_topic(self._device_id, event),
+                            callback=sub)
+
+        except Exception as e:
+            self.log.exception('omci-cc-subscription-setup', e=e)
+
+        # Schedule first audit if enabled
+        if self._audit_delay > 0:
+            # Note using the shorter timeout delay here since this is the first
+            # audit after startup
+            self._deferred = reactor.callLater(self._timeout_delay, self.audit_alarm)
+        else:
+            self._deferred = reactor.callLater(0, self.sync_alarm)
+
+    def on_enter_in_sync(self):
+        """
+        Schedule a tick to occur to in the future to request an audit
+        """
+        self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+        if self._audit_delay > 0:
+            # Note using the shorter timeout delay here since this is the first
+            # audit after startup
+            self._deferred = reactor.callLater(self._audit_delay, self.audit_alarm)
+
+    def on_enter_auditing(self):
+        """
+         Begin full Alarm data sync, Comparing the all alarms
+         """
+        self.advertise(AlarmOpenOmciEventType.state_change, self.state)
+
+        def success(results):
+            self.log.debug('alarm-diff-success')
+            self._current_task = None
+
+            # Any differences found between ONU and OpenOMCI Alarm tables?
+            if results is None:
+                self._device.alarm_db_in_sync = True
+                self._deferred = reactor.callLater(0, self.success)
+            else:
+                # Reconcile the alarm table and re-run audit
+                self.reconcile_alarm_table(results)
+                self._deferred = reactor.callLater(5, self.audit_alarm)
+
+        def failure(reason):
+            self.log.info('alarm-update-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+        self._current_task = self._resync_task(self._agent, self._device_id)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def reconcile_alarm_table(self, results):
+        self.log.debug('alarm-reconcile', state=self.state, results=results)
+
+        onu_only = results['onu-only']
+        olt_only = results['olt-only']
+        attr_diffs = results['attr-diffs']
+        onu_db = results['onu-db']
+        olt_db = results['olt-db']
+
+        if any(item is not None for item in (onu_only, olt_only, attr_diffs)):
+            self._device.alarm_db_in_sync = False
+
+        # Compare the differences.  During upload, if there are no alarms at all,
+        # then the ONU alarm table retrieved may be empty (instead of MEs with all
+        # bits cleared) depending upon the ONU's OMCI Stack.
+
+        if onu_only is not None:
+            self.process_onu_only_diffs(onu_only, onu_db)
+
+        if olt_only is not None:
+            self.process_olt_only_diffs(olt_only)
+
+        if attr_diffs is not None:
+            self.process_attr_diffs(attr_diffs, olt_db, onu_db)
+
+    def process_onu_only_diffs(self, onu_only, onu_db):
+        """
+        ONU only alarms will typically occur when doing the first audit as our
+        database is clear and we are seeding the alarm table. Save the entries
+        and if any are set, we need to raise that alarm.
+
+        :param onu_only: (list) Tuples with [0]=class ID, [1]=entity ID
+        :param onu_db: (dict) ONU Alarm database from the alarm audit upload
+        """
+        for cid_eid in onu_only:
+            class_id = cid_eid[0]
+            entity_id = cid_eid[1]
+            try:
+                bitmap = onu_db[class_id][entity_id][ATTRIBUTES_KEY][AlarmDbExternal.ALARM_BITMAP_KEY]
+                self.process_alarm_data(class_id, entity_id, bitmap, -1)
+
+            except KeyError as e:
+                self.log.error('alarm-not-found', class_id=class_id, entity_id=entity_id, e=e)
+
+    def process_olt_only_diffs(self, olt_only):
+        """
+        OLT only alarms may occur if the alarm(s) are no longer active on the ONU
+        and the notification was missed. Process this by sending a cleared bitmap
+        for any alarm in the OLT database only
+
+        :param olt_only: (list) Tuples with [0]=class ID, [1]=entity ID
+        """
+        for cid_eid in olt_only:
+            # First process the alarm clearing
+            self.process_alarm_data(cid_eid[0], cid_eid[1], 0, -1)
+            # Now remove from alarm DB so we match the ONU alarm table
+            self._database.delete(self._device_id, cid_eid[0], cid_eid[1])
+
+    def process_attr_diffs(self, attr_diffs, onu_db):
+        """
+        Mismatch in alarm settings. Note that the attribute should always be the
+        alarm bitmap attribute (long).  For differences, the ONU is always right
+
+        :param attr_diffs: (list(int,int,str)) [0]=class ID, [1]=entity ID, [1]=attr
+        :param olt_db: (dict) OLT Alarm database snapshot from the alarm audit
+        :param onu_db: (dict) ONU Alarm database from the alarm audit upload
+        """
+        for cid_eid_attr in attr_diffs:
+            class_id = cid_eid_attr[0]
+            entity_id = cid_eid_attr[1]
+
+            try:
+                assert AlarmDbExternal.ALARM_BITMAP_KEY == cid_eid_attr[2]
+                bitmap = onu_db[class_id][entity_id][ATTRIBUTES_KEY][AlarmDbExternal.ALARM_BITMAP_KEY]
+                self.process_alarm_data(class_id, entity_id, bitmap, -1)
+
+            except KeyError as e:
+                self.log.error('alarm-not-found', class_id=class_id, entity_id=entity_id, e=e)
+
+    def on_alarm_update_response(self, _topic, msg):
+        """
+        Process a Get All Alarms response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-alarm-update-response', state=self.state, msg=msg)
+
+        if self._omci_cc_subscriptions[RxEvent.Get_ALARM_Get]:
+            if self.state == 'disabled':
+                self.log.error('rx-in-invalid-state', state=self.state)
+                return
+
+            try:
+                response = msg.get(RX_RESPONSE_KEY)
+
+                if isinstance(response, OmciFrame) and \
+                        isinstance(response.fields.get('omci_message'), OmciGetAllAlarmsResponse):
+                    # ONU will reset its last alarm sequence number to 0 on receipt of the
+                    # Get All Alarms request
+                    self.log.debug('received-alarm-response')
+                    self.reset_alarm_sequence()
+
+            except Exception as e:
+                self.log.exception('upload-alarm-failure', e=e)
+
+    def on_alarm_notification(self, _topic, msg):
+        """
+        Process an alarm Notification
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with keys:
+                    TX_REQUEST_KEY  -> None (this is an autonomous msg)
+                    RX_RESPONSE_KEY -> OmciMessage (Alarm notification frame)
+        """
+        self.log.debug('on-alarm-update-response', state=self.state, msg=msg)
+
+        alarm_msg = msg.get(RX_RESPONSE_KEY)
+        if alarm_msg is not None:
+            omci_msg = alarm_msg.fields['omci_message'].fields
+            class_id = omci_msg['entity_class']
+            seq_no = omci_msg['alarm_sequence_number']
+
+            # Validate that this ME supports alarm notifications
+            if class_id not in self._device.me_map or \
+                    OP.AlarmNotification not in self._device.me_map[class_id].notifications or \
+                    len(self._device.me_map[class_id].alarms) == 0:
+                self.log.warn('invalid-alarm-notification', class_id=class_id)
+                return
+
+            self.process_alarm_data(class_id,
+                                    omci_msg['entity_id'],
+                                    omci_msg['alarm_bit_map'],
+                                    seq_no)
+
+    def process_alarm_data(self, class_id, entity_id, bitmap, msg_seq_no):
+        """
+        Process new alarm data
+
+        :param class_id: (int)  Class ID of alarm
+        :param entity_id: (int) Entity ID of alarm
+        :param bitmap: (long) Alarm bitmap value
+        :param msg_seq_no: (int) Alarm sequence number. -1 if generated during an audit
+        """
+        if msg_seq_no > 0:
+            # increment alarm number & compare to alarm # in message
+            # Signal early audit if no match and audits are enabled
+            self.increment_alarm_sequence()
+
+            if self.last_alarm_sequence != msg_seq_no and self._audit_delay > 0:
+                self._deferred = reactor.callLater(0, self.audit_alarm)
+
+        key = AlarmDbExternal.ALARM_BITMAP_KEY
+        prev_entry = self._database.query(self._device_id, class_id, entity_id)
+        try:
+            # Need to access the bit map structure which is nested in dict attributes
+            prev_bitmap = 0 if len(prev_entry) == 0 else long(prev_entry['attributes'][key])
+        except Exception as e:
+            self.log.exception('alarm-prev-entry-collection-failure', class_id=class_id,
+                               device_id=self._device_id, entity_id=entity_id, value=bitmap, e=e)
+        # Save current entry before going on
+        try:
+            self._database.set(self._device_id, class_id, entity_id, {key: bitmap})
+
+        except Exception as e:
+            self.log.exception('alarm-save-failure', class_id=class_id,
+                               device_id=self._device_id, entity_id=entity_id, value=bitmap, e=e)
+
+        if self._alarm_manager is not None:
+            # Generate a set of alarm number that are raised in current and previous
+            previously_raised = {alarm_no for alarm_no in xrange(224)
+                                 if prev_bitmap & (1L << (223-alarm_no)) != 0L}
+
+            currently_raised = {alarm_no for alarm_no in xrange(224)
+                                if bitmap & (1L << (223-alarm_no)) != 0L}
+
+            newly_cleared = previously_raised - currently_raised
+            newly_raised = currently_raised - previously_raised
+
+            # Generate the set/clear alarms now
+            for alarm_number in newly_cleared:
+                reactor.callLater(0, self.clear_alarm, class_id, entity_id, alarm_number)
+
+            for alarm_number in newly_raised:
+                reactor.callLater(0, self.raise_alarm, class_id, entity_id, alarm_number)
+
+    def get_alarm_description(self, class_id, alarm_number):
+        """
+        Get the alarm description, both as a printable-string and also a CamelCase value
+        """
+        if alarm_number in self._device.me_map[class_id].alarms:
+            description = self._device.me_map[class_id].alarms[alarm_number]
+        elif alarm_number <= 207:
+            description = 'Reserved alarm {}'.format(alarm_number)
+        else:
+            description = 'Vendor specific alarm {}'.format(alarm_number)
+
+        # For CamelCase, replace hyphens with spaces before camel casing the string
+        return description, description.replace('-', ' ').title().replace(' ', '')
+
+    def raise_alarm(self, class_id, entity_id, alarm_number):
+        """
+        Raise an alarm on the ONU
+
+        :param class_id: (int)  Class ID of the Alarm ME
+        :param entity_id: (int) Entity ID of the Alarm
+        :param alarm_number: (int) Alarm number (bit) that is alarmed
+        """
+        description, name = self.get_alarm_description(class_id, alarm_number)
+
+        self.log.warn('alarm-set', class_id=class_id, entity_id=entity_id,
+                      alarm_number=alarm_number, name=name, description=description)
+
+        if self._alarm_manager is not None:
+            alarm = self.omci_alarm_to_onu_alarm(class_id, entity_id, alarm_number)
+            if alarm is not None:
+                alarm.raise_alarm()
+
+    def clear_alarm(self, class_id, entity_id, alarm_number):
+        """
+        Lower/clear an alarm on the ONU
+
+        :param class_id: (int)  Class ID of the Alarm ME
+        :param entity_id: (int) Entity ID of the Alarm
+        :param alarm_number: (int) Alarm number (bit) that is alarmed
+        """
+        description, name = self.get_alarm_description(class_id, alarm_number)
+
+        self.log.info('alarm-cleared', class_id=class_id, entity_id=entity_id,
+                      alarm_number=alarm_number, name=name, description=description)
+
+        if self._alarm_manager is not None:
+            alarm = self.omci_alarm_to_onu_alarm(class_id, entity_id, alarm_number)
+            if alarm is not None:
+                alarm.clear_alarm()
+
+    def query_mib(self, class_id=None, instance_id=None):
+        """
+        Get Alarm database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises DatabaseStateError: If the database is not enabled or does not exist
+        """
+        from voltha.extensions.omci.database.mib_db_api import DatabaseStateError
+
+        self.log.debug('query', class_id=class_id, instance_id=instance_id)
+        if self._database is None:
+            raise DatabaseStateError('Database does not yet exist')
+
+        return self._database.query(self._device_id, class_id=class_id, instance_id=instance_id)
+
+    def omci_alarm_to_onu_alarm(self, class_id, entity_id, alarm_number):
+        """
+        Map an OMCI Alarm Notification alarm to the proper ONU Alarm Library alarm
+
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Class instance ID
+        :param alarm_number: (int) Alarm Number
+        :return: (AlarmBase) Alarm library alarm or None if not supported/found
+        """
+        from voltha.extensions.alarms.onu.onu_dying_gasp_alarm import OnuDyingGaspAlarm
+        from voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+        from voltha.extensions.alarms.onu.onu_equipment_alarm import OnuEquipmentAlarm
+        from voltha.extensions.alarms.onu.onu_selftest_failure_alarm import OnuSelfTestFailureAlarm
+        from voltha.extensions.alarms.onu.onu_laser_eol_alarm import OnuLaserEolAlarm
+        from voltha.extensions.alarms.onu.onu_laser_bias_current_alarm import OnuLaserBiasAlarm
+        from voltha.extensions.alarms.onu.onu_temp_yellow_alarm import OnuTempYellowAlarm
+        from voltha.extensions.alarms.onu.onu_temp_red_alarm import OnuTempRedAlarm
+        from voltha.extensions.alarms.onu.onu_voltage_yellow_alarm import OnuVoltageYellowAlarm
+        from voltha.extensions.alarms.onu.onu_voltage_red_alarm import OnuVoltageRedAlarm
+        from voltha.extensions.alarms.onu.onu_low_rx_optical_power_alarm import OnuLowRxOpticalAlarm
+        from voltha.extensions.alarms.onu.onu_high_rx_optical_power_alarm import OnuHighRxOpticalAlarm
+        from voltha.extensions.alarms.onu.onu_low_tx_optical_power_alarm import OnuLowTxOpticalAlarm
+        from voltha.extensions.alarms.onu.onu_high_tx_optical_power_alarm import OnuHighTxOpticalAlarm
+
+        mgr = self._alarm_manager
+        if class_id in (CircuitPack.class_id, PptpEthernetUni.class_id):
+            intf_id = self.select_uni_port(class_id, entity_id)
+
+        elif class_id in (AniG.class_id, OntG.class_id):
+            intf_id = self.select_ani_port(class_id, entity_id)
+
+        else:
+            self.log.error('unsupported-class-id', class_id=class_id, alarm_number=alarm_number)
+            return
+
+        alarm_map = {
+            (CircuitPack.class_id, 0): OnuEquipmentAlarm,
+            (CircuitPack.class_id, 2): OnuSelfTestFailureAlarm,
+            (CircuitPack.class_id, 3): OnuLaserEolAlarm,
+            (CircuitPack.class_id, 4): OnuTempYellowAlarm,
+            (CircuitPack.class_id, 5): OnuTempRedAlarm,
+
+            (PptpEthernetUni.class_id, 0): OnuLosAlarm,
+
+            (OntG.class_id, 0): OnuEquipmentAlarm,
+            (OntG.class_id, 6): OnuSelfTestFailureAlarm,
+            (OntG.class_id, 7): OnuDyingGaspAlarm,
+            (OntG.class_id, 8): OnuTempYellowAlarm,
+            (OntG.class_id, 9): OnuTempRedAlarm,
+            (OntG.class_id, 10): OnuVoltageYellowAlarm,
+            (OntG.class_id, 11): OnuVoltageRedAlarm,
+
+            (AniG.class_id, 0): OnuLowRxOpticalAlarm,
+            (AniG.class_id, 1): OnuHighRxOpticalAlarm,
+            (AniG.class_id, 4): OnuLowTxOpticalAlarm,
+            (AniG.class_id, 5): OnuHighTxOpticalAlarm,
+            (AniG.class_id, 6): OnuLaserBiasAlarm,
+        }
+        alarm_cls = alarm_map.get((class_id, alarm_number))
+
+        return alarm_cls(mgr, self._onu_id, intf_id) if alarm_cls is not None else None
+
+    def select_uni_port(self, class_id, entity_id):
+        """
+        Select the best possible UNI Port (logical) interface number for this ME class and
+        entity ID.
+
+        This base implementation will assume that a UNI Port object has been registered
+        on startup and supports both an 'entity_id' and also 'logical_port_number'
+        property.  See both the Adtran and BroadCom OpenOMCI ONU DA for an example
+        of this UNI port object.
+
+        :param class_id: (int)  ME Class ID for which the alarms belongs to
+        :param entity_id: (int) Instance ID
+
+        :return: (int) Logical Port number for the UNI port
+        """
+        # NOTE: Of the three class ID's supported in this version of code, only the CircuitPack,
+        #       and PptpEthernetUni MEs will map to the UNI port
+        assert class_id in (CircuitPack.class_id, PptpEthernetUni.class_id)
+
+        return next((uni.logical_port_number for uni in self._uni_ports if
+                     uni.entity_id == entity_id), None)
+
+    def select_ani_port(self, class_id, _entity_id):
+        """
+        Select the best possible ANI Port (physical) interface number for this ME class and
+        entity ID.
+
+        Currently the base implementation assumes only a single PON port and it will be
+        chosen.  A future implementation may want to have a PON Port object (similar to
+        the BroadCom Open OMCI and Adtran ONU's UNI Port object) that provides a match
+        for entity ID.  This does assume that the PON port object supports a property
+        of 'port_number' to return the physical port number.
+
+        :param class_id: (int)  ME Class ID for which the alarms belongs to
+        :param _entity_id: (int) Instance ID
+
+        :return: (int) Logical Port number for the UNI port
+        """
+        # NOTE: Of the three class ID's supported in this version of code, only the AniG
+        #       MEs will map to the ANI port. For some the OntG alarms (Dying Gasp) the
+        #       PON interface will also be selected.
+        assert class_id in (AniG.class_id, OntG.class_id)
+
+        return self._ani_ports[0].port_number if len(self._ani_ports) else None
diff --git a/python/extensions/omci/state_machines/image_agent.py b/python/extensions/omci/state_machines/image_agent.py
new file mode 100755
index 0000000..e6d5884
--- /dev/null
+++ b/python/extensions/omci/state_machines/image_agent.py
@@ -0,0 +1,1024 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import structlog
+from datetime import datetime, timedelta
+from binascii import crc32, hexlify
+from transitions import Machine
+from transitions.extensions.nesting import HierarchicalMachine as HMachine
+from twisted.python import failure
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred, CancelledError
+from common.event_bus import EventBusClient
+from voltha.protos.voltha_pb2 import ImageDownload
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes, AttributeAccess, OmciSectionDataSize
+from voltha.extensions.omci.omci_entities import SoftwareImage
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+from voltha.extensions.omci.omci_messages import OmciEndSoftwareDownloadResponse, OmciActivateImageResponse
+
+###################################################################################
+##              OLT out-of-band download image procedure
+###################################################################################
+
+class ImageDownloadeSTM(object):
+    DEFAULT_STATES = ['disabled', 'downloading', 'validating', 'done']
+    DEFAULT_TRANSITIONS = [
+        {'trigger': 'start', 'source': 'disabled', 'dest': 'downloading'},
+        {'trigger': 'stop',  'source': ['downloading', 'validating', 'done'], 'dest': 'disabled'},
+        {'trigger': 'dw_success', 'source': 'downloading', 'dest': 'validating'},
+        {'trigger': 'dw_fail', 'source': 'downloading', 'dest': 'done'},
+        {'trigger': 'validate_success', 'source': 'validating', 'dest': 'done'},
+    ]
+    DEFAULT_TIMEOUT_RETRY = 1000      # Seconds to delay after task failure/timeout
+
+    # def __init__(self, omci_agent, dev_id, local_name, local_dir, remote_url, download_task, 
+    def __init__(self, omci_agent, image_download, 
+                     download_task_cls, 
+                     states=DEFAULT_STATES,
+                     transitions=DEFAULT_TRANSITIONS,
+                     initial_state='disabled',
+                     timeout_delay=DEFAULT_TIMEOUT_RETRY,
+                     advertise_events=True, clock=None):
+        """
+        :Param: omci_agent:	(OpenOMCIAgent)
+        :Param: image_dnld: (ImageDownload)
+                            ImageDownload.id  : device id
+                            ImageDownload.name: file name of the image 
+                            ImageDownload.url : URL to download the image from server
+                            ImageDownload.local_dir: local directory of the image file
+        """
+        self.log = structlog.get_logger(device_id=image_download.id)
+        self._agent = omci_agent
+        # self._imgdw = ImageDownload()
+        # self._imgdw.name = local_name
+        # self._imgdw.id   = dev_id
+        # self._imgdw.url  = remote_url
+        # self._imgdw.local_dir = local_dir
+        self._imgdw = image_download
+        # self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN   # voltha_pb2
+
+        self._download_task_cls = download_task_cls
+        self._timeout_delay   = timeout_delay
+
+        self._current_task  = None
+        self._task_deferred = None
+        self._ret_deferred  = None
+        self._timeout_dc = None    # DelayedCall
+        self._advertise_events = advertise_events
+        self.reactor = clock if clock is not None else reactor 
+
+        self.log.debug("ImageDownloadeSTM", image_download=self._imgdw)
+        self.machine = Machine(model=self, states=states,
+                               transitions=transitions,
+                               initial=initial_state,
+                               queued=True,
+                               name='{}-{}'.format(self.__class__.__name__, self._imgdw.id))
+    # @property
+    # def name(self):
+    #     return self._imgdw.name
+
+    def _cancel_timeout(self):
+        d, self._timeout_dc = self._timeout_dc, None
+        if d is not None and not d.called:
+            d.cancel()
+
+    @property
+    def status(self):
+        return self._imgdw
+
+    @property
+    def deferred(self):
+        return self._ret_deferred
+        
+    def advertise(self, event, info):
+        """Advertise an event on the OpenOMCI event bus"""
+        if self._advertise_events:
+            self._agent.advertise(event,
+                                  {
+                                      'state-machine': self.machine.name,
+                                      'info': info,
+                                      'time': str(datetime.utcnow())
+                                  })
+
+    # def reset(self):
+    #     """
+    #     Reset all the state machine to intial state
+    #     It is used to clear failed result in last downloading
+    #     """
+    #     self.log.debug('reset download', image_download=self._imgdw)
+    #     if self._current_task is not None:
+    #         self._current_task.stop()
+            
+    #     self._cancel_deferred()
+        
+    #     if self._ret_deferred is not None:
+    #         self._ret_deferred.cancel()
+    #         self._ret_deferred = None
+
+    #     self.stop()
+    #     self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN
+        
+    def get_file(self):
+        """
+          return a Deferred object
+          Caller will register a callback to the Deferred to get notified once the image is available
+        """
+        # self.log.debug('get_file', image_download=self._imgdw)
+        if self._ret_deferred is None or self._ret_deferred.called:
+            self._ret_deferred = Deferred()
+            
+        if self._imgdw.state == ImageDownload.DOWNLOAD_SUCCEEDED:
+            self.log.debug('Image Available')
+            self.reactor.callLater(0, self._ret_deferred.callback, self._imgdw)
+        elif self._imgdw.state == ImageDownload.DOWNLOAD_FAILED or self._imgdw.state == ImageDownload.DOWNLOAD_UNSUPPORTED:
+            self.log.debug('Image not exist')
+            self.reactor.callLater(0, self._ret_deferred.errback, failure.Failure(Exception('Image Download Failed ' + self._imgdw.name)))
+        elif self._imgdw.state == ImageDownload.DOWNLOAD_UNKNOWN or self._imgdw.state == ImageDownload.DOWNLOAD_REQUESTED:
+            self.log.debug('Start Image STM')
+            self._imgdw.state = ImageDownload.DOWNLOAD_STARTED
+            self.reactor.callLater(0, self.start)
+        else:
+            self.log.debug('NO action', state=self._imgdw.state)
+            
+        return self._ret_deferred
+            
+    def timeout(self):
+        self.log.debug('Image Download Timeout', download_task=self._current_task);
+        if self._current_task:
+            self.reactor.callLater(0, self._current_task.stop)
+        # if self._task_deferred is not None and not self._task_deferred.called:
+        #     self._task_deferred.cancel()
+            self._current_task = None
+        # else:
+        #     self.dw_fail()
+            
+    def on_enter_downloading(self):
+        self.log.debug("on_enter_downloading")
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        def success(results):
+            self.log.debug('image-download-success', results=results)
+            self._imgdw.state = ImageDownload.DOWNLOAD_SUCCEEDED
+            self._imgdw.reason = ImageDownload.NO_ERROR
+            self._current_task = None
+            self._task_deferred = None
+            self.dw_success()
+
+        def failure(reason):
+            self.log.info('image-download-failure', reason=reason)
+            if self._imgdw.state == ImageDownload.DOWNLOAD_STARTED:
+                self._imgdw.state = ImageDownload.DOWNLOAD_FAILED
+            if isinstance(reason, CancelledError): 
+                self._imgdw.reason = ImageDownload.CANCELLED
+            self._current_task = None
+            self._task_deferred = None
+            self.dw_fail()
+
+        self._device = self._agent.get_device(self._imgdw.id)
+        self._current_task = self._download_task_cls(self._agent, self._imgdw, self.reactor)
+
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+        self._imgdw.state = ImageDownload.DOWNLOAD_STARTED
+
+        if self._timeout_delay > 0:
+            self._timeout_dc = self.reactor.callLater(self._timeout_delay, self.timeout)
+
+    def on_enter_validating(self):
+        self.log.debug("on_enter_validating")
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self.validate_success()
+
+    def on_enter_done(self):
+        self.log.debug("on_enter_done")
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_timeout()
+
+        d, self._ret_deferred = self._ret_deferred, None
+        if d is not None:
+            if self._imgdw.state == ImageDownload.DOWNLOAD_SUCCEEDED:
+                self.reactor.callLater(0, d.callback, self._imgdw)
+            else:  # failed
+                if self._imgdw.reason == ImageDownload.CANCELLED:
+                    self.reactor.callLater(0, d.cancel)
+                else:
+                    self.reactor.callLater(0, d.errback, failure.Failure(Exception('Image Download Failed ' + self._imgdw.name)))
+            
+    def on_enter_disabled(self):
+        self.log.debug("on_enter_disabled")
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        self._cancel_timeout()
+        if self._current_task is not None:
+            self.reactor.callLater(0, self._current_task.stop)
+            self._current_task = None
+
+        if self._ret_deferred:
+            self.reactor.callLater(0, self._ret_deferred.cancel)
+            self._ret_deferred = None
+            
+        # remove local file fragments if download failed
+        file_path = self._imgdw.local_dir + '/' + self._imgdw.name
+        if self._imgdw.state != ImageDownload.DOWNLOAD_SUCCEEDED and os.path.exists(file_path):
+            os.remove(file_path)            
+        self._imgdw.state = ImageDownload.DOWNLOAD_UNKNOWN
+
+###################################################################################
+##              OMCI Software Image Download Procedure
+###################################################################################
+
+class OmciSoftwareImageDownloadSTM(object):
+    
+    OMCI_SWIMG_DOWNLOAD_TIMEOUT = 5400      # TODO: Seconds for the full downloading procedure to avoid errors that cause infinte downloading
+    OMCI_SWIMG_DOWNLOAD_WINDOW_SIZE = 32
+    OMCI_SWIMG_WINDOW_RETRY_MAX = 2
+    OMCI_SWIMG_ACTIVATE_RETRY_MAX = 2
+    OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT = 10      # Seconds to delay after task failure/timeout
+
+    # def __init__(self, omci_agent, dev_id, img_path, 
+    def __init__(self, image_id, omci_agent, image_dnld,
+                     window_size=OMCI_SWIMG_DOWNLOAD_WINDOW_SIZE,
+                     timeout_delay=OMCI_SWIMG_DOWNLOAD_TIMEOUT,
+                     advertise_events=True,
+                     clock=None):
+        """
+        omci_agent:	(OpenOMCIAgent)
+        image_dnld: (ImageDownload)
+                    ImageDownload.id  : device id
+                    ImageDownload.name: file name of the image 
+                    ImageDownload.url : URL to download the image from server
+                    ImageDownload.local_dir: local directory of the image file
+        window_size: window size of OMCI download procedure 
+        """
+        self.log = structlog.get_logger(device_id=image_dnld.id)
+        self._omci_agent = omci_agent
+        self._image_download = image_dnld
+        self._timeout = timeout_delay
+        self._timeout_dc = None
+        self._window_size = window_size
+        self.reactor = clock if clock is not None else reactor
+        self._offset = 0
+        # self._win_section = 0
+        self._win_retry = 0
+        self._device_id = image_dnld.id
+        self._device = omci_agent.get_device(image_dnld.id)
+        self.__init_state_machine()
+        self._ret_deferred = None
+        self._image_id = image_id    # Target software image entity ID
+        self._image_file = image_dnld.local_dir + '/' + image_dnld.name
+        self._image_obj = open(self._image_file, mode='rb')
+        self._image_size = os.path.getsize(self._image_file)
+        self._crc32 = 0
+        self._win_crc32 = 0
+        self._win_data = None
+        self._current_deferred = None
+        self._result = None    # ReasonCodes
+        self.crctable = []
+        self._crctable_init = False
+        self._actimg_retry_max = OmciSoftwareImageDownloadSTM.OMCI_SWIMG_ACTIVATE_RETRY_MAX
+        self._actimg_retry = 0
+        self.log.debug("DownloadSTM", image=self._image_file, image_size=self._image_size)
+ 
+    def __init_state_machine(self):
+
+        #### Download Window Sub State Machine ####
+        OMCI_DOWNLOAD_WINDOW_STATE = ['init_window', 'sending_sections', 'window_success', 'window_failed']
+        OMCI_DOWNLOAD_WINDOW_TRANSITIONS = [
+            {'trigger': 'send_sections',  'source': 'init_window',     'dest': 'sending_sections'},
+            # {'trigger': 'send_section_last',  'source': 'start_section', 'dest': 'last_section'  },
+            {'trigger': 'rx_ack_success', 'source': 'sending_sections', 'dest': 'window_success' },
+            {'trigger': 'rx_ack_failed',  'source': 'sending_sections', 'dest': 'window_failed'  },
+            # {'trigger': 'retry_window',   'source': 'window_failed', 'dest': 'start_section'  },
+            {'trigger': 'reset_window',   'source': '*',               'dest': 'init_window'    }
+        ]    
+        self.win_machine = HMachine(model=self, 
+                                    states=OMCI_DOWNLOAD_WINDOW_STATE,
+                                    transitions=OMCI_DOWNLOAD_WINDOW_TRANSITIONS,
+                                    initial='init_window',
+                                    queued=True,
+                                    name='{}-window_section_machine'.format(self.__class__.__name__))
+
+        #### Software Activation Sub State Machine ####
+        OMCI_SWIMG_ACTIVATE_STATES = ['init_act', 'activating', 'busy', 'rebooting', 'committing', 'done', 'failed']
+        OMCI_SWIMG_ACTIVATE_TRANSITIONS = [
+            {'trigger': 'activate', 'source': ['init_act', 'busy'], 'dest': 'activating'},
+            {'trigger': 'onu_busy', 'source': 'activating', 'dest': 'busy'},
+            {'trigger': 'reboot',   'source': 'activating', 'dest': 'rebooting'},
+            {'trigger': 'do_commit', 'source': ['activating', 'rebooting'], 'dest': 'committing'},
+            # {'trigger': 'commit_ok', 'source': 'committing', 'dest': 'done'},
+            {'trigger': 'reset_actimg', 'source': ['activating', 'rebooting', 'committing', 'failed'], 'dest': 'init_act'},
+            # {'trigger': 'actimg_fail', 'source': ['init_act', 'activating', 'rebooting', 'committing'], 'dest': 'failed'}
+        ]
+        
+        self.activate_machine = HMachine(model=self, 
+                                         states=OMCI_SWIMG_ACTIVATE_STATES,
+                                         transitions=OMCI_SWIMG_ACTIVATE_TRANSITIONS,
+                                         initial='init_act',
+                                         queued=True,
+                                         name='{}-activate_machine'.format(self.__class__.__name__))
+                                   
+        #### Main State Machine ####
+        OMCI_SWIMG_DOWNLOAD_STATES = [ 'init_image', 'starting_image', 'ending_image', 'endimg_busy', 'done_image',
+                                      {'name': 'dwin', 'children': self.win_machine}, 
+                                      {'name': 'actimg', 'children': self.activate_machine}
+                                     ]
+        OMCI_SWIMG_DOWNLOAD_TRANSITIONS = [
+            {'trigger': 'start_image',      'source': 'init_image',     'dest': 'starting_image' },
+            {'trigger': 'download_window',  'source': 'starting_image', 'dest': 'dwin_init_window' },
+            {'trigger': 'download_success', 'source': 'dwin',           'dest': 'ending_image'   },
+            {'trigger': 'onu_busy',         'source': 'ending_image',   'dest': 'endimg_busy'    },
+            {'trigger': 'retry_endimg',     'source': 'endimg_busy',    'dest': 'ending_image'   },
+            {'trigger': 'end_img_success',  'source': 'ending_image',   'dest': 'actimg_init_act'  },
+            {'trigger': 'activate_done',    'source': 'actimg',         'dest': 'done_image'     },
+            {'trigger': 'download_fail',    'source': '*',              'dest': 'done_image'     },
+            {'trigger': 'reset_image',      'source': '*',              'dest': 'init_image'     },
+        ]
+        
+        self.img_machine = HMachine(model=self, 
+                                   states=OMCI_SWIMG_DOWNLOAD_STATES,
+                                   transitions=OMCI_SWIMG_DOWNLOAD_TRANSITIONS,
+                                   initial='init_image',
+                                   queued=True,
+                                   name='{}-image_download_machine'.format(self.__class__.__name__))
+
+    # @property
+    # def image_filename(self):
+    #     return self._image_file
+
+    # @image_filename.setter
+    # def image_filename(self, value):
+    #     if self._image_fd is not None:
+    #         self._image_fd.close()
+    #     self._image_filename = value
+    #     self._image_fd = open(self._image_filename, mode='rb')
+    #     self._image_size = os.path.getsize(self._image_filename)
+    #    print("Set image file: " + self._image_filename + " size: " + str(self._image_size))
+
+    def __omci_start_download_resp_success(self, rx_frame):
+        self.log.debug("__omci_download_resp_success")
+        self.download_window()
+        return rx_frame
+
+    def __omci_start_download_resp_fail(self, fail):
+        self.log.debug("__omci_download_resp_fail", failure=fail)
+        self._result = ReasonCodes.ProcessingError
+        self.download_fail()
+
+    def __omci_end_download_resp_success(self, rx_frame):
+        self.log.debug("__omci_end_download_resp_success")
+        if rx_frame.fields['message_type'] == OmciEndSoftwareDownloadResponse.message_id: # 0x35
+            omci_data = rx_frame.fields['omci_message']
+            if omci_data.fields['result'] == 0: 
+                self.log.debug('OMCI End Image OK')
+                self._result = ReasonCodes.Success
+                self.end_img_success()
+            elif omci_data.fields['result'] == 6: # Device Busy
+                self.log.debug('OMCI End Image Busy')
+                self.onu_busy()
+            else:
+                self.log.debug('OMCI End Image Failed', reason=omci_data.fields['result'])
+        else:
+            self.log.debug('Receive Unexpected OMCI', message_type=rx_frame.fields['message_type'])
+
+    def __omci_end_download_resp_fail(self, fail):
+        self.log.debug("__omci_end_download_resp_fail", failure=fail)
+        self._result = ReasonCodes.ProcessingError
+        self.download_fail()
+    
+    def __omci_send_window_resp_success(self, rx_frame, cur_state, datasize):
+        # self.log.debug("__omci_send_window_resp_success", current_state=cur_state)
+        self._offset += datasize
+        self._image_download.downloaded_bytes += datasize
+        self.rx_ack_success()
+
+    def __omci_send_window_resp_fail(self, fail, cur_state):
+        self.log.debug("__omci_send_window_resp_fail", current_state=cur_state)
+        self.rx_ack_failed()
+
+    def __activate_resp_success(self, rx_frame):
+        self._current_deferred = None
+        if rx_frame.fields['message_type'] == OmciActivateImageResponse.message_id: # 0x36
+            omci_data = rx_frame.fields['omci_message']
+            if omci_data.fields['result'] == 0:
+                self.log.debug("Activate software image success, rebooting ONU ...", device_id=self._device.device_id,
+                                state=self._image_download.image_state)
+                standby_image_id = 0 if self._image_id else 1
+                self._omci_agent.database.set(self._device.device_id, SoftwareImage.class_id, self._image_id, 	{"is_active": 1})
+                self._omci_agent.database.set(self._device.device_id, SoftwareImage.class_id, standby_image_id, {"is_active": 0})
+                self.reboot()
+            elif omci_data.fields['result'] == 6: # Device Busy
+                self.log.debug('OMCI Activate Image Busy')
+                self.onu_busy()
+            else:
+                self.log.debug('OMCI Activate Image Failed', reason=omci_data['result'])
+        else:
+            self.log.debug('Receive Unexpected OMCI', message_type=rx_frame['message_type'])
+                
+    def __activate_fail(self, fail):
+        self.log.debug("Activate software image failed", faile=fail)
+        self._current_deferred = None
+        self._result = ReasonCodes.ProcessingError
+        self.activate_done()
+        
+    def __commit_success(self, rx_frame):
+        self.log.debug("Commit software success", device_id=self._device_id)
+        self._current_deferred = None
+        standby_image_id = 0 if self._image_id else 1
+        self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._image_id, {"is_committed": 1})
+        self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, standby_image_id, {"is_committed": 0})
+        self._image_download.image_state = ImageDownload.IMAGE_ACTIVE
+        self._result = ReasonCodes.Success
+        self.activate_done()
+
+    def __commit_fail(self, fail):
+        self.log.debug("Commit software image failed", faile=fail)
+        self._current_deferred = None
+        self._result = ReasonCodes.ProcessingError
+        self._image_download.image_state = ImageDownload.IMAGE_REVERT
+        self.activate_done()
+
+#    @property
+#    def image_id(self):
+#        return self._image_id
+
+#    @image_id.setter
+#    def image_id(self, value):
+#        self._image_id = value
+
+    @property
+    def status(self):
+        return self._image_download
+
+    def start(self):
+        self.log.debug("OmciSoftwareImageDownloadSTM.start", current_state=self.state)
+        if self._ret_deferred is None:
+            self._ret_deferred = Deferred()
+        if self.state == 'init_image':
+            self.reactor.callLater(0, self.start_image)
+        return self._ret_deferred
+
+    def stop(self):
+        self.log.debug("OmciSoftwareImageDownloadSTM.stop", current_state=self.state)
+        self._result = ReasonCodes.OperationCancelled
+        self.download_fail()
+        
+    def on_enter_init_image(self):
+        self.log.debug("on_enter_init_image")
+        self._image_obj.seek(0)
+        self._offset = 0
+        # self._win_section = 0
+        self._win_retry   = 0
+        
+    def on_enter_starting_image(self):
+        self.log.debug("on_enter_starting_image")
+        self._image_download.downloaded_bytes = 0
+        self._current_deferred = self._device.omci_cc.send_start_software_download(self._image_id, self._image_size, self._window_size)
+        self._current_deferred.addCallbacks(self.__omci_start_download_resp_success, self.__omci_start_download_resp_fail)
+                                            # callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+    def on_enter_dwin_init_window(self):
+        # self.log.debug("on_enter_dwin_init_window", offset=self._offset, image_size=self._image_size)
+        if self._offset < self._image_size:
+            self.send_sections()
+
+    def on_enter_dwin_sending_sections(self):
+        # self.log.debug("on_enter_dwin_sending_sections", offset=self._offset)
+
+        if (self._offset + self._window_size * OmciSectionDataSize) <= self._image_size:
+            sections = self._window_size
+            mod = 0
+            datasize = self._window_size * OmciSectionDataSize
+        else:
+            datasize = self._image_size - self._offset
+            sections = datasize / OmciSectionDataSize
+            mod = datasize % OmciSectionDataSize
+            sections = sections + 1 if mod > 0 else sections
+
+        # self.log.debug("on_enter_dwin_sending_sections", offset=self._offset, datasize=datasize, sections=sections)
+        if self._win_retry == 0:
+            self._win_data = self._image_obj.read(datasize)
+            self._win_crc32 = self.crc32(self._crc32, self._win_data)
+            # self.log.debug("CRC32", crc32=self._win_crc32, offset=self._offset)
+        else:
+            self.log.debug("Retry download window with crc32", offset=self._offset)
+            
+        sent = 0
+        for i in range(0, sections):
+            if i < sections - 1:
+                # self.log.debug("section data", data=hexlify(data[(self._offset+sent):(self._offset+sent+OmciSectionDataSize)]))
+                self._device.omci_cc.send_download_section(self._image_id, i,
+                                                           self._win_data[sent:sent+OmciSectionDataSize])
+                sent += OmciSectionDataSize
+            else:
+                last_size = OmciSectionDataSize if mod == 0 else mod
+                self._current_deferred = self._device.omci_cc.send_download_section(self._image_id, i,
+                                                           self._win_data[sent:sent+last_size],
+                                                           timeout=DEFAULT_OMCI_TIMEOUT)
+                self._current_deferred.addCallbacks(self.__omci_send_window_resp_success, self.__omci_send_window_resp_fail,
+                                                    callbackArgs=(self.state, datasize), errbackArgs=(self.state,))
+                sent += last_size
+                assert sent==datasize
+
+    # def on_enter_dwin_last_section(self):
+    #     self._current_deferred = self._device.omci_cc.send_download_section, self._instance_id, self._win_section, data)
+    #     self._current_deferred.addCallbacks(self.__omci_resp_success, self.__omci_resp_fail,
+    #                                         callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+    def on_enter_dwin_window_success(self):
+        # self.log.debug("on_enter_dwin_window_success")
+        self._crc32 = self._win_crc32 if self._win_crc32 != 0 else self._crc32
+        self._win_crc32 = 0
+        self._win_retry = 0
+        if self._offset < self._image_size:
+            self.reset_window()
+        else:
+            self.download_success()
+
+    def on_enter_dwin_window_failed(self):
+        self.log.debug("on_enter_dwin_window_fail: ", retry=self._win_retry)
+        if self._win_retry < self.OMCI_SWIMG_WINDOW_RETRY_MAX:
+            self._win_retry += 1
+            self.reset_window()
+        else:
+            self._result = ReasonCodes.ProcessingError
+            self.download_fail()
+
+    def on_enter_ending_image(self):
+        self.log.debug("on_enter_ending_image", crc32=self._crc32)
+        self._current_deferred = self._device.omci_cc.send_end_software_download(self._image_id, self._crc32, 
+                                                                                 self._image_size, timeout=18)
+        self._current_deferred.addCallbacks(self.__omci_end_download_resp_success, self.__omci_end_download_resp_fail)
+                                            # callbackArgs=(self.state,), errbackArgs=(self.state,))
+
+    def on_enter_endimg_busy(self):
+        self.log.debug("on_enter_endimg_busy")
+        self.reactor.callLater(3, self.retry_endimg)
+
+    def on_enter_actimg_init_act(self):
+        self.log.debug("on_enter_actimg_init_act", retry=self._actimg_retry, max_retry=self._actimg_retry_max)
+        # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+        # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+        # if (self._images[self._to_image]["is_active"] != 1 and self._images[self._to_image]["is_valid"] == 1):
+        if self._actimg_retry > self._actimg_retry_max:
+            self.log.debug("activate image failed: retry max", retries=self._actimg_retry)
+            self._result = ReasonCodes.ProcessingError
+            self.activate_done()
+        else:
+            self._image_download.image_state = ImageDownload.IMAGE_ACTIVATE
+            self.activate()
+            
+    def on_enter_actimg_activating(self):
+        self.log.debug("on_enter_actimg_activating")
+        img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 
+                                              self._image_id, ["is_active", "is_committed", "is_valid"])
+                                              
+        self.log.debug("on_enter_actimg_activating", instance=self._image_id, state=img)
+        if img["is_active"] == 0:
+            #if img["is_valid"] == 1:
+            self._current_deferred = self._device.omci_cc.send_active_image(self._image_id)
+            self._current_deferred.addCallbacks(self.__activate_resp_success, self.__activate_fail)
+            #else:
+            #    self.fail()
+        else:
+            self.do_commit()
+
+    def on_enter_actimg_busy(self):
+        self.log.debug("on_enter_actimg_busy")
+        self.reactor.callLater(3, self.activate)
+        
+    def __on_reboot_timeout(self):
+        self.log.debug("on_reboot_timeout")
+        self._timeout_dc = None
+        self._result = ReasonCodes.ProcessingError
+        self.activate_done()
+        
+    def on_enter_actimg_rebooting(self):
+        self.log.debug("on_enter_actimg_rebooting")
+        if self._timeout_dc == None:
+            self._timeout_dc = self.reactor.callLater(self._timeout, self.__on_reboot_timeout)
+
+    def on_exit_actimg_rebooting(self):
+        self.log.debug("on_exit_actimg_rebooting", timeout=self._timeout_dc)
+        if self._timeout_dc and self._timeout_dc.active:
+            self._timeout_dc.cancel()
+            self._timeout_dc = None
+    
+    def on_enter_actimg_committing(self):
+        # self.log.debug("on_enter_committing")
+        img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 
+                                              self._image_id, ["is_active", "is_committed", "is_valid"])
+        self.log.debug("on_enter_actimg_committing", instance=self._image_id, state=img)
+        if (img['is_active'] == 0):
+            self._actimg_retry += 1
+            self.log.debug("do retry", retry=self._actimg_retry)
+            self.reset_actimg()
+        else:
+            self._actimg_retry = 0
+            self._current_deferred = self._device.omci_cc.send_commit_image(self._image_id)
+            self._current_deferred.addCallbacks(self.__commit_success, self.__commit_fail)
+
+    def on_enter_done_image(self):
+        self.log.debug("on_enter_done_image", result=self._result)
+        if self._result == ReasonCodes.Success:
+            self.reactor.callLater(0, self._ret_deferred.callback, self._image_download) # (str(self._instance_id))
+        else:
+            self._ret_deferred.errback(failure.Failure(Exception('ONU Software Download Failed, instance ' + str(self._image_id))))
+
+    def __crc_GenTable32(self):
+        if self._crctable_init:
+            return
+            
+        #  x32 + x26 + x23 + x22 + x16 + x12 + x11 + x10 + x8 + x7 + x5 + x4 + x2 + x + 1   
+        pn32 = [0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26]
+        poly = 0
+        for i in pn32:
+            poly |= (1 << i)
+
+        for i in range(0, 256):
+            _accum = (i << 24) & 0xFFFFFFFF
+            for j in range(0, 8):
+                if _accum & (1 << 31):
+                    _accum = (_accum << 1) ^ poly
+                else:
+                    _accum = (_accum << 1) & 0xFFFFFFFF
+            # self.crctable[i] = accum
+            self.crctable.append(_accum)
+        self._crctable_init = True
+            
+    def crc32(self, accum, data):
+        self.__crc_GenTable32()
+        _accum = ~accum & 0xFFFFFFFF
+        num = len(data)
+        for i in range(0, num):
+            _accum = self.crctable[((_accum >> 24) ^ ord(data[i])) & 0xFF] ^ ((_accum << 8) & 0xFFFFFFFF)
+
+        return ~_accum & 0xFFFFFFFF
+
+###################################################################################
+##              OMCI Software Image Activation/Committing Procedure
+###################################################################################
+'''
+class OmciSoftwareImageActivateSTM(object):
+    OMCI_SWIMG_ACTIVATE_STATES = ['starting', 'activating', 'busy', 'rebooting', 'committing', 'done', 'failed']
+    OMCI_SWIMG_ACTIVATE_TRANSITIONS = [
+        {'trigger': 'activate', 'source': ['starting', 'busy'], 'dest': 'activating'},
+        {'trigger': 'onu_busy', 'source': 'activating', 'dest': 'busy'},
+        {'trigger': 'reboot',   'source': 'activating', 'dest': 'rebooting'},
+        {'trigger': 'do_commit', 'source': ['activating', 'rebooting'], 'dest': 'committing'},
+        {'trigger': 'commit_ok', 'source': 'committing', 'dest': 'done'},
+        {'trigger': 'reset',    'source': ['activating', 'rebooting', 'committing', 'failed'], 'dest': 'starting'},
+        {'trigger': 'fail',     'source': ['starting', 'activating', 'rebooting', 'committing'], 'dest': 'failed'}
+    ]
+    OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT = 10      # Seconds to delay after task failure/timeout
+    OMCI_SWIMG_ACTIVATE_RETRY_MAX           = 2
+    def __init__(self, omci_agent, dev_id, target_img_entity_id, image_download,
+                     states=OMCI_SWIMG_ACTIVATE_STATES,
+                     transitions=OMCI_SWIMG_ACTIVATE_TRANSITIONS,
+                     initial_state='disabled',
+                     timeout_delay=OMCI_SWIMG_ACTIVATE_TRANSITIONS_TIMEOUT,
+                     advertise_events=True,
+                     clock=None):
+        self.log = structlog.get_logger(device_id=dev_id)
+        self._omci_agent = omci_agent
+        self._device_id  = dev_id
+        self._device = omci_agent.get_device(dev_id)
+        self._to_image = target_img_entity_id
+        self._from_image = 0 if self._to_image == 1 else 1
+        self._image_download = image_download
+        # self._images = dict()
+        self._timeout = timeout_delay
+        self._timeout_dc = None
+        self.reactor = clock if clock is not None else reactor
+        self._retry_max = OmciSoftwareImageActivateSTM.OMCI_SWIMG_ACTIVATE_RETRY_MAX
+        self._retry = 0
+        self._deferred = None
+        self.ret_deferred = None
+        self.machine = Machine(model=self, 
+                               states=states,
+                               transitions=transitions,
+                               initial='starting',
+                               queued=True,
+                               name='{}-image_activate_machine'.format(self.__class__.__name__))
+        self.log.debug("OmciSoftwareImageActivateSTM", target=self._to_image)
+
+    def __activate_resp_success(self, rx_frame):
+        if rx_frame.fields['message_type'] == 0x36:  # (OmciActivateImageResponse)
+            omci_data = rx_frame.fields['omci_message']
+            if omci_data.fields['result'] == 0:
+                self.log.debug("Activate software image success, rebooting ONU ...", device_id=self._device_id) 
+                self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._to_image, 	{"is_active": 1})
+                self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._from_image, {"is_active": 0})
+                self.reboot()
+            elif omci_data.fields['result'] == 6: # Device Busy
+                self.log.debug('OMCI Activate Image Busy')
+                self.onu_busy()
+            else:
+                self.log.debug('OMCI Activate Image Failed', reason=omci_data['result'])
+        else:
+            self.log.debug('Receive Unexpected OMCI', message_type=rx_frame['message_type'])
+                
+    def __activate_fail(self, fail):
+        self.log.debug("Activate software image failed", faile=fail)
+        
+    def __commit_success(self, rx_frame):
+        self.log.debug("Commit software success", device_id=self._device_id)
+        self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._to_image, {"is_committed": 1})
+        self._omci_agent.database.set(self._device_id, SoftwareImage.class_id, self._from_image, {"is_committed": 0})
+        self.commit_ok()
+
+    def __commit_fail(self, fail):
+        self.log.debug("Commit software image failed", faile=fail)
+
+    @property
+    def status(self):
+        return self._image_download
+        
+    def start(self):
+        self.log.debug("Start switch software image", target=self._to_image)
+        # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+        # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+        # if (self._images[self._to_image]["is_active"] == 0 and self._images[self._to_image]["is_valid"] == 1):
+        self.ret_deferred = Deferred()
+        self._image_download.image_state = ImageDownload.IMAGE_ACTIVATE
+        self.reactor.callLater(0, self.activate)
+        return self.ret_deferred
+
+    def on_enter_starting(self):
+        # self.log.debug("on_enter_starting")
+        # self._images[0] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed", "is_valid"])
+        # self._images[1] = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed", "is_valid"])
+        # if (self._images[self._to_image]["is_active"] != 1 and self._images[self._to_image]["is_valid"] == 1):
+        if self._retry > self._retry_max:
+            self.log.debug("failed: retry max", retries=self._retry)
+            self.fail()
+        else:
+            self.activate()
+            
+    def on_enter_activating(self):
+        img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 
+                                              self._to_image, ["is_active", "is_committed", "is_valid"])
+                                              
+        self.log.debug("on_enter_activating", instance=self._to_image, state=img)
+        if img["is_active"] == 0:
+            #if img["is_valid"] == 1:
+            self._deferred = self._device.omci_cc.send_active_image(self._to_image)
+            self._deferred.addCallbacks(self.__activate_resp_success, self.__activate_fail)
+            #else:
+            #    self.fail()
+        else:
+            self.do_commit()
+
+    def on_enter_busy(self):
+        self.log.debug("on_enter_busy")
+        self.reactor.callLater(3, self.activate)
+        
+    def on_enter_rebooting(self):
+        self.log.debug("on_enter_rebooting")
+        if self._timeout_dc == None:
+            self._timeout_dc = self.reactor.callLater(self._timeout, self.fail)
+
+    def on_exit_rebooting(self):
+        self.log.debug("on_exit_rebooting")
+        if self._timeout_dc and self._timeout_dc.active:
+            self._timeout_dc.cancel()
+            self._timeout_dc = None
+    
+    def on_enter_committing(self):
+        # self.log.debug("on_enter_committing")
+        img = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 
+                                              self._to_image, ["is_active", "is_committed", "is_valid"])
+        self.log.debug("on_enter_committing", instance=self._to_image, state=img)
+        if (img['is_active'] == 0):
+            self._retry += 1
+            self.log.debug("do retry", retry=self._retry)
+            self.reset()
+        else:
+            self._retry = 0
+            self._deferred = self._device.omci_cc.send_commit_image(self._to_image)
+            self._deferred.addCallbacks(self.__commit_success, self.__commit_fail)
+
+    def on_enter_done(self):
+        self.log.debug("on_enter_done")
+        self._image_download.image_state = ImageDownload.IMAGE_ACTIVE
+        self.ret_deferred.callback(self._to_image)
+
+    def on_enter_failed(self):
+        self.log.debug("on_enter_failed")
+        self._image_download.image_state = ImageDownload.IMAGE_REVERT
+        self.ret_deferred.errback(failure.Failure(Exception('ONU Software Activating Failed, instance ' + str(self._to_image))))
+'''
+
+###################################################################################
+##              Image Agent for OLT/ONT software image handling
+###################################################################################
+class ImageAgent(object):
+    """
+        Image Agent supports multiple state machines running at the same time:
+    """
+
+    DEFAULT_LOCAL_ROOT = "/"
+    
+    # def __init__(self, omci_agent, dev_id, stm_cls, img_tasks, advertise_events=True):
+    def __init__(self, omci_agent, dev_id, 
+                     dwld_stm_cls, dwld_img_tasks, 
+                     upgrade_onu_stm_cls, upgrade_onu_tasks, 
+                     # image_activate_stm_cls, 
+                     advertise_events=True, local_dir=None, clock=None):
+        """
+        Class initialization
+
+        :param omci_agent: (OpenOmciAgent) Agent
+        :param dev_id    : (str) ONU Device ID
+        :param dwld_stm_cls          : (ImageDownloadeSTM) Image download state machine class
+        :param dwld_img_tasks        : (FileDownloadTask) file download task
+        :param upgrade_onu_stm_cls   : (OmciSoftwareImageDownloadSTM) ONU Image upgrade state machine class
+        :param upgrade_onu_tasks     : ({OmciSwImageUpgradeTask})
+        # :param image_activate_stm_cls: (OmciSoftwareImageActivateSTM)
+        """
+        
+        self.log = structlog.get_logger(device_id=dev_id)
+
+        self._omci_agent = omci_agent
+        self._device_id = dev_id
+        self._dwld_stm_cls  = dwld_stm_cls
+        # self._image_download_sm = None
+        self._images = dict()
+        self._download_task_cls = dwld_img_tasks['download-file'] 
+
+        self._omci_upgrade_sm_cls = upgrade_onu_stm_cls
+        self._omci_upgrade_task_cls = upgrade_onu_tasks['omci_upgrade_task']
+        self._omci_upgrade_task = None
+        self._omci_upgrade_deferred = None
+        
+        # self._omci_activate_img_sm_cls = image_activate_stm_cls
+        # self._omci_activate_img_sm = None
+        self.reactor = clock if clock is not None else reactor
+
+        self._advertise_events = advertise_events
+        # self._local_dir = None
+
+        self._device = None
+        # onu_dev = self._omci_agent.get_device(self._device_id)
+        # assert device
+        
+        # self._local_dir = DEFAULT_LOCAL_ROOT + onu_dev.adapter_agent.name
+        # self.log.debug("ImageAgent", local_dir=self._local_dir)
+        
+        
+    def __get_standby_image_instance(self):
+        instance_id = None
+        instance_0 = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 0, ["is_active", "is_committed"])
+        if instance_0['is_active'] == 1:
+            instance_id = 1
+        else:
+            instance_1 = self._omci_agent.database.query(self._device_id, SoftwareImage.class_id, 1, ["is_active", "is_committed"])
+            if instance_1['is_active'] == 1:
+                instance_id = 0
+        return instance_id
+
+    def __clear_task(self, arg):
+        self.__omci_upgrade_task = None
+
+    # def get_image(self, name, local_dir, remote_url, timeout_delay=ImageDownloadeSTM.DEFAULT_TIMEOUT_RETRY):
+    def get_image(self, image_download, timeout_delay=ImageDownloadeSTM.DEFAULT_TIMEOUT_RETRY):
+
+        """
+         Get named image from servers
+        :param image_download: (voltha_pb2.ImageDownload)
+        :param timeout_delay : (number) timeout for download task
+        :
+        :Return a Deferred that will be triggered if the file is locally availabe or downloaded sucessfully
+        :  Caller will register callback and errback to the returned defer to get notified
+        """
+        self.log.debug("get_image", download=image_download)
+
+        # if self._local_dir is None:
+        #     onu_dev = self._omci_agent.get_device(self._device_id)
+        #     assert onu_dev
+        #     if image_download.local_dir is None:
+        #         self._local_dir = ImageAgent.DEFAULT_LOCAL_ROOT + onu_dev.adapter_agent.name
+        #     else:
+        #         self._local_dir = image_download.local_dir + '/' + onu_dev.adapter_agent.name
+            
+            # self.log.debug("ImageAgent", local_dir=self._local_dir)
+        #     image_download.local_dir = self._local_dir
+            
+        # if os.path.isfile(self._local_dir + '/' + image_download.name): # image file exists
+        #     d = Deferred()
+        #     self.reactor.callLater(0, d.callback, image_download)
+        #     self.log.debug("Image file exists")
+        #     return d
+
+        img_dnld_sm = self._images.get(image_download.name)
+        if img_dnld_sm is None:
+            img_dnld_sm = self._dwld_stm_cls(self._omci_agent, # self._device_id, name, local_dir, remote_url, 
+                                             image_download,
+                                             self._download_task_cls,
+                                             timeout_delay=timeout_delay,
+                                             clock=self.reactor
+                                            )
+            self._images[image_download.name] = img_dnld_sm
+
+        # if self._image_download_sm is None:
+        #     self._image_download_sm = self._dwld_stm_cls(self._omci_agent, # self._device_id, name, local_dir, remote_url, 
+        #                                                  image_download,
+        #                                                  self._download_task_cls,
+        #                                                  timeout_delay=timeout_delay,
+        #                                                  clock=self.reactor
+        #                                                 )
+        # else:
+        #     if self._image_download_sm.download_status.state != ImageDownload.DOWNLOAD_SUCCEEDED:
+        #         self._image_download_sm.reset()
+            
+        d = img_dnld_sm.get_file()
+        return d
+
+    def cancel_download_image(self, name):
+        img_dnld_sm = self._images.pop(name, None)
+        if img_dnld_sm is not None:
+            img_dnld_sm.stop()
+            
+            
+    def onu_omci_download(self, image_dnld_name):
+        """
+        Start upgrading ONU.
+        image_dnld: (ImageDownload)
+        : Return Defer instance to get called after upgrading success or failed. 
+        : Or return None if image does not exist
+        """
+        self.log.debug("onu_omci_download", image=image_dnld_name)
+
+        image_dnld_sm = self._images.get(image_dnld_name)
+        if image_dnld_sm is None:
+            return None
+            
+        self._device = self._omci_agent.get_device(image_dnld_sm.status.id) if self._device is None else self._device
+
+        # if restart:
+        #     self.cancel_upgrade_onu()            
+            
+        if self._omci_upgrade_task is None:
+            img_id = self.__get_standby_image_instance()
+            self.log.debug("start task", image_Id=img_id, task=self._omci_upgrade_sm_cls)
+            self._omci_upgrade_task = self._omci_upgrade_task_cls(img_id, 
+                                                                  self._omci_upgrade_sm_cls, 
+                                                                  self._omci_agent, 
+                                                                  image_dnld_sm.status, clock=self.reactor)
+            self.log.debug("task created but not started")
+            # self._device.task_runner.start()
+            self._omci_upgrade_deferred = self._device.task_runner.queue_task(self._omci_upgrade_task)
+            self._omci_upgrade_deferred.addBoth(self.__clear_task)
+        return self._omci_upgrade_deferred
+
+
+    def cancel_upgrade_onu(self):
+        self.log.debug("cancel_upgrade_onu")
+        if self._omci_upgrade_task is not None:
+            self.log.debug("cancel_upgrade_onu", running=self._omci_upgrade_task.running)
+            # if self._omci_upgrade_task.running:
+            self._omci_upgrade_task.stop()
+            self._omci_upgrade_task = None
+        if self._omci_upgrade_deferred is not None:
+           self.reactor.callLater(0, self._omci_upgrade_deferred.cancel)
+           self._omci_upgrade_deferred = None
+           
+
+    # def activate_onu_image(self, image_name):
+    #     self.log.debug("activate_onu_image", image=image_name)
+    #     img_dnld = self.get_image_status(image_name)
+    #     if img_dnld is None:
+    #         return None
+            
+    #     img_dnld.image_state = ImageDownload.IMAGE_INACTIVE    
+    #     if self._omci_activate_img_sm is None:
+    #         self._omci_activate_img_sm = self._omci_activate_img_sm_cls(self._omci_agent, self._device_id,
+    #                                                                     self.__get_standby_image_instance(), 
+    #                                                                     img_dnld, clock=self.reactor)
+    #         return self._omci_activate_img_sm.start()
+    #     else:
+    #         return None
+            
+    def onu_bootup(self):
+        if self._omci_upgrade_task is not None:
+            self._omci_upgrade_task.onu_bootup()
+
+    def get_image_status(self, image_name):
+        """
+          Return (ImageDownload)
+        """
+        sm = self._images.get(image_name)
+        return sm.status if sm is not None else None
+    
diff --git a/python/extensions/omci/state_machines/mib_sync.py b/python/extensions/omci/state_machines/mib_sync.py
new file mode 100644
index 0000000..2a8b535
--- /dev/null
+++ b/python/extensions/omci/state_machines/mib_sync.py
@@ -0,0 +1,934 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from datetime import datetime, timedelta
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.database.mib_db_api import MDS_KEY
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes, \
+    AttributeAccess
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, TX_REQUEST_KEY, \
+    RX_RESPONSE_KEY
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEvents, OnuDeviceEntry, \
+    SUPPORTED_MESSAGE_ENTITY_KEY, SUPPORTED_MESSAGE_TYPES_KEY
+from voltha.extensions.omci.omci_entities import OntData
+from common.event_bus import EventBusClient
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+
+RxEvent = OmciCCRxEvents
+DevEvent = OnuDeviceEvents
+OP = EntityOperations
+RC = ReasonCodes
+AA = AttributeAccess
+
+
+class MibSynchronizer(object):
+    """
+    OpenOMCI MIB Synchronizer state machine
+    """
+    DEFAULT_STATES = ['disabled', 'starting', 'uploading', 'examining_mds',
+                      'in_sync', 'out_of_sync', 'auditing', 'resynchronizing']
+
+    DEFAULT_TRANSITIONS = [
+        {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+
+        {'trigger': 'upload_mib', 'source': 'starting', 'dest': 'uploading'},
+        {'trigger': 'examine_mds', 'source': 'starting', 'dest': 'examining_mds'},
+
+        {'trigger': 'success', 'source': 'uploading', 'dest': 'in_sync'},
+
+        {'trigger': 'success', 'source': 'examining_mds', 'dest': 'in_sync'},
+        {'trigger': 'mismatch', 'source': 'examining_mds', 'dest': 'resynchronizing'},
+
+        {'trigger': 'audit_mib', 'source': 'in_sync', 'dest': 'auditing'},
+
+        {'trigger': 'success', 'source': 'out_of_sync', 'dest': 'in_sync'},
+        {'trigger': 'audit_mib', 'source': 'out_of_sync', 'dest': 'auditing'},
+
+        {'trigger': 'success', 'source': 'auditing', 'dest': 'in_sync'},
+        {'trigger': 'mismatch', 'source': 'auditing', 'dest': 'resynchronizing'},
+        {'trigger': 'force_resync', 'source': 'auditing', 'dest': 'resynchronizing'},
+
+        {'trigger': 'success', 'source': 'resynchronizing', 'dest': 'in_sync'},
+        {'trigger': 'diffs_found', 'source': 'resynchronizing', 'dest': 'out_of_sync'},
+
+        # Do wildcard 'timeout' trigger that sends us back to start
+        {'trigger': 'timeout', 'source': '*', 'dest': 'starting'},
+
+        # Do wildcard 'stop' trigger last so it covers all previous states
+        {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+    ]
+    DEFAULT_TIMEOUT_RETRY = 5      # Seconds to delay after task failure/timeout
+    DEFAULT_AUDIT_DELAY = 60       # Periodic tick to audit the MIB Data Sync
+    DEFAULT_RESYNC_DELAY = 300     # Periodically force a resync
+
+    def __init__(self, agent, device_id, mib_sync_tasks, db,
+                 advertise_events=False,
+                 states=DEFAULT_STATES,
+                 transitions=DEFAULT_TRANSITIONS,
+                 initial_state='disabled',
+                 timeout_delay=DEFAULT_TIMEOUT_RETRY,
+                 audit_delay=DEFAULT_AUDIT_DELAY,
+                 resync_delay=DEFAULT_RESYNC_DELAY):
+        """
+        Class initialization
+
+        :param agent: (OpenOmciAgent) Agent
+        :param device_id: (str) ONU Device ID
+        :param db: (MibDbVolatileDict) MIB Database
+        :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+        :param mib_sync_tasks: (dict) Tasks to run
+        :param states: (list) List of valid states
+        :param transitions: (dict) Dictionary of triggers and state changes
+        :param initial_state: (str) Initial state machine state
+        :param timeout_delay: (int/float) Number of seconds after a timeout to attempt
+                                          a retry (goes back to starting state)
+        :param audit_delay: (int) Seconds between MIB audits while in sync. Set to
+                                  zero to disable audit. An operator can request
+                                  an audit manually by calling 'self.audit_mib'
+        :param resync_delay: (int) Seconds in sync before performing a forced MIB
+                                   resynchronization
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+
+        self._agent = agent
+        self._device_id = device_id
+        self._device = None
+        self._database = db
+        self._timeout_delay = timeout_delay
+        self._audit_delay = audit_delay
+        self._resync_delay = resync_delay
+
+        self._upload_task = mib_sync_tasks['mib-upload']
+        self._get_mds_task = mib_sync_tasks['get-mds']
+        self._audit_task = mib_sync_tasks['mib-audit']
+        self._resync_task = mib_sync_tasks['mib-resync']
+        self._reconcile_task = mib_sync_tasks['mib-reconcile']
+        self._advertise_events = advertise_events
+
+        self._deferred = None
+        self._current_task = None  # TODO: Support multiple running tasks after v.2.0 release
+        self._task_deferred = None
+        self._mib_data_sync = 0
+        self._last_mib_db_sync_value = None
+        self._device_in_db = False
+        self._next_resync = None
+
+        self._on_olt_only_diffs = None
+        self._on_onu_only_diffs = None
+        self._attr_diffs = None
+        self._audited_olt_db = None
+        self._audited_onu_db = None
+
+        self._event_bus = EventBusClient()
+        self._omci_cc_subscriptions = {               # RxEvent.enum -> Subscription Object
+            RxEvent.MIB_Reset: None,
+            RxEvent.AVC_Notification: None,
+            RxEvent.MIB_Upload: None,
+            RxEvent.MIB_Upload_Next: None,
+            RxEvent.Create: None,
+            RxEvent.Delete: None,
+            RxEvent.Set: None,
+        }
+        self._omci_cc_sub_mapping = {
+            RxEvent.MIB_Reset: self.on_mib_reset_response,
+            RxEvent.AVC_Notification: self.on_avc_notification,
+            RxEvent.MIB_Upload: self.on_mib_upload_response,
+            RxEvent.MIB_Upload_Next: self.on_mib_upload_next_response,
+            RxEvent.Create: self.on_create_response,
+            RxEvent.Delete: self.on_delete_response,
+            RxEvent.Set: self.on_set_response,
+        }
+        self._onu_dev_subscriptions = {               # DevEvent.enum -> Subscription Object
+            DevEvent.OmciCapabilitiesEvent: None
+        }
+        self._onu_dev_sub_mapping = {
+            DevEvent.OmciCapabilitiesEvent: self.on_capabilities_event
+        }
+
+        # Statistics and attributes
+        # TODO: add any others if it will support problem diagnosis
+
+        # Set up state machine to manage states
+        self.machine = Machine(model=self, states=states,
+                               transitions=transitions,
+                               initial=initial_state,
+                               queued=True,
+                               name='{}-{}'.format(self.__class__.__name__,
+                                                   device_id))
+        try:
+            import logging
+            logging.getLogger('transitions').setLevel(logging.WARNING)
+        except Exception as e:
+            self.log.exception('log-level-failed', e=e)
+
+    def _cancel_deferred(self):
+        d1, self._deferred = self._deferred, None
+        d2, self._task_deferred = self._task_deferred, None
+
+        for d in [d1, d1]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def __str__(self):
+        return 'MIBSynchronizer: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+    def delete(self):
+        """
+        Cleanup any state information
+        """
+        self.stop()
+        db, self._database = self._database, None
+
+        if db is not None:
+            db.remove(self._device_id)
+
+    @property
+    def device_id(self):
+        return self._device_id
+
+    @property
+    def mib_data_sync(self):
+        return self._mib_data_sync
+
+    def increment_mib_data_sync(self):
+        self._mib_data_sync += 1
+        if self._mib_data_sync > 255:
+            self._mib_data_sync = 0
+
+        if self._database is not None:
+            self._database.save_mib_data_sync(self._device_id,
+                                              self._mib_data_sync)
+
+    @property
+    def last_mib_db_sync(self):
+        return self._last_mib_db_sync_value
+
+    @last_mib_db_sync.setter
+    def last_mib_db_sync(self, value):
+        self._last_mib_db_sync_value = value
+        if self._database is not None:
+            self._database.save_last_sync(self.device_id, value)
+
+    @property
+    def is_new_onu(self):
+        """
+        Is this a new ONU (has never completed MIB synchronization)
+        :return: (bool) True if this ONU should be considered new
+        """
+        return self.last_mib_db_sync is None
+
+    @property
+    def advertise_events(self):
+        return self._advertise_events
+
+    @advertise_events.setter
+    def advertise_events(self, value):
+        if not isinstance(value, bool):
+            raise TypeError('Advertise event is a boolean')
+        self._advertise_events = value
+
+    def advertise(self, event, info):
+        """Advertise an event on the OpenOMCI event bus"""
+        if self._advertise_events:
+            self._agent.advertise(event,
+                                  {
+                                      'state-machine': self.machine.name,
+                                      'info': info,
+                                      'time': str(datetime.utcnow())
+                                  })
+
+    def on_enter_disabled(self):
+        """
+        State machine is being stopped
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        self._cancel_deferred()
+        if self._device is not None:
+            self._device.mib_db_in_sync = False
+
+        task, self._current_task = self._current_task, None
+        if task is not None:
+            task.stop()
+
+        # Drop Response and Autonomous notification subscriptions
+        for event, sub in self._omci_cc_subscriptions.iteritems():
+            if sub is not None:
+                self._omci_cc_subscriptions[event] = None
+                self._device.omci_cc.event_bus.unsubscribe(sub)
+
+        for event, sub in self._onu_dev_subscriptions.iteritems():
+            if sub is not None:
+                self._onu_dev_subscriptions[event] = None
+                self._device.event_bus.unsubscribe(sub)
+
+        # TODO: Stop and remove any currently running or scheduled tasks
+        # TODO: Anything else?
+
+    def _seed_database(self):
+        if not self._device_in_db:
+            try:
+                try:
+                    self._database.start()
+                    self._database.add(self._device_id)
+                    self.log.debug('seed-db-does-not-exist', device_id=self._device_id)
+
+                except KeyError:
+                    # Device already is in database
+                    self.log.debug('seed-db-exist', device_id=self._device_id)
+                    self._mib_data_sync = self._database.get_mib_data_sync(self._device_id)
+                    self._last_mib_db_sync_value = self._database.get_last_sync(self._device_id)
+
+                self._device_in_db = True
+
+            except Exception as e:
+                self.log.exception('seed-database-failure', e=e)
+
+    def on_enter_starting(self):
+        """
+        Determine ONU status and start/re-start MIB Synchronization tasks
+        """
+        self._device = self._agent.get_device(self._device_id)
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        # Make sure root of external MIB Database exists
+        self._seed_database()
+
+        # Set up Response and Autonomous notification subscriptions
+        try:
+            for event, sub in self._omci_cc_sub_mapping.iteritems():
+                if self._omci_cc_subscriptions[event] is None:
+                    self._omci_cc_subscriptions[event] = \
+                        self._device.omci_cc.event_bus.subscribe(
+                            topic=OMCI_CC.event_bus_topic(self._device_id, event),
+                            callback=sub)
+
+        except Exception as e:
+            self.log.exception('omci-cc-subscription-setup', e=e)
+
+        # Set up ONU device subscriptions
+        try:
+            for event, sub in self._onu_dev_sub_mapping.iteritems():
+                if self._onu_dev_subscriptions[event] is None:
+                    self._onu_dev_subscriptions[event] = \
+                        self._device.event_bus.subscribe(
+                                topic=OnuDeviceEntry.event_bus_topic(self._device_id, event),
+                                callback=sub)
+
+        except Exception as e:
+            self.log.exception('dev-subscription-setup', e=e)
+
+        # Clear any previous audit results
+        self._on_olt_only_diffs = None
+        self._on_onu_only_diffs = None
+        self._attr_diffs = None
+        self._audited_olt_db = None
+        self._audited_onu_db = None
+
+        # Determine if this ONU has ever synchronized
+        if self.is_new_onu:
+            # Start full MIB upload
+            self._deferred = reactor.callLater(0, self.upload_mib)
+
+        else:
+            # Examine the MIB Data Sync
+            self._deferred = reactor.callLater(0, self.examine_mds)
+
+    def on_enter_uploading(self):
+        """
+        Begin full MIB data upload, starting with a MIB RESET
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        def success(results):
+            self.log.debug('mib-upload-success', results=results)
+            self._current_task = None
+            self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+            self._deferred = reactor.callLater(0, self.success)
+
+        def failure(reason):
+            self.log.info('mib-upload-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+        self._device.mib_db_in_sync = False
+        self._current_task = self._upload_task(self._agent, self._device_id)
+
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_examining_mds(self):
+        """
+        Create a simple task to fetch the MIB Data Sync value and
+        determine if the ONU value matches what is in the MIB database
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        self._mib_data_sync = self._database.get_mib_data_sync(self._device_id) or 0
+
+        def success(onu_mds_value):
+            self.log.debug('examine-mds-success', onu_mds_value=onu_mds_value, olt_mds_value=self.mib_data_sync)
+            self._current_task = None
+
+            # Examine MDS value
+            if self.mib_data_sync == onu_mds_value:
+                self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+                self._deferred = reactor.callLater(0, self.success)
+            else:
+                self._deferred = reactor.callLater(0, self.mismatch)
+
+        def failure(reason):
+            self.log.info('examine-mds-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+        self._device.mib_db_in_sync = False
+        self._current_task = self._get_mds_task(self._agent, self._device_id)
+
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_in_sync(self):
+        """
+        The OLT/OpenOMCI MIB Database is in sync with the ONU MIB Database.
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self.last_mib_db_sync = datetime.utcnow()
+        self._device.mib_db_in_sync = True
+
+        if self._audit_delay > 0:
+            self._deferred = reactor.callLater(self._audit_delay, self.audit_mib)
+
+    def on_enter_out_of_sync(self):
+        """
+        The MIB in OpenOMCI and the ONU are out of sync.  This can happen if:
+
+           o the MIB_Data_Sync values are not equal, or
+           o the MIBs were compared and differences were found.
+
+        Schedule a task to reconcile the differences
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        # We are only out-of-sync if there were differences.  If here due to MDS
+        # value differences, still run the reconcile so we up date the ONU's MDS
+        # value to match ours.
+
+        self._device.mib_db_in_sync = self._attr_diffs is None and \
+                                      self._on_onu_only_diffs is None and \
+                                      self._on_olt_only_diffs is None
+
+        def success(onu_mds_value):
+            self.log.debug('reconcile-success', mds_value=onu_mds_value)
+            self._current_task = None
+            self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+            self._deferred = reactor.callLater(0, self.success)
+
+        def failure(reason):
+            self.log.info('reconcile-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+        diff_collection = {
+            'onu-only': self._on_onu_only_diffs,
+            'olt-only': self._on_olt_only_diffs,
+            'attributes': self._attr_diffs,
+            'olt-db': self._audited_olt_db,
+            'onu-db': self._audited_onu_db
+        }
+        # Clear out results since reconciliation task will be handling them
+        self._on_olt_only_diffs = None
+        self._on_onu_only_diffs = None
+        self._attr_diffs = None
+        self._audited_olt_db = None
+        self._audited_onu_db = None
+
+        self._current_task = self._reconcile_task(self._agent, self._device_id, diff_collection)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_auditing(self):
+        """
+        Perform a MIB Audit.  If our last MIB resync was too long in the
+        past, perform a resynchronization anyway
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        if self._next_resync is None:
+            self.log.error('next-forced-resync-error', msg='Next Resync should always be valid at this point')
+            self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+        if datetime.utcnow() >= self._next_resync:
+            self._deferred = reactor.callLater(0, self.force_resync)
+        else:
+            def success(onu_mds_value):
+                self.log.debug('audit-success', onu_mds_value=onu_mds_value, olt_mds_value=self.mib_data_sync)
+                self._current_task = None
+
+                # Examine MDS value
+                if self.mib_data_sync == onu_mds_value:
+                    self._deferred = reactor.callLater(0, self.success)
+                else:
+                    self._device.mib_db_in_sync = False
+                    self._deferred = reactor.callLater(0, self.mismatch)
+
+            def failure(reason):
+                self.log.info('audit-failure', reason=reason)
+                self._current_task = None
+                self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+            self._current_task = self._audit_task(self._agent, self._device_id)
+            self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+            self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_resynchronizing(self):
+        """
+        Perform a resynchronization of the MIB database
+
+        First calculate any differences
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        def success(results):
+            self.log.debug('resync-success', results=results)
+
+            on_olt_only = results.get('on-olt-only')
+            on_onu_only = results.get('on-onu-only')
+            attr_diffs = results.get('attr-diffs')
+            olt_db = results.get('olt-db')
+            onu_db = results.get('onu-db')
+
+            self._current_task = None
+            self._on_olt_only_diffs = on_olt_only if on_olt_only and len(on_olt_only) else None
+            self._on_onu_only_diffs = on_onu_only if on_onu_only and len(on_onu_only) else None
+            self._attr_diffs = attr_diffs if attr_diffs and len(attr_diffs) else None
+            self._audited_olt_db = olt_db
+            self._audited_onu_db = onu_db
+
+            mds_equal = self.mib_data_sync == self._audited_onu_db[MDS_KEY]
+
+            if mds_equal and all(diff is None for diff in [self._on_olt_only_diffs,
+                                                           self._on_onu_only_diffs,
+                                                           self._attr_diffs]):
+                self._next_resync = datetime.utcnow() + timedelta(seconds=self._resync_delay)
+                self._deferred = reactor.callLater(0, self.success)
+            else:
+                self._deferred = reactor.callLater(0, self.diffs_found)
+
+        def failure(reason):
+            self.log.info('resync-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.timeout)
+
+        self._current_task = self._resync_task(self._agent, self._device_id)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_mib_reset_response(self, _topic, msg):
+        """
+        Called upon receipt of a MIB Reset Response for this ONU
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-mib-reset-response', state=self.state)
+        try:
+            response = msg[RX_RESPONSE_KEY]
+
+            # Check if expected in current mib_sync state
+            if self.state != 'uploading' or self._omci_cc_subscriptions[RxEvent.MIB_Reset] is None:
+                self.log.error('rx-in-invalid-state', state=self.state)
+
+            else:
+                now = datetime.utcnow()
+
+                if not isinstance(response, OmciFrame):
+                    raise TypeError('Response should be an OmciFrame')
+
+                omci_msg = response.fields['omci_message'].fields
+                status = omci_msg['success_code']
+
+                assert status == RC.Success, 'Unexpected MIB reset response status: {}'. \
+                    format(status)
+
+                self._device.mib_db_in_sync = False
+                self._mib_data_sync = 0
+                self._device._modified = now
+                self._database.on_mib_reset(self._device_id)
+
+        except KeyError:
+            pass            # NOP
+
+    def on_avc_notification(self, _topic, msg):
+        """
+        Process an Attribute Value Change Notification
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-avc-notification', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.AVC_Notification]:
+            try:
+                notification = msg[RX_RESPONSE_KEY]
+
+                if self.state == 'disabled':
+                    self.log.error('rx-in-invalid-state', state=self.state)
+
+                # Inspect the notification
+                omci_msg = notification.fields['omci_message'].fields
+                class_id = omci_msg['entity_class']
+                instance_id = omci_msg['entity_id']
+                data = omci_msg['data']
+                attributes = [data.keys()]
+
+                # Look up ME Instance in Database. Not-found can occur if a MIB
+                # reset has occurred
+                info = self._database.query(self.device_id, class_id, instance_id, attributes)
+                # TODO: Add old/new info to log message
+                self.log.debug('avc-change', class_id=class_id, instance_id=instance_id)
+
+                # Save the changed data to the MIB.
+                self._database.set(self.device_id, class_id, instance_id, data)
+
+                # Autonomous creation and deletion of managed entities do not
+                # result in an increment of the MIB data sync value. However,
+                # AVC's in response to a change by the Operator do incur an
+                # increment of the MIB Data Sync.  If here during uploading,
+                # we issued a MIB-Reset which may generate AVC.  (TODO: Focus testing during hardening)
+                if self.state == 'uploading':
+                    self.increment_mib_data_sync()
+
+            except KeyError:
+                pass            # NOP
+
+    def on_mib_upload_response(self, _topic, msg):
+        """
+        Process a MIB Upload response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-mib-upload-next-response', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.MIB_Upload]:
+            # Check if expected in current mib_sync state
+            if self.state == 'resynchronizing':
+                # The resync task handles this
+                # TODO: Remove this subscription if we never do anything with the response
+                return
+
+            if self.state != 'uploading':
+                self.log.error('rx-in-invalid-state', state=self.state)
+
+    def on_mib_upload_next_response(self, _topic, msg):
+        """
+        Process a MIB Upload Next response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-mib-upload-next-response', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.MIB_Upload_Next]:
+            try:
+                if self.state == 'resynchronizing':
+                    # The resync task handles this
+                    return
+
+                # Check if expected in current mib_sync state
+                if self.state != 'uploading':
+                    self.log.error('rx-in-invalid-state', state=self.state)
+
+                else:
+                    response = msg[RX_RESPONSE_KEY]
+
+                    # Extract entity instance information
+                    omci_msg = response.fields['omci_message'].fields
+
+                    class_id = omci_msg['object_entity_class']
+                    entity_id = omci_msg['object_entity_id']
+
+                    # Filter out the 'mib_data_sync' from the database. We save that at
+                    # the device level and do not want it showing up during a re-sync
+                    # during data compares
+
+                    if class_id == OntData.class_id:
+                        return
+
+                    attributes = {k: v for k, v in omci_msg['object_data'].items()}
+
+                    # Save to the database
+                    self._database.set(self._device_id, class_id, entity_id, attributes)
+
+            except KeyError:
+                pass            # NOP
+            except Exception as e:
+                self.log.exception('upload-next', e=e)
+
+    def on_create_response(self, _topic, msg):
+        """
+        Process a Set response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-create-response', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.Create]:
+            if self.state in ['disabled', 'uploading']:
+                self.log.error('rx-in-invalid-state', state=self.state)
+                return
+            try:
+                request = msg[TX_REQUEST_KEY]
+                response = msg[RX_RESPONSE_KEY]
+                status = response.fields['omci_message'].fields['success_code']
+
+                if status != RC.Success and status != RC.InstanceExists:
+                    # TODO: Support offline ONTs in post VOLTHA v1.3.0
+                    omci_msg = response.fields['omci_message']
+                    self.log.warn('set-response-failure',
+                                  class_id=omci_msg.fields['entity_class'],
+                                  instance_id=omci_msg.fields['entity_id'],
+                                  status=omci_msg.fields['success_code'],
+                                  status_text=self._status_to_text(omci_msg.fields['success_code']),
+                                  parameter_error_attributes_mask=omci_msg.fields['parameter_error_attributes_mask'])
+                else:
+                    omci_msg = request.fields['omci_message'].fields
+                    class_id = omci_msg['entity_class']
+                    entity_id = omci_msg['entity_id']
+                    attributes = {k: v for k, v in omci_msg['data'].items()}
+
+                    # Save to the database
+                    created = self._database.set(self._device_id, class_id, entity_id, attributes)
+
+                    if created:
+                        self.increment_mib_data_sync()
+
+                    # If the ME contains set-by-create or writeable values that were
+                    # not specified in the create command, the ONU will have
+                    # initialized those fields
+
+                    if class_id in self._device.me_map:
+                        sbc_w_set = {attr.field.name for attr in self._device.me_map[class_id].attributes
+                                     if (AA.SBC in attr.access or AA.W in attr.access)
+                                     and attr.field.name != 'managed_entity_id'}
+
+                        missing = sbc_w_set - {k for k in attributes.iterkeys()}
+
+                        if len(missing):
+                            # Request the missing attributes
+                            self.update_sbc_w_items(class_id, entity_id, missing)
+
+            except KeyError as e:
+                pass            # NOP
+
+            except Exception as e:
+                self.log.exception('create', e=e)
+
+    def update_sbc_w_items(self, class_id, entity_id, missing_attributes):
+        """
+        Perform a get-request for Set-By-Create (SBC) or writable (w) attributes
+        that were not specified in the original Create request.
+
+        :param class_id: (int) Class ID
+        :param entity_id: (int) Instance ID
+        :param missing_attributes: (set) Missing SBC or Writable attribute
+        """
+        if len(missing_attributes) and class_id in self._device.me_map:
+            from voltha.extensions.omci.tasks.omci_get_request import OmciGetRequest
+
+            def success(results):
+                self._database.set(self._device_id, class_id, entity_id, results.attributes)
+
+            def failure(reason):
+                self.log.warn('update-sbc-w-failed', reason=reason, class_id=class_id,
+                              entity_id=entity_id, attributes=missing_attributes)
+
+            d = self._device.task_runner.queue_task(OmciGetRequest(self._agent, self._device_id,
+                                                                   self._device.me_map[class_id],
+                                                                   entity_id, missing_attributes,
+                                                                   allow_failure=True))
+            d.addCallbacks(success, failure)
+
+    def on_delete_response(self, _topic, msg):
+        """
+        Process a Delete response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-delete-response', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.Delete]:
+            if self.state in ['disabled', 'uploading']:
+                self.log.error('rx-in-invalid-state', state=self.state)
+                return
+            try:
+                request = msg[TX_REQUEST_KEY]
+                response = msg[RX_RESPONSE_KEY]
+
+                if response.fields['omci_message'].fields['success_code'] != RC.Success:
+                    # TODO: Support offline ONTs in post VOLTHA v1.3.0
+                    omci_msg = response.fields['omci_message']
+                    self.log.warn('set-response-failure',
+                                  class_id=omci_msg.fields['entity_class'],
+                                  instance_id=omci_msg.fields['entity_id'],
+                                  status=omci_msg.fields['success_code'],
+                                  status_text=self._status_to_text(omci_msg.fields['success_code']))
+                else:
+                    omci_msg = request.fields['omci_message'].fields
+                    class_id = omci_msg['entity_class']
+                    entity_id = omci_msg['entity_id']
+
+                    # Remove from the database
+                    deleted = self._database.delete(self._device_id, class_id, entity_id)
+
+                    if deleted:
+                        self.increment_mib_data_sync()
+
+            except KeyError as e:
+                pass            # NOP
+            except Exception as e:
+                self.log.exception('delete', e=e)
+
+    def on_set_response(self, _topic, msg):
+        """
+        Process a Set response
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-set-response', state=self.state)
+
+        if self._omci_cc_subscriptions[RxEvent.Set]:
+            if self.state in ['disabled', 'uploading']:
+                self.log.error('rx-in-invalid-state', state=self.state)
+            try:
+                request = msg[TX_REQUEST_KEY]
+                response = msg[RX_RESPONSE_KEY]
+
+                if response.fields['omci_message'].fields['success_code'] != RC.Success:
+                    # TODO: Support offline ONTs in post VOLTHA v1.3.0
+                    omci_msg = response.fields['omci_message']
+                    self.log.warn('set-response-failure',
+                                  class_id=omci_msg.fields['entity_class'],
+                                  instance_id=omci_msg.fields['entity_id'],
+                                  status=omci_msg.fields['success_code'],
+                                  status_text=self._status_to_text(omci_msg.fields['success_code']),
+                                  unsupported_attribute_mask=omci_msg.fields['unsupported_attributes_mask'],
+                                  failed_attribute_mask=omci_msg.fields['failed_attributes_mask'])
+                else:
+                    omci_msg = request.fields['omci_message'].fields
+                    class_id = omci_msg['entity_class']
+                    entity_id = omci_msg['entity_id']
+                    attributes = {k: v for k, v in omci_msg['data'].items()}
+
+                    # Save to the database (Do not save 'sets' of the mib-data-sync however)
+                    if class_id != OntData.class_id:
+                        modified = self._database.set(self._device_id, class_id, entity_id, attributes)
+                        if modified:
+                            self.increment_mib_data_sync()
+
+            except KeyError as _e:
+                pass            # NOP
+            except Exception as e:
+                self.log.exception('set', e=e)
+    def on_capabilities_event(self, _topic, msg):
+        """
+        Process a OMCI capabilties event
+        :param _topic: (str) OnuDeviceEntry Capabilities event
+        :param msg: (dict) Message Entities & Message Types supported
+        """
+        self._database.update_supported_managed_entities(self.device_id,
+                                                         msg[SUPPORTED_MESSAGE_ENTITY_KEY])
+        self._database.update_supported_message_types(self.device_id,
+                                                      msg[SUPPORTED_MESSAGE_TYPES_KEY])
+
+    def _status_to_text(self, success_code):
+        return {
+                RC.Success: "Success",
+                RC.ProcessingError: "Processing Error",
+                RC.NotSupported: "Not Supported",
+                RC.ParameterError: "Paremeter Error",
+                RC.UnknownEntity: "Unknown Entity",
+                RC.UnknownInstance: "Unknown Instance",
+                RC.DeviceBusy: "Device Busy",
+                RC.InstanceExists: "Instance Exists"
+            }.get(success_code, 'Unknown status code: {}'.format(success_code))
+
+    def query_mib(self, class_id=None, instance_id=None, attributes=None):
+        """
+        Get MIB database information.
+
+        This method can be used to request information from the database to the detailed
+        level requested
+
+        :param class_id:  (int) Managed Entity class ID
+        :param instance_id: (int) Managed Entity instance
+        :param attributes: (list or str) Managed Entity instance's attributes
+
+        :return: (dict) The value(s) requested. If class/inst/attribute is
+                        not found, an empty dictionary is returned
+        :raises DatabaseStateError: If the database is not enabled or does not exist
+        """
+        from voltha.extensions.omci.database.mib_db_api import DatabaseStateError
+
+        self.log.debug('query', class_id=class_id,
+                       instance_id=instance_id, attributes=attributes)
+        if self._database is None:
+            raise DatabaseStateError('Database does not yet exist')
+
+        return self._database.query(self._device_id, class_id=class_id,
+                                    instance_id=instance_id,
+                                    attributes=attributes)
+
+    def mib_set(self, class_id, entity_id, attributes):
+        """
+        Set attributes of an existing ME Class instance
+
+        This method is primarily used by other state machines to save ME specific
+        information to the persistent database. Access by objects external to the
+        OpenOMCI library is discouraged.
+
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Class entity ID
+        :param attributes: (dict) attribute -> value pairs to set
+        """
+        # It must exist first (but attributes can be new)
+        if isinstance(attributes, dict) and len(attributes) and\
+                self.query_mib(class_id, entity_id) is not None:
+            self._database.set(self._device_id, class_id, entity_id, attributes)
+
+    def mib_delete(self, class_id, entity_id):
+        """
+        Delete an existing ME Class instance
+
+        This method is primarily used by other state machines to delete an ME
+        from the MIB database
+
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME Class entity ID
+
+        :raises KeyError: If device does not exist
+        :raises DatabaseStateError: If the database is not enabled
+        """
+        self._database.delete(self._device_id, class_id, entity_id)
diff --git a/python/extensions/omci/state_machines/omci_onu_capabilities.py b/python/extensions/omci/state_machines/omci_onu_capabilities.py
new file mode 100644
index 0000000..c13739e
--- /dev/null
+++ b/python/extensions/omci/state_machines/omci_onu_capabilities.py
@@ -0,0 +1,262 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from transitions import Machine
+from twisted.internet import reactor
+from voltha.extensions.omci.onu_device_entry import OnuDeviceEntry, OnuDeviceEvents, IN_SYNC_KEY
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+
+
+class OnuOmciCapabilities(object):
+    """
+    OpenOMCI ONU OMCI Capabilities State machine
+    """
+    DEFAULT_STATES = ['disabled', 'out_of_sync', 'in_sync', 'idle']
+
+    DEFAULT_TRANSITIONS = [
+        {'trigger': 'start', 'source': 'disabled', 'dest': 'out_of_sync'},
+        {'trigger': 'synchronized', 'source': 'out_of_sync', 'dest': 'in_sync'},
+
+        {'trigger': 'success', 'source': 'in_sync', 'dest': 'idle'},
+        {'trigger': 'failure', 'source': 'in_sync', 'dest': 'out_of_sync'},
+
+        {'trigger': 'not_synchronized', 'source': 'idle', 'dest': 'out_of_sync'},
+
+        # Do wildcard 'stop' trigger last so it covers all previous states
+        {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+    ]
+    DEFAULT_RETRY = 10      # Seconds to delay after task failure/timeout/poll
+
+    def __init__(self, agent, device_id, tasks,
+                 advertise_events=False,
+                 states=DEFAULT_STATES,
+                 transitions=DEFAULT_TRANSITIONS,
+                 initial_state='disabled',
+                 timeout_delay=DEFAULT_RETRY):
+        """
+        Class initialization
+
+        :param agent: (OpenOmciAgent) Agent
+        :param device_id: (str) ONU Device ID
+        :param tasks: (dict) Tasks to run
+        :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+        :param states: (list) List of valid states
+        :param transitions: (dict) Dictionary of triggers and state changes
+        :param initial_state: (str) Initial state machine state
+        :param timeout_delay: (int/float) Number of seconds after a timeout or poll
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+
+        self._agent = agent
+        self._device_id = device_id
+        self._device = None
+        self._timeout_delay = timeout_delay
+
+        self._get_capabilities_task = tasks['get-capabilities']
+        self._advertise_events = advertise_events
+
+        self._deferred = None
+        self._current_task = None
+        self._task_deferred = None
+        self._supported_entities = frozenset()
+        self._supported_msg_types = frozenset()
+
+        self._subscriptions = {               # RxEvent.enum -> Subscription Object
+            OnuDeviceEvents.MibDatabaseSyncEvent: None
+        }
+        self._sub_mapping = {
+            OnuDeviceEvents.MibDatabaseSyncEvent: self.on_mib_sync_event
+        }
+        # Statistics and attributes
+        # TODO: add any others if it will support problem diagnosis
+
+        # Set up state machine to manage states
+        self.machine = Machine(model=self, states=states,
+                               transitions=transitions,
+                               initial=initial_state,
+                               queued=True,
+                               name='{}-{}'.format(self.__class__.__name__,
+                                                   device_id))
+
+    def _cancel_deferred(self):
+        d1, self._deferred = self._deferred, None
+        d2, self._task_deferred = self._task_deferred, None
+
+        for d in [d1, d2]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def _cancel_tasks(self):
+        task, self._current_task = self._current_task, None
+        if task is not None:
+            task.stop()
+
+    def __str__(self):
+        return 'OnuOmciCapabilities: Device ID: {}, State:{}'.format(self._device_id, self.state)
+
+    def delete(self):
+        """
+        Cleanup any state information
+        """
+        self.stop()
+
+    @property
+    def device_id(self):
+        return self._device_id
+
+    @property
+    def supported_managed_entities(self):
+        """
+        Return a set of the Managed Entity class IDs supported on this ONU
+        None is returned if no MEs have been discovered
+
+        :return: (set of ints)
+        """
+        return self._supported_entities if len(self._supported_entities) else None
+
+    @property
+    def supported_message_types(self):
+        """
+        Return a set of the Message Types supported on this ONU
+        None is returned if no message types have been discovered
+
+        :return: (set of EntityOperations)
+        """
+        return self._supported_msg_types if len(self._supported_msg_types) else None
+
+    @property
+    def advertise_events(self):
+        return self._advertise_events
+
+    @advertise_events.setter
+    def advertise_events(self, value):
+        if not isinstance(value, bool):
+            raise TypeError('Advertise event is a boolean')
+        self._advertise_events = value
+
+    def advertise(self, event, info):
+        """Advertise an event on the OpenOMCI event bus"""
+        from datetime import datetime
+
+        if self._advertise_events:
+            self._agent.advertise(event,
+                                  {
+                                      'state-machine': self.machine.name,
+                                      'info': info,
+                                      'time': str(datetime.utcnow())
+                                  })
+
+    def on_enter_disabled(self):
+        """
+        State machine is being stopped
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._cancel_tasks()
+
+        self._supported_entities = frozenset()
+        self._supported_msg_types = frozenset()
+
+        # Drop Response and Autonomous notification subscriptions
+        for event, sub in self._subscriptions.iteritems():
+            if sub is not None:
+                self._subscriptions[event] = None
+                self._device.event_bus.unsubscribe(sub)
+
+    def on_enter_out_of_sync(self):
+        """
+        State machine has just started or the MIB database has transitioned
+        to an out-of-synchronization state
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._device = self._agent.get_device(self._device_id)
+
+        # Subscribe to events of interest
+        try:
+            for event, sub in self._sub_mapping.iteritems():
+                if self._subscriptions[event] is None:
+                    self._subscriptions[event] = \
+                        self._device.event_bus.subscribe(
+                            topic=OnuDeviceEntry.event_bus_topic(self._device_id,
+                                                                 event),
+                            callback=sub)
+
+        except Exception as e:
+            self.log.exception('subscription-setup', e=e)
+
+        # Periodically check/poll for in-sync in case subscription was missed or
+        # already in sync
+        self._deferred = reactor.callLater(0, self.check_in_sync)
+
+    def check_in_sync(self):
+        if self._device.mib_db_in_sync:
+            self.synchronized()
+        else:
+            self._deferred = reactor.callLater(self._timeout_delay,
+                                               self.check_in_sync)
+
+    def on_enter_in_sync(self):
+        """
+        State machine has just transitioned to an in-synchronization state
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+
+        def success(results):
+            self.log.debug('capabilities-success', results=results)
+            self._supported_entities = self._current_task.supported_managed_entities
+            self._supported_msg_types = self._current_task.supported_message_types
+            self._current_task = None
+            self._deferred = reactor.callLater(0, self.success)
+
+        def failure(reason):
+            self.log.info('capabilities-failure', reason=reason)
+            self._supported_entities = frozenset()
+            self._supported_msg_types = frozenset()
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+        # Schedule a task to read the ONU's OMCI capabilities
+        self._current_task = self._get_capabilities_task(self._agent, self._device_id)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_idle(self):
+        """
+        Notify any subscribers for a capabilities event and wait until
+        stopped or ONU MIB database goes out of sync
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._device.publish_omci_capabilities_event()
+
+    def on_mib_sync_event(self, _topic, msg):
+        """
+        Handle In-Sync/Out-of-Sync for the MIB database
+        :param _topic: (str) Subscription topic
+        :param msg: (dict) In-Sync event data
+        """
+        if self._subscriptions.get(OnuDeviceEvents.MibDatabaseSyncEvent) is None:
+            return
+
+        if msg[IN_SYNC_KEY]:
+            self.synchronized()
+        else:
+            self.not_synchronized()
diff --git a/python/extensions/omci/state_machines/performance_intervals.py b/python/extensions/omci/state_machines/performance_intervals.py
new file mode 100644
index 0000000..73e145b
--- /dev/null
+++ b/python/extensions/omci/state_machines/performance_intervals.py
@@ -0,0 +1,901 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+import arrow
+from transitions import Machine
+from datetime import datetime, timedelta
+from random import uniform, shuffle
+from twisted.internet import reactor
+from common.utils.indexpool import IndexPool
+from voltha.protos.omci_mib_db_pb2 import OpenOmciEventType
+from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
+from voltha.extensions.omci.omci_cc import OmciCCRxEvents, OMCI_CC, TX_REQUEST_KEY, \
+    RX_RESPONSE_KEY
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+from voltha.extensions.omci.tasks.omci_get_request import OmciGetRequest
+from voltha.extensions.omci.omci_entities import MacBridgePortConfigurationData
+from voltha.extensions.omci.omci_entities import EthernetPMMonitoringHistoryData, \
+    FecPerformanceMonitoringHistoryData, \
+    XgPonTcPerformanceMonitoringHistoryData, \
+    XgPonDownstreamPerformanceMonitoringHistoryData, \
+    XgPonUpstreamPerformanceMonitoringHistoryData, \
+    EthernetFrameUpstreamPerformanceMonitoringHistoryData, \
+    EthernetFrameDownstreamPerformanceMonitoringHistoryData, \
+    EthernetFrameExtendedPerformanceMonitoring, \
+    EthernetFrameExtendedPerformanceMonitoring64Bit, AniG
+
+
+RxEvent = OmciCCRxEvents
+OP = EntityOperations
+RC = ReasonCodes
+
+
+class PerformanceIntervals(object):
+    """
+    OpenOMCI ONU Performance Monitoring Intervals State machine
+
+    This state machine focuses on L2 Internet Data Service and Classical
+    PM (for the v2.0 release).
+    """
+    DEFAULT_STATES = ['disabled', 'starting', 'synchronize_time', 'idle', 'create_pm_me',
+                      'collect_data', 'threshold_exceeded']
+
+    DEFAULT_TRANSITIONS = [
+        {'trigger': 'start', 'source': 'disabled', 'dest': 'starting'},
+        {'trigger': 'tick', 'source': 'starting', 'dest': 'synchronize_time'},
+
+        {'trigger': 'success', 'source': 'synchronize_time', 'dest': 'idle'},
+        {'trigger': 'failure', 'source': 'synchronize_time', 'dest': 'synchronize_time'},
+
+        {'trigger': 'tick', 'source': 'idle', 'dest': 'collect_data'},
+        {'trigger': 'add_me', 'source': 'idle', 'dest': 'create_pm_me'},
+        {'trigger': 'delete_me', 'source': 'idle', 'dest': 'delete_pm_me'},
+
+        {'trigger': 'success', 'source': 'create_pm_me', 'dest': 'idle'},
+        {'trigger': 'failure', 'source': 'create_pm_me', 'dest': 'idle'},
+
+        {'trigger': 'success', 'source': 'delete_pm_me', 'dest': 'idle'},
+        {'trigger': 'failure', 'source': 'delete_pm_me', 'dest': 'idle'},
+
+        {'trigger': 'success', 'source': 'collect_data', 'dest': 'idle'},
+        {'trigger': 'failure', 'source': 'collect_data', 'dest': 'idle'},
+
+        # TODO: Add rebooted event transitions to disabled or synchronize_time
+        # TODO: Need to capture Threshold Crossing Alarms appropriately
+
+        # Do wildcard 'stop' trigger last so it covers all previous states
+        {'trigger': 'stop', 'source': '*', 'dest': 'disabled'},
+        {'trigger': 'reboot', 'source': '*', 'dest': 'rebooted'},
+    ]
+    DEFAULT_RETRY = 10               # Seconds to delay after task failure/timeout/poll
+    DEFAULT_TICK_DELAY = 15          # Seconds between checks for collection tick
+    DEFAULT_INTERVAL_SKEW = 10 * 60  # Seconds to skew past interval boundary
+    DEFAULT_COLLECT_ATTEMPTS = 3     # Maximum number of collection fetch attempts
+    DEFAULT_CREATE_ATTEMPTS = 15     # Maximum number of attempts to create a PM Managed Entities
+
+    def __init__(self, agent, device_id, tasks,
+                 advertise_events=False,
+                 states=DEFAULT_STATES,
+                 transitions=DEFAULT_TRANSITIONS,
+                 initial_state='disabled',
+                 timeout_delay=DEFAULT_RETRY,
+                 tick_delay=DEFAULT_TICK_DELAY,
+                 interval_skew=DEFAULT_INTERVAL_SKEW,
+                 collect_attempts=DEFAULT_COLLECT_ATTEMPTS,
+                 create_attempts=DEFAULT_CREATE_ATTEMPTS):
+        """
+        Class initialization
+
+        :param agent: (OpenOmciAgent) Agent
+        :param device_id: (str) ONU Device ID
+        :param tasks: (dict) Tasks to run
+        :param advertise_events: (bool) Advertise events on OpenOMCI Event Bus
+        :param states: (list) List of valid states
+        :param transitions: (dict) Dictionary of triggers and state changes
+        :param initial_state: (str) Initial state machine state
+        :param timeout_delay: (int/float) Number of seconds after a timeout to pause
+        :param tick_delay: (int/float) Collection poll check delay while idle
+        :param interval_skew: (int/float) Seconds to randomly skew the next interval
+                              collection to spread out requests for PM intervals
+        :param collect_attempts: (int) Max requests for a single PM interval before fail
+        :param create_attempts: (int) Max attempts to create PM Managed entities before stopping state machine
+        """
+        self.log = structlog.get_logger(device_id=device_id)
+
+        self._agent = agent
+        self._device_id = device_id
+        self._device = None
+        self._pm_config = None
+        self._timeout_delay = timeout_delay
+        self._tick_delay = tick_delay
+        self._interval_skew = interval_skew
+        self._collect_attempts = collect_attempts
+        self._create_attempts = create_attempts
+
+        self._sync_time_task = tasks['sync-time']
+        self._get_interval_task = tasks['collect-data']
+        self._create_pm_task = tasks['create-pm']
+        self._delete_pm_task = tasks['delete-pm']
+        self._advertise_events = advertise_events
+
+        self._omci_cc_subscriptions = {               # RxEvent.enum -> Subscription Object
+            RxEvent.MIB_Reset: None,
+            RxEvent.Create: None,
+            RxEvent.Delete: None
+        }
+        self._omci_cc_sub_mapping = {
+            RxEvent.MIB_Reset: self.on_mib_reset_response,
+            RxEvent.Create: self.on_create_response,
+            RxEvent.Delete: self.on_delete_response,
+        }
+        self._me_watch_list = {
+            MacBridgePortConfigurationData.class_id: {
+                'create-delete': self.add_remove_enet_frame_pm,
+                'instances': dict()  # BP entity_id -> (PM class_id, PM entity_id)
+            }
+        }
+        self._deferred = None
+        self._task_deferred = None
+        self._current_task = None
+        self._add_me_deferred = None
+        self._delete_me_deferred = None
+        self._next_interval = None
+        self._enet_entity_id = IndexPool(1024, 1)
+        self._add_pm_me_retry = 0
+
+        # (Class ID, Instance ID) -> Collect attempts remaining
+        self._pm_me_collect_retries = dict()
+        self._pm_me_extended_info = dict()
+        self._add_pm_me = dict()        # (pm cid, pm eid) -> (me cid, me eid, upstream)
+        self._del_pm_me = set()
+
+        # Pollable PM items
+        # Note that some items the KPI extracts are not listed below. These are the
+        # administrative states, operational states, and sensed ethernet type. The values
+        # in the MIB database should be accurate for these items.
+
+        self._ani_g_items = ["optical_signal_level", "transmit_optical_level"]
+        self._next_poll_time = datetime.utcnow()
+        self._poll_interval = 60                    # TODO: Fixed at once a minute
+
+        # Statistics and attributes
+        # TODO: add any others if it will support problem diagnosis
+
+        # Set up state machine to manage states
+        self.machine = Machine(model=self, states=states,
+                               transitions=transitions,
+                               initial=initial_state,
+                               queued=True,
+                               ignore_invalid_triggers=True,
+                               name='{}-{}'.format(self.__class__.__name__,
+                                                   device_id))
+        try:
+            import logging
+            logging.getLogger('transitions').setLevel(logging.WARNING)
+        except Exception as e:
+            self.log.exception('log-level-failed', e=e)
+
+
+    def _cancel_deferred(self):
+        d1, self._deferred = self._deferred, None
+        d2, self._task_deferred = self._task_deferred, None
+        d3, self._add_me_deferred = self._add_me_deferred, None
+        d4, self._delete_me_deferred = self._delete_me_deferred, None
+
+        for d in [d1, d2, d3, d4]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def _cancel_tasks(self):
+        task, self._current_task = self._current_task, None
+        if task is not None:
+            task.stop()
+
+    def __str__(self):
+        return 'PerformanceIntervals: Device ID: {}, State:{}'.format(self._device_id,
+                                                                      self.state)
+
+    def delete(self):
+        """
+        Cleanup any state information
+        """
+        self.stop()
+
+    @property
+    def device_id(self):
+        return self._device_id
+
+    @property
+    def advertise_events(self):
+        return self._advertise_events
+
+    @advertise_events.setter
+    def advertise_events(self, value):
+        if not isinstance(value, bool):
+            raise TypeError('Advertise event is a boolean')
+        self._advertise_events = value
+
+    def advertise(self, event, info):
+        """Advertise an event on the OpenOMCI event bus"""
+        if self._advertise_events:
+            self._agent.advertise(event,
+                                  {
+                                      'state-machine': self.machine.name,
+                                      'info': info,
+                                      'time': str(datetime.utcnow()),
+                                      'next': str(self._next_interval)
+                                  })
+
+    def set_pm_config(self, pm_config):
+        """
+        Set PM interval configuration
+
+        :param pm_config: (OnuPmIntervalMetrics) PM Interval configuration
+        :return:
+        """
+        self._pm_config = pm_config
+
+    def _me_is_supported(self, class_id):
+        """
+        Check to see if ONU supports this ME
+        :param class_id: (int) ME Class ID
+        :return: (bool) If ME is supported
+        """
+        #
+        supported = self._device.omci_capabilities.supported_managed_entities
+        return class_id in supported if supported is not None else False
+
+    def add_pm_me(self, pm_class_id, pm_entity_id, cid=0, eid=0, upstream=False):
+        """
+        Add a new Performance Monitoring ME.
+
+        The ME ID will be added to an internal list and will be added the next
+        time the idle state is reached. An 'add_pm_me' trigger will be raised in
+        case already in the Idle state.
+
+        :param pm_class_id: (int) ME Class ID (1..0xFFFE)
+        :param pm_entity_id: (int) Instance ID (1..0xFFFE)
+        :param cid: (int) Class ID of entity monitored, may be None
+        :param eid: (int) Instance ID of entity monitored, may be None
+        :param upstream: (bool): Flag indicating if PM is for upstream traffic
+        """
+        if not isinstance(pm_class_id, int):
+            raise TypeError('PM ME Instance ID is an integer')
+        if not 0 < pm_class_id < 0xFFFF:
+            raise ValueError('PM ME Instance ID must be 1..65534')
+
+        # Check to see if ONU supports this ME
+        if not self._me_is_supported(pm_class_id):
+            self.log.warn('unsupported-PM-me', class_id=pm_class_id)
+            return
+
+        key = (pm_class_id, pm_entity_id)
+        entry = (cid, eid, upstream)
+
+        if key not in self._pm_me_collect_retries and key not in self._add_pm_me:
+            self._add_pm_me[key] = entry
+
+            if self._add_me_deferred is None:
+                self._add_me_deferred = reactor.callLater(0, self.add_me)
+
+        if (pm_class_id, pm_entity_id) in self._del_pm_me:
+            self._del_pm_me.remove((pm_class_id, pm_entity_id))
+
+    def delete_pm_me(self, class_id, entity_id):
+        """
+        Remove a new Performance Monitoring ME.
+
+        The ME ID will be added to an internal list and will be removed the next
+        time the idle state is reached. An 'delete_pm_me' trigger will be raised in
+        case already in the Idle state.
+
+        :param class_id: (int) ME Class ID (1..0xFFFE)
+        :param entity_id: (int) Instance ID (1..0xFFFE)
+        """
+        if not isinstance(class_id, int):
+            raise TypeError('PM ME Class ID is an integer')
+        if not 0 < class_id < 0xFFFF:
+            raise ValueError('PM ME Class ID must be 1..65534')
+
+        # Check to see if ONU supports this ME
+        if not self._me_is_supported(class_id):
+            self.log.warn('unsupported-PM-me', class_id=class_id)
+            return
+
+        key = (class_id, entity_id)
+
+        if key in self._pm_me_collect_retries and key not in self._del_pm_me:
+            self._del_pm_me.add(key)
+
+            if self._delete_me_deferred is None:
+                self._delete_me_deferred = reactor.callLater(0, self.delete_me)
+
+        if key in self._add_pm_me:
+            self._add_pm_me.pop(key)
+
+    def on_enter_disabled(self):
+        """
+        State machine is being stopped
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._cancel_tasks()
+        self._next_interval = None
+
+        # Drop OMCI ME Response subscriptions
+        for event, sub in self._omci_cc_subscriptions.iteritems():
+            if sub is not None:
+                self._omci_cc_subscriptions[event] = None
+                self._device.omci_cc.event_bus.unsubscribe(sub)
+
+        # Manually remove ani ANI/PON and UNI PM interval MEs
+        config = self._device.configuration
+        anis = config.ani_g_entities
+        unis = config.uni_g_entities
+
+        if anis is not None:
+            for entity_id in anis.iterkeys():
+                self.delete_pm_me(FecPerformanceMonitoringHistoryData.class_id, entity_id)
+                self.delete_pm_me(XgPonTcPerformanceMonitoringHistoryData.class_id, entity_id)
+                self.delete_pm_me(XgPonDownstreamPerformanceMonitoringHistoryData.class_id, entity_id)
+                self.delete_pm_me(XgPonUpstreamPerformanceMonitoringHistoryData.class_id, entity_id)
+
+        if unis is not None:
+            for entity_id in config.uni_g_entities.iterkeys():
+                self.delete_pm_me(EthernetPMMonitoringHistoryData.class_id, entity_id)
+
+    def on_enter_starting(self):
+        """ Add the PON/ANI and UNI PM intervals"""
+        self.advertise(OpenOmciEventType.state_change, self.state)
+
+        self._device = self._agent.get_device(self._device_id)
+        self._cancel_deferred()
+
+        # Set up OMCI ME Response subscriptions
+        try:
+            for event, sub in self._omci_cc_sub_mapping.iteritems():
+                if self._omci_cc_subscriptions[event] is None:
+                    self._omci_cc_subscriptions[event] = \
+                        self._device.omci_cc.event_bus.subscribe(
+                            topic=OMCI_CC.event_bus_topic(self._device_id, event),
+                            callback=sub)
+
+        except Exception as e:
+            self.log.exception('omci-cc-subscription-setup', e=e)
+
+        try:
+            # Manually start some ANI/PON and UNI PM interval MEs
+            config = self._device.configuration
+            anis = config.ani_g_entities
+            unis = config.uni_g_entities
+
+            if anis is not None:
+                for entity_id in anis.iterkeys():
+                    self.add_pm_me(FecPerformanceMonitoringHistoryData.class_id,
+                                   entity_id)
+                    self.add_pm_me(XgPonTcPerformanceMonitoringHistoryData.class_id,
+                                   entity_id)
+                    self.add_pm_me(XgPonDownstreamPerformanceMonitoringHistoryData.class_id,
+                                   entity_id)
+                    self.add_pm_me(XgPonUpstreamPerformanceMonitoringHistoryData.class_id,
+                                   entity_id)
+
+            if unis is not None:
+                for entity_id in config.uni_g_entities.iterkeys():
+                    self.add_pm_me(EthernetPMMonitoringHistoryData.class_id, entity_id)
+
+            # Look for existing instances of dynamically created ME's that have PM
+            # associated with them and add them now
+            for class_id in self._me_watch_list.iterkeys():
+                instances = {k: v for k, v in
+                             self._device.query_mib(class_id=class_id).items()
+                             if isinstance(k, int)}
+
+                for entity_id, data in instances.items():
+                    method = self._me_watch_list[class_id]['create-delete']
+                    cid, eid = method(None, class_id, entity_id,
+                                      add=True, attributes=data[ATTRIBUTES_KEY])
+                    if cid > 0:
+                        # BP entity_id -> (PM class_id, PM entity_id)
+                        instances = self._me_watch_list[class_id]['instances']
+                        instances[entity_id] = (cid, eid)
+
+        except Exception as e:
+            self.log.exception('pm-me-setup', class_id=class_id, e=e)
+
+        # Got to synchronize_time state
+        self._deferred = reactor.callLater(0, self.tick)
+
+    def on_enter_synchronize_time(self):
+        """
+        State machine has just transitioned to the synchronize_time state
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+
+        def success(_results):
+            self.log.debug('sync-time-success')
+            self._current_task = None
+            self._deferred = reactor.callLater(0, self.success)
+            # Calculate next interval time
+            self._next_interval = self.get_next_interval
+
+        def failure(reason):
+            self.log.info('sync-time-failure', reason=reason)
+            self._current_task = None
+            self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+        # Schedule a task to set the ONU time
+        self._current_task = self._sync_time_task(self._agent, self._device_id)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_idle(self):
+        """
+        State machine has just transitioned to the idle state
+
+        In this state, any added PM MEs that need to be created will be.
+        TODO: some non-interval PM stats (if there are any) are collected here
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+
+        if len(self._del_pm_me) and self._delete_me_deferred is None:
+            self._delete_me_deferred = reactor.callLater(0, self.delete_me)
+
+        elif len(self._add_pm_me) and self._add_me_deferred is None:
+            self._add_me_deferred = reactor.callLater(0, self.add_me)
+
+        elif datetime.utcnow() >= self._next_poll_time:
+            def success(results):
+                self._device.timestamp = arrow.utcnow().float_timestamp
+                self._device.mib_synchronizer.mib_set(results.me_class.class_id,
+                                                      results.entity_id,
+                                                      results.attributes)
+                self._next_poll_time = datetime.utcnow() + timedelta(seconds=self._poll_interval)
+
+            def failure(reason):
+                self.log.info('poll-failure', reason=reason)
+                self._device.timestamp = None
+                return None
+
+            # Scan all ANI-G ports
+            ani_g_entities = self._device.configuration.ani_g_entities
+            ani_g_entities_ids = ani_g_entities.keys() if ani_g_entities is not None else None
+
+            if ani_g_entities_ids is not None and len(ani_g_entities_ids):
+                for entity_id in ani_g_entities_ids:
+                    task = OmciGetRequest(self._agent, self.device_id,
+                                          AniG, entity_id,
+                                          self._ani_g_items, allow_failure=True)
+                    self._task_deferred = self._device.task_runner.queue_task(task)
+                    self._task_deferred.addCallbacks(success, failure)
+            else:
+                self.log.warn('poll-pm-no-anis')
+                self._next_poll_time = datetime.utcnow() + timedelta(seconds=self._poll_interval)
+
+        # TODO: Compute a better mechanism than just polling here, perhaps based on
+        #       the next time to fetch data for 'any' interval
+        self._deferred = reactor.callLater(self._tick_delay, self.tick)
+
+    def on_enter_create_pm_me(self):
+        """
+        State machine has just transitioned to the create_pm_me state
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._cancel_tasks()
+        mes, self._add_pm_me = self._add_pm_me, dict()
+
+        def success(results):
+            self.log.debug('create-me-success', results=results)
+
+            # Check if already here. The create request could have received
+            # an already-exists status code which we consider successful
+            for pm, me in mes.items():
+                self._pm_me_collect_retries[pm] = self.pm_collected(pm)
+                self._pm_me_extended_info[pm] = me
+
+            self._current_task = None
+            self._deferred = reactor.callLater(0, self.success)
+
+        def failure(reason):
+            self.log.info('create-me-failure', reason=reason, retries=self._add_pm_me_retry)
+            self._current_task = None
+            if self._add_pm_me_retry <= self._create_attempts:
+              for pm, me in mes.items():
+                  self._add_pm_me[pm] = me
+              self._add_pm_me_retry += 1
+              self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+            else:
+              # we cant seem to create any collection me, no point in doing anything
+              self.log.warn('unable-to-create-pm-me-disabling-collection', reason=reason, device_id=self._device_id)
+              self._deferred = reactor.callLater(self._timeout_delay, self.stop)
+
+        self._current_task = self._create_pm_task(self._agent, self._device_id, mes)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_delete_pm_me(self):
+        """
+        State machine has just transitioned to the delete_pm_me state
+        """
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._cancel_tasks()
+
+        mes, self._del_pm_me = self._del_pm_me, set()
+
+        def success(results):
+            self.log.debug('delete-me-success', results=results)
+            self._current_task = None
+            for me in mes:
+                self._pm_me_collect_retries.pop(me)
+
+            self._deferred = reactor.callLater(0, self.success)
+
+        def failure(reason):
+            self.log.info('delete-me-failure', reason=reason)
+            self._current_task = None
+            for me in mes:
+                self._del_pm_me.add(me)
+
+            self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+
+        self._current_task = self._delete_pm_task(self._agent, self._device_id, mes)
+        self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+        self._task_deferred.addCallbacks(success, failure)
+
+    def on_enter_collect_data(self):
+        """
+        State machine has just transitioned to the collect_data state
+        """
+
+        if self._next_interval is not None and self._next_interval > datetime.utcnow():
+            self.log.debug('wait-next-interval')
+            # Not ready for next interval, transition back to idle and we should get
+            # called again after a short delay
+            reactor.callLater(0, self.success)
+            return
+
+        self.advertise(OpenOmciEventType.state_change, self.state)
+        self._cancel_deferred()
+        self._cancel_tasks()
+        keys = self._pm_me_collect_retries.keys()
+        shuffle(keys)
+
+        for key in keys:
+            class_id = key[0]
+            entity_id = key[1]
+
+            self.log.debug("in-enter-collect-data", data_key=key,
+                           retries=self._pm_me_collect_retries[key])
+
+            # Collect the data ?
+            if self._pm_me_collect_retries[key] > 0:
+                def success(results):
+                    self.log.debug('collect-success', results=results,
+                                   class_id=results.get('class_id'),
+                                   entity_id=results.get('entity_id'))
+                    self._current_task = None
+                    self._pm_me_collect_retries[key] = 0
+                    self._deferred = reactor.callLater(0, self.success)
+                    return results
+
+                def failure(reason):
+                    self.log.info('collect-failure', reason=reason)
+                    self._current_task = None
+                    self._pm_me_collect_retries[key] -= 1
+                    self._deferred = reactor.callLater(self._timeout_delay, self.failure)
+                    return reason   # Halt callback processing
+
+                # start the task
+                if key in self._pm_me_extended_info:
+                    self.log.debug('collect-extended-info-found', data_key=key,
+                                   extended_info=self._pm_me_extended_info[key])
+                    parent_class_id = self._pm_me_extended_info[key][0]
+                    parent_entity_id = self._pm_me_extended_info[key][1]
+                    upstream = self._pm_me_extended_info[key][2]
+                else:
+                    self.log.debug('collect-extended-info-not-found', data_key=key)
+                    parent_class_id = None
+                    parent_entity_id = None
+                    upstream = None
+
+                self._current_task = self._get_interval_task(self._agent, self._device_id,
+                                                             class_id, entity_id,
+                                                             parent_class_id=parent_class_id,
+                                                             parent_entity_id=parent_entity_id,
+                                                             upstream=upstream)
+                self._task_deferred = self._device.task_runner.queue_task(self._current_task)
+                self._task_deferred.addCallbacks(success, failure)
+                self._task_deferred.addCallback(self.publish_data)
+                return
+
+        # Here if all intervals have been collected (we are up to date)
+        self._next_interval = self.get_next_interval
+        self.log.debug('collect-calculate-next', next=self._next_interval)
+
+        self._pm_me_collect_retries = dict.fromkeys(self._pm_me_collect_retries, self._collect_attempts)
+        reactor.callLater(0, self.success)
+
+    def on_enter_threshold_exceeded(self):
+        """
+        State machine has just transitioned to the threshold_exceeded state
+        """
+        pass  # TODO: Not sure if we want this state. Need to get alarm synchronizer working first
+
+    @property
+    def get_next_interval(self):
+        """
+        Determine the time for the next interval collection for all of this
+        ONUs PM Intervals. Earliest fetch time is at least 1 minute into the
+        next interval.
+
+        :return: (datetime) UTC time to get the next interval
+        """
+        now = datetime.utcnow()
+
+        # Get delta seconds to at least 1 minute into next interval
+        next_delta_secs = (16 - (now.minute % 15)) * 60
+        next_interval = now + timedelta(seconds=next_delta_secs)
+
+        # NOTE: For debugging, uncomment next section to perform collection
+        #       right after initial code startup/mib-sync
+        if self._next_interval is None:
+            return now     # Do it now  (just for debugging purposes)
+
+        # Skew the next time up to the maximum specified
+        # TODO: May want to skew in a shorter range and select the minute
+        #       based off some device property value to make collection a
+        #       little more predictable on a per-ONU basis.
+        return next_interval + timedelta(seconds=uniform(0, self._interval_skew))
+
+    def pm_collected(self, key):
+        """
+        Query database and determine if PM data needs to be collected for this ME
+        """
+        class_id = key[0]
+        entity_id = key[1]
+
+        return self._collect_attempts        # TODO: Implement persistent storage
+
+    def publish_data(self, results):
+        """
+        Publish the PM interval results on the appropriate bus.  The results are
+        a dictionary with the following format.
+
+            'class-id':          (int) ME Class ID,
+            'entity-id':         (int) ME Entity ID,
+            'me-name':           (str) ME Class name,   # Mostly for debugging...
+            'interval-end-time': None,
+            'interval-utc-time': (DateTime) UTC time when retrieved from ONU,
+
+            Counters added here as they are retrieved with the format of
+            'counter-attribute-name': value (int)
+
+        :param results: (dict) PM results
+        """
+        self.log.debug('collect-publish', results=results)
+
+        if self._pm_config is not None:
+            self._pm_config.publish_metrics(results)
+
+        pass  # TODO: Save off last time interval fetched to persistent storage?
+
+    def on_mib_reset_response(self, _topic, msg):
+        """
+        Called upon receipt of a MIB Reset Response for this ONU
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-mib-reset-response', state=self.state)
+        try:
+            response = msg[RX_RESPONSE_KEY]
+            omci_msg = response.fields['omci_message'].fields
+            status = omci_msg['success_code']
+
+            if status == RC.Success:
+                for class_id in self._me_watch_list.iterkeys():
+                    # BP entity_id -> (PM class_id, PM entity_id)
+                    instances = self._me_watch_list[class_id]['instances']
+                    for _, me_pair in instances.items():
+                        self._me_watch_list[class_id]['create-delete'](None, me_pair[0],
+                                                                       me_pair[1], add=False)
+                    self._me_watch_list[class_id]['instances'] = dict()
+
+        except KeyError:
+            pass            # NOP
+
+    def on_create_response(self, _topic, msg):
+        """
+        Called upon receipt of a Create Response for this ONU.
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-create-response', state=self.state)
+
+        def valid_request(stat, c_id, e_id):
+            return self._omci_cc_subscriptions[RxEvent.Delete] is not None\
+                and stat in (RC.Success, RC.InstanceExists) \
+                and c_id in self._me_watch_list.keys() \
+                and e_id not in self._me_watch_list[c_id]['instances']
+
+        response = msg[RX_RESPONSE_KEY]
+        omci = response.fields['omci_message'].fields
+        class_id = omci['entity_class']
+        entity_id = omci['entity_id']
+        status = omci['success_code']
+
+        if valid_request(status, class_id, entity_id):
+            request = msg[TX_REQUEST_KEY]
+            method = self._me_watch_list[class_id]['create-delete']
+            cid, eid = method(request, class_id, entity_id, add=True)
+
+            if cid > 0:
+                # BP entity_id -> (PM class_id, PM entity_id)
+                instances = self._me_watch_list[class_id]['instances']
+                instances[entity_id] = (cid, eid)
+
+    def on_delete_response(self, _topic, msg):
+        """
+        Called upon receipt of a Delete Response for this ONU
+
+        :param _topic: (str) OMCI-RX topic
+        :param msg: (dict) Dictionary with 'rx-response' and 'tx-request' (if any)
+        """
+        self.log.debug('on-delete-response', state=self.state)
+
+        def valid_request(stat, cid, eid):
+            return self._omci_cc_subscriptions[RxEvent.Delete] is not None\
+                and stat in (RC.Success, RC.UnknownInstance) \
+                and cid in self._me_watch_list.keys() \
+                and eid in self._me_watch_list[cid]['instances']
+
+        response = msg[RX_RESPONSE_KEY]
+        omci = response.fields['omci_message'].fields
+        class_id = omci['entity_class']
+        entity_id = omci['entity_id']
+        status = omci['success_code']
+
+        if valid_request(status, class_id, entity_id):
+            request = msg[TX_REQUEST_KEY]
+            method = self._me_watch_list[class_id]['create-delete']
+
+            method(request, class_id, entity_id, add=False)
+            # BP entity_id -> (PM class_id, PM entity_id)
+            instances = self._me_watch_list[class_id]['instances']
+            del instances[entity_id]
+
+    def get_pm_entity_id_for_add(self, pm_cid, eid):
+        """
+        Select the Entity ID to use for a specific PM Class ID.  For extended
+        PM ME's, an entity id (>0) is allocated
+
+        :param pm_cid: (int) PM ME Class ID to create/get entry ID for
+        :param eid: (int) Reference class's entity ID. Used as PM entity ID for non-
+                    extended PM history PMs
+        :return: (int) Entity ID to use
+        """
+        if pm_cid in (EthernetFrameExtendedPerformanceMonitoring.class_id,
+                      EthernetFrameExtendedPerformanceMonitoring64Bit.class_id):
+            return self._enet_entity_id.get_next()
+        return eid
+
+    def release_pm_entity_id(self, pm_cid, eid):
+        if pm_cid in (EthernetFrameExtendedPerformanceMonitoring.class_id,
+                      EthernetFrameExtendedPerformanceMonitoring64Bit.class_id):
+            try:
+                self._enet_entity_id.release(eid)
+            except:
+                pass
+
+    def add_remove_enet_frame_pm(self, request, class_id, entity_id,
+                                 add=True,
+                                 attributes=None):
+        """
+        Add/remove PM for the dynamic MAC Port configuration data.
+
+        This can be called in a variety of ways:
+
+           o If from an Response event from OMCI_CC, the request will contain
+             the original create/delete request. The class_id and entity_id will
+             be the MAC Data Configuration Data class and instance ID.
+             add = True if create, False if delete
+
+           o If starting up (and the associated ME is already created), the MAC
+             Data Configuration Data class and instance ID, and attributes are
+             provided. request = None and add = True
+
+           o If cleaning up (stopping), the PM ME class_id, entity_id are provided.
+             request = None and add = False
+
+        :return: (int, int) PM ME class_id and entity_id for add/remove was performed.
+                            class and entity IDs are non-zero on success
+        """
+        pm_entity_id = 0
+        cid = 0
+        eid = 0
+        upstream = False
+
+        def tp_type_to_pm(tp):
+            # TODO: Support 64-bit extended Monitoring MEs.
+            # This will result in the need to maintain entity IDs of PMs differently
+            upstream_types = [  # EthernetFrameExtendedPerformanceMonitoring64Bit.class_id,
+                              EthernetFrameExtendedPerformanceMonitoring.class_id,
+                              EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id], True
+            downstream_types = [  # EthernetFrameExtendedPerformanceMonitoring64Bit.class_id,
+                                EthernetFrameExtendedPerformanceMonitoring.class_id,
+                                EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id], False
+            return {
+                1: downstream_types,
+                3: upstream_types,
+                5: downstream_types,
+                6: downstream_types,
+            }.get(tp, None)
+
+        if request is not None:
+            assert class_id == MacBridgePortConfigurationData.class_id
+
+            # Is this associated with the ANI or the UNI side of the bridge?
+            # For VOLTHA v2.0, only high-speed internet data service is
+            attributes = request.fields['omci_message'].fields['data']
+            pm_class_ids, upstream = tp_type_to_pm(attributes['tp_type'])
+            cid = request.fields['omci_message'].fields['entity_class']
+            eid = request.fields['omci_message'].fields['entity_id']
+            if not add:
+                instances = self._me_watch_list[cid]['instances']
+                _, pm_entity_id = instances.get(eid, (None, None))
+
+        elif add:
+            assert class_id == MacBridgePortConfigurationData.class_id
+            assert isinstance(attributes, dict)
+
+            # Is this associated with the ANI or the UNI side of the bridge?
+            pm_class_ids, upstream = tp_type_to_pm(attributes.get('tp_type'))
+            cid = class_id
+            eid = entity_id
+
+        else:
+            assert class_id in (EthernetFrameUpstreamPerformanceMonitoringHistoryData.class_id,
+                                EthernetFrameDownstreamPerformanceMonitoringHistoryData.class_id,
+                                EthernetFrameExtendedPerformanceMonitoring.class_id,
+                                EthernetFrameExtendedPerformanceMonitoring64Bit.class_id)
+            pm_class_ids = [class_id]
+
+        if pm_class_ids is None:
+            return False     # Unable to select a supported ME for this ONU
+
+        if add:
+            for pm_class_id in pm_class_ids:
+                if self._me_is_supported(pm_class_id):
+                    pm_entity_id = self.get_pm_entity_id_for_add(pm_class_id, eid)
+                    self.add_pm_me(pm_class_id, pm_entity_id, cid=cid, eid=eid,
+                                   upstream=upstream)
+                    return pm_class_id, pm_entity_id
+        else:
+            for pm_class_id in pm_class_ids:
+                if self._me_is_supported(pm_class_id):
+                    self.delete_pm_me(pm_class_id, pm_entity_id)
+                    self.release_pm_entity_id(pm_class_id, pm_entity_id)
+                    return pm_class_id, pm_entity_id
+
+        return 0, 0
diff --git a/python/extensions/omci/tasks/__init__.py b/python/extensions/omci/tasks/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/extensions/omci/tasks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/extensions/omci/tasks/alarm_resync_task.py b/python/extensions/omci/tasks/alarm_resync_task.py
new file mode 100644
index 0000000..a16f3a2
--- /dev/null
+++ b/python/extensions/omci/tasks/alarm_resync_task.py
@@ -0,0 +1,393 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, returnValue
+from twisted.internet import reactor
+from common.utils.asleep import asleep
+from voltha.extensions.omci.database.mib_db_dict import *
+from voltha.extensions.omci.omci_defs import AttributeAccess
+from voltha.extensions.omci.database.alarm_db_ext import AlarmDbExternal
+
+AA = AttributeAccess
+
+
+class AlarmCopyException(Exception):
+    pass
+
+
+class AlarmDownloadException(Exception):
+    pass
+
+
+class AlarmResyncException(Exception):
+    pass
+
+
+class AlarmResyncTask(Task):
+    """
+    OpenOMCI ALARM resynchronization Task
+
+    This task should get a copy of the ALARM and compare compare it to a
+    copy of the database. When the ALARM Upload command is sent to the ONU,
+    it should make a copy and source the data requested from this database.
+    The ONU can still source AVC's and the the OLT can still send config
+    commands to the actual.
+    """
+    task_priority = Task.DEFAULT_PRIORITY
+    name = "ALARM Resynchronization Task"
+
+    max_retries = 3
+    retry_delay = 7
+
+    max_alarm_upload_next_retries = 3
+    alarm_upload_next_delay = 10          # Max * delay < 60 seconds
+
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        super(AlarmResyncTask, self).__init__(AlarmResyncTask.name,
+                                              omci_agent,
+                                              device_id,
+                                              priority=AlarmResyncTask.task_priority,
+                                              exclusive=False)
+        self._local_deferred = None
+        self._device = omci_agent.get_device(device_id)
+        self._db_active = MibDbVolatileDict(omci_agent)
+        self._db_active.start()
+
+    def cancel_deferred(self):
+        super(AlarmResyncTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start ALARM Re-Synchronization task
+        """
+        super(AlarmResyncTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_alarm_resync)
+        self._db_active.start()
+        self._db_active.add(self.device_id)
+
+    def stop(self):
+        """
+        Shutdown ALARM Re-Synchronization task
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        self._device = None
+        self._db_active.stop()
+        self._db_active = None
+        super(AlarmResyncTask, self).stop()
+
+    @inlineCallbacks
+    def perform_alarm_resync(self):
+        """
+        Perform the ALARM Resynchronization sequence
+
+        The sequence to be performed is:
+            - get a copy of the current ALARM database
+
+            - perform ALARM upload commands to get ONU's database and save this
+              to a local DB.
+        During the alarm upload process, the maximum time between alarm upload next
+        requests is 1 minute.
+        """
+        self.log.debug('perform-alarm-resync')
+
+        try:
+            self.strobe_watchdog()
+            results = yield self.snapshot_alarm()
+            olt_db_copy = results[0]
+            number_of_commands = results[1]
+
+            if olt_db_copy is None:
+                e = AlarmCopyException('Failed to get local database copy')
+                self.deferred.errback(failure.Failure(e))
+            else:
+                # Start the ALARM upload sequence, save alarms to the table
+                self.strobe_watchdog()
+
+                if number_of_commands > 0:
+                    commands_retrieved = yield self.upload_alarm(number_of_commands)
+                else:
+                    commands_retrieved = 0
+
+                if commands_retrieved != number_of_commands:
+                    e = AlarmDownloadException('Only retrieved {} of {} instances'.
+                                               format(commands_retrieved, number_of_commands))
+                    self.deferred.errback(failure.Failure(e))
+                else:
+                    # Compare the databases
+                    onu_db_copy = self._db_active.query(self.device_id)
+
+                    on_olt_only, on_onu_only, attr_diffs = \
+                        self.compare_mibs(olt_db_copy, onu_db_copy)
+
+                    on_olt_only = on_olt_only if len(on_olt_only) else None
+                    on_onu_only = on_onu_only if len(on_onu_only) else None
+                    attr_diffs = attr_diffs if len(attr_diffs) else None
+
+                    on_olt_only_diffs = on_olt_only if on_olt_only and len(on_olt_only) else None
+                    on_onu_only_diffs = on_onu_only if on_onu_only and len(on_onu_only) else None
+                    attr_diffs = attr_diffs if attr_diffs and len(attr_diffs) else None
+
+                    if all(diff is None for diff in [on_olt_only_diffs, on_onu_only_diffs, attr_diffs]):
+                        results = None
+                    else:
+                        results = {
+                            'onu-only': on_onu_only_diffs,
+                            'olt-only': on_olt_only_diffs,
+                            'attr-diffs': attr_diffs,
+                            'onu-db': onu_db_copy,
+                            'olt-db': olt_db_copy
+                        }
+                    self.deferred.callback(results)
+
+        except Exception as e:
+            self.log.exception('resync', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+    @inlineCallbacks
+    def snapshot_alarm(self):
+        """
+        Snapshot the ALARM on the ONU and create a copy of our local ALARM database
+
+        :return: (pair) (command_sequence_number)
+        """
+        olt_db_copy = None
+        command_sequence_number = None
+
+        try:
+            max_tries = AlarmResyncTask.max_retries - 1
+
+            for retries in xrange(0, max_tries + 1):
+                # Send ALARM Upload so ONU snapshots its ALARM
+                try:
+                    command_sequence_number = yield self.send_alarm_upload()
+                    self.strobe_watchdog()
+
+                    if command_sequence_number is None:
+                        if retries >= max_tries:
+                            olt_db_copy = None
+                            break
+
+                except TimeoutError as e:
+                    self.log.warn('timeout', e=e)
+                    if retries >= max_tries:
+                        raise
+
+                    self.strobe_watchdog()
+                    yield asleep(AlarmResyncTask.retry_delay)
+                    continue
+
+                # Get a snapshot of the local MIB database
+                olt_db_copy = self._device.query_alarm_table()
+                # if we made it this far, no need to keep trying
+                break
+
+        except Exception as e:
+            self.log.exception('alarm-resync', e=e)
+            raise
+
+        # Handle initial failures
+
+        if olt_db_copy is None or command_sequence_number is None:
+            raise AlarmCopyException('Failed to snapshot ALARM copy after {} retries'.
+                                     format(AlarmResyncTask.max_retries))
+
+        returnValue((olt_db_copy, command_sequence_number))
+
+    @inlineCallbacks
+    def send_alarm_upload(self):
+        """
+        Perform ALARM upload command and get the number of entries to retrieve
+
+        :return: (int) Number of commands to execute or None on error
+        """
+        ########################################
+        # Begin ALARM Upload
+        try:
+            results = yield self._device.omci_cc.send_get_all_alarm()
+            self.strobe_watchdog()
+            command_sequence_number = results.fields['omci_message'].fields['number_of_commands']
+
+            if command_sequence_number < 0:
+                raise ValueError('Number of commands was {}'.format(command_sequence_number))
+
+            returnValue(command_sequence_number)
+
+        except TimeoutError as e:
+            self.log.warn('alarm-resync-get-timeout', e=e)
+            raise
+
+    @inlineCallbacks
+    def upload_alarm(self, command_sequence_number):
+        ########################################
+        # Begin ALARM Upload
+        seq_no = None
+
+        for seq_no in xrange(command_sequence_number):
+            max_tries = AlarmResyncTask.max_alarm_upload_next_retries
+
+            for retries in xrange(0, max_tries):
+                try:
+                    response = yield self._device.omci_cc.send_get_all_alarm_next(seq_no)
+                    self.strobe_watchdog()
+
+                    omci_msg = response.fields['omci_message'].fields
+                    alarm_class_id = omci_msg['alarmed_entity_class']
+                    alarm_entity_id = omci_msg['alarmed_entity_id']
+
+                    alarm_bit_map = omci_msg['alarm_bit_map']
+                    attributes = {AlarmDbExternal.ALARM_BITMAP_KEY: alarm_bit_map}
+
+                    # Save to the database
+                    self._db_active.set(self.device_id, alarm_class_id,
+                                        alarm_entity_id, attributes)
+                    break
+
+                except TimeoutError:
+                    self.log.warn('alarm-resync-timeout', seq_no=seq_no,
+                                  command_sequence_number=command_sequence_number)
+
+                    if retries < max_tries - 1:
+                        yield asleep(AlarmResyncTask.alarm_upload_next_delay)
+                        self.strobe_watchdog()
+                    else:
+                        raise
+
+                except Exception as e:
+                    self.log.exception('resync', e=e, seq_no=seq_no,
+                                       command_sequence_number=command_sequence_number)
+
+        returnValue(seq_no + 1)     # seq_no is zero based and alarm table.
+
+    def compare_mibs(self, db_copy, db_active):
+        """
+        Compare the our db_copy with the ONU's active copy
+
+        :param db_copy: (dict) OpenOMCI's copy of the database
+        :param db_active: (dict) ONU's database snapshot
+        :return: (dict), (dict), dict()  Differences
+        """
+        self.strobe_watchdog()
+
+        # Class & Entities only in local copy (OpenOMCI)
+        on_olt_only = self.get_lsh_only_dict(db_copy, db_active)
+
+        # Class & Entities only on remote (ONU)
+        on_onu_only = self.get_lsh_only_dict(db_active, db_copy)
+
+        # Class & Entities on both local & remote, but one or more attributes
+        # are different on the ONU.  This is the value that the local (OpenOMCI)
+        # thinks should be on the remote (ONU)
+
+        me_map = self.omci_agent.get_device(self.device_id).me_map
+        attr_diffs = self.get_attribute_diffs(db_copy, db_active, me_map)
+
+        return on_olt_only, on_onu_only, attr_diffs
+
+    def get_lsh_only_dict(self, lhs, rhs):
+        """
+        Compare two MIB database dictionaries and return the ME Class ID and
+        instances that are unique to the lhs dictionary. Both parameters
+        should be in the common MIB Database output dictionary format that
+        is returned by the mib 'query' command.
+
+        :param lhs: (dict) Left-hand-side argument.
+        :param rhs: (dict) Right-hand-side argument
+
+        return: (list(int,int)) List of tuples where (class_id, inst_id)
+        """
+        results = list()
+
+        for cls_id, cls_data in lhs.items():
+            # Get unique classes
+            #
+            # Skip keys that are not class IDs
+            if not isinstance(cls_id, int):
+                continue
+
+            if cls_id not in rhs:
+                results.extend([(cls_id, inst_id) for inst_id in cls_data.keys()
+                                if isinstance(inst_id, int)])
+            else:
+                # Get unique instances of a class
+                lhs_cls = cls_data
+                rhs_cls = rhs[cls_id]
+
+                for inst_id, _ in lhs_cls.items():
+                    # Skip keys that are not instance IDs
+                    if isinstance(cls_id, int) and inst_id not in rhs_cls:
+                        results.extend([(cls_id, inst_id)])
+
+        return results
+
+    def get_attribute_diffs(self, omci_copy, onu_copy, me_map):
+        """
+        Compare two OMCI MIBs and return the ME class and instance IDs that exists
+        on both the local copy and the remote ONU that have different attribute
+        values. Both parameters should be in the common MIB Database output
+        dictionary format that is returned by the mib 'query' command.
+
+        :param omci_copy: (dict) OpenOMCI copy (OLT-side) of the MIB Database
+        :param onu_copy: (dict) active ONU latest copy its database
+        :param me_map: (dict) ME Class ID MAP for this ONU
+
+        return: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+                                    points to the specific ME instance where attributes
+                                    are different
+        """
+        results = list()
+
+        # Get class ID's that are in both
+        class_ids = {cls_id for cls_id, _ in omci_copy.items()
+                     if isinstance(cls_id, int) and cls_id in onu_copy}
+
+        for cls_id in class_ids:
+            # Get unique instances of a class
+            olt_cls = omci_copy[cls_id]
+            onu_cls = onu_copy[cls_id]
+
+            # Get set of common instance IDs
+            inst_ids = {inst_id for inst_id, _ in olt_cls.items()
+                        if isinstance(inst_id, int) and inst_id in onu_cls}
+
+            for inst_id in inst_ids:
+                omci_attributes = {k for k in olt_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+                onu_attributes = {k for k in onu_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+
+                # Get attributes that exist in one database, but not the other
+                sym_diffs = (omci_attributes ^ onu_attributes)
+                results.extend([(cls_id, inst_id, attr) for attr in sym_diffs])
+
+                # Get common attributes with different values
+                common_attributes = (omci_attributes & onu_attributes)
+                results.extend([(cls_id, inst_id, attr) for attr in common_attributes
+                               if olt_cls[inst_id][ATTRIBUTES_KEY][attr] !=
+                                onu_cls[inst_id][ATTRIBUTES_KEY][attr]])
+        return results
diff --git a/python/extensions/omci/tasks/file_download_task.py b/python/extensions/omci/tasks/file_download_task.py
new file mode 100755
index 0000000..63da427
--- /dev/null
+++ b/python/extensions/omci/tasks/file_download_task.py
@@ -0,0 +1,108 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, AlreadyCalledError
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+import requests
+import os
+import time
+
+class FileDownloadTask(Task):
+    name = "Image File Download Task"
+    CHUNK_SIZE = 1024
+    
+    def __init__(self, omci_agent, img_dnld, clock= None): #device_id, url, local_path)
+        super(FileDownloadTask, self).__init__(FileDownloadTask.name, omci_agent, img_dnld.id,
+                                               exclusive=False,
+                                               watchdog_timeout=45)
+        # self.url = url
+        # self.local_path = local_path
+        self._image_download = img_dnld
+        self.reactor = clock if clock is not None else reactor
+        self._local_deferred = None
+        # self._request = None
+        # self._file = None
+        # self.log.debug('{} running'.format(FileDownloadTask.name))
+
+    # def __save_data(self):
+    #     chunk = self._request.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE)
+    #     if len(chunk) == 0:
+    #         self._file.close()
+    #         self.deferred.callback(self._image_download)
+    #     else:
+    #         self._file.write(chunk)
+    #         self._image_download.downloaded_bytes += len(chunk)
+    #         self.reactor.callLater(0, self.__save_data)        
+
+    @inlineCallbacks
+    def perform_download_data(self):
+        try:
+            r = requests.get(self._image_download.url, stream=True)
+            with open(self._image_download.local_dir + '/' + self._image_download.name, 'wb') as f:
+                for chunk in r.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE):
+                    self.strobe_watchdog()
+                    if chunk: # filter out keep-alive new chunks
+                        yield f.write(chunk)
+                        self._image_download.file_size += len(chunk)
+                        # yield time.sleep(1)
+            self.deferred.callback(self._image_download)
+        except Exception as e:
+            self.deferred.errback(failure.Failure(e))
+        
+    def start(self):
+        super(FileDownloadTask, self).start()
+        if not os.path.exists(self._image_download.local_dir):
+            os.makedirs(self._image_download.local_dir)
+
+        self.strobe_watchdog()
+        self._image_download.file_size = 0
+        self._local_deferred = self.reactor.callLater(0, self.perform_download_data)
+        # try:
+        #     if not os.path.exists(self._image_download.local_dir):
+        #         os.makedirs(self._image_download.local_dir)
+
+        #     self.strobe_watchdog()
+        #     self._image_download.downloaded_bytes = 0
+        #     self.reactor.callLater(0, self.perform_download_data)
+            
+            # self._request = requests.get(self._image_download.url, stream=True)
+            # with open(self._image_download.local_dir + '/' + self._image_download.name, 'wb') as f:
+            #     for chunk in r.iter_content(chunk_size=FileDownloadTask.CHUNK_SIZE):
+            #         self.strobe_watchdog()
+            #         if chunk: # filter out keep-alive new chunks
+            #             f.write(chunk)
+            #             self._image_download.downloaded_bytes += len(chunk)
+            
+            # self.deferred.callback(self._image_download)
+        # except Exception as e:
+        #     self.deferred.errback(failure.Failure(e))
+            
+    # def stop(self):
+    #     # self.cancel_deferred()
+    #     super(FileDownloadTask, self).stop()
+
+    def cancel_deferred(self):
+        self.log.debug('FileDownloadTask cancel_deferred')
+        super(FileDownloadTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
diff --git a/python/extensions/omci/tasks/get_mds_task.py b/python/extensions/omci/tasks/get_mds_task.py
new file mode 100644
index 0000000..1560c83
--- /dev/null
+++ b/python/extensions/omci/tasks/get_mds_task.py
@@ -0,0 +1,112 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_me import OntDataFrame
+from voltha.extensions.omci.omci_defs import ReasonCodes as RC
+
+
+class GetMdsTask(Task):
+    """
+    OpenOMCI Get MIB Data Sync value task
+
+    On successful completion, this task will call the 'callback' method of the
+    deferred returned by the start method and return the value of the MIB
+    Data Sync attribute of the ONT Data ME
+    """
+    task_priority = Task.DEFAULT_PRIORITY
+    name = "Get MDS Task"
+
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        super(GetMdsTask, self).__init__(GetMdsTask.name,
+                                         omci_agent,
+                                         device_id,
+                                         priority=GetMdsTask.task_priority)
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(GetMdsTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start MIB Synchronization tasks
+        """
+        super(GetMdsTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_get_mds)
+
+    def stop(self):
+        """
+        Shutdown MIB Synchronization tasks
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(GetMdsTask, self).stop()
+
+    @inlineCallbacks
+    def perform_get_mds(self):
+        """
+        Get the 'mib_data_sync' attribute of the ONU
+        """
+        self.log.debug('perform-get-mds')
+
+        try:
+            device = self.omci_agent.get_device(self.device_id)
+
+            #########################################
+            # Request (MDS supplied value does not matter for a 'get' request)
+
+            self.strobe_watchdog()
+            results = yield device.omci_cc.send(OntDataFrame().get())
+
+            omci_msg = results.fields['omci_message'].fields
+            status = omci_msg['success_code']
+
+            # Note: Currently the data reported by the Scapy decode is 16-bits since we need
+            #       the data field that large in order to support MIB and Alarm Upload Next
+            #       commands.  Select only the first 8-bits since that is the size of the MIB
+            #       Data Sync attribute
+            mds = (omci_msg['data']['mib_data_sync'] >> 8) & 0xFF \
+                if 'data' in omci_msg and 'mib_data_sync' in omci_msg['data'] else -1
+
+            self.log.debug('ont-data-mds', status=status, mib_data_sync=mds)
+
+            assert status == RC.Success, 'Unexpected Response Status: {}'.format(status)
+
+            # Successful if here
+            self.deferred.callback(mds)
+
+        except TimeoutError as e:
+            self.log.warn('get-mds-timeout', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+        except Exception as e:
+            self.log.exception('get-mds', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/interval_data_task.py b/python/extensions/omci/tasks/interval_data_task.py
new file mode 100644
index 0000000..d41c1d0
--- /dev/null
+++ b/python/extensions/omci/tasks/interval_data_task.py
@@ -0,0 +1,198 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from datetime import datetime
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_defs import ReasonCodes
+from voltha.extensions.omci.omci_frame import OmciFrame, OmciGet
+
+
+class IntervalDataTaskFailure(Exception):
+    pass
+
+
+class IntervalDataTask(Task):
+    """
+    OpenOMCI Performance Interval Get Request
+    """
+    task_priority = Task.DEFAULT_PRIORITY
+    name = "Interval Data Task"
+    max_payload = 29
+
+    def __init__(self, omci_agent, device_id, class_id, entity_id,
+                 max_get_response_payload=max_payload,
+                 parent_class_id=None,
+                 parent_entity_id=None,
+                 upstream=None):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param class_id: (int) ME Class ID
+        :param entity_id: (int) ME entity ID
+        :param max_get_response_payload: (int) Maximum number of octets in a
+                                               single GET response frame
+        """
+        super(IntervalDataTask, self).__init__(IntervalDataTask.name,
+                                               omci_agent,
+                                               device_id,
+                                               priority=IntervalDataTask.task_priority,
+                                               exclusive=False)
+        self._local_deferred = None
+        self._class_id = class_id
+        self._entity_id = entity_id
+
+        self._parent_class_id = parent_class_id
+        self._parent_entity_id = parent_entity_id
+        self._upstream = upstream
+
+        me_map = self.omci_agent.get_device(self.device_id).me_map
+        if self._class_id not in me_map:
+            msg = "The requested ME Class () does not exist in the ONU's ME Map".format(self._class_id)
+            self.log.warn('unknown-pm-me', msg=msg)
+            raise IntervalDataTaskFailure(msg)
+
+        self._entity = me_map[self._class_id]
+        self._counter_attributes = self.get_counter_attributes_names_and_size()
+        self._max_payload = max_get_response_payload
+
+    def cancel_deferred(self):
+        super(IntervalDataTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start the tasks
+        """
+        super(IntervalDataTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_get_interval)
+
+    def stop(self):
+        """
+        Shutdown the tasks
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(IntervalDataTask, self).stop()
+
+    def get_counter_attributes_names_and_size(self):
+        """
+        Get all of the counter attributes names and the amount of storage they take
+
+        :return: (dict) Attribute name -> length
+        """
+        return {name: self._entity.attributes[attr_index].field.sz
+                for name, attr_index in self._entity.attribute_name_to_index_map.items()
+                if self._entity.attributes[attr_index].is_counter}
+
+    @inlineCallbacks
+    def perform_get_interval(self):
+        """
+        Sync the time
+        """
+        self.log.info('perform-get-interval', class_id=self._class_id,
+                      entity_id=self._entity_id)
+
+        device = self.omci_agent.get_device(self.device_id)
+        attr_names = self._counter_attributes.keys()
+
+        final_results = {
+            'class_id': self._class_id,
+            'entity_id': self._entity_id,
+            'me_name': self._entity.__name__,   # Mostly for debugging...
+            'interval_utc_time': None,
+            'parent_class_id': self._parent_class_id,
+            'parent_entity_id': self._parent_entity_id,
+            'upstream': self._upstream
+            # Counters added here as they are retrieved
+        }
+        last_end_time = None
+
+        while len(attr_names) > 0:
+            # Get as many attributes that will fit. Always include the 1 octet
+            # Interval End Time Attribute and 2 octets for the Entity ID
+
+            remaining_payload = self._max_payload - 3
+            attributes = list()
+            for name in attr_names:
+                if self._counter_attributes[name] > remaining_payload:
+                    break
+
+                attributes.append(name)
+                remaining_payload -= self._counter_attributes[name]
+
+            attr_names = attr_names[len(attributes):]
+            attributes.append('interval_end_time')
+
+            frame = OmciFrame(
+                transaction_id=None,
+                message_type=OmciGet.message_id,
+                omci_message=OmciGet(
+                    entity_class=self._class_id,
+                    entity_id=self._entity_id,
+                    attributes_mask=self._entity.mask_for(*attributes)
+                )
+            )
+            self.log.debug('interval-get-request', class_id=self._class_id,
+                           entity_id=self._entity_id)
+            try:
+                self.strobe_watchdog()
+                results = yield device.omci_cc.send(frame)
+
+                omci_msg = results.fields['omci_message'].fields
+                status = omci_msg['success_code']
+                end_time = omci_msg['data'].get('interval_end_time')
+
+                self.log.debug('interval-get-results', class_id=self._class_id,
+                               entity_id=self._entity_id, status=status,
+                               end_time=end_time)
+
+                if status != ReasonCodes.Success:
+                    raise IntervalDataTaskFailure('Unexpected Response Status: {}, Class ID: {}'.
+                                                  format(status, self._class_id))
+                if last_end_time is None:
+                    last_end_time = end_time
+
+                elif end_time != last_end_time:
+                    msg = 'Interval End Time Changed during retrieval from {} to {}'\
+                        .format(last_end_time, end_time)
+                    self.log.info('interval-roll-over', msg=msg, class_id=self._class_id)
+                    raise IntervalDataTaskFailure(msg)
+
+                final_results['interval_utc_time'] = datetime.utcnow()
+                for attribute in attributes:
+                    final_results[attribute] = omci_msg['data'].get(attribute)
+
+            except TimeoutError as e:
+                self.log.warn('interval-get-timeout', e=e, class_id=self._class_id,
+                              entity_id=self._entity_id, attributes=attributes)
+                self.deferred.errback(failure.Failure(e))
+
+            except Exception as e:
+                self.log.exception('interval-get-failure', e=e, class_id=self._class_id)
+                self.deferred.errback(failure.Failure(e))
+
+        # Successful if here
+        self.deferred.callback(final_results)
diff --git a/python/extensions/omci/tasks/mib_reconcile_task.py b/python/extensions/omci/tasks/mib_reconcile_task.py
new file mode 100644
index 0000000..38e29dc
--- /dev/null
+++ b/python/extensions/omci/tasks/mib_reconcile_task.py
@@ -0,0 +1,693 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from common.utils.asleep import asleep
+from voltha.extensions.omci.tasks.task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue, TimeoutError
+from voltha.extensions.omci.omci_defs import *
+from voltha.extensions.omci.omci_me import OntDataFrame
+from voltha.extensions.omci.omci_frame import OmciFrame, OmciDelete, OmciCreate, OmciSet
+from voltha.extensions.omci.database.mib_db_api import ATTRIBUTES_KEY
+
+OP = EntityOperations
+RC = ReasonCodes
+AA = AttributeAccess
+
+
+class MibReconcileException(Exception):
+    pass
+
+
+class MibPartialSuccessException(Exception):
+    pass
+
+
+class MibReconcileTask(Task):
+    """
+    OpenOMCI MIB Reconcile Task
+
+    This task attempts to resynchronize the MIB. Note that it runs in exclusive
+    OMCI-CC mode so that it can query the current database/ONU to verify the
+    differences still exist before correcting them.
+    """
+    task_priority = 240
+    name = "MIB Reconcile Task"
+    max_sequential_db_updates = 5   # Be kind, rewind
+    db_update_pause = 0.05          # 50mS
+
+    def __init__(self, omci_agent, device_id, diffs):
+        """
+        Class initialization
+
+        :param omci_agent: (OpenOMCIAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param diffs: (dict) Dictionary of what was found to be invalid
+        """
+        super(MibReconcileTask, self).__init__(MibReconcileTask.name,
+                                               omci_agent,
+                                               device_id,
+                                               priority=MibReconcileTask.task_priority,
+                                               exclusive=False)
+        self._local_deferred = None
+        self._diffs = diffs
+        self._device = None
+        self._sync_sm = None
+        self._db_updates = 0    # For tracking sequential blocking consul/etcd updates
+
+    def cancel_deferred(self):
+        super(MibReconcileTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start MIB Reconcile task
+        """
+        super(MibReconcileTask, self).start()
+
+        self._device = self.omci_agent.get_device(self.device_id)
+
+        if self._device is None:
+            e = MibReconcileException('Device {} no longer exists'.format(self.device_id))
+            self.deferred.errback(failure.Failure(e))
+            return
+
+        self._sync_sm = self._device.mib_synchronizer
+
+        if self._device is None:
+            e = MibReconcileException('Device {} MIB State machine no longer exists'.format(self.device_id))
+            self.deferred.errback(failure.Failure(e))
+            return
+
+        self._local_deferred = reactor.callLater(0, self.perform_mib_reconcile)
+
+    def stop(self):
+        """
+        Shutdown MIB Reconcile task
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        self._device = None
+        super(MibReconcileTask, self).stop()
+
+    @inlineCallbacks
+    def perform_mib_reconcile(self):
+        """
+        Perform the MIB Reconciliation sequence.
+
+        The sequence to reconcile will be to clean up ONU only MEs, followed by
+        OLT/OpenOMCI-only MEs, and then finally correct common MEs with differing
+        attributes.
+        """
+        self.log.debug('perform-mib-reconcile')
+
+        try:
+            successes = 0
+            failures = 0
+
+            if self._diffs['onu-only'] is not None and len(self._diffs['onu-only']):
+                results = yield self.fix_onu_only(self._diffs['onu-only'],
+                                                  self._diffs['onu-db'])
+                self.log.debug('onu-only-results', good=results[0], bad=results[1])
+                successes += results[0]
+                failures += results[1]
+
+            if self._diffs['olt-only'] is not None and len(self._diffs['olt-only']):
+                results = yield self.fix_olt_only(self._diffs['olt-only'],
+                                                  self._diffs['onu-db'],
+                                                  self._diffs['olt-db'])
+                self.log.debug('olt-only-results', good=results[0], bad=results[1])
+                successes += results[0]
+                failures += results[1]
+
+            if self._diffs['attributes'] is not None and len(self._diffs['attributes']):
+                results = yield self.fix_attributes_only(self._diffs['attributes'],
+                                                         self._diffs['onu-db'],
+                                                         self._diffs['olt-db'])
+                self.log.debug('attributes-results', good=results[0], bad=results[1])
+                successes += results[0]
+                failures += results[1]
+
+            # Success? Update MIB-data-sync
+            if failures == 0:
+                results = yield self.update_mib_data_sync()
+                successes += results[0]
+                failures += results[1]
+
+            # Send back final status
+            if failures > 0:
+                msg = '{} Successful updates, {} failures'.format(successes, failure)
+                error = MibPartialSuccessException(msg) if successes \
+                    else MibReconcileException(msg)
+                self.deferred.errback(failure.Failure(error))
+            else:
+                self.deferred.callback('{} Successful updates'.format(successes))
+
+        except Exception as e:
+            if not self.deferred.called:
+                self.log.exception('reconcile', e=e)
+                self.deferred.errback(failure.Failure(e))
+
+    @inlineCallbacks
+    def fix_onu_only(self, onu, onu_db):
+        """
+        Fix ME's that were only found on the ONU. For ONU only MEs there are
+        the following things that will be checked.
+
+            o ME's that do not have an OpenOMCI class decoder. These are stored
+              as binary blobs in the MIB database. Since we do not ever set them
+              (since no encoder as well), just store them in the OLT/OpenOMCI MIB
+              Database.
+
+            o For ME's that are created by the ONU (no create/delete access), the
+              MEs 'may' be due to a firmware upgrade and reboot or in response to
+              an OLT creating another ME entity and then creating this ME.  Place
+              these 'new' into the database.
+
+            o For ME's that are created by the OLT/OpenOMCI, delete them from the
+              ONU
+
+        :param onu: (list(int,int)) List of tuples where (class_id, inst_id)
+        :param onu_db: (dict) ONU Database snapshot at time of audit
+
+        :return: (int, int) successes, failures
+        """
+        successes = 0
+        failures = 0
+        me_map = self._device.me_map
+
+        ####################################################################
+        # First the undecodables and onu-created (treated the same)
+        undecodable = self._undecodable(onu, me_map)
+        onu_created = self._onu_created(onu, me_map)
+
+        if len(undecodable) or len(onu_created):
+            results = yield self.fix_onu_only_save_to_db(undecodable, onu_created, onu_db)
+            successes += results[0]
+            failures += results[1]
+
+        ####################################################################
+        # Last the OLT created values, resend these to the ONU
+
+        olt_created = self._olt_created(onu, me_map)
+        if len(olt_created):
+            results = yield self.fix_onu_only_remove_from_onu(olt_created)
+            successes += results[0]
+            failures += results[1]
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_onu_only_save_to_db(self, undecodable, onu_created, onu_db):
+        """
+        In ONU database and needs to be saved to OLT/OpenOMCI database.
+
+        Note that some, perhaps all, of these instances could be ONU create
+        in response to the OLT creating some other ME instance. So treat
+        the Database operation as a create.
+        """
+        successes = 0
+        failures = 0
+
+        for cid, eid in undecodable + onu_created:
+            if self.deferred.called:        # Check if task canceled
+                break
+            try:
+                # If in current MIB, had an audit issue or other MIB operation
+                # put it into the database, declare it a failure so we audit again
+                try:
+                    olt_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+
+                except KeyError:        # Common for ONU created MEs during audit
+                    olt_entry = None
+
+                if olt_entry is not None and len(olt_entry):
+                    self.log.debug('onu-only-in-current', cid=cid, eid=eid)
+                    failures += 1     # Mark as failure so we audit again
+
+                elif cid not in onu_db:
+                    self.log.warn('onu-only-not-in-audit', cid=cid, eid=eid)
+                    failures += 1
+
+                else:
+                    entry = onu_db[cid][eid]
+                    self.strobe_watchdog()
+                    self._sync_sm.mib_set(cid, eid, entry[ATTRIBUTES_KEY])
+                    successes += 1
+
+                    # If we do nothing but DB updates for ALOT of MEs, we are
+                    # blocking other async twisted tasks, be kind and pause
+                    self._db_updates += 1
+
+                    if self._db_updates >= MibReconcileTask.max_sequential_db_updates:
+                        self._db_updates = 0
+                        self._local_deferred = yield asleep(MibReconcileTask.db_update_pause)
+
+            except Exception as e:
+                self.log.warn('onu-only-error', e=e)
+                failures += 1
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_onu_only_remove_from_onu(self, olt_created,):
+        """ On ONU, but no longer on OLT/OpenOMCI, delete it """
+        successes = 0
+        failures = 0
+
+        for cid, eid in olt_created:
+            if self.deferred.called:        # Check if task canceled
+                break
+            try:
+                # If in current MIB, had an audit issue, declare it an error
+                # and next audit should clear it up
+                try:
+                    current_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+
+                except KeyError:
+                    # Expected if no other entities with same class present in MIB
+                    current_entry = None
+
+                if current_entry is not None and len(current_entry):
+                    self.log.debug('onu-only-in-current', cid=cid, eid=eid)
+                    failures += 1
+
+                else:
+                    # Delete it from the ONU. Assume success
+                    frame = OmciFrame(transaction_id=None,
+                                      message_type=OmciDelete.message_id,
+                                      omci_message=OmciDelete(entity_class=cid, entity_id=eid))
+
+                    self._local_deferred = yield self._device.omci_cc.send(frame)
+                    self.check_status_and_state(self._local_deferred, 'onu-attribute-update')
+                    successes += 1
+                    self._db_updates = 0
+
+            except Exception as e:
+                self.log.warn('olt-only-error', e=e)
+                failures += 1
+                self.strobe_watchdog()
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_olt_only(self, olt, onu_db, olt_db):
+        """
+        Fix ME's that were only found on the OLT. For OLT only MEs there are
+        the following things that will be checked.
+
+            o ME's that do not have an OpenOMCI class decoder. These are stored
+              as binary blobs in the MIB database. Since the OLT will never
+              create these (all are learned from ONU), it is assumed the ONU
+              has removed them for some purpose. So delete them from the OLT
+              database.
+
+            o For ME's that are created by the ONU (no create/delete access), the
+              MEs 'may' not be on the ONU because of a reboot or an OLT created
+              ME was deleted and the ONU gratuitously removes it.  So delete them
+              from the OLT database.
+
+            o For ME's that are created by the OLT/OpenOMCI, delete them from the
+              ONU
+
+        :param olt: (list(int,int)) List of tuples where (class_id, inst_id)
+        :param onu_db: (dict) ONU Database snapshot at time of audit
+        :param olt_db: (dict) OLT Database snapshot at time of audit
+
+        :return: (int, int) successes, failures
+        """
+        successes = 0
+        failures = 0
+        me_map = self._device.me_map
+
+        ####################################################################
+        # First the undecodables and onu-created (treated the same) remove
+        # from OpenOMCI database
+        undecodable = self._undecodable(olt, me_map)
+        onu_created = self._onu_created(olt, me_map)
+
+        if len(undecodable) or len(onu_created):
+            good, bad = self.fix_olt_only_remove_from_db(undecodable, onu_created)
+            successes += good
+            failures += bad
+
+        ####################################################################
+        # Last the OLT created
+
+        olt_created = self._olt_created(olt, me_map)
+        if len(olt_created):
+            results = yield self.fix_olt_only_create_on_onu(olt_created, me_map)
+            successes += results[0]
+            failures += results[1]
+
+        returnValue((successes, failures))
+
+    def fix_olt_only_remove_from_db(self, undecodable, onu_created):
+        """ On OLT, but not on ONU and are ONU created, delete from OLT/OpenOMCI DB """
+        successes = 0
+        failures = 0
+
+        for cid, eid in undecodable + onu_created:
+            if self.deferred.called:        # Check if task canceled
+                break
+            try:
+                # Delete it. If already deleted (KeyError), then that is okay
+                self._sync_sm.mib_delete(cid, eid)
+                self.strobe_watchdog()
+
+            except KeyError:
+                successes += 1      # Not found in DB anymore, assume success
+
+            except Exception as e:
+                self.log.warn('olt-only-db-error', cid=cid, eid=eid, e=e)
+                failures += 1
+
+        return successes, failures
+
+    @inlineCallbacks
+    def fix_olt_only_create_on_onu(self, olt_created, me_map):
+        """ Found on OLT and created by OLT, so create on ONU"""
+        successes = 0
+        failures = 0
+
+        for cid, eid in olt_created:
+            if self.deferred.called:        # Check if task canceled
+                break
+
+            try:
+                # Get current entry, use it if found
+                olt_entry = self._sync_sm.query_mib(class_id=cid, instance_id=eid)
+                me_entry = me_map[cid]
+
+                if olt_entry is None or len(olt_entry) == 0:
+                    successes += 1      # Deleted before task got to run
+                else:
+                    # Create it in the ONU. Only set-by-create attributes allowed
+                    sbc_data = {k: v for k, v in olt_entry[ATTRIBUTES_KEY].items()
+                                if AA.SetByCreate in
+                                next((attr.access for attr in me_entry.attributes
+                                      if attr.field.name == k), set())}
+
+                    frame = OmciFrame(transaction_id=None,
+                                      message_type=OmciCreate.message_id,
+                                      omci_message=OmciCreate(entity_class=cid,
+                                                              entity_id=eid,
+                                                              data=sbc_data))
+
+                    self._local_deferred = yield self._device.omci_cc.send(frame)
+                    self.check_status_and_state(self._local_deferred, 'olt-create-sbc')
+                    successes += 1
+                    self._db_updates = 0
+
+                    # Try any writeable attributes now (but not set-by-create)
+                    writeable_data = {k: v for k, v in olt_entry[ATTRIBUTES_KEY].items()
+                                      if AA.Writable in
+                                      next((attr.access for attr in me_entry.attributes
+                                            if attr.field.name == k), set())
+                                      and AA.SetByCreate not in
+                                      next((attr.access for attr in me_entry.attributes
+                                            if attr.field.name == k), set())}
+
+                    if len(writeable_data):
+                        attributes_mask = me_entry.mask_for(*writeable_data.keys())
+                        frame = OmciFrame(transaction_id=None,
+                                          message_type=OmciSet.message_id,
+                                          omci_message=OmciSet(entity_class=cid,
+                                                               entity_id=eid,
+                                                               attributes_mask=attributes_mask,
+                                                               data=writeable_data))
+
+                        self._local_deferred = yield self._device.omci_cc.send(frame)
+                        self.check_status_and_state(self._local_deferred, 'olt-set-writeable')
+                        successes += 1
+
+            except Exception as e:
+                self.log.exception('olt-only-fix', e=e, cid=cid, eid=eid)
+                failures += 1
+                self.strobe_watchdog()
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_attributes_only(self, attrs, onu_db, olt_db):
+        """
+        Fix ME's that were found on both the ONU and OLT, but had differing
+        attribute values.  There are several cases to handle here
+
+            o For ME's created on the ONU that have write attributes that
+              only exist in the ONU's database, copy these to the OLT/OpenOMCI
+              database
+
+            o For all other writeable attributes, the OLT value takes precedence
+
+        :param attrs: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+                                          points to the specific ME instance where attributes
+                                          are different
+        :param onu_db: (dict) ONU Database snapshot at time of audit
+        :param olt_db: (dict) OLT Database snapshot at time of audit
+
+        :return: (int, int) successes, failures
+        """
+        successes = 0
+        failures = 0
+        me_map = self._device.me_map
+
+        # Collect up attributes on a per CID/EID basis.  This will result in
+        # the minimal number of operations to either the database of over
+        # the OMCI-CC to the ONU
+
+        attr_map = dict()
+        for cid, eid, attribute in attrs:
+            if (cid, eid) not in attr_map:
+                attr_map[(cid, eid)] = {attribute}
+            else:
+                attr_map[(cid, eid)].add(attribute)
+
+        for entity_pair, attributes in attr_map.items():
+            cid = entity_pair[0]
+            eid = entity_pair[1]
+
+            # Skip MEs we cannot encode/decode
+            if cid not in me_map:
+                self.log.warn('no-me-map-decoder', class_id=cid)
+                failures += 1
+                continue
+
+            if self.deferred.called:        # Check if task canceled
+                break
+
+            # Build up MIB set commands and ONU Set (via OMCI) commands
+            # based of the attributes
+            me_entry = me_map[cid]
+            mib_data_to_save = dict()
+            onu_data_to_set = dict()
+            olt_attributes = olt_db[cid][eid][ATTRIBUTES_KEY]
+            onu_attributes = onu_db[cid][eid][ATTRIBUTES_KEY]
+
+            for attribute in attributes:
+                map_access = next((attr.access for attr in me_entry.attributes
+                                   if attr.field.name == attribute), set())
+                writeable = AA.Writable in map_access or AA.SetByCreate in map_access
+
+                # If only in ONU database snapshot, save it to OLT
+                if attribute in onu_attributes and attribute not in olt_attributes:
+                    # On onu only
+                    mib_data_to_save[attribute] = onu_attributes[attribute]
+
+                elif writeable:
+                    # On olt only or in both. Either way OLT wins
+                    onu_data_to_set[attribute] = olt_attributes[attribute]
+
+            # Now do the bulk operations For both, check to see if the target
+            # is still the same as when the audit was performed. If it is, do
+            # the commit.  If not, mark as a failure so an expedited audit will
+            # occur and check again.
+
+            if len(mib_data_to_save):
+                results = yield self.fix_attributes_only_in_mib(cid, eid, mib_data_to_save)
+                successes += results[0]
+                failures += results[1]
+
+            if len(onu_data_to_set):
+                results = yield self.fix_attributes_only_on_olt(cid, eid, onu_data_to_set, olt_db, me_entry)
+                successes += results[0]
+                failures += results[1]
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_attributes_only_in_mib(self, cid, eid, mib_data):
+        successes = 0
+        failures = 0
+        try:
+            # Get current and verify same as during audit it is missing from our DB
+            attributes = mib_data.keys()
+            current_entry = self._device.query_mib(cid, eid, attributes)
+
+            if current_entry is not None and len(current_entry):
+                clashes = {k: v for k, v in current_entry.items()
+                           if k in attributes and v != mib_data[k]}
+
+                if len(clashes):
+                    raise ValueError('Existing DB entry for {}/{} attributes clash with audit data. Clash: {}'.
+                                     format(cid, eid, clashes))
+
+            self._sync_sm.mib_set(cid, eid, mib_data)
+            successes += len(mib_data)
+            self.strobe_watchdog()
+
+            # If we do nothing but DB updates for ALOT of MEs, we are
+            # blocking other async twisted tasks, be kind and yield
+            self._db_updates += 1
+            if self._db_updates >= MibReconcileTask.max_sequential_db_updates:
+                self._db_updates = 0
+                self._local_deferred = yield asleep(MibReconcileTask.db_update_pause)
+
+        except ValueError as e:
+            self.log.debug('attribute-changed', e)
+            failures += len(mib_data)
+
+        except Exception as e:
+            self.log.exception('attribute-only-fix-mib', e=e, cid=cid, eid=eid)
+            failures += len(mib_data)
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def fix_attributes_only_on_olt(self, cid, eid, onu_data, olt_db, me_entry):
+        successes = 0
+        failures = 0
+
+        try:
+            # On olt only or in both. Either way OLT wins, first verify that
+            # the OLT version is still the same data that we want to
+            # update on the ONU. Verify the data for the OLT is the same as
+            # at time of audit
+            olt_db_entries = {k: v for k, v in olt_db[cid][eid][ATTRIBUTES_KEY].items()
+                              if k in onu_data.keys()}
+            current_entries = self._sync_sm.query_mib(class_id=cid, instance_id=eid,
+                                                      attributes=onu_data.keys())
+
+            still_the_same = all(current_entries.get(k) == v for k, v in olt_db_entries.items())
+            if not still_the_same:
+                returnValue((0, len(onu_data)))    # Wait for it to stabilize
+
+            # OLT data still matches, do the set operations now
+            # while len(onu_data):
+            attributes_mask = me_entry.mask_for(*onu_data.keys())
+            frame = OmciFrame(transaction_id=None,
+                              message_type=OmciSet.message_id,
+                              omci_message=OmciSet(entity_class=cid,
+                                                   entity_id=eid,
+                                                   attributes_mask=attributes_mask,
+                                                   data=onu_data))
+
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results, 'onu-attribute-update')
+            successes += len(onu_data)
+            self._db_updates = 0
+
+        except Exception as e:
+            self.log.exception('attribute-only-fix-onu', e=e, cid=cid, eid=eid)
+            failures += len(onu_data)
+            self.strobe_watchdog()
+
+        returnValue((successes, failures))
+
+    @inlineCallbacks
+    def update_mib_data_sync(self):
+        """
+        As the final step of MIB resynchronization, the OLT sets the MIB data sync
+        attribute of the ONU data ME to some suitable value of its own choice. It
+        then sets its own record of the same attribute to the same value,
+        incremented by 1, as explained in clause
+
+        :return: (int, int) success, failure counts
+        """
+        # Get MDS to set, do not user zero
+
+        new_mds_value = self._sync_sm.mib_data_sync
+        if new_mds_value == 0:
+            self._sync_sm.increment_mib_data_sync()
+            new_mds_value = self._sync_sm.mib_data_sync
+
+        # Update it.  The set response will be sent on the OMCI-CC pub/sub bus
+        # and the MIB Synchronizer will update this MDS value in the database
+        # if successful.
+        try:
+            frame = OntDataFrame(mib_data_sync=new_mds_value).set()
+
+            results = yield self._device.omci_cc.send(frame)
+            self.check_status_and_state(results, 'ont-data-mbs-update')
+            returnValue((1, 0))
+
+        except TimeoutError as e:
+            self.log.debug('ont-data-send-timeout', e=e)
+            returnValue((0, 1))
+
+        except Exception as e:
+            self.log.exception('ont-data-send', e=e, mds=new_mds_value)
+            returnValue((0, 1))
+
+    def check_status_and_state(self, results, operation=''):
+        """
+        Check the results of an OMCI response.  An exception is thrown
+        if the task was cancelled or an error was detected.
+
+        :param results: (OmciFrame) OMCI Response frame
+        :param operation: (str) what operation was being performed
+        :return: True if successful, False if the entity existed (already created)
+        """
+        omci_msg = results.fields['omci_message'].fields
+        status = omci_msg['success_code']
+        error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
+        failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
+        unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
+        self.strobe_watchdog()
+
+        self.log.debug(operation, status=status, error_mask=error_mask,
+                       failed_mask=failed_mask, unsupported_mask=unsupported_mask)
+
+        if status == RC.Success:
+            return True
+
+        elif status == RC.InstanceExists:
+            return False
+
+        msg = '{} failed with a status of {}, error_mask: {}, failed_mask: {}, unsupported_mask: {}'.\
+            format(operation, status, error_mask, failed_mask, unsupported_mask)
+
+        raise MibReconcileException(msg)
+
+    def _undecodable(self, cid_eid_list, me_map):
+        return [(cid, eid) for cid, eid in cid_eid_list if cid not in me_map]
+
+    def _onu_created(self, cid_eid_list, me_map):
+        return [(cid, eid) for cid, eid in cid_eid_list if cid in me_map and
+                (OP.Create not in me_map[cid].mandatory_operations and
+                 OP.Create not in me_map[cid].optional_operations)]
+
+    def _olt_created(self, cid_eid_list, me_map):
+        return [(cid, eid) for cid, eid in cid_eid_list if cid in me_map and
+                (OP.Create in me_map[cid].mandatory_operations or
+                 OP.Create in me_map[cid].optional_operations)]
diff --git a/python/extensions/omci/tasks/mib_resync_task.py b/python/extensions/omci/tasks/mib_resync_task.py
new file mode 100644
index 0000000..ef9c531
--- /dev/null
+++ b/python/extensions/omci/tasks/mib_resync_task.py
@@ -0,0 +1,427 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, returnValue
+from twisted.internet import reactor
+from common.utils.asleep import asleep
+from voltha.extensions.omci.database.mib_db_dict import *
+from voltha.extensions.omci.omci_entities import OntData
+from voltha.extensions.omci.omci_defs import AttributeAccess, EntityOperations
+
+AA = AttributeAccess
+OP = EntityOperations
+
+class MibCopyException(Exception):
+    pass
+
+
+class MibDownloadException(Exception):
+    pass
+
+
+class MibResyncException(Exception):
+    pass
+
+
+class MibResyncTask(Task):
+    """
+    OpenOMCI MIB resynchronization Task
+
+    This task should get a copy of the MIB and compare compare it to a
+    copy of the database. When the MIB Upload command is sent to the ONU,
+    it should make a copy and source the data requested from this database.
+    The ONU can still source AVC's and the the OLT can still send config
+    commands to the actual.
+    """
+    task_priority = 240
+    name = "MIB Resynchronization Task"
+
+    max_db_copy_retries = 3
+    db_copy_retry_delay = 7
+
+    max_mib_upload_next_retries = 3
+    mib_upload_next_delay = 10          # Max * delay < 60 seconds
+    watchdog_timeout = 15               # Should be > max delay
+
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OpenOMCIAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        super(MibResyncTask, self).__init__(MibResyncTask.name,
+                                            omci_agent,
+                                            device_id,
+                                            priority=MibResyncTask.task_priority,
+                                            exclusive=False)
+        self._local_deferred = None
+        self._device = omci_agent.get_device(device_id)
+        self._db_active = MibDbVolatileDict(omci_agent)
+        self._db_active.start()
+
+    def cancel_deferred(self):
+        super(MibResyncTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start MIB Re-Synchronization task
+        """
+        super(MibResyncTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_mib_resync)
+        self._db_active.start()
+        self._db_active.add(self.device_id)
+
+    def stop(self):
+        """
+        Shutdown MIB Re-Synchronization task
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        self._device = None
+        self._db_active.stop()
+        self._db_active = None
+        super(MibResyncTask, self).stop()
+
+    @inlineCallbacks
+    def perform_mib_resync(self):
+        """
+        Perform the MIB Resynchronization sequence
+
+        The sequence to be performed is:
+            - get a copy of the current MIB database (db_copy)
+
+            - perform MIB upload commands to get ONU's database and save this
+              to a local DB (db_active). Note that the ONU can still receive
+              create/delete/set/get operations from the operator and source
+              AVC notifications as well during this period.
+
+            - Compare the information in the db_copy to the db_active
+
+        During the mib upload process, the maximum time between mib upload next
+        requests is 1 minute.
+        """
+        self.log.debug('perform-mib-resync')
+
+        try:
+            results = yield self.snapshot_mib()
+            db_copy = results[0]
+
+            if db_copy is None:
+                e = MibCopyException('Failed to get local database copy')
+                self.deferred.errback(failure.Failure(e))
+
+            else:
+                number_of_commands = results[1]
+
+                # Start the MIB upload sequence
+                self.strobe_watchdog()
+                commands_retrieved = yield self.upload_mib(number_of_commands)
+
+                if commands_retrieved < number_of_commands:
+                    e = MibDownloadException('Only retrieved {} of {} instances'.
+                                             format(commands_retrieved, number_of_commands))
+                    self.deferred.errback(failure.Failure(e))
+                else:
+                    # Compare the databases
+                    active_copy = self._db_active.query(self.device_id)
+                    on_olt_only, on_onu_only, attr_diffs = \
+                        self.compare_mibs(db_copy, active_copy)
+
+                    self.deferred.callback(
+                            {
+                                'on-olt-only': on_olt_only if len(on_olt_only) else None,
+                                'on-onu-only': on_onu_only if len(on_onu_only) else None,
+                                'attr-diffs': attr_diffs if len(attr_diffs) else None,
+                                'olt-db': db_copy,
+                                'onu-db': active_copy
+                            })
+
+        except Exception as e:
+            self.log.exception('resync', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+    @inlineCallbacks
+    def snapshot_mib(self):
+        """
+        Snapshot the MIB on the ONU and create a copy of our local MIB database
+
+        :return: (pair) (db_copy, number_of_commands)
+        """
+        db_copy = None
+        number_of_commands = None
+
+        try:
+            max_tries = MibResyncTask.max_db_copy_retries - 1
+
+            for retries in xrange(0, max_tries + 1):
+                # Send MIB Upload so ONU snapshots its MIB
+                try:
+                    self.strobe_watchdog()
+                    number_of_commands = yield self.send_mib_upload()
+
+                    if number_of_commands is None:
+                        if retries >= max_tries:
+                            db_copy = None
+                            break
+
+                except (TimeoutError, ValueError) as e:
+                    self.log.warn('timeout-or-value-error', e=e)
+                    if retries >= max_tries:
+                        raise
+
+                    self.strobe_watchdog()
+                    yield asleep(MibResyncTask.db_copy_retry_delay)
+                    continue
+
+                # Get a snapshot of the local MIB database
+                db_copy = self._device.query_mib()
+                # if we made it this far, no need to keep trying
+                break
+
+        except Exception as e:
+            self.log.exception('mib-resync', e=e)
+            raise
+
+        # Handle initial failures
+
+        if db_copy is None or number_of_commands is None:
+            raise MibCopyException('Failed to snapshot MIB copy after {} retries'.
+                                   format(MibResyncTask.max_db_copy_retries))
+
+        returnValue((db_copy, number_of_commands))
+
+    @inlineCallbacks
+    def send_mib_upload(self):
+        """
+        Perform MIB upload command and get the number of entries to retrieve
+
+        :return: (int) Number of commands to execute or None on error
+        """
+        ########################################
+        # Begin MIB Upload
+        try:
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send_mib_upload()
+
+            number_of_commands = results.fields['omci_message'].fields['number_of_commands']
+
+            if number_of_commands is None or number_of_commands <= 0:
+                raise ValueError('Number of commands was {}'.format(number_of_commands))
+
+            returnValue(number_of_commands)
+
+        except TimeoutError as e:
+            self.log.warn('mib-resync-get-timeout', e=e)
+            raise
+
+    @inlineCallbacks
+    def upload_mib(self, number_of_commands):
+        ########################################
+        # Begin MIB Upload
+        seq_no = None
+
+        for seq_no in xrange(number_of_commands):
+            max_tries = MibResyncTask.max_mib_upload_next_retries
+
+            for retries in xrange(0, max_tries):
+                try:
+                    self.strobe_watchdog()
+                    response = yield self._device.omci_cc.send_mib_upload_next(seq_no)
+
+                    omci_msg = response.fields['omci_message'].fields
+                    class_id = omci_msg['object_entity_class']
+                    entity_id = omci_msg['object_entity_id']
+
+                    # Filter out the 'mib_data_sync' from the database. We save that at
+                    # the device level and do not want it showing up during a re-sync
+                    # during data comparison
+                    from binascii import hexlify
+                    if class_id == OntData.class_id:
+                        break
+
+                    # The T&W ONU reports an ME with class ID 0 but only on audit. Perhaps others do as well.
+                    if class_id == 0 or class_id > 0xFFFF:
+                        self.log.warn('invalid-class-id', class_id=class_id)
+                        break
+
+                    attributes = {k: v for k, v in omci_msg['object_data'].items()}
+
+                    # Save to the database
+                    self._db_active.set(self.device_id, class_id, entity_id, attributes)
+                    break
+
+                except TimeoutError:
+                    self.log.warn('mib-resync-timeout', seq_no=seq_no,
+                                  number_of_commands=number_of_commands)
+
+                    if retries < max_tries - 1:
+                        self.strobe_watchdog()
+                        yield asleep(MibResyncTask.mib_upload_next_delay)
+                    else:
+                        raise
+
+                except Exception as e:
+                    self.log.exception('resync', e=e, seq_no=seq_no,
+                                       number_of_commands=number_of_commands)
+
+        returnValue(seq_no + 1)     # seq_no is zero based.
+
+    def compare_mibs(self, db_copy, db_active):
+        """
+        Compare the our db_copy with the ONU's active copy
+
+        :param db_copy: (dict) OpenOMCI's copy of the database
+        :param db_active: (dict) ONU's database snapshot
+        :return: (dict), (dict), (list)  Differences
+        """
+        self.strobe_watchdog()
+        me_map = self.omci_agent.get_device(self.device_id).me_map
+
+        # Class & Entities only in local copy (OpenOMCI)
+        on_olt_temp = self.get_lhs_only_dict(db_copy, db_active)
+
+        # Remove any entries that are not reported during an upload (but could
+        # be in our database copy. Retain undecodable class IDs.
+        on_olt_only = [(cid, eid) for cid, eid in on_olt_temp
+                       if cid not in me_map or not me_map[cid].hidden]
+
+        # Further reduce the on_olt_only MEs reported in an audit to not
+        # include missed MEs that are ONU created. Not all ONUs report MEs
+        # that are ONU created unless we are doing the initial MIB upload.
+        # Adtran does report them, T&W may not as well as a few others
+        on_olt_only = [(cid, eid) for cid, eid in on_olt_only if cid in me_map and
+                       (OP.Create in me_map[cid].mandatory_operations or
+                        OP.Create in me_map[cid].optional_operations)]
+
+        # Class & Entities only on remote (ONU)
+        on_onu_only = self.get_lhs_only_dict(db_active, db_copy)
+
+        # Class & Entities on both local & remote, but one or more attributes
+        # are different on the ONU.  This is the value that the local (OpenOMCI)
+        # thinks should be on the remote (ONU)
+
+        attr_diffs = self.get_attribute_diffs(db_copy, db_active, me_map)
+
+        # TODO: Note that certain MEs are excluded from the MIB upload.  In particular,
+        #       instances of some general purpose MEs, such as the Managed Entity ME and
+        #       and the Attribute ME are not included in the MIB upload.  Also all table
+        #       attributes are not included in the MIB upload (but we do not yet support
+        #       tables in this OpenOMCI implementation (VOLTHA v1.3.0)
+
+        return on_olt_only, on_onu_only, attr_diffs
+
+    def get_lhs_only_dict(self, lhs, rhs):
+        """
+        Compare two MIB database dictionaries and return the ME Class ID and
+        instances that are unique to the lhs dictionary. Both parameters
+        should be in the common MIB Database output dictionary format that
+        is returned by the mib 'query' command.
+
+        :param lhs: (dict) Left-hand-side argument.
+        :param rhs: (dict) Right-hand-side argument
+
+        return: (list(int,int)) List of tuples where (class_id, inst_id)
+        """
+        results = list()
+
+        for cls_id, cls_data in lhs.items():
+            # Get unique classes
+            #
+            # Skip keys that are not class IDs
+            if not isinstance(cls_id, int):
+                continue
+
+            if cls_id not in rhs:
+                results.extend([(cls_id, inst_id) for inst_id in cls_data.keys()
+                                if isinstance(inst_id, int)])
+            else:
+                # Get unique instances of a class
+                lhs_cls = cls_data
+                rhs_cls = rhs[cls_id]
+
+                for inst_id, _ in lhs_cls.items():
+                    # Skip keys that are not instance IDs
+                    if isinstance(cls_id, int) and inst_id not in rhs_cls:
+                        results.extend([(cls_id, inst_id)])
+
+        return results
+
+    def get_attribute_diffs(self, omci_copy, onu_copy, me_map):
+        """
+        Compare two OMCI MIBs and return the ME class and instance IDs that exists
+        on both the local copy and the remote ONU that have different attribute
+        values. Both parameters should be in the common MIB Database output
+        dictionary format that is returned by the mib 'query' command.
+
+        :param omci_copy: (dict) OpenOMCI copy (OLT-side) of the MIB Database
+        :param onu_copy: (dict) active ONU latest copy its database
+        :param me_map: (dict) ME Class ID MAP for this ONU
+
+        return: (list(int,int,str)) List of tuples where (class_id, inst_id, attribute)
+                                    points to the specific ME instance where attributes
+                                    are different
+        """
+        results = list()
+        ro_set = {AA.R}
+
+        # Get class ID's that are in both
+        class_ids = {cls_id for cls_id, _ in omci_copy.items()
+                     if isinstance(cls_id, int) and cls_id in onu_copy}
+
+        for cls_id in class_ids:
+            # Get unique instances of a class
+            olt_cls = omci_copy[cls_id]
+            onu_cls = onu_copy[cls_id]
+
+            # Weed out read-only attributes. Attributes on onu may be read-only. These
+            # will only show up it the OpenOMCI (OLT-side) database if it changed and
+            # an AVC Notification was sourced by the ONU
+            # TODO: These class IDs could be calculated once at ONU startup (at device add)
+            if cls_id in me_map:
+                ro_attrs = {attr.field.name for attr in me_map[cls_id].attributes
+                            if attr.access == ro_set}
+            else:
+                # Here if partially defined ME (not defined in ME Map)
+                from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
+                ro_attrs = {UNKNOWN_CLASS_ATTRIBUTE_KEY}
+
+            # Get set of common instance IDs
+            inst_ids = {inst_id for inst_id, _ in olt_cls.items()
+                        if isinstance(inst_id, int) and inst_id in onu_cls}
+
+            for inst_id in inst_ids:
+                omci_attributes = {k for k in olt_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+                onu_attributes = {k for k in onu_cls[inst_id][ATTRIBUTES_KEY].iterkeys()}
+
+                # Get attributes that exist in one database, but not the other
+                sym_diffs = (omci_attributes ^ onu_attributes) - ro_attrs
+                results.extend([(cls_id, inst_id, attr) for attr in sym_diffs])
+
+                # Get common attributes with different values
+                common_attributes = (omci_attributes & onu_attributes) - ro_attrs
+                results.extend([(cls_id, inst_id, attr) for attr in common_attributes
+                               if olt_cls[inst_id][ATTRIBUTES_KEY][attr] !=
+                                onu_cls[inst_id][ATTRIBUTES_KEY][attr]])
+        return results
diff --git a/python/extensions/omci/tasks/mib_upload.py b/python/extensions/omci/tasks/mib_upload.py
new file mode 100644
index 0000000..4afd234
--- /dev/null
+++ b/python/extensions/omci/tasks/mib_upload.py
@@ -0,0 +1,158 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, AlreadyCalledError
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+
+
+class MibUploadFailure(Exception):
+    """
+    This error is raised by default when the upload fails
+    """
+
+
+class MibUploadTask(Task):
+    """
+    OpenOMCI MIB upload task
+
+    On successful completion, this task will call the 'callback' method of the
+    deferred returned by the start method. Only a textual message is provided as
+    the successful results and it lists the number of ME entities successfully
+    retrieved.
+
+    Note that the MIB Synchronization State Machine will get event subscription
+    information for the MIB Reset and MIB Upload Next requests and it is the
+    MIB Synchronization State Machine that actually populates the MIB Database.
+    """
+    task_priority = 250
+    name = "MIB Upload Task"
+
+    def __init__(self, omci_agent, device_id):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        """
+        super(MibUploadTask, self).__init__(MibUploadTask.name,
+                                            omci_agent,
+                                            device_id,
+                                            priority=MibUploadTask.task_priority)
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(MibUploadTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start MIB Synchronization tasks
+        """
+        super(MibUploadTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_mib_upload)
+
+    def stop(self):
+        """
+        Shutdown MIB Synchronization tasks
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(MibUploadTask, self).stop()
+
+    @inlineCallbacks
+    def perform_mib_upload(self):
+        """
+        Perform the MIB Upload sequence
+        """
+        self.log.debug('perform-mib-upload')
+
+        seq_no = 0
+        number_of_commands = 0
+
+        try:
+            device = self.omci_agent.get_device(self.device_id)
+
+            #########################################
+            # MIB Reset
+            self.strobe_watchdog()
+            results = yield device.omci_cc.send_mib_reset()
+
+            status = results.fields['omci_message'].fields['success_code']
+            if status != ReasonCodes.Success.value:
+                raise MibUploadFailure('MIB Reset request failed with status code: {}'.
+                                       format(status))
+
+            ########################################
+            # Begin MIB Upload
+            self.strobe_watchdog()
+            results = yield device.omci_cc.send_mib_upload()
+
+            number_of_commands = results.fields['omci_message'].fields['number_of_commands']
+
+            for seq_no in xrange(number_of_commands):
+                if not device.active or not device.omci_cc.enabled:
+                    raise MibUploadFailure('OMCI and/or ONU is not active')
+
+                for retry in range(0, 3):
+                    try:
+                        self.log.debug('mib-upload-next-request', seq_no=seq_no,
+                                       retry=retry,
+                                       number_of_commands=number_of_commands)
+                        self.strobe_watchdog()
+                        yield device.omci_cc.send_mib_upload_next(seq_no)
+
+                        self.log.debug('mib-upload-next-success', seq_no=seq_no,
+                                       number_of_commands=number_of_commands)
+                        break
+
+                    except TimeoutError as e:
+                        from common.utils.asleep import asleep
+                        self.log.warn('mib-upload-timeout', e=e, seq_no=seq_no,
+                                      number_of_commands=number_of_commands)
+                        if retry >= 2:
+                            raise MibUploadFailure('Upload timeout failure on req {} of {}'.
+                                                   format(seq_no + 1, number_of_commands))
+                        self.strobe_watchdog()
+                        yield asleep(0.3)
+
+            # Successful if here
+            self.log.info('mib-synchronized')
+            self.deferred.callback('success, loaded {} ME Instances'.
+                                   format(number_of_commands))
+
+        except TimeoutError as e:
+            self.log.warn('mib-upload-timeout-on-reset', e=e, seq_no=seq_no,
+                          number_of_commands=number_of_commands)
+            self.deferred.errback(failure.Failure(e))
+
+        except AlreadyCalledError:
+            # Can occur if task canceled due to MIB Sync state change
+            self.log.debug('already-called-exception', seq_no=seq_no,
+                           number_of_commands=number_of_commands)
+            assert self.deferred.called, \
+                'Unexpected AlreadyCalledError exception: seq: {} of {}'.format(seq_no,
+                                                                                number_of_commands)
+        except Exception as e:
+            self.log.exception('mib-upload', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/omci_create_pm_task.py b/python/extensions/omci/tasks/omci_create_pm_task.py
new file mode 100644
index 0000000..355e26a
--- /dev/null
+++ b/python/extensions/omci/tasks/omci_create_pm_task.py
@@ -0,0 +1,150 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, TimeoutError
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciCreate
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class CreatePMException(Exception):
+    pass
+
+
+class OmciCreatePMRequest(Task):
+    """
+    OpenOMCI routine to create the requested PM Interval MEs
+
+    TODO: Support of thresholding crossing alarms will be in a future VOLTHA release
+    """
+    task_priority = Task.DEFAULT_PRIORITY
+    name = "ONU OMCI Create PM ME Task"
+
+    def __init__(self, omci_agent, device_id, me_dict, exclusive=False):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param me_dict: (dict) (pm cid, pm eid) -> (me cid, me eid, upstream)
+        :param exclusive: (bool) True if this Create request Task exclusively own the
+                                 OMCI-CC while running. Default: False
+        """
+        super(OmciCreatePMRequest, self).__init__(OmciCreatePMRequest.name,
+                                                  omci_agent,
+                                                  device_id,
+                                                  priority=OmciCreatePMRequest.task_priority,
+                                                  exclusive=exclusive)
+        self._device = omci_agent.get_device(device_id)
+        self._me_dict = me_dict
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(OmciCreatePMRequest, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """ Start task """
+        super(OmciCreatePMRequest, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_create)
+
+    @inlineCallbacks
+    def perform_create(self):
+        """ Perform the create requests """
+
+        try:
+            for pm, me in self._me_dict.items():
+                pm_class_id = pm[0]
+                pm_entity_id = pm[1]
+                me_class_id = me[0]
+                me_entity_id = me[1]
+                upstream = me[2]
+                self.log.debug('create-pm-me', class_id=pm_class_id, entity_id=pm_entity_id)
+
+                if me_class_id == 0:
+                    # Typical/common PM interval format
+                    frame = OmciFrame(
+                        transaction_id=None,  # OMCI-CC will set
+                        message_type=OmciCreate.message_id,
+                        omci_message=OmciCreate(
+                            entity_class=pm_class_id,
+                            entity_id=pm_entity_id,
+                            data=dict()
+                        )
+                    )
+                else:
+                    # Extended PM interval format. See ITU-T G.988 Section 9.3.32.
+                    #    Bit 1 - continuous accumulation if set, 15-minute interval if unset
+                    #    Bit 2 - directionality (0=upstream, 1=downstream)
+                    #    Bit 3..14 - Reserved
+                    #    Bit 15 - Use P bits of TCI field to filter
+                    #    Bit 16 - Use VID bits of TCI field to filter
+                    bitmap = 0 if upstream else 1 << 1
+
+                    data = {'control_block': [
+                        0,             # Threshold data 1/2 ID
+                        me_class_id,   # Parent ME Class
+                        me_entity_id,  # Parent ME Instance
+                        0,             # Accumulation disable
+                        0,             # TCA Disable
+                        bitmap,        # Control fields bitmap
+                        0,             # TCI
+                        0              # Reserved
+                    ]}
+                    frame = OmciFrame(
+                        transaction_id=None,  # OMCI-CC will set
+                        message_type=OmciCreate.message_id,
+                        omci_message=OmciCreate(
+                            entity_class=pm_class_id,
+                            entity_id=pm_entity_id,
+                            data=data
+                        )
+                    )
+                self.strobe_watchdog()
+                try:
+                    results = yield self._device.omci_cc.send(frame)
+                except TimeoutError:
+                    self.log.warning('perform-create-timeout', me_class_id=me_class_id, me_entity_id=me_entity_id,
+                                     pm_class_id=pm_class_id, pm_entity_id=pm_entity_id)
+                    raise
+
+                status = results.fields['omci_message'].fields['success_code']
+                self.log.debug('perform-create-status', status=status)
+
+                # Did it fail
+                if status != RC.Success.value and status != RC.InstanceExists.value:
+                    msg = 'ME: {}, entity: {} failed with status {}'.format(pm_class_id,
+                                                                            pm_entity_id,
+                                                                            status)
+                    raise CreatePMException(msg)
+
+                self.log.debug('create-pm-success', class_id=pm_class_id,
+                               entity_id=pm_entity_id)
+
+            self.deferred.callback(self)
+
+        except Exception as e:
+            self.log.exception('perform-create', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/omci_delete_pm_task.py b/python/extensions/omci/tasks/omci_delete_pm_task.py
new file mode 100644
index 0000000..adf1ce2
--- /dev/null
+++ b/python/extensions/omci/tasks/omci_delete_pm_task.py
@@ -0,0 +1,108 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciDelete
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class DeletePMException(Exception):
+    pass
+
+
+class OmciDeletePMRequest(Task):
+    """
+    OpenOMCI routine to delete the requested PM Interval MEs
+    """
+    task_priority = Task.DEFAULT_PRIORITY
+    name = "ONU OMCI Delete PM ME Task"
+
+    def __init__(self, omci_agent, device_id, me_set, exclusive=False):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param me_set: (set) Tuples of class_id / entity_id to create
+        :param exclusive: (bool) True if this Create request Task exclusively own the
+                                 OMCI-CC while running. Default: False
+        """
+        super(OmciDeletePMRequest, self).__init__(OmciDeletePMRequest.name,
+                                                  omci_agent,
+                                                  device_id,
+                                                  priority=OmciDeletePMRequest.task_priority,
+                                                  exclusive=exclusive)
+        self._device = omci_agent.get_device(device_id)
+        self._me_tuples = me_set
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(OmciDeletePMRequest, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """ Start task """
+        super(OmciDeletePMRequest, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_delete)
+
+    @inlineCallbacks
+    def perform_delete(self):
+        """ Perform the delete requests """
+        self.log.debug('perform-delete')
+
+        try:
+            for me in self._me_tuples:
+                class_id = me[0]
+                entity_id = me[1]
+
+                frame = OmciFrame(
+                    transaction_id=None,
+                    message_type=OmciDelete.message_id,
+                    omci_message=OmciDelete(
+                        entity_class=class_id,
+                        entity_id=entity_id
+                    )
+                )
+                self.strobe_watchdog()
+                results = yield self._device.omci_cc.send(frame)
+
+                status = results.fields['omci_message'].fields['success_code']
+                self.log.debug('perform-delete-status', status=status)
+
+                # Did it fail, it instance does not exist, not an error
+                if status != RC.Success.value and status != RC.UnknownInstance.value:
+                    msg = 'ME: {}, entity: {} failed with status {}'.format(class_id,
+                                                                            entity_id,
+                                                                            status)
+                    raise DeletePMException(msg)
+
+                self.log.debug('delete-pm-success', class_id=class_id,
+                               entity_id=entity_id)
+            self.deferred.callback(self)
+
+        except Exception as e:
+            self.log.exception('perform-create', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/omci_get_request.py b/python/extensions/omci/tasks/omci_get_request.py
new file mode 100644
index 0000000..690df1c
--- /dev/null
+++ b/python/extensions/omci/tasks/omci_get_request.py
@@ -0,0 +1,356 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import failure, inlineCallbacks, TimeoutError, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import MEFrame
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+from voltha.extensions.omci.omci_messages import OmciGet
+from voltha.extensions.omci.omci_fields import OmciTableField
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class GetException(Exception):
+    pass
+
+
+class OmciGetRequest(Task):
+    """
+    OpenOMCI Get an OMCI ME Instance Attributes
+
+    Upon completion, the Task deferred callback is invoked with a reference of
+    this Task object.
+
+    The Task has an initializer option (allow_failure) that will retry all
+    requested attributes if the original request fails with a status code of
+    9 (Attributes failed or unknown). This result means that an attribute
+    is not supported by the ONU or that a mandatory/optional attribute could
+    not be executed by the ONU, even if it is supported, for example,
+    because of a range or type violation.
+    """
+    task_priority = 128
+    name = "ONU OMCI Get Task"
+
+    def __init__(self, omci_agent, device_id, entity_class, entity_id, attributes,
+                 exclusive=True, allow_failure=False):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param entity_class: (EntityClass) ME Class to retrieve
+        :param entity_id: (int) ME Class instance ID to retrieve
+        :param attributes: (list or set) Name of attributes to retrieve
+        :param exclusive: (bool) True if this GET request Task exclusively own the
+                                 OMCI-CC while running. Default: True
+        :param allow_failure: (bool) If true, attempt to get all valid attributes
+                                     if the original request receives an error
+                                     code of 9 (Attributes failed or unknown).
+        """
+        super(OmciGetRequest, self).__init__(OmciGetRequest.name,
+                                             omci_agent,
+                                             device_id,
+                                             priority=OmciGetRequest.task_priority,
+                                             exclusive=exclusive)
+        self._device = omci_agent.get_device(device_id)
+        self._entity_class = entity_class
+        self._entity_id = entity_id
+        self._attributes = attributes
+        self._allow_failure = allow_failure
+        self._failed_or_unknown_attributes = set()
+        self._results = None
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(OmciGetRequest, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    @property
+    def me_class(self):
+        """The OMCI Managed Entity Class associated with this request"""
+        return self._entity_class
+
+    @property
+    def entity_id(self):
+        """The ME Entity ID associated with this request"""
+        return self._entity_id
+
+    @property
+    def attributes(self):
+        """
+        Return a dictionary of attributes for the request if the Get was
+        successfully completed.  None otherwise
+        """
+        if self._results is None:
+            return None
+
+        omci_msg = self._results.fields['omci_message'].fields
+        return omci_msg['data'] if 'data' in omci_msg else None
+
+    @property
+    def success_code(self):
+        """
+        Return the OMCI success/reason code for the Get Response.
+        """
+        if self._results is None:
+            return None
+
+        return self._results.fields['omci_message'].fields['success_code']
+
+    @property
+    def raw_results(self):
+        """
+        Return the raw Get Response OMCIFrame
+        """
+        return self._results
+
+    def start(self):
+        """
+        Start MIB Capabilities task
+        """
+        super(OmciGetRequest, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_get_omci)
+
+    @property
+    def failed_or_unknown_attributes(self):
+        """
+        Returns a set attributes that failed or unknown in the original get
+        request that resulted in an initial status code of 9 (Attributes
+        failed or unknown).
+
+        :return: (set of str) attributes
+        """
+        return self._failed_or_unknown_attributes
+
+    @inlineCallbacks
+    def perform_get_omci(self):
+        """
+        Perform the initial get request
+        """
+        self.log.info('perform-get', entity_class=self._entity_class,
+                      entity_id=self._entity_id, attributes=self._attributes)
+        try:
+            # If one or more attributes is a table attribute, get it separately
+            def is_table_attr(attr):
+                index = self._entity_class.attribute_name_to_index_map[attr]
+                attr_def = self._entity_class.attributes[index]
+                return isinstance(attr_def.field, OmciTableField)
+
+            first_attributes = {attr for attr in self._attributes if not is_table_attr(attr)}
+            table_attributes = {attr for attr in self._attributes if is_table_attr(attr)}
+
+            frame = MEFrame(self._entity_class, self._entity_id, first_attributes).get()
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+
+            status = results.fields['omci_message'].fields['success_code']
+            self.log.debug('perform-get-status', status=status)
+
+            # Success?
+            if status == RC.Success.value:
+                self._results = results
+                results_omci = results.fields['omci_message'].fields
+
+                # Were all attributes fetched?
+                missing_attr = frame.fields['omci_message'].fields['attributes_mask'] ^ \
+                    results_omci['attributes_mask']
+
+                if missing_attr > 0 or len(table_attributes) > 0:
+                    self.log.info('perform-get-missing', num_missing=missing_attr,
+                                  table_attr=table_attributes)
+                    self.strobe_watchdog()
+                    self._local_deferred = reactor.callLater(0,
+                                                             self.perform_get_missing_attributes,
+                                                             missing_attr,
+                                                             table_attributes)
+                    returnValue(self._local_deferred)
+
+            elif status == RC.AttributeFailure.value:
+                # What failed?  Note if only one attribute was attempted, then
+                # that is an overall failure
+
+                if not self._allow_failure or len(self._attributes) <= 1:
+                    raise GetException('Get failed with status code: {}'.
+                                       format(RC.AttributeFailure.value))
+
+                self.strobe_watchdog()
+                self._local_deferred = reactor.callLater(0,
+                                                         self.perform_get_failed_attributes,
+                                                         results,
+                                                         self._attributes)
+                returnValue(self._local_deferred)
+
+            else:
+                raise GetException('Get failed with status code: {}'.format(status))
+
+            self.log.debug('get-completed')
+            self.deferred.callback(self)
+
+        except TimeoutError as e:
+            self.deferred.errback(failure.Failure(e))
+
+        except Exception as e:
+            self.log.exception('perform-get', e=e, class_id=self._entity_class,
+                               entity_id=self._entity_id, attributes=self._attributes)
+            self.deferred.errback(failure.Failure(e))
+
+    @inlineCallbacks
+    def perform_get_missing_attributes(self, missing_attr, table_attributes):
+        """
+        This method is called when the original Get requests completes with success
+        but not all attributes were returned.  This can happen if one or more of the
+        attributes would have exceeded the space available in the OMCI frame.
+
+        This routine iterates through the missing attributes and attempts to retrieve
+        the ones that were missing.
+
+        :param missing_attr: (int) Missing attributes bitmask
+        :param table_attributes: (set) Attributes that need table get/get-next support
+        """
+        self.log.debug('perform-get-missing', attrs=missing_attr, tbl=table_attributes)
+
+        # Retrieve missing attributes first (if any)
+        results_omci = self._results.fields['omci_message'].fields
+
+        for index in xrange(16):
+            attr_mask = 1 << index
+
+            if attr_mask & missing_attr:
+                # Get this attribute
+                frame = OmciFrame(
+                    transaction_id=None,  # OMCI-CC will set
+                    message_type=OmciGet.message_id,
+                    omci_message=OmciGet(
+                        entity_class=self._entity_class.class_id,
+                        entity_id=self._entity_id,
+                        attributes_mask=attr_mask
+                    )
+                )
+                try:
+                    self.strobe_watchdog()
+                    get_results = yield self._device.omci_cc.send(frame)
+
+                    get_omci = get_results.fields['omci_message'].fields
+                    if get_omci['success_code'] != RC.Success.value:
+                        continue
+
+                    assert attr_mask == get_omci['attributes_mask'], 'wrong attribute'
+                    results_omci['attributes_mask'] |= attr_mask
+
+                    if results_omci.get('data') is None:
+                        results_omci['data'] = dict()
+
+                    results_omci['data'].update(get_omci['data'])
+
+                except TimeoutError:
+                    self.log.debug('missing-timeout')
+
+                except Exception as e:
+                    self.log.exception('missing-failure', e=e)
+
+        # Now any table attributes. OMCI_CC handles background get/get-next sequencing
+        for tbl_attr in table_attributes:
+            attr_mask = self._entity_class.mask_for(tbl_attr)
+            frame = OmciFrame(
+                    transaction_id=None,  # OMCI-CC will set
+                    message_type=OmciGet.message_id,
+                    omci_message=OmciGet(
+                            entity_class=self._entity_class.class_id,
+                            entity_id=self._entity_id,
+                            attributes_mask=attr_mask
+                    )
+            )
+            try:
+                timeout = 2 * DEFAULT_OMCI_TIMEOUT  # Multiple frames expected
+                self.strobe_watchdog()
+                get_results = yield self._device.omci_cc.send(frame,
+                                                              timeout=timeout)
+                self.strobe_watchdog()
+                get_omci = get_results.fields['omci_message'].fields
+                if get_omci['success_code'] != RC.Success.value:
+                    continue
+
+                if results_omci.get('data') is None:
+                    results_omci['data'] = dict()
+
+                results_omci['data'].update(get_omci['data'])
+
+            except TimeoutError:
+                self.log.debug('tbl-attr-timeout')
+
+            except Exception as e:
+                self.log.exception('tbl-attr-timeout', e=e)
+
+        self.deferred.callback(self)
+
+    @inlineCallbacks
+    def perform_get_failed_attributes(self, tmp_results, attributes):
+        """
+
+        :param tmp_results:
+        :param attributes:
+        :return:
+        """
+        self.log.debug('perform-get-failed', attrs=attributes)
+
+        for attr in attributes:
+            try:
+                frame = MEFrame(self._entity_class, self._entity_id, {attr}).get()
+
+                self.strobe_watchdog()
+                results = yield self._device.omci_cc.send(frame)
+
+                status = results.fields['omci_message'].fields['success_code']
+
+                if status == RC.AttributeFailure.value:
+                    self.log.debug('unknown-or-invalid-attribute', attr=attr, status=status)
+                    self._failed_or_unknown_attributes.add(attr)
+
+                elif status != RC.Success.value:
+                    self.log.warn('invalid-get', class_id=self._entity_class,
+                                  attribute=attr, status=status)
+                    self._failed_or_unknown_attributes.add(attr)
+
+                else:
+                    # Add to partial results and correct the status
+                    tmp_results.fields['omci_message'].fields['success_code'] = status
+                    tmp_results.fields['omci_message'].fields['attributes_mask'] |= \
+                        results.fields['omci_message'].fields['attributes_mask']
+
+                    if tmp_results.fields['omci_message'].fields.get('data') is None:
+                        tmp_results.fields['omci_message'].fields['data'] = dict()
+
+                    tmp_results.fields['omci_message'].fields['data'][attr] = \
+                        results.fields['omci_message'].fields['data'][attr]
+
+            except TimeoutError as e:
+                self.log.debug('attr-timeout')
+
+            except Exception as e:
+                self.log.exception('attr-failure', e=e)
+
+        self._results = tmp_results
+        self.deferred.callback(self)
diff --git a/python/extensions/omci/tasks/omci_modify_request.py b/python/extensions/omci/tasks/omci_modify_request.py
new file mode 100644
index 0000000..da7bff5
--- /dev/null
+++ b/python/extensions/omci/tasks/omci_modify_request.py
@@ -0,0 +1,171 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import MEFrame
+from voltha.extensions.omci.omci_frame import OmciFrame
+from voltha.extensions.omci.omci_messages import OmciCreate, OmciSet, OmciDelete
+from voltha.extensions.omci.omci_entities import EntityClass
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class ModifyException(Exception):
+    pass
+
+
+class OmciModifyRequest(Task):
+    """
+    OpenOMCI Generic Create, Set, or Delete Frame support Task.
+
+    This task allows an ONU to send a Create, Set, or Delete request from any point in their
+    code while properly using the OMCI-CC channel.  Direct access to the OMCI-CC object
+    to send requests by an ONU is highly discouraged.
+    """
+    task_priority = 128
+    name = "ONU OMCI Modify Task"
+
+    def __init__(self, omci_agent, device_id, frame, priority=task_priority, exclusive=False):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param frame: (OmciFrame) Frame to send
+        :param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
+        :param exclusive: (bool) True if this GET request Task exclusively own the
+                                 OMCI-CC while running. Default: False
+        """
+        super(OmciModifyRequest, self).__init__(OmciModifyRequest.name,
+                                                omci_agent,
+                                                device_id,
+                                                priority=priority,
+                                                exclusive=exclusive)
+        self._device = omci_agent.get_device(device_id)
+        self._frame = frame
+        self._results = None
+        self._local_deferred = None
+
+        # Validate message type
+        self._msg_type = frame.fields['message_type']
+        if self._msg_type not in (OmciCreate.message_id, OmciSet.message_id, OmciDelete.message_id):
+            raise TypeError('Invalid Message type: {}, must be Create, Set, or Delete'.
+                            format(self._msg_type))
+
+    def cancel_deferred(self):
+        super(OmciModifyRequest, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    @property
+    def success_code(self):
+        """
+        Return the OMCI success/reason code for the Get Response.
+        """
+        if self._results is None:
+            return None
+
+        return self._results.fields['omci_message'].fields['success_code']
+
+    @property
+    def illegal_attributes_mask(self):
+        """
+        For Create & Set requests, a failure may indicate that one or more
+        attributes have an illegal value.  This property returns any illegal
+        attributes
+
+        :return: None if not a create/set request, otherwise the attribute mask
+                 of illegal attributes
+        """
+        if self._results is None:
+            return None
+
+        omci_msg = self._results.fields['omci_message'].fields
+
+        if self._msg_type == OmciCreate.message_id:
+            if self.success_code != RC.ParameterError:
+                return 0
+            return omci_msg['parameter_error_attributes_mask']
+
+        elif self._msg_type == OmciSet.message_id:
+            if self.success_code != RC.AttributeFailure:
+                return 0
+            return omci_msg['failed_attributes_mask']
+
+        return None
+
+    @property
+    def unsupported_attributes_mask(self):
+        """
+        For Set requests, a failure may indicate that one or more attributes
+        are not supported by this ONU. This property returns any those unsupported attributes
+
+        :return: None if not a set request, otherwise the attribute mask of any illegal
+                 parameters
+        """
+        if self._msg_type != OmciSet.message_id or self._results is None:
+            return None
+
+        if self.success_code != RC.AttributeFailure:
+            return 0
+
+        return self._results.fields['omci_message'].fields['unsupported_attributes_mask']
+
+    @property
+    def raw_results(self):
+        """
+        Return the raw Response OMCIFrame
+        """
+        return self._results
+
+    def start(self):
+        """
+        Start MIB Capabilities task
+        """
+        super(OmciModifyRequest, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_omci)
+
+    @inlineCallbacks
+    def perform_omci(self):
+        """
+        Perform the request
+        """
+        self.log.debug('perform-request')
+
+        try:
+            self.strobe_watchdog()
+            self._results = yield self._device.omci_cc.send(self._frame)
+
+            status = self._results.fields['omci_message'].fields['success_code']
+            self.log.debug('response-status', status=status)
+
+            # Success?
+            if status in (RC.Success.value, RC.InstanceExists):
+                self.deferred.callback(self)
+            else:
+                raise ModifyException('Failed with status {}'.format(status))
+
+        except Exception as e:
+            self.log.exception('perform-modify', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/omci_sw_image_upgrade_task.py b/python/extensions/omci/tasks/omci_sw_image_upgrade_task.py
new file mode 100644
index 0000000..5eaa87c
--- /dev/null
+++ b/python/extensions/omci/tasks/omci_sw_image_upgrade_task.py
@@ -0,0 +1,64 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from task import Task
+from twisted.internet import reactor
+from voltha.protos.voltha_pb2 import ImageDownload
+
+class OmciSwImageUpgradeTask(Task):
+    name = "OMCI Software Image Upgrade Task"
+
+
+    def __init__(self, img_id, omci_upgrade_sm_cls, omci_agent, image_download, clock=None):
+        super(OmciSwImageUpgradeTask, self).__init__(OmciSwImageUpgradeTask.name, omci_agent, image_download.id,
+                                                     exclusive=False,
+                                                     watchdog_timeout=45)
+        self.log.debug("OmciSwImageUpgradeTask create ", image_id=img_id)
+        self._image_id = img_id
+        self._omci_upgrade_sm_cls = omci_upgrade_sm_cls
+        # self._omci_agent = omci_agent
+        self._image_download = image_download
+        self.reactor = clock if clock is not None else reactor
+        self._omci_upgrade_sm = None
+        self.log.debug("OmciSwImageUpgradeTask create end", image_id=img_id)
+
+    @property 
+    def status(self):
+        return self._image_download
+        
+    def start(self):
+        self.log.debug("OmciSwImageUpgradeTask start")
+        super(OmciSwImageUpgradeTask, self).start()
+        if self._omci_upgrade_sm is None:
+            self._omci_upgrade_sm = self._omci_upgrade_sm_cls(self._image_id, self.omci_agent, self._image_download, clock=self.reactor)
+            d = self._omci_upgrade_sm.start()
+            d.chainDeferred(self.deferred)
+        #else:
+        #    if restart:
+        #        self._omci_upgrade_sm.reset_image()
+
+    def stop(self):
+        self.log.debug("OmciSwImageUpgradeTask stop")
+        if self._omci_upgrade_sm is not None:
+            self._omci_upgrade_sm.stop()
+            self._omci_upgrade_sm = None
+    
+    def onu_bootup(self):
+        self.log.debug("onu_bootup", state=self._omci_upgrade_sm.status.image_state);
+        if self._omci_upgrade_sm is not None \
+            and self._omci_upgrade_sm.status.image_state == ImageDownload.IMAGE_ACTIVATE:
+            self._omci_upgrade_sm.do_commit()
+    
diff --git a/python/extensions/omci/tasks/onu_capabilities_task.py b/python/extensions/omci/tasks/onu_capabilities_task.py
new file mode 100644
index 0000000..048382c
--- /dev/null
+++ b/python/extensions/omci/tasks/onu_capabilities_task.py
@@ -0,0 +1,282 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from binascii import hexlify
+from twisted.internet.defer import inlineCallbacks, failure, returnValue
+from twisted.internet import reactor
+from voltha.extensions.omci.omci_defs import ReasonCodes
+from voltha.extensions.omci.omci_me import OmciFrame
+from voltha.extensions.omci.omci import EntityOperations
+
+
+class GetNextException(Exception):
+    pass
+
+
+class GetCapabilitiesFailure(Exception):
+    pass
+
+
+class OnuCapabilitiesTask(Task):
+    """
+    OpenOMCI MIB Capabilities Task
+
+    This task requests information on supported MEs via the OMCI (ME#287)
+    Managed entity.
+
+    This task should be ran after MIB Synchronization and before any MIB
+    Downloads to the ONU.
+
+    Upon completion, the Task deferred callback is invoked with dictionary
+    containing the supported managed entities and message types.
+
+    results = {
+                'supported-managed-entities': {set of supported managed entities},
+                'supported-message-types': {set of supported message types}
+              }
+    """
+    task_priority = 240
+    name = "ONU Capabilities Task"
+
+    max_mib_get_next_retries = 3
+    mib_get_next_delay = 5
+    DEFAULT_OCTETS_PER_MESSAGE = 29
+
+    def __init__(self, omci_agent, device_id, omci_pdu_size=DEFAULT_OCTETS_PER_MESSAGE):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param omci_pdu_size: (int) OMCI Data payload size (not counting any trailers)
+        """
+        super(OnuCapabilitiesTask, self).__init__(OnuCapabilitiesTask.name,
+                                                  omci_agent,
+                                                  device_id,
+                                                  priority=OnuCapabilitiesTask.task_priority)
+        self._local_deferred = None
+        self._device = omci_agent.get_device(device_id)
+        self._pdu_size = omci_pdu_size
+        self._supported_entities = set()
+        self._supported_msg_types = set()
+
+    def cancel_deferred(self):
+        super(OnuCapabilitiesTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    @property
+    def supported_managed_entities(self):
+        """
+        Return a set of the Managed Entity class IDs supported on this ONU
+
+        None is returned if no MEs have been discovered
+
+        :return: (set of ints)
+        """
+        return frozenset(self._supported_entities) if len(self._supported_entities) else None
+
+    @property
+    def supported_message_types(self):
+        """
+        Return a set of the Message Types supported on this ONU
+
+        None is returned if no message types have been discovered
+
+        :return: (set of EntityOperations)
+        """
+        return frozenset(self._supported_msg_types) if len(self._supported_msg_types) else None
+
+    def start(self):
+        """
+        Start MIB Capabilities task
+        """
+        super(OnuCapabilitiesTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_get_capabilities)
+
+    def stop(self):
+        """
+        Shutdown MIB Capabilities task
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        self._device = None
+        super(OnuCapabilitiesTask, self).stop()
+
+    @inlineCallbacks
+    def perform_get_capabilities(self):
+        """
+        Perform the MIB Capabilities sequence.
+
+        The sequence is to perform a Get request with the attribute mask equal
+        to 'me_type_table'.  The response to this request will carry the size
+        of (number of get-next sequences).
+
+        Then a loop is entered and get-next commands are sent for each sequence
+        requested.
+        """
+        self.log.debug('perform-get')
+
+        try:
+            self.strobe_watchdog()
+            self._supported_entities = yield self.get_supported_entities()
+
+            self.strobe_watchdog()
+            self._supported_msg_types = yield self.get_supported_message_types()
+
+            self.log.debug('get-success',
+                           supported_entities=self.supported_managed_entities,
+                           supported_msg_types=self.supported_message_types)
+            results = {
+                'supported-managed-entities': self.supported_managed_entities,
+                'supported-message-types': self.supported_message_types
+            }
+            self.deferred.callback(results)
+
+        except Exception as e:
+            self.log.exception('perform-get', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+    def get_count_from_data_buffer(self, data):
+        """
+        Extract the 4 octet buffer length from the OMCI PDU contents
+        """
+        self.log.debug('get-count-buffer', data=hexlify(data))
+        return int(hexlify(data[:4]), 16)
+
+    @inlineCallbacks
+    def get_supported_entities(self):
+        """
+        Get the supported ME Types for this ONU.
+        """
+        try:
+            # Get the number of requests needed
+            frame = OmciFrame(me_type_table=True).get()
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+
+            omci_msg = results.fields['omci_message']
+            status = omci_msg.fields['success_code']
+
+            if status != ReasonCodes.Success.value:
+                raise GetCapabilitiesFailure('Get count of supported entities failed with status code: {}'.
+                                             format(status))
+            data = omci_msg.fields['data']['me_type_table']
+            count = self.get_count_from_data_buffer(bytearray(data))
+
+            seq_no = 0
+            data_buffer = bytearray(0)
+            self.log.debug('me-type-count', octets=count, data=hexlify(data))
+
+            # Start the loop
+            for offset in xrange(0, count, self._pdu_size):
+                frame = OmciFrame(me_type_table=seq_no).get_next()
+                seq_no += 1
+                self.strobe_watchdog()
+                results = yield self._device.omci_cc.send(frame)
+
+                omci_msg = results.fields['omci_message']
+                status = omci_msg.fields['success_code']
+
+                if status != ReasonCodes.Success.value:
+                    raise GetCapabilitiesFailure(
+                        'Get supported entities request at offset {} of {} failed with status code: {}'.
+                        format(offset + 1, count, status))
+
+                # Extract the data
+                num_octets = count - offset
+                if num_octets > self._pdu_size:
+                    num_octets = self._pdu_size
+
+                data = omci_msg.fields['data']['me_type_table']
+                data_buffer += bytearray(data[:num_octets])
+
+            me_types = {(data_buffer[x] << 8) + data_buffer[x + 1]
+                        for x in xrange(0, len(data_buffer), 2)}
+            returnValue(me_types)
+
+        except Exception as e:
+            self.log.exception('get-entities', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+    @inlineCallbacks
+    def get_supported_message_types(self):
+        """
+        Get the supported Message Types (actions) for this ONU.
+        """
+        try:
+            # Get the number of requests needed
+            frame = OmciFrame(message_type_table=True).get()
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame)
+
+            omci_msg = results.fields['omci_message']
+            status = omci_msg.fields['success_code']
+
+            if status != ReasonCodes.Success.value:
+                raise GetCapabilitiesFailure('Get count of supported msg types failed with status code: {}'.
+                                             format(status))
+
+            data = omci_msg.fields['data']['message_type_table']
+            count = self.get_count_from_data_buffer(bytearray(data))
+
+            seq_no = 0
+            data_buffer = list()
+            self.log.debug('me-type-count', octets=count, data=hexlify(data))
+
+            # Start the loop
+            for offset in xrange(0, count, self._pdu_size):
+                frame = OmciFrame(message_type_table=seq_no).get_next()
+                seq_no += 1
+                self.strobe_watchdog()
+                results = yield self._device.omci_cc.send(frame)
+
+                omci_msg = results.fields['omci_message']
+                status = omci_msg.fields['success_code']
+
+                if status != ReasonCodes.Success.value:
+                    raise GetCapabilitiesFailure(
+                        'Get supported msg types request at offset {} of {} failed with status code: {}'.
+                        format(offset + 1, count, status))
+
+                # Extract the data
+                num_octets = count - offset
+                if num_octets > self._pdu_size:
+                    num_octets = self._pdu_size
+
+                data = omci_msg.fields['data']['message_type_table']
+                data_buffer += data[:num_octets]
+
+            def buffer_to_message_type(value):
+                """
+                Convert an integer value to the appropriate EntityOperations enumeration
+                :param value: (int) Message type value (4..29)
+                :return: (EntityOperations) Enumeration, None on failure
+                """
+                next((v for k, v in EntityOperations.__members__.items() if v.value == value), None)
+
+            msg_types = {buffer_to_message_type(v) for v in data_buffer if v is not None}
+            returnValue({msg_type for msg_type in msg_types if msg_type is not None})
+
+        except Exception as e:
+            self.log.exception('get-msg-types', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/reboot_task.py b/python/extensions/omci/tasks/reboot_task.py
new file mode 100644
index 0000000..316e23b
--- /dev/null
+++ b/python/extensions/omci/tasks/reboot_task.py
@@ -0,0 +1,125 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from enum import IntEnum
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, failure, TimeoutError
+from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
+
+RC = ReasonCodes
+OP = EntityOperations
+
+
+class RebootException(Exception):
+    pass
+
+
+class DeviceBusy(Exception):
+    pass
+
+
+class RebootFlags(IntEnum):
+    Reboot_Unconditionally = 0,
+    Reboot_If_No_POTS_VoIP_In_Progress = 1,
+    Reboot_If_No_Emergency_Call_In_Progress = 2
+
+
+class OmciRebootRequest(Task):
+    """
+    OpenOMCI routine to request reboot of an ONU
+    """
+    task_priority = Task.MAX_PRIORITY
+    name = "ONU OMCI Reboot Task"
+    # adopt the global default
+    DEFAULT_REBOOT_TIMEOUT = DEFAULT_OMCI_TIMEOUT
+
+    def __init__(self, omci_agent, device_id,
+                 flags=RebootFlags.Reboot_Unconditionally,
+                 timeout=DEFAULT_REBOOT_TIMEOUT):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param flags: (RebootFlags) Reboot condition
+        """
+        super(OmciRebootRequest, self).__init__(OmciRebootRequest.name,
+                                                omci_agent,
+                                                device_id,
+                                                priority=OmciRebootRequest.task_priority,
+                                                exclusive=True)
+        self._device = omci_agent.get_device(device_id)
+        self._flags = flags
+        self._timeout = timeout
+        self._local_deferred = None
+
+    def cancel_deferred(self):
+        super(OmciRebootRequest, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """ Start task """
+        super(OmciRebootRequest, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_reboot)
+
+    @inlineCallbacks
+    def perform_reboot(self):
+        """
+        Perform the reboot requests
+
+        Depending on the ONU implementation, a response may not be returned. For this
+        reason, a timeout is considered successful.
+        """
+        self.log.info('perform-reboot')
+
+        try:
+            frame = OntGFrame().reboot(reboot_code=self._flags)
+            self.strobe_watchdog()
+            results = yield self._device.omci_cc.send(frame, timeout=self._timeout)
+
+            status = results.fields['omci_message'].fields['success_code']
+            self.log.debug('reboot-status', status=status)
+
+            # Did it fail
+            if status != RC.Success.value:
+                if self._flags != RebootFlags.Reboot_Unconditionally and\
+                        status == RC.DeviceBusy.value:
+                    raise DeviceBusy('ONU is busy, try again later')
+                else:
+                    msg = 'Reboot request failed with status {}'.format(status)
+                    raise RebootException(msg)
+
+            self.log.info('reboot-success')
+            self.deferred.callback(self)
+
+        except TimeoutError:
+            self.log.info('timeout', msg='Request timeout is not considered an error')
+            self.deferred.callback(None)
+
+        except DeviceBusy as e:
+            self.log.warn('perform-reboot', msg=e)
+            self.deferred.errback(failure.Failure(e))
+
+        except Exception as e:
+            self.log.exception('perform-reboot', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/sync_time_task.py b/python/extensions/omci/tasks/sync_time_task.py
new file mode 100644
index 0000000..b5b1dc9
--- /dev/null
+++ b/python/extensions/omci/tasks/sync_time_task.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from task import Task
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
+from voltha.extensions.omci.omci_me import OntGFrame
+from voltha.extensions.omci.omci_defs import ReasonCodes as RC
+from datetime import datetime
+
+
+class SyncTimeTask(Task):
+    """
+    OpenOMCI - Synchronize the ONU time with server
+    """
+    task_priority = Task.DEFAULT_PRIORITY + 10
+    name = "Sync Time Task"
+
+    def __init__(self, omci_agent, device_id, use_utc=True):
+        """
+        Class initialization
+
+        :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
+        :param device_id: (str) ONU Device ID
+        :param use_utc: (bool) Use UTC time if True, otherwise local time
+        """
+        super(SyncTimeTask, self).__init__(SyncTimeTask.name,
+                                           omci_agent,
+                                           device_id,
+                                           priority=SyncTimeTask.task_priority,
+                                           exclusive=False)
+        self._local_deferred = None
+        self._use_utc = use_utc
+
+    def cancel_deferred(self):
+        super(SyncTimeTask, self).cancel_deferred()
+
+        d, self._local_deferred = self._local_deferred, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def start(self):
+        """
+        Start the tasks
+        """
+        super(SyncTimeTask, self).start()
+        self._local_deferred = reactor.callLater(0, self.perform_sync_time)
+
+    def stop(self):
+        """
+        Shutdown the tasks
+        """
+        self.log.debug('stopping')
+
+        self.cancel_deferred()
+        super(SyncTimeTask, self).stop()
+
+    @inlineCallbacks
+    def perform_sync_time(self):
+        """
+        Sync the time
+        """
+        self.log.debug('perform-sync-time')
+
+        try:
+            device = self.omci_agent.get_device(self.device_id)
+
+            #########################################
+            # ONT-G (ME #256)
+            dt = datetime.utcnow() if self._use_utc else datetime.now()
+
+            results = yield device.omci_cc.send(OntGFrame().synchronize_time(dt))
+
+            omci_msg = results.fields['omci_message'].fields
+            status = omci_msg['success_code']
+            self.log.debug('sync-time', status=status)
+
+            if status == RC.Success:
+                self.log.info('sync-time', success_info=omci_msg['success_info'] & 0x0f)
+
+            assert status == RC.Success, 'Unexpected Response Status: {}'.format(status)
+
+            # Successful if here
+            self.deferred.callback(results)
+
+        except TimeoutError as e:
+            self.log.warn('sync-time-timeout', e=e)
+            self.deferred.errback(failure.Failure(e))
+
+        except Exception as e:
+            self.log.exception('sync-time', e=e)
+            self.deferred.errback(failure.Failure(e))
diff --git a/python/extensions/omci/tasks/task.py b/python/extensions/omci/tasks/task.py
new file mode 100644
index 0000000..36020c0
--- /dev/null
+++ b/python/extensions/omci/tasks/task.py
@@ -0,0 +1,188 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import defer, reactor
+from twisted.internet.defer import failure
+
+
+class WatchdogTimeoutFailure(Exception):
+    """Task callback/errback not called properly before watchdog expiration"""
+    pass
+
+
+class Task(object):
+    """
+    OpenOMCI Base Task implementation
+
+    An OMCI task can be one or more OMCI requests, comparisons, or whatever
+    is needed to do a specific unit of work that needs to be ran to completion
+    successfully.
+
+    On successful completion, the task should called the 'callback' method of
+    the deferred and pass back whatever is meaningful to the user/state-machine
+    that launched it.
+
+    On failure, the 'errback' routine should be called with an appropriate
+    Failure object.
+    """
+    DEFAULT_PRIORITY = 128
+    MIN_PRIORITY = 0
+    MAX_PRIORITY = 255
+    DEFAULT_WATCHDOG_SECS = 10          # 10 seconds
+    MIN_WATCHDOG_SECS = 3               # 3 seconds
+    MAX_WATCHDOG_SECS = 60              # 60 seconds
+
+    _next_task_id = 0
+
+    def __init__(self, name, omci_agent, device_id, priority=DEFAULT_PRIORITY,
+                 exclusive=True, watchdog_timeout=DEFAULT_WATCHDOG_SECS):
+        """
+        Class initialization
+
+        :param name: (str) Task Name
+        :param device_id: (str) ONU Device ID
+        :param priority: (int) Task priority (0..255) 255 Highest
+        :param exclusive: (bool) If True, this task needs exclusive access to the
+                                 OMCI Communications channel when it runs
+        :param watchdog_timeout (int or float) Watchdog timeout (seconds) after task start, to
+                                run longer, periodically call 'strobe_watchdog()' to reschedule.
+        """
+        assert Task.MIN_PRIORITY <= priority <= Task.MAX_PRIORITY, \
+            'Priority should be {}..{}'.format(Task.MIN_PRIORITY, Task.MAX_PRIORITY)
+
+        assert Task.MIN_WATCHDOG_SECS <= watchdog_timeout <= Task.MAX_WATCHDOG_SECS, \
+            'Watchdog timeout should be {}..{} seconds'
+
+        Task._next_task_id += 1
+        self._task_id = Task._next_task_id
+        self.log = structlog.get_logger(device_id=device_id, name=name,
+                                        task_id=self._task_id)
+        self.name = name
+        self.device_id = device_id
+        self.omci_agent = omci_agent
+        self._running = False
+        self._exclusive = exclusive
+        self._deferred = defer.Deferred()       # Fires upon completion
+        self._watchdog = None
+        self._watchdog_timeout = watchdog_timeout
+        self._priority = priority
+
+    def __str__(self):
+        return 'Task: {}, ID:{}, Priority: {}, Exclusive: {}, Watchdog: {}'.format(
+            self.name, self.task_id, self.priority, self.exclusive, self.watchdog_timeout)
+
+    @property
+    def priority(self):
+        return self._priority
+
+    @property
+    def task_id(self):
+        return self._task_id
+
+    @property
+    def exclusive(self):
+        return self._exclusive
+
+    @property
+    def watchdog_timeout(self):
+        return self._watchdog_timeout
+
+    @property
+    def deferred(self):
+        return self._deferred
+
+    @property
+    def running(self):
+        # Is the Task running?
+        #
+        # Can be useful for tasks that use inline callbacks to detect
+        # if the task has been canceled.
+        #
+        return self._running
+
+    def cancel_deferred(self):
+        d1, self._deferred = self._deferred, None
+        d2, self._watchdog = self._watchdog, None
+
+        for d in [d1, d2]:
+            try:
+                if d is not None and not d.called:
+                    d.cancel()
+            except:
+                pass
+
+    def start(self):
+        """
+        Start task operations
+        """
+        self.log.debug('starting')
+        assert self._deferred is not None and not self._deferred.called, \
+            'Cannot re-use the same task'
+        self._running = True
+        self.strobe_watchdog()
+
+    def stop(self):
+        """
+        Stop task synchronization
+        """
+        self.log.debug('stopping')
+        self._running = False
+        self.cancel_deferred()
+        self.omci_agent = None      # Should only start/stop once
+
+    def task_cleanup(self):
+        """
+        This method should only be called from the TaskRunner's callback/errback
+        that is added when the task is initially queued. It is responsible for
+        clearing of the 'running' flag and canceling of the watchdog time
+        """
+        self._running = False
+        d, self._watchdog = self._watchdog, None
+        try:
+            if d is not None and not d.called:
+                d.cancel()
+        except:
+            pass
+
+    def strobe_watchdog(self):
+        """
+        Signal that we have not hung/deadlocked
+        """
+        # Create if first time (called at Task start)
+
+        def watchdog_timeout():
+            # Task may have hung (blocked) or failed to call proper success/error
+            # completion callback/errback
+            if not self.deferred.called:
+                err_msg = 'Task {}:{} watchdog timeout'.format(self.name, self.task_id)
+                self.log.error("task-watchdog-timeout", running=self.running,
+                               timeout=self.watchdog_timeout, error=err_msg)
+
+                self.deferred.errback(failure.Failure(WatchdogTimeoutFailure(err_msg)))
+                self.deferred.cancel()
+
+        if self._watchdog is not None:
+            if self._watchdog.called:
+                # Too late, timeout failure in progress
+                self.log.warn('task-watchdog-tripped', running=self.running,
+                              timeout=self.watchdog_timeout)
+                return
+
+            d, self._watchdog = self._watchdog, None
+            d.cancel()
+
+        # Schedule/re-schedule the watchdog timer
+        self._watchdog = reactor.callLater(self.watchdog_timeout, watchdog_timeout)
diff --git a/python/extensions/omci/tasks/task_runner.py b/python/extensions/omci/tasks/task_runner.py
new file mode 100644
index 0000000..eb7a252
--- /dev/null
+++ b/python/extensions/omci/tasks/task_runner.py
@@ -0,0 +1,285 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+from twisted.internet import reactor
+
+
+class TaskRunner(object):
+    """
+    Control the number of running tasks utilizing the OMCI Communications
+    channel (OMCI_CC
+    """
+    def __init__(self, device_id, clock=None):
+        self.log = structlog.get_logger(device_id=device_id)
+        self._pending_queue = dict()   # task-priority -> [tasks]
+        self._running_queue = dict()   # task-id -> task
+        self._active = False
+
+        self._successful_tasks = 0
+        self._failed_tasks = 0
+        self._watchdog_timeouts = 0
+        self._last_watchdog_failure_task = ''
+        self.reactor = clock if clock is not None else reactor
+
+    def __str__(self):
+        return 'TaskRunner: Pending: {}, Running:{}'.format(self.pending_tasks,
+                                                            self.running_tasks)
+
+    @property
+    def active(self):
+        return self._active
+
+    @property
+    def pending_tasks(self):
+        """
+        Get the number of tasks pending to run
+        """
+        count = 0
+        for tasks in self._pending_queue.itervalues():
+            count += len(tasks)
+        return count
+
+    @property
+    def running_tasks(self):
+        """
+        Get the number of tasks currently running
+        """
+        return len(self._running_queue)
+
+    @property
+    def successful_tasks_completed(self):
+        return self._successful_tasks
+
+    @property
+    def failed_tasks(self):
+        return self._failed_tasks
+
+    @property
+    def watchdog_timeouts(self):
+        return self._watchdog_timeouts
+
+    @property
+    def last_watchdog_failure_task(self):
+        """ Task name of last tasks to fail due to watchdog"""
+        return self._last_watchdog_failure_task
+
+    # TODO: add properties for various stats as needed
+
+    def start(self):
+        """
+        Start the Task runner
+        """
+        self.log.debug('starting', active=self._active)
+
+        if not self._active:
+            assert len(self._running_queue) == 0, 'Running task queue not empty'
+            self._active = True
+            self._run_next_task()
+
+    def stop(self):
+        """
+        Stop the Task runner, first stopping any tasks and flushing the queue
+        """
+        self.log.debug('stopping', active=self._active)
+
+        if self._active:
+            self._active = False
+
+            pq, self._pending_queue = self._pending_queue, dict()
+            rq, self._running_queue = self._running_queue, dict()
+
+            # Stop running tasks
+            for task in rq.itervalues():
+                try:
+                    task.stop()
+                except:
+                    pass
+
+            # Kill pending tasks
+            for d in pq.iterkeys():
+                try:
+                    d.cancel()
+                except:
+                    pass
+
+    def _run_next_task(self):
+        """
+        Search for next task to run, if one can
+        :return:
+        """
+        self.log.debug('run-next', active=self._active,
+                       num_running=len(self._running_queue),
+                       num_pending=len(self._pending_queue))
+
+        if self._active and len(self._pending_queue) > 0:
+            # Cannot run a new task if a running one needs the OMCI_CC exclusively
+
+            if any(task.exclusive for task in self._running_queue.itervalues()):
+                self.log.debug('exclusive-running')
+                return    # An exclusive task is already running
+
+            try:
+                priorities = [k for k in self._pending_queue.iterkeys()]
+                priorities.sort(reverse=True)
+                highest_priority = priorities[0] if len(priorities) else None
+
+                if highest_priority is not None:
+                    queue = self._pending_queue[highest_priority]
+                    next_task = queue[0] if len(queue) else None
+
+                    if next_task is not None:
+                        if next_task.exclusive and len(self._running_queue) > 0:
+                            self.log.debug('next-is-exclusive', task=str(next_task))
+                            return  # Next task to run needs exclusive access
+
+                        queue.pop(0)
+                        if len(queue) == 0:
+                            del self._pending_queue[highest_priority]
+
+                        self.log.debug('starting-task', task=str(next_task),
+                                       running=len(self._running_queue),
+                                       pending=len(self._pending_queue))
+
+                        self._running_queue[next_task.task_id] = next_task
+                        self.reactor.callLater(0, next_task.start)
+
+                # Run again if others are waiting
+                if len(self._pending_queue):
+                    self._run_next_task()
+
+            except Exception as e:
+                self.log.exception('run-next', e=e)
+
+    def _on_task_success(self, results, task):
+        """
+        A task completed successfully callback
+        :param results: deferred results
+        :param task: (Task) The task that succeeded
+        :return: deferred results
+        """
+        self.log.debug('task-success', task_id=str(task),
+                       running=len(self._running_queue),
+                       pending=len(self._pending_queue))
+        try:
+            assert task is not None and task.task_id in self._running_queue,\
+                'Task not found in running queue'
+
+            task.task_cleanup()
+            self._successful_tasks += 1
+            del self._running_queue[task.task_id]
+
+        except Exception as e:
+            self.log.exception('task-error', task=str(task), e=e)
+
+        finally:
+            reactor.callLater(0, self._run_next_task)
+
+        return results
+
+    def _on_task_failure(self, failure, task):
+        """
+        A task completed with failure callback
+        :param failure: (Failure) Failure results
+        :param task: (Task) The task that failed
+        :return: (Failure) Failure results
+        """
+        from voltha.extensions.omci.tasks.task import WatchdogTimeoutFailure
+
+        self.log.debug('task-failure', task_id=str(task),
+                       running=len(self._running_queue),
+                       pending=len(self._pending_queue))
+        try:
+            assert task is not None and task.task_id in self._running_queue,\
+                'Task not found in running queue'
+
+            task.task_cleanup()
+            self._failed_tasks += 1
+            del self._running_queue[task.task_id]
+
+            if isinstance(failure.value, WatchdogTimeoutFailure):
+                self._watchdog_timeouts += 1
+                self._last_watchdog_failure_task = task.name
+
+        except Exception as e:
+            # Check the pending queue
+
+            for priority, tasks in self._pending_queue.iteritems():
+                found = next((t for t in tasks if t.task_id == task.task_id), None)
+
+                if found is not None:
+                    self._pending_queue[task.priority].remove(task)
+                    if len(self._pending_queue[task.priority]) == 0:
+                        del self._pending_queue[task.priority]
+                    return failure
+
+            self.log.exception('task-error', task=str(task), e=e)
+            raise
+
+        finally:
+            reactor.callLater(0, self._run_next_task)
+
+        return failure
+
+    def queue_task(self, task):
+        """
+        Place a task on the queue to run
+
+        :param task: (Task) task to run
+        :return: (deferred) Deferred that will fire on task completion
+        """
+        self.log.debug('queue-task', active=self._active, task=str(task),
+                       running=len(self._running_queue),
+                       pending=len(self._pending_queue))
+
+        if task.priority not in self._pending_queue:
+            self._pending_queue[task.priority] = []
+
+        task.deferred.addCallbacks(self._on_task_success, self._on_task_failure,
+                                   callbackArgs=[task], errbackArgs=[task])
+
+        self._pending_queue[task.priority].append(task)
+        self._run_next_task()
+
+        return task.deferred
+
+    def cancel_task(self, task_id):
+        """
+        Cancel a pending or running task.  The cancel method will be called
+        for the task's deferred
+
+        :param task_id: (int) Task identifier
+        """
+        task = self._running_queue.get(task_id, None)
+
+        if task is not None:
+            try:
+                task.stop()
+            except Exception as e:
+                self.log.exception('stop-error', task=str(task), e=e)
+
+            reactor.callLater(0, self._run_next_task)
+
+        else:
+            for priority, tasks in self._pending_queue.iteritems():
+                task = next((t for t in tasks if t.task_id == task_id), None)
+
+                if task is not None:
+                    try:
+                        task.deferred.cancel()
+                    except Exception as e:
+                        self.log.exception('cancel-error', task=str(task), e=e)
+                    return
+
diff --git a/python/kafka/__init__.py b/python/kafka/__init__.py
new file mode 100644
index 0000000..58aca1e
--- /dev/null
+++ b/python/kafka/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/kafka/adapter_proxy.py b/python/kafka/adapter_proxy.py
new file mode 100644
index 0000000..ddb11da
--- /dev/null
+++ b/python/kafka/adapter_proxy.py
@@ -0,0 +1,110 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between adapters.
+"""
+
+import structlog
+from uuid import uuid4
+from twisted.internet.defer import inlineCallbacks, returnValue
+from container_proxy import ContainerProxy
+from voltha.protos import third_party
+from voltha.protos.inter_container_pb2 import InterAdapterHeader, \
+    InterAdapterMessage
+import time
+
+_ = third_party
+log = structlog.get_logger()
+
+
+class AdapterProxy(ContainerProxy):
+
+    def __init__(self, kafka_proxy, core_topic, my_listening_topic):
+        super(AdapterProxy, self).__init__(kafka_proxy,
+                                           core_topic,
+                                           my_listening_topic)
+
+    def _to_string(self, unicode_str):
+        if unicode_str is not None:
+            if type(unicode_str) == unicode:
+                return unicode_str.encode('ascii', 'ignore')
+            else:
+                return unicode_str
+        else:
+            return ""
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def send_inter_adapter_message(self,
+                                   msg,
+                                   type,
+                                   from_adapter,
+                                   to_adapter,
+                                   to_device_id=None,
+                                   proxy_device_id=None,
+                                   message_id=None):
+        """
+        Sends a message directly to an adapter. This is typically used to send
+        proxied messages from one adapter to another.  An initial ACK response
+        is sent back to the invoking adapter.  If there is subsequent response
+        to be sent back (async) then the adapter receiving this request will
+        use this same API to send back the async response.
+        :param msg : GRPC message to send
+        :param type : InterAdapterMessageType of the message to send
+        :param from_adapter: Name of the adapter making the request.
+        :param to_adapter: Name of the remote adapter.
+        :param to_device_id: The ID of the device for to the message is
+        intended. if it's None then the message is not intended to a specific
+        device.  Its interpretation is adapter specific.
+        :param proxy_device_id: The ID of the device which will proxy that
+        message. If it's None then there is no specific device to proxy the
+        message.  Its interpretation is adapter specific.
+        :param message_id: A unique number for this transaction that the
+        adapter may use to correlate a request and an async response.
+        """
+
+        try:
+            # validate params
+            assert msg
+            assert from_adapter
+            assert to_adapter
+
+            # Build the inter adapter message
+            h = InterAdapterHeader()
+            h.type = type
+            h.from_topic = self._to_string(from_adapter)
+            h.to_topic = self._to_string(to_adapter)
+            h.to_device_id = self._to_string(to_device_id)
+            h.proxy_device_id = self._to_string(proxy_device_id)
+
+            if message_id:
+                h.id = self._to_string(message_id)
+            else:
+                h.id = uuid4().hex
+
+            h.timestamp = int(round(time.time() * 1000))
+            iaMsg = InterAdapterMessage()
+            iaMsg.header.CopyFrom(h)
+            iaMsg.body.Pack(msg)
+
+            log.debug("sending-inter-adapter-message", header=iaMsg.header)
+            res = yield self.invoke(rpc="process_inter_adapter_message",
+                                    to_topic=iaMsg.header.to_topic,
+                                    msg=iaMsg)
+            returnValue(res)
+        except Exception as e:
+            log.exception("error-sending-request", e=e)
diff --git a/python/kafka/adapter_request_facade.py b/python/kafka/adapter_request_facade.py
new file mode 100644
index 0000000..7bf41e5
--- /dev/null
+++ b/python/kafka/adapter_request_facade.py
@@ -0,0 +1,337 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This facade handles kafka-formatted messages from the Core, extracts the kafka
+formatting and forwards the request to the concrete handler.
+"""
+import structlog
+from twisted.internet.defer import inlineCallbacks
+from zope.interface import implementer
+from twisted.internet import reactor
+
+from afkak.consumer import OFFSET_LATEST, OFFSET_EARLIEST
+from voltha.adapters.interface import IAdapterInterface
+from voltha.protos.inter_container_pb2 import IntType, InterAdapterMessage, StrType, Error, ErrorCode
+from voltha.protos.device_pb2 import Device, ImageDownload
+from voltha.protos.openflow_13_pb2 import FlowChanges, FlowGroups, Flows, \
+    FlowGroupChanges, ofp_packet_out
+from kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+    get_messaging_proxy, KAFKA_OFFSET_LATEST, KAFKA_OFFSET_EARLIEST
+
+log = structlog.get_logger()
+
+class MacAddressError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+class IDError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+@implementer(IAdapterInterface)
+class AdapterRequestFacade(object):
+    """
+    Gate-keeper between CORE and device adapters.
+
+    On one side it interacts with Core's internal model and update/dispatch
+    mechanisms.
+
+    On the other side, it interacts with the adapters standard interface as
+    defined in
+    """
+
+    def __init__(self, adapter):
+        self.adapter = adapter
+
+    @inlineCallbacks
+    def start(self):
+        log.debug('starting')
+
+    @inlineCallbacks
+    def stop(self):
+        log.debug('stopping')
+
+    @inlineCallbacks
+    def createKafkaDeviceTopic(self, deviceId):
+        log.debug("subscribing-to-topic", device_id=deviceId)
+        kafka_proxy = get_messaging_proxy()
+        device_topic = kafka_proxy.get_default_topic() + "_" + deviceId
+        # yield kafka_proxy.create_topic(topic=device_topic)
+        yield kafka_proxy.subscribe(topic=device_topic, group_id=device_topic, target_cls=self, offset=KAFKA_OFFSET_EARLIEST)
+        log.debug("subscribed-to-topic", topic=device_topic)
+
+    def adopt_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+
+            # Start the creation of a device specific topic to handle all
+            # subsequent requests from the Core. This adapter instance will
+            # handle all requests for that device.
+            reactor.callLater(0, self.createKafkaDeviceTopic, d.id)
+
+            result = self.adapter.adopt_device(d)
+            # return True, self.adapter.adopt_device(d)
+
+            return True, result
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def get_ofp_device_info(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return True, self.adapter.get_ofp_device_info(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def get_ofp_port_info(self, device, port_no):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        p = IntType()
+        if port_no:
+            port_no.Unpack(p)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.get_ofp_port_info(d, p.val)
+
+    def reconcile_device(self, device):
+        return self.adapter.reconcile_device(device)
+
+    def abandon_device(self, device):
+        return self.adapter.abandon_device(device)
+
+    def disable_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return True, self.adapter.disable_device(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def reenable_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return True, self.adapter.reenable_device(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def reboot_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.reboot_device(d))
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def download_image(self, device, request):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        img = ImageDownload()
+        if request:
+            request.Unpack(img)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.download_image(device, request)
+
+    def get_image_download_status(self, device, request):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        img = ImageDownload()
+        if request:
+            request.Unpack(img)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.get_image_download_status(device, request)
+
+    def cancel_image_download(self, device, request):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        img = ImageDownload()
+        if request:
+            request.Unpack(img)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.cancel_image_download(device, request)
+
+    def activate_image_update(self, device, request):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        img = ImageDownload()
+        if request:
+            request.Unpack(img)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.activate_image_update(device, request)
+
+    def revert_image_update(self, device, request):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        img = ImageDownload()
+        if request:
+            request.Unpack(img)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
+
+        return True, self.adapter.revert_image_update(device, request)
+
+
+    def self_test(self, device):
+        return self.adapter.self_test_device(device)
+
+    def delete_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            result = self.adapter.delete_device(d)
+            # return (True, self.adapter.delete_device(d))
+
+            # Before we return, delete the device specific topic as we will no
+            # longer receive requests from the Core for that device
+            kafka_proxy = get_messaging_proxy()
+            device_topic = kafka_proxy.get_default_topic() + "/" + d.id
+            kafka_proxy.unsubscribe(topic=device_topic)
+
+            return (True, result)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+
+    def get_device_details(self, device):
+        return self.adapter.get_device_details(device)
+
+    def update_flows_bulk(self, device, flows, groups):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        f = Flows()
+        if flows:
+            flows.Unpack(f)
+
+        g = FlowGroups()
+        if groups:
+            groups.Unpack(g)
+
+        return (True, self.adapter.update_flows_bulk(d, f, g))
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
+        f = FlowChanges()
+        if flow_changes:
+            flow_changes.Unpack(f)
+
+        g = FlowGroupChanges()
+        if group_changes:
+            group_changes.Unpack(g)
+
+        return (True, self.adapter.update_flows_incrementally(d, f, g))
+
+    def suppress_alarm(self, filter):
+        return self.adapter.suppress_alarm(filter)
+
+    def unsuppress_alarm(self, filter):
+        return self.adapter.unsuppress_alarm(filter)
+
+    def process_inter_adapter_message(self, msg):
+        m = InterAdapterMessage()
+        if msg:
+            msg.Unpack(m)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="msg-invalid")
+
+        return (True, self.adapter.process_inter_adapter_message(m))
+
+
+    def receive_packet_out(self, deviceId, outPort, packet):
+        try:
+            d_id = StrType()
+            if deviceId:
+                deviceId.Unpack(d_id)
+            else:
+                return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                    reason="deviceid-invalid")
+
+            op = IntType()
+            if outPort:
+                outPort.Unpack(op)
+            else:
+                return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                    reason="outport-invalid")
+
+            p = ofp_packet_out()
+            if packet:
+                packet.Unpack(p)
+            else:
+                return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                    reason="packet-invalid")
+
+            return (True, self.adapter.receive_packet_out(d_id.val, op.val, p))
+        except Exception as e:
+            log.exception("error-processing-receive_packet_out", e=e)
+
diff --git a/python/kafka/container_proxy.py b/python/kafka/container_proxy.py
new file mode 100644
index 0000000..d7f18b4
--- /dev/null
+++ b/python/kafka/container_proxy.py
@@ -0,0 +1,133 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+The superclass for all kafka proxy subclasses.
+"""
+
+import structlog
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python import failure
+from zope.interface import implementer
+
+from common.utils.deferred_utils import DeferredWithTimeout, \
+    TimeOutError
+from voltha.core.registry import IComponent
+
+log = structlog.get_logger()
+
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+@implementer(IComponent)
+class ContainerProxy(object):
+
+    def __init__(self, kafka_proxy, core_topic, my_listening_topic):
+        self.kafka_proxy = kafka_proxy
+        self.listening_topic = my_listening_topic
+        self.core_topic = core_topic
+        self.default_timeout = 3
+
+    def start(self):
+        log.info('started')
+
+        return self
+
+    def stop(self):
+        log.info('stopped')
+
+    @classmethod
+    def wrap_request(cls, return_cls):
+        def real_wrapper(func):
+            @inlineCallbacks
+            def wrapper(*args, **kw):
+                try:
+                    (success, d) = yield func(*args, **kw)
+                    if success:
+                        log.debug("successful-response", func=func, val=d)
+                        if return_cls is not None:
+                            rc = return_cls()
+                            if d is not None:
+                                d.Unpack(rc)
+                            returnValue(rc)
+                        else:
+                            log.debug("successful-response-none", func=func,
+                                      val=None)
+                            returnValue(None)
+                    else:
+                        log.warn("unsuccessful-request", func=func, args=args,
+                                 kw=kw)
+                        returnValue(d)
+                except Exception as e:
+                    log.exception("request-wrapper-exception", func=func, e=e)
+                    raise
+
+            return wrapper
+
+        return real_wrapper
+
+    @inlineCallbacks
+    def invoke(self, rpc, to_topic=None, reply_topic=None, **kwargs):
+        @inlineCallbacks
+        def _send_request(rpc, m_callback, to_topic, reply_topic, **kwargs):
+            try:
+                log.debug("sending-request",
+                          rpc=rpc,
+                          to_topic=to_topic,
+                          reply_topic=reply_topic)
+                if to_topic is None:
+                    to_topic = self.core_topic
+                if reply_topic is None:
+                    reply_topic = self.listening_topic
+                result = yield self.kafka_proxy.send_request(rpc=rpc,
+                                                             to_topic=to_topic,
+                                                             reply_topic=reply_topic,
+                                                             callback=None,
+                                                             **kwargs)
+                if not m_callback.called:
+                    m_callback.callback(result)
+                else:
+                    log.debug('timeout-already-occurred', rpc=rpc)
+            except Exception as e:
+                log.exception("Failure-sending-request", rpc=rpc, kw=kwargs)
+                if not m_callback.called:
+                    m_callback.errback(failure.Failure())
+
+        # We are going to resend the request on the to_topic if there is a
+        # timeout error. This time the timeout will be longer.  If the second
+        # request times out then we will send the request to the default
+        # core_topic.
+        timeouts = [self.default_timeout,
+                    self.default_timeout*2,
+                    self.default_timeout]
+        retry = 0
+        max_retry = 2
+        for timeout in timeouts:
+            cb = DeferredWithTimeout(timeout=timeout)
+            _send_request(rpc, cb, to_topic, reply_topic, **kwargs)
+            try:
+                res = yield cb
+                returnValue(res)
+            except TimeOutError as e:
+                log.warn('invoke-timeout', e=e)
+                if retry == max_retry:
+                    raise e
+                retry += 1
+                if retry == max_retry:
+                    to_topic = self.core_topic
diff --git a/python/kafka/core_proxy.py b/python/kafka/core_proxy.py
new file mode 100644
index 0000000..4f4579b
--- /dev/null
+++ b/python/kafka/core_proxy.py
@@ -0,0 +1,344 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between CORE and an adapter.
+"""
+import structlog
+from google.protobuf.message import Message
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from container_proxy import ContainerProxy
+from voltha.protos.common_pb2 import ID, ConnectStatus, OperStatus
+from voltha.protos.inter_container_pb2 import StrType, BoolType, IntType, Packet
+from voltha.protos.device_pb2 import Device, Ports
+from voltha.protos.voltha_pb2 import CoreInstance
+
+log = structlog.get_logger()
+
+
+def createSubTopic(*args):
+    return '_'.join(args)
+
+class CoreProxy(ContainerProxy):
+
+    def __init__(self, kafka_proxy, core_topic, my_listening_topic):
+        super(CoreProxy, self).__init__(kafka_proxy, core_topic,
+                                        my_listening_topic)
+
+    @ContainerProxy.wrap_request(CoreInstance)
+    @inlineCallbacks
+    def register(self, adapter, deviceTypes):
+        log.debug("register")
+        try:
+            res = yield self.invoke(rpc="Register",
+                                    adapter=adapter,
+                                    deviceTypes=deviceTypes)
+            log.info("registration-returned", res=res)
+            returnValue(res)
+        except Exception as e:
+            log.exception("registration-exception", e=e)
+            raise
+
+    @ContainerProxy.wrap_request(Device)
+    @inlineCallbacks
+    def get_device(self, device_id):
+        log.debug("get-device")
+        id = ID()
+        id.id = device_id
+        # Once we have a device being managed, all communications between the
+        # the adapter and the core occurs over a topic associated with that
+        # device
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="GetDevice",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=id)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(Device)
+    @inlineCallbacks
+    def get_child_device(self, parent_device_id, **kwargs):
+        raise NotImplementedError()
+
+    @ContainerProxy.wrap_request(Ports)
+    @inlineCallbacks
+    def get_ports(self, device_id, port_type):
+        id = ID()
+        id.id = device_id
+        p_type = IntType()
+        p_type.val = port_type
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="GetPorts",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=id,
+                                port_type=p_type)
+        returnValue(res)
+
+    def get_child_devices(self, parent_device_id):
+        raise NotImplementedError()
+
+    def get_child_device_with_proxy_address(self, proxy_address):
+        raise NotImplementedError()
+
+    def _to_proto(self, **kwargs):
+        encoded = {}
+        for k, v in kwargs.iteritems():
+            if isinstance(v, Message):
+                encoded[k] = v
+            elif type(v) == int:
+                i_proto = IntType()
+                i_proto.val = v
+                encoded[k] = i_proto
+            elif type(v) == str:
+                s_proto = StrType()
+                s_proto.val = v
+                encoded[k] = s_proto
+            elif type(v) == bool:
+                b_proto = BoolType()
+                b_proto.val = v
+                encoded[k] = b_proto
+        return encoded
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def child_device_detected(self,
+                              parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        id = ID()
+        id.id = parent_device_id
+        ppn = IntType()
+        ppn.val = parent_port_no
+        cdt = StrType()
+        cdt.val = child_device_type
+        channel = IntType()
+        channel.val = channel_id
+        to_topic = createSubTopic(self.core_topic, parent_device_id)
+        reply_topic = createSubTopic(self.listening_topic, parent_device_id)
+        args = self._to_proto(**kw)
+        res = yield self.invoke(rpc="ChildDeviceDetected",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                parent_device_id=id,
+                                parent_port_no=ppn,
+                                child_device_type=cdt,
+                                channel_id=channel,
+                                **args)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def device_update(self, device):
+        log.debug("device_update")
+        to_topic = createSubTopic(self.core_topic, device.id)
+        reply_topic = createSubTopic(self.listening_topic, device.id)
+        res = yield self.invoke(rpc="DeviceUpdate",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device=device)
+        returnValue(res)
+
+    def child_device_removed(parent_device_id, child_device_id):
+        raise NotImplementedError()
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def device_state_update(self, device_id,
+                            oper_status=None,
+                            connect_status=None):
+        id = ID()
+        id.id = device_id
+        o_status = IntType()
+        if oper_status or oper_status == OperStatus.UNKNOWN:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status or connect_status == ConnectStatus.UNKNOWN:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="DeviceStateUpdate",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def children_state_update(self, device_id,
+                              oper_status=None,
+                              connect_status=None):
+        id = ID()
+        id.id = device_id
+        o_status = IntType()
+        if oper_status or oper_status == OperStatus.UNKNOWN:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status or connect_status == ConnectStatus.UNKNOWN:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="ChildrenStateUpdate",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def port_state_update(self,
+                          device_id,
+                          port_type,
+                          port_no,
+                          oper_status):
+        id = ID()
+        id.id = device_id
+        pt = IntType()
+        pt.val = port_type
+        pNo = IntType()
+        pNo.val = port_no
+        o_status = IntType()
+        o_status.val = oper_status
+
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="PortStateUpdate",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=id,
+                                port_type=pt,
+                                port_no=pNo,
+                                oper_status=o_status)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def child_devices_state_update(self, parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None):
+
+        id = ID()
+        id.id = parent_device_id
+        o_status = IntType()
+        if oper_status or oper_status == OperStatus.UNKNOWN:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status or connect_status == ConnectStatus.UNKNOWN:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+
+        to_topic = createSubTopic(self.core_topic, parent_device_id)
+        reply_topic = createSubTopic(self.listening_topic, parent_device_id)
+        res = yield self.invoke(rpc="child_devices_state_update",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                parent_device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status)
+        returnValue(res)
+
+    def child_devices_removed(parent_device_id):
+        raise NotImplementedError()
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def device_pm_config_update(self, device_pm_config, init=False):
+        log.debug("device_pm_config_update")
+        b = BoolType()
+        b.val = init
+        to_topic = createSubTopic(self.core_topic, device_pm_config.id)
+        reply_topic = createSubTopic(self.listening_topic, device_pm_config.id)
+        res = yield self.invoke(rpc="DevicePMConfigUpdate",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_pm_config=device_pm_config,
+                                init=b)
+        returnValue(res)
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def port_created(self, device_id, port):
+        log.debug("port_created")
+        proto_id = ID()
+        proto_id.id = device_id
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="PortCreated",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=proto_id,
+                                port=port)
+        returnValue(res)
+
+    def port_removed(device_id, port):
+        raise NotImplementedError()
+
+    def ports_enabled(device_id):
+        raise NotImplementedError()
+
+    def ports_disabled(device_id):
+        raise NotImplementedError()
+
+    def ports_oper_status_update(device_id, oper_status):
+        raise NotImplementedError()
+
+    def image_download_update(img_dnld):
+        raise NotImplementedError()
+
+    def image_download_deleted(img_dnld):
+        raise NotImplementedError()
+
+    @ContainerProxy.wrap_request(None)
+    @inlineCallbacks
+    def send_packet_in(self, device_id, port, packet):
+        log.debug("send_packet_in", device_id=device_id)
+        proto_id = ID()
+        proto_id.id = device_id
+        p = IntType()
+        p.val = port
+        pac = Packet()
+        pac.payload = packet
+        to_topic = createSubTopic(self.core_topic, device_id)
+        reply_topic = createSubTopic(self.listening_topic, device_id)
+        res = yield self.invoke(rpc="PacketIn",
+                                to_topic=to_topic,
+                                reply_topic=reply_topic,
+                                device_id=proto_id,
+                                port=p,
+                                packet=pac)
+        returnValue(res)
diff --git a/python/kafka/event_bus_publisher.py b/python/kafka/event_bus_publisher.py
new file mode 100644
index 0000000..8020bf5
--- /dev/null
+++ b/python/kafka/event_bus_publisher.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A gateway between the internal event bus and the Kafka publisher proxy
+to publish select topics and messages posted to the Voltha-internal event
+bus toward the external world.
+"""
+import structlog
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+
+from common.event_bus import EventBusClient
+
+log = structlog.get_logger()
+
+
+class EventBusPublisher(object):
+
+    def __init__(self, kafka_proxy, config):
+        self.kafka_proxy = kafka_proxy
+        self.config = config
+        self.topic_mappings = config.get('topic_mappings', {})
+        self.event_bus = EventBusClient()
+        self.subscriptions = None
+
+    def start(self):
+        log.debug('starting')
+        self.subscriptions = list()
+        self._setup_subscriptions(self.topic_mappings)
+        log.info('started')
+        return self
+
+    def stop(self):
+        try:
+            log.debug('stopping-event-bus')
+            if self.subscriptions:
+                for subscription in self.subscriptions:
+                    self.event_bus.unsubscribe(subscription)
+            log.info('stopped-event-bus')
+        except Exception, e:
+            log.exception('failed-stopping-event-bus', e=e)
+            return
+
+    def _setup_subscriptions(self, mappings):
+
+        for event_bus_topic, mapping in mappings.iteritems():
+
+            kafka_topic = mapping.get('kafka_topic', None)
+
+            if kafka_topic is None:
+                log.error('no-kafka-topic-in-config',
+                          event_bus_topic=event_bus_topic,
+                          mapping=mapping)
+                continue
+
+            self.subscriptions.append(self.event_bus.subscribe(
+                event_bus_topic,
+                # to avoid Python late-binding to the last registered
+                # kafka_topic, we force instant binding with the default arg
+                lambda _, m, k=kafka_topic: self.forward(k, m)))
+
+            log.info('event-to-kafka', kafka_topic=kafka_topic,
+                     event_bus_topic=event_bus_topic)
+
+    def forward(self, kafka_topic, msg):
+        try:
+            # convert to JSON string if msg is a protobuf msg
+            if isinstance(msg, Message):
+                msg = dumps(MessageToDict(msg, True, True))
+            log.debug('forward-event-bus-publisher')
+            self.kafka_proxy.send_message(kafka_topic, msg)
+        except Exception, e:
+            log.exception('failed-forward-event-bus-publisher', e=e)
+
diff --git a/python/kafka/kafka_inter_container_library.py b/python/kafka/kafka_inter_container_library.py
new file mode 100644
index 0000000..cf51684
--- /dev/null
+++ b/python/kafka/kafka_inter_container_library.py
@@ -0,0 +1,570 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+from uuid import uuid4
+
+import structlog
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, \
+    DeferredQueue, gatherResults
+from zope.interface import implementer
+
+from common.utils import asleep
+from voltha.core.registry import IComponent
+from kafka_proxy import KafkaProxy, get_kafka_proxy
+from voltha.protos.inter_container_pb2 import MessageType, Argument, \
+    InterContainerRequestBody, InterContainerMessage, Header, \
+    InterContainerResponseBody
+
+log = structlog.get_logger()
+
+KAFKA_OFFSET_LATEST = 'latest'
+KAFKA_OFFSET_EARLIEST = 'earliest'
+
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+@implementer(IComponent)
+class IKafkaMessagingProxy(object):
+    _kafka_messaging_instance = None
+
+    def __init__(self,
+                 kafka_host_port,
+                 kv_store,
+                 default_topic,
+                 group_id_prefix,
+                 target_cls):
+        """
+        Initialize the kafka proxy.  This is a singleton (may change to
+        non-singleton if performance is better)
+        :param kafka_host_port: Kafka host and port
+        :param kv_store: Key-Value store
+        :param default_topic: Default topic to subscribe to
+        :param target_cls: target class - method of that class is invoked
+        when a message is received on the default_topic
+        """
+        # return an exception if the object already exist
+        if IKafkaMessagingProxy._kafka_messaging_instance:
+            raise Exception(
+                'Singleton-exist', cls=IKafkaMessagingProxy)
+
+        log.debug("Initializing-KafkaProxy")
+        self.kafka_host_port = kafka_host_port
+        self.kv_store = kv_store
+        self.default_topic = default_topic
+        self.default_group_id = "_".join((group_id_prefix, default_topic))
+        self.target_cls = target_cls
+        self.topic_target_cls_map = {}
+        self.topic_callback_map = {}
+        self.subscribers = {}
+        self.kafka_proxy = None
+        self.transaction_id_deferred_map = {}
+        self.received_msg_queue = DeferredQueue()
+        self.stopped = False
+
+        self.init_time = 0
+        self.init_received_time = 0
+
+        self.init_resp_time = 0
+        self.init_received_resp_time = 0
+
+        self.num_messages = 0
+        self.total_time = 0
+        self.num_responses = 0
+        self.total_time_responses = 0
+        log.debug("KafkaProxy-initialized")
+
+    def start(self):
+        try:
+            log.debug("KafkaProxy-starting")
+
+            # Get the kafka proxy instance.  If it does not exist then
+            # create it
+            self.kafka_proxy = get_kafka_proxy()
+            if self.kafka_proxy == None:
+                KafkaProxy(kafka_endpoint=self.kafka_host_port).start()
+                self.kafka_proxy = get_kafka_proxy()
+
+            # Subscribe the default topic and target_cls
+            self.topic_target_cls_map[self.default_topic] = self.target_cls
+
+            # Start the queue to handle incoming messages
+            reactor.callLater(0, self._received_message_processing_loop)
+
+            # Subscribe using the default topic and default group id.  Whenever
+            # a message is received on that topic then teh target_cls will be
+            # invoked.
+            reactor.callLater(0, self.subscribe,
+                              topic=self.default_topic,
+                              target_cls=self.target_cls,
+                              group_id=self.default_group_id)
+
+            # Setup the singleton instance
+            IKafkaMessagingProxy._kafka_messaging_instance = self
+            log.debug("KafkaProxy-started")
+        except Exception as e:
+            log.exception("Failed-to-start-proxy", e=e)
+
+    def stop(self):
+        """
+        Invoked to stop the kafka proxy
+        :return: None on success, Exception on failure
+        """
+        log.debug("Stopping-messaging-proxy ...")
+        try:
+            # Stop the kafka proxy.  This will stop all the consumers
+            # and producers
+            self.stopped = True
+            self.kafka_proxy.stop()
+            log.debug("Messaging-proxy-stopped.")
+        except Exception as e:
+            log.exception("Exception-when-stopping-messaging-proxy:", e=e)
+
+    def get_target_cls(self):
+        return self.target_cls
+
+    def get_default_topic(self):
+        return self.default_topic
+
+    @inlineCallbacks
+    def _subscribe_group_consumer(self, group_id, topic, offset, callback=None,
+                                  target_cls=None):
+        try:
+            log.debug("subscribing-to-topic-start", topic=topic)
+            yield self.kafka_proxy.subscribe(topic,
+                                             self._enqueue_received_group_message,
+                                             group_id, offset)
+
+            if target_cls is not None and callback is None:
+                # Scenario #1
+                if topic not in self.topic_target_cls_map:
+                    self.topic_target_cls_map[topic] = target_cls
+            elif target_cls is None and callback is not None:
+                # Scenario #2
+                log.debug("custom-callback", topic=topic,
+                          callback_map=self.topic_callback_map)
+                if topic not in self.topic_callback_map:
+                    self.topic_callback_map[topic] = [callback]
+                else:
+                    self.topic_callback_map[topic].extend([callback])
+            else:
+                log.warn("invalid-parameters")
+
+            returnValue(True)
+        except Exception as e:
+            log.exception("Exception-during-subscription", e=e)
+            returnValue(False)
+
+    @inlineCallbacks
+    def subscribe(self, topic, callback=None, target_cls=None,
+                  max_retry=3, group_id=None, offset=KAFKA_OFFSET_LATEST):
+        """
+        Scenario 1:  invoked to subscribe to a specific topic with a
+        target_cls to invoke when a message is received on that topic.  This
+        handles the case of request/response where this library performs the
+        heavy lifting. In this case the m_callback must to be None
+
+        Scenario 2:  invoked to subscribe to a specific topic with a
+        specific callback to invoke when a message is received on that topic.
+        This handles the case where the caller wants to process the message
+        received itself. In this case the target_cls must to be None
+
+        :param topic: topic to subscribe to
+        :param callback: Callback to invoke when a message is received on
+        the topic. Either one of callback or target_cls needs can be none
+        :param target_cls:  Target class to use when a message is
+        received on the topic. There can only be 1 target_cls per topic.
+        Either one of callback or target_cls needs can be none
+        :param max_retry:  the number of retries before reporting failure
+        to subscribe.  This caters for scenario where the kafka topic is not
+        ready.
+        :param group_id:  The ID of the group the consumer is subscribing to
+        :param offset: The topic offset on the kafka bus from where message consumption will start
+        :return: True on success, False on failure
+        """
+        RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+        def _backoff(msg, retries):
+            wait_time = RETRY_BACKOFF[min(retries,
+                                          len(RETRY_BACKOFF) - 1)]
+            log.info(msg, retry_in=wait_time)
+            return asleep.asleep(wait_time)
+
+        log.debug("subscribing", topic=topic, group_id=group_id,
+                  callback=callback, target=target_cls)
+
+        retry = 0
+        subscribed = False
+        if group_id is None:
+            group_id = self.default_group_id
+        while not subscribed:
+            subscribed = yield self._subscribe_group_consumer(group_id, topic,
+                                                              callback=callback,
+                                                              target_cls=target_cls,
+                                                              offset=offset)
+            if subscribed:
+                returnValue(True)
+            elif retry > max_retry:
+                returnValue(False)
+            else:
+                _backoff("subscription-not-complete", retry)
+                retry += 1
+
+    def unsubscribe(self, topic, callback=None, target_cls=None):
+        """
+        Invoked when unsubscribing to a topic
+        :param topic: topic to unsubscribe from
+        :param callback:  the callback used when subscribing to the topic, if any
+        :param target_cls: the targert class used when subscribing to the topic, if any
+        :return: None on success or Exception on failure
+        """
+        log.debug("Unsubscribing-to-topic", topic=topic)
+
+        try:
+            self.kafka_proxy.unsubscribe(topic,
+                                         self._enqueue_received_group_message)
+
+            if callback is None and target_cls is None:
+                log.error("both-call-and-target-cls-cannot-be-none",
+                          topic=topic)
+                raise KafkaMessagingError(
+                    error="both-call-and-target-cls-cannot-be-none")
+
+            if target_cls is not None and topic in self.topic_target_cls_map:
+                del self.topic_target_cls_map[topic]
+
+            if callback is not None and topic in self.topic_callback_map:
+                index = 0
+                for cb in self.topic_callback_map[topic]:
+                    if cb == callback:
+                        break
+                    index += 1
+                if index < len(self.topic_callback_map[topic]):
+                    self.topic_callback_map[topic].pop(index)
+
+                if len(self.topic_callback_map[topic]) == 0:
+                    del self.topic_callback_map[topic]
+        except Exception as e:
+            log.exception("Exception-when-unsubscribing-to-topic", topic=topic,
+                          e=e)
+            return e
+
+    @inlineCallbacks
+    def _enqueue_received_group_message(self, msg):
+        """
+        Internal method to continuously queue all received messaged
+        irrespective of topic
+        :param msg: Received message
+        :return: None on success, Exception on failure
+        """
+        try:
+            log.debug("received-msg", msg=msg)
+            yield self.received_msg_queue.put(msg)
+        except Exception as e:
+            log.exception("Failed-enqueueing-received-message", e=e)
+
+    @inlineCallbacks
+    def _received_message_processing_loop(self):
+        """
+        Internal method to continuously process all received messages one
+        at a time
+        :return: None on success, Exception on failure
+        """
+        while True:
+            try:
+                message = yield self.received_msg_queue.get()
+                yield self._process_message(message)
+                if self.stopped:
+                    break
+            except Exception as e:
+                log.exception("Failed-dequeueing-received-message", e=e)
+
+    def _to_string(self, unicode_str):
+        if unicode_str is not None:
+            if type(unicode_str) == unicode:
+                return unicode_str.encode('ascii', 'ignore')
+            else:
+                return unicode_str
+        else:
+            return None
+
+    def _format_request(self,
+                        rpc,
+                        to_topic,
+                        reply_topic,
+                        **kwargs):
+        """
+        Format a request to send over kafka
+        :param rpc: Requested remote API
+        :param to_topic: Topic to send the request
+        :param reply_topic: Topic to receive the resulting response, if any
+        :param kwargs: Dictionary of key-value pairs to pass as arguments to
+        the remote rpc API.
+        :return: A InterContainerMessage message type on success or None on
+        failure
+        """
+        try:
+            transaction_id = uuid4().hex
+            request = InterContainerMessage()
+            request_body = InterContainerRequestBody()
+            request.header.id = transaction_id
+            request.header.type = MessageType.Value("REQUEST")
+            request.header.from_topic = reply_topic
+            request.header.to_topic = to_topic
+
+            response_required = False
+            if reply_topic:
+                request_body.reply_to_topic = reply_topic
+                request_body.response_required = True
+                response_required = True
+
+            request.header.timestamp = int(round(time.time() * 1000))
+            request_body.rpc = rpc
+            for a, b in kwargs.iteritems():
+                arg = Argument()
+                arg.key = a
+                try:
+                    arg.value.Pack(b)
+                    request_body.args.extend([arg])
+                except Exception as e:
+                    log.exception("Failed-parsing-value", e=e)
+            request.body.Pack(request_body)
+            return request, transaction_id, response_required
+        except Exception as e:
+            log.exception("formatting-request-failed",
+                          rpc=rpc,
+                          to_topic=to_topic,
+                          reply_topic=reply_topic,
+                          args=kwargs)
+            return None, None, None
+
+    def _format_response(self, msg_header, msg_body, status):
+        """
+        Format a response
+        :param msg_header: The header portion of a received request
+        :param msg_body: The response body
+        :param status: True is this represents a successful response
+        :return: a InterContainerMessage message type
+        """
+        try:
+            assert isinstance(msg_header, Header)
+            response = InterContainerMessage()
+            response_body = InterContainerResponseBody()
+            response.header.id = msg_header.id
+            response.header.timestamp = int(
+                round(time.time() * 1000))
+            response.header.type = MessageType.Value("RESPONSE")
+            response.header.from_topic = msg_header.to_topic
+            response.header.to_topic = msg_header.from_topic
+            if msg_body is not None:
+                response_body.result.Pack(msg_body)
+            response_body.success = status
+            response.body.Pack(response_body)
+            return response
+        except Exception as e:
+            log.exception("formatting-response-failed", header=msg_header,
+                          body=msg_body, status=status, e=e)
+            return None
+
+    def _parse_response(self, msg):
+        try:
+            message = InterContainerMessage()
+            message.ParseFromString(msg)
+            resp = InterContainerResponseBody()
+            if message.body.Is(InterContainerResponseBody.DESCRIPTOR):
+                message.body.Unpack(resp)
+            else:
+                log.debug("unsupported-msg", msg_type=type(message.body))
+                return None
+            log.debug("parsed-response", input=message, output=resp)
+            return resp
+        except Exception as e:
+            log.exception("parsing-response-failed", msg=msg, e=e)
+            return None
+
+    @inlineCallbacks
+    def _process_message(self, m):
+        """
+        Default internal method invoked for every batch of messages received
+        from Kafka.
+        """
+
+        def _toDict(args):
+            """
+            Convert a repeatable Argument type into a python dictionary
+            :param args: Repeatable core_adapter.Argument type
+            :return: a python dictionary
+            """
+            if args is None:
+                return None
+            result = {}
+            for arg in args:
+                assert isinstance(arg, Argument)
+                result[arg.key] = arg.value
+            return result
+
+        current_time = int(round(time.time() * 1000))
+        # log.debug("Got Message", message=m)
+        try:
+            val = m.value()
+            # val = m.message.value
+            # print m.topic
+
+            # Go over customized callbacks first
+            m_topic = m.topic()
+            if m_topic in self.topic_callback_map:
+                for c in self.topic_callback_map[m_topic]:
+                    yield c(val)
+
+            #  Check whether we need to process request/response scenario
+            if m_topic not in self.topic_target_cls_map:
+                return
+
+            # Process request/response scenario
+            message = InterContainerMessage()
+            message.ParseFromString(val)
+
+            if message.header.type == MessageType.Value("REQUEST"):
+                # Get the target class for that specific topic
+                targetted_topic = self._to_string(message.header.to_topic)
+                msg_body = InterContainerRequestBody()
+                if message.body.Is(InterContainerRequestBody.DESCRIPTOR):
+                    message.body.Unpack(msg_body)
+                else:
+                    log.debug("unsupported-msg", msg_type=type(message.body))
+                    return
+                if targetted_topic in self.topic_target_cls_map:
+                    if msg_body.args:
+                        log.debug("message-body-args-present", body=msg_body)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))(
+                            **_toDict(msg_body.args))
+                    else:
+                        log.debug("message-body-args-absent", body=msg_body,
+                                  rpc=msg_body.rpc)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))()
+                    if msg_body.response_required:
+                        response = self._format_response(
+                            msg_header=message.header,
+                            msg_body=res,
+                            status=status,
+                        )
+                        if response is not None:
+                            res_topic = self._to_string(
+                                response.header.to_topic)
+                            self._send_kafka_message(res_topic, response)
+
+                        log.debug("Response-sent", response=response.body,
+                                  to_topic=res_topic)
+            elif message.header.type == MessageType.Value("RESPONSE"):
+                trns_id = self._to_string(message.header.id)
+                if trns_id in self.transaction_id_deferred_map:
+                    resp = self._parse_response(val)
+
+                    self.transaction_id_deferred_map[trns_id].callback(resp)
+            else:
+                log.error("!!INVALID-TRANSACTION-TYPE!!")
+
+        except Exception as e:
+            log.exception("Failed-to-process-message", message=m, e=e)
+
+    @inlineCallbacks
+    def _send_kafka_message(self, topic, msg):
+        try:
+            yield self.kafka_proxy.send_message(topic, msg.SerializeToString())
+        except Exception, e:
+            log.exception("Failed-sending-message", message=msg, e=e)
+
+    @inlineCallbacks
+    def send_request(self,
+                     rpc,
+                     to_topic,
+                     reply_topic=None,
+                     callback=None,
+                     **kwargs):
+        """
+        Invoked to send a message to a remote container and receive a
+        response if required.
+        :param rpc: The remote API to invoke
+        :param to_topic: Send the message to this kafka topic
+        :param reply_topic: If not None then a response is expected on this
+        topic.  If set to None then no response is required.
+        :param callback: Callback to invoke when a response is received.
+        :param kwargs: Key-value pairs representing arguments to pass to the
+        rpc remote API.
+        :return: Either no response is required, or a response is returned
+        via the callback or the response is a tuple of (status, return_cls)
+        """
+        try:
+            # Ensure all strings are not unicode encoded
+            rpc = self._to_string(rpc)
+            to_topic = self._to_string(to_topic)
+            reply_topic = self._to_string(reply_topic)
+
+            request, transaction_id, response_required = \
+                self._format_request(
+                    rpc=rpc,
+                    to_topic=to_topic,
+                    reply_topic=reply_topic,
+                    **kwargs)
+
+            if request is None:
+                return
+
+            # Add the transaction to the transaction map before sending the
+            # request.  This will guarantee the eventual response will be
+            # processed.
+            wait_for_result = None
+            if response_required:
+                wait_for_result = Deferred()
+                self.transaction_id_deferred_map[
+                    self._to_string(request.header.id)] = wait_for_result
+
+            yield self._send_kafka_message(to_topic, request)
+            log.debug("message-sent", to_topic=to_topic,
+                      from_topic=reply_topic)
+
+            if response_required:
+                res = yield wait_for_result
+
+                if res is None or not res.success:
+                    raise KafkaMessagingError(error="Failed-response:{"
+                                                    "}".format(res))
+
+                # Remove the transaction from the transaction map
+                del self.transaction_id_deferred_map[transaction_id]
+
+                log.debug("send-message-response", rpc=rpc, result=res)
+
+                if callback:
+                    callback((res.success, res.result))
+                else:
+                    returnValue((res.success, res.result))
+        except Exception as e:
+            log.exception("Exception-sending-request", e=e)
+            raise KafkaMessagingError(error=e)
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_messaging_proxy():
+    return IKafkaMessagingProxy._kafka_messaging_instance
diff --git a/python/kafka/kafka_proxy.py b/python/kafka/kafka_proxy.py
new file mode 100644
index 0000000..64da9a8
--- /dev/null
+++ b/python/kafka/kafka_proxy.py
@@ -0,0 +1,338 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from confluent_kafka import Producer as _kafkaProducer
+from structlog import get_logger
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.threads import deferToThread
+from zope.interface import implementer
+
+from common.utils.consulhelpers import get_endpoint_from_consul
+from event_bus_publisher import EventBusPublisher
+from voltha.core.registry import IComponent
+from confluent_kafka import Consumer, KafkaError
+import threading
+
+log = get_logger()
+
+
+@implementer(IComponent)
+class KafkaProxy(object):
+    """
+    This is a singleton proxy kafka class to hide the kafka client details. This
+    proxy uses confluent-kafka-python as the kafka client. Since that client is
+    not a Twisted client then requests to that client are wrapped with
+    twisted.internet.threads.deferToThread to avoid any potential blocking of
+    the Twisted loop.
+    """
+    _kafka_instance = None
+
+    def __init__(self,
+                 consul_endpoint='localhost:8500',
+                 kafka_endpoint='localhost:9092',
+                 ack_timeout=1000,
+                 max_req_attempts=10,
+                 consumer_poll_timeout=10,
+                 config={}):
+
+        # return an exception if the object already exist
+        if KafkaProxy._kafka_instance:
+            raise Exception('Singleton exist for :{}'.format(KafkaProxy))
+
+        log.debug('initializing', endpoint=kafka_endpoint)
+        self.ack_timeout = ack_timeout
+        self.max_req_attempts = max_req_attempts
+        self.consul_endpoint = consul_endpoint
+        self.kafka_endpoint = kafka_endpoint
+        self.config = config
+        self.kclient = None
+        self.kproducer = None
+        self.event_bus_publisher = None
+        self.stopping = False
+        self.faulty = False
+        self.consumer_poll_timeout = consumer_poll_timeout
+        self.topic_consumer_map = {}
+        self.topic_callbacks_map = {}
+        self.topic_any_map_lock = threading.Lock()
+        log.debug('initialized', endpoint=kafka_endpoint)
+
+    @inlineCallbacks
+    def start(self):
+        log.debug('starting')
+        self._get_kafka_producer()
+        KafkaProxy._kafka_instance = self
+        self.event_bus_publisher = yield EventBusPublisher(
+            self, self.config.get('event_bus_publisher', {})).start()
+        log.info('started')
+        KafkaProxy.faulty = False
+        self.stopping = False
+        returnValue(self)
+
+    @inlineCallbacks
+    def stop(self):
+        try:
+            log.debug('stopping-kafka-proxy')
+            self.stopping = True
+            try:
+                if self.kclient:
+                    yield self.kclient.close()
+                    self.kclient = None
+                    log.debug('stopped-kclient-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kclient-kafka-proxy', e=e)
+
+            try:
+                if self.kproducer:
+                    yield self.kproducer.flush()
+                    self.kproducer = None
+                    log.debug('stopped-kproducer-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kproducer-kafka-proxy', e=e)
+
+            # Stop all consumers
+            try:
+                self.topic_any_map_lock.acquire()
+                log.debug('stopping-consumers-kafka-proxy')
+                for _, c in self.topic_consumer_map.iteritems():
+                    yield deferToThread(c.close)
+                self.topic_consumer_map.clear()
+                self.topic_callbacks_map.clear()
+                log.debug('stopped-consumers-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-consumers-kafka-proxy', e=e)
+            finally:
+                self.topic_any_map_lock.release()
+                log.debug('stopping-consumers-kafka-proxy-released-lock')
+
+            # try:
+            #    if self.event_bus_publisher:
+            #        yield self.event_bus_publisher.stop()
+            #        self.event_bus_publisher = None
+            #        log.debug('stopped-event-bus-publisher-kafka-proxy')
+            # except Exception, e:
+            #    log.debug('failed-stopped-event-bus-publisher-kafka-proxy')
+            #    pass
+
+            log.debug('stopped-kafka-proxy')
+
+        except Exception, e:
+            self.kclient = None
+            self.kproducer = None
+            # self.event_bus_publisher = None
+            log.exception('failed-stopped-kafka-proxy', e=e)
+            pass
+
+    def _get_kafka_producer(self):
+
+        try:
+
+            if self.kafka_endpoint.startswith('@'):
+                try:
+                    _k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
+                                                           self.kafka_endpoint[
+                                                           1:])
+                    log.debug('found-kafka-service', endpoint=_k_endpoint)
+
+                except Exception as e:
+                    log.exception('no-kafka-service-in-consul', e=e)
+
+                    self.kproducer = None
+                    self.kclient = None
+                    return
+            else:
+                _k_endpoint = self.kafka_endpoint
+            self.kproducer = _kafkaProducer(
+                {'bootstrap.servers': _k_endpoint,
+                 }
+            )
+            pass
+        except Exception, e:
+            log.exception('failed-get-kafka-producer', e=e)
+            return
+
+    @inlineCallbacks
+    def _wait_for_messages(self, consumer, topic):
+        while True:
+            try:
+                msg = yield deferToThread(consumer.poll,
+                                          self.consumer_poll_timeout)
+
+                if self.stopping:
+                    log.debug("stop-request-recieved", topic=topic)
+                    break
+
+                if msg is None:
+                    continue
+                if msg.error():
+                    # This typically is received when there are no more messages
+                    # to read from kafka. Ignore.
+                    continue
+
+                # Invoke callbacks
+                for cb in self.topic_callbacks_map[topic]:
+                    yield cb(msg)
+            except Exception as e:
+                log.debug("exception-receiving-msg", topic=topic, e=e)
+
+    @inlineCallbacks
+    def subscribe(self, topic, callback, groupId, offset='latest'):
+        """
+        subscribe allows a caller to subscribe to a given kafka topic.  This API
+        always create a group consumer.
+        :param topic - the topic to subscribe to
+        :param callback - the callback to invoke whenever a message is received
+        on that topic
+        :param groupId - the groupId for this consumer.  In the current
+        implementation there is a one-to-one mapping between a topic and a
+        groupId.  In other words, once a groupId is used for a given topic then
+        we won't be able to create another groupId for the same topic.
+        :param offset:  the kafka offset from where the consumer will start
+        consuming messages
+        """
+        try:
+            self.topic_any_map_lock.acquire()
+            if topic in self.topic_consumer_map:
+                # Just add the callback
+                if topic in self.topic_callbacks_map:
+                    self.topic_callbacks_map[topic].append(callback)
+                else:
+                    self.topic_callbacks_map[topic] = [callback]
+                return
+
+            # Create consumer for that topic
+            c = Consumer({
+                'bootstrap.servers': self.kafka_endpoint,
+                'group.id': groupId,
+                'auto.offset.reset': offset
+            })
+            yield deferToThread(c.subscribe, [topic])
+            # c.subscribe([topic])
+            self.topic_consumer_map[topic] = c
+            self.topic_callbacks_map[topic] = [callback]
+            # Start the consumer
+            reactor.callLater(0, self._wait_for_messages, c, topic)
+        except Exception, e:
+            log.exception("topic-subscription-error", e=e)
+        finally:
+            self.topic_any_map_lock.release()
+
+    @inlineCallbacks
+    def unsubscribe(self, topic, callback):
+        """
+        Unsubscribe to a given topic.  Since there they be multiple callers
+        consuming from the same topic then to ensure only the relevant caller
+        gets unsubscribe then the callback is used as a differentiator.   The
+        kafka consumer will be closed when there are no callbacks required.
+        :param topic: topic to unsubscribe
+        :param callback: callback the caller used when subscribing to the topic.
+        If multiple callers have subscribed to a topic using the same callback
+        then the first callback on the list will be removed.
+        :return:None
+        """
+        try:
+            self.topic_any_map_lock.acquire()
+            log.debug("unsubscribing-to-topic", topic=topic)
+            if topic in self.topic_callbacks_map:
+                index = 0
+                for cb in self.topic_callbacks_map[topic]:
+                    if cb == callback:
+                        break
+                    index += 1
+                if index < len(self.topic_callbacks_map[topic]):
+                    self.topic_callbacks_map[topic].pop(index)
+
+                if len(self.topic_callbacks_map[topic]) == 0:
+                    # Stop the consumer
+                    if topic in self.topic_consumer_map:
+                        yield deferToThread(
+                            self.topic_consumer_map[topic].close)
+                        del self.topic_consumer_map[topic]
+                    del self.topic_callbacks_map[topic]
+                    log.debug("unsubscribed-to-topic", topic=topic)
+                else:
+                    log.debug("consumers-for-topic-still-exist", topic=topic,
+                              num=len(self.topic_callbacks_map[topic]))
+        except Exception, e:
+            log.exception("topic-unsubscription-error", e=e)
+        finally:
+            self.topic_any_map_lock.release()
+            log.debug("unsubscribing-to-topic-release-lock", topic=topic)
+
+    @inlineCallbacks
+    def send_message(self, topic, msg, key=None):
+        assert topic is not None
+        assert msg is not None
+
+        # first check whether we have a kafka producer.  If there is none
+        # then try to get one - this happens only when we try to lookup the
+        # kafka service from consul
+        try:
+            if self.faulty is False:
+
+                if self.kproducer is None:
+                    self._get_kafka_producer()
+                    # Lets the next message request do the retry if still a failure
+                    if self.kproducer is None:
+                        log.error('no-kafka-producer',
+                                  endpoint=self.kafka_endpoint)
+                        return
+
+                log.debug('sending-kafka-msg', topic=topic, kafka_msg=msg)
+                msgs = [msg]
+
+                if self.kproducer is not None and self.event_bus_publisher and self.faulty is False:
+                    d = deferToThread(self.kproducer.produce, topic, msg, key)
+                    yield d
+                    log.debug('sent-kafka-msg', topic=topic, kafka_msg=msg)
+                    # send a lightweight poll to avoid an exception after 100k messages.
+                    d1 = deferToThread(self.kproducer.poll, 0)
+                    yield d1
+                else:
+                    return
+
+        except Exception, e:
+            self.faulty = True
+            log.error('failed-to-send-kafka-msg', topic=topic, kafka_msg=msg,
+                      e=e)
+
+            # set the kafka producer to None.  This is needed if the
+            # kafka docker went down and comes back up with a different
+            # port number.
+            if self.stopping is False:
+                log.debug('stopping-kafka-proxy')
+                try:
+                    self.stopping = True
+                    self.stop()
+                    self.stopping = False
+                    self.faulty = False
+                    log.debug('stopped-kafka-proxy')
+                except Exception, e:
+                    log.exception('failed-stopping-kafka-proxy', e=e)
+                    pass
+            else:
+                log.info('already-stopping-kafka-proxy')
+
+            return
+
+    def is_faulty(self):
+        return self.faulty
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_kafka_proxy():
+    return KafkaProxy._kafka_instance
diff --git a/python/protos/Makefile b/python/protos/Makefile
new file mode 100644
index 0000000..0fad970
--- /dev/null
+++ b/python/protos/Makefile
@@ -0,0 +1,101 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: third_party build
+
+PROTO_FILES := $(wildcard ../../protos/*.proto)
+PROTO_GOOGLE_API := $(wildcard third_party/google/api/*.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GRPC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_DESC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,.desc,$(f)))
+
+PROTOC_PREFIX := /usr/local
+PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
+
+PROTOC := $(PROTOC_PREFIX)/bin/protoc
+
+PROTOC_VERSION := "3.3.0"
+PROTOC_DOWNLOAD_PREFIX := "https://github.com/google/protobuf/releases/download"
+PROTOC_DIR := protobuf-$(PROTOC_VERSION)
+PROTOC_TARBALL := protobuf-python-$(PROTOC_VERSION).tar.gz
+PROTOC_DOWNLOAD_URI := $(PROTOC_DOWNLOAD_PREFIX)/v$(PROTOC_VERSION)/$(PROTOC_TARBALL)
+PROTOC_BUILD_TMP_DIR := "/tmp/protobuf-build-$(shell uname -s | tr '[:upper:]' '[:lower:]')"
+
+# Google API needs to be built from within the third party directory
+#
+third_party: google_api
+google_api:
+	@echo "Building protocol buffer artifacts from third_party google api"
+	cd third_party ; \
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I. \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=google/api/annotations.desc \
+	    --include_imports \
+	    --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTOC) $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto Makefile
+	@echo "Building protocol buffer artifacts from $<"
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I../../protos \
+	    -I./third_party \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=./$(basename $(notdir $<)).desc \
+	    --include_imports \
+	    --include_source_info \
+	    $<
+
+clean:
+	rm -f *.desc *_pb2* \
+		$(PROTO_PB2_GOOGLE_API) \
+		$(PROTO_PB2_GRPC_GOOGLE_API)\
+		$(PROTO_DESC_GOOGLE_API)
+
+$(PROTOC):
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+	@echo "It looks like you don't have protocol buffer tools installed."
+	@echo "To install the protocol buffer toolchain, you can run:"
+	@echo "    make install-protoc"
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+
+install-protoc: $(PROTOC)
+	@echo "Downloading and installing protocol buffer support."
+	@echo "Installation will require sodo priviledges"
+	@echo "This will take a few minutes."
+	mkdir -p $(PROTOC_BUILD_TMP_DIR)
+	@echo "We ask for sudo credentials now so we can install at the end"; \
+	sudo echo "Thanks"; \
+	    cd $(PROTOC_BUILD_TMP_DIR); \
+	    wget $(PROTOC_DOWNLOAD_URI); \
+	    tar xzvf $(PROTOC_TARBALL); \
+	    cd $(PROTOC_DIR); \
+	    ./configure --prefix=$(PROTOC_PREFIX); \
+	    make; \
+	    sudo make install
+
+uninstall-protoc:
+	cd $(PROTOC_BUILD_TMP_DIR)/$(PROTOC_DIR); \
+	    sudo make uninstall
+
diff --git a/python/protos/third_party/__init__.py b/python/protos/third_party/__init__.py
new file mode 100644
index 0000000..3b654af
--- /dev/null
+++ b/python/protos/third_party/__init__.py
@@ -0,0 +1,53 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This helps loading http_pb2 and annotations_pb2.
+Without this, the Python importer will not be able to process the lines:
+from google.api import http_pb2 or
+from google.api import annotations_pb2
+(Without importing these, the protobuf loader will not recognize http options
+in the protobuf definitions.)
+"""
+
+from importlib import import_module
+import os
+import sys
+
+
+class GoogleApiImporter(object):
+
+    def find_module(self, full_name, path=None):
+        if full_name == 'google.api':
+            self.path = [os.path.dirname(__file__)]
+            return self
+
+    def load_module(self, name):
+        if name in sys.modules:
+            return sys.modules[name]
+        full_name = 'voltha.protos.third_party.' + name
+        import_module(full_name)
+        module = sys.modules[full_name]
+        sys.modules[name] = module
+        return module
+
+
+sys.meta_path.append(GoogleApiImporter())
+try:
+    from google.api import http_pb2, annotations_pb2
+    _ = http_pb2, annotations_pb2
+except AssertionError:
+    pass
diff --git a/python/protos/third_party/google/LICENSE b/python/protos/third_party/google/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/python/protos/third_party/google/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/python/protos/third_party/google/__init__.py b/python/protos/third_party/google/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/protos/third_party/google/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/protos/third_party/google/api/__init__.py b/python/protos/third_party/google/api/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/protos/third_party/google/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/protos/third_party/google/api/annotations.proto b/python/protos/third_party/google/api/annotations.proto
new file mode 100644
index 0000000..cbd18b8
--- /dev/null
+++ b/python/protos/third_party/google/api/annotations.proto
@@ -0,0 +1,29 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/python/protos/third_party/google/api/http.proto b/python/protos/third_party/google/api/http.proto
new file mode 100644
index 0000000..ce07aa1
--- /dev/null
+++ b/python/protos/third_party/google/api/http.proto
@@ -0,0 +1,127 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API
+// methods. The mapping determines what portions of the request message are
+// populated from the path, query parameters, or body of the HTTP request.  The
+// mapping is typically specified as an `google.api.http` annotation, see
+// "google/api/annotations.proto" for details.
+//
+// The mapping consists of a mandatory field specifying a path template and an
+// optional `body` field specifying what data is represented in the HTTP request
+// body. The field name for the path indicates the HTTP method. Example:
+//
+// ```
+// package google.storage.v2;
+//
+// import "google/api/annotations.proto";
+//
+// service Storage {
+//   rpc CreateObject(CreateObjectRequest) returns (Object) {
+//     option (google.api.http) {
+//       post: "/v2/{bucket_name=buckets/*}/objects"
+//       body: "object"
+//     };
+//   };
+// }
+// ```
+//
+// Here `bucket_name` and `object` bind to fields of the request message
+// `CreateObjectRequest`.
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// `*` matches a single path component, `**` zero or more path components, and
+// `LITERAL` a constant.  A `Variable` can match an entire path as specified
+// again by a template; this nested template must not contain further variables.
+// If no template is given with a variable, it matches a single path component.
+// The notation `{var}` is henceforth equivalent to `{var=*}`.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+message HttpRule {
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Used for listing and getting information about resources.
+    string get = 2;
+
+    // Used for updating a resource.
+    string put = 3;
+
+    // Used for creating a resource.
+    string post = 4;
+
+    // Used for deleting a resource.
+    string delete = 5;
+
+    // Used for updating a resource.
+    string patch = 6;
+
+    // Custom pattern is used for defining custom verbs.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP body, or
+  // `*` for mapping all fields not captured by the path pattern to the HTTP
+  // body.
+  string body = 7;
+
+  // Additional HTTP bindings for the selector. Nested bindings must not
+  // specify a selector and must not contain additional bindings.
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/python/requirements.txt b/python/requirements.txt
new file mode 100644
index 0000000..56cb356
--- /dev/null
+++ b/python/requirements.txt
@@ -0,0 +1,69 @@
+argparse==1.2.1
+arrow==0.10.0
+bitstring==3.1.5
+cmd2==0.7.0
+colorama==0.3.9
+confluent-kafka==0.11.5
+cython==0.24.1
+decorator==4.1.2
+docker-py==1.10.6
+fluent-logger==0.6.0
+grpc==0.3.post19
+grpcio==1.3.5
+grpcio-tools==1.3.5
+hash_ring==1.3.1
+hexdump==3.3
+jinja2==2.8
+jsonpatch==1.16
+kafka_python==1.3.5
+klein==17.10.0
+kubernetes==5.0.0
+netaddr==0.7.19
+networkx==2.0
+nose==1.3.7
+nose-exclude==0.5.0
+nose-testconfig==0.10
+mock==2.0.0
+netifaces==0.10.6
+pcapy==0.11.1
+pep8==1.7.1
+pep8-naming>=0.3.3
+protobuf==3.3.0
+protobuf-to-dict==0.1.0
+pyflakes==1.6.0
+pylint==1.7.6
+#pypcap>=1.1.5
+pyOpenSSL==17.3.0
+PyYAML==3.12
+requests==2.18.4
+scapy==2.3.3
+service-identity==17.0.0
+simplejson==3.12.0
+jsonschema==2.6.0
+six==1.11.0
+structlog==17.2.0
+termcolor==1.1.0
+transitions==0.6.4
+treq==17.8.0
+Twisted==17.9.0
+txaioetcd==0.3.0
+urllib3==1.22
+pyang==1.7.3
+lxml==3.6.4
+nosexcover==1.0.11
+zmq==0.0.0
+pyzmq==16.0.3
+txZMQ==0.8.0
+ncclient==0.5.3
+xmltodict==0.11.0
+dicttoxml==1.7.4
+etcd3==0.7.0
+pyparsing==2.2.0
+packaging==17.1
+
+# python-consul>=0.6.1  we need the pre-released version for now, because 0.6.1 does not
+# yet support Twisted. Once this is released, it will be the 0.6.2 version
+git+https://github.com/cablehead/python-consul.git
+
+# Twisted Python kafka client
+git+https://github.com/ciena/afkak.git