VOL-1452 Changes to make runnable with old openolt code

Updates to mostly make the openolt adapter talk to the core.
Still have to implement adding logical devices

Change-Id: I3140af196eb38d8beb225a864b1fc42fe5242329
diff --git a/python/adapters/kafka/kafka_inter_container_library.py b/python/adapters/kafka/kafka_inter_container_library.py
index 5cad2e8..a4719d4 100644
--- a/python/adapters/kafka/kafka_inter_container_library.py
+++ b/python/adapters/kafka/kafka_inter_container_library.py
@@ -24,9 +24,9 @@
 from zope.interface import implementer
 
 from python.common.utils import asleep
-from python.common.utils.registry import IComponent
+from voltha.common.utils.registry import IComponent
 from kafka_proxy import KafkaProxy, get_kafka_proxy
-from python.protos.inter_container_pb2 import MessageType, Argument, \
+from voltha.protos.inter_container_pb2 import MessageType, Argument, \
     InterContainerRequestBody, InterContainerMessage, Header, \
     InterContainerResponseBody
 
diff --git a/python/adapters/kafka/kafka_proxy.py b/python/adapters/kafka/kafka_proxy.py
index cefc590..8c4eeb8 100644
--- a/python/adapters/kafka/kafka_proxy.py
+++ b/python/adapters/kafka/kafka_proxy.py
@@ -24,7 +24,7 @@
 
 from python.common.utils.consulhelpers import get_endpoint_from_consul
 from event_bus_publisher import EventBusPublisher
-from python.common.utils.registry import IComponent
+from voltha.common.utils.registry import IComponent
 from confluent_kafka import Consumer, KafkaError
 import threading
 
diff --git a/python/adapters/openolt/main.py b/python/adapters/openolt/main.py
index 273ff13..3af6421 100755
--- a/python/adapters/openolt/main.py
+++ b/python/adapters/openolt/main.py
@@ -29,22 +29,22 @@
 from twisted.internet.task import LoopingCall
 from zope.interface import implementer
 
-from python.common.structlog_setup import setup_logging, update_logging
-from python.common.utils.asleep import asleep
-from python.common.utils.deferred_utils import TimeOutError
-from python.common.utils.dockerhelpers import get_my_containers_name
-from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
+from voltha.common.structlog_setup import setup_logging, update_logging
+from voltha.common.utils.asleep import asleep
+from voltha.common.utils.deferred_utils import TimeOutError
+from voltha.common.utils.dockerhelpers import get_my_containers_name
+from voltha.common.utils.nethelpers import get_my_primary_local_ipv4, \
     get_my_primary_interface
-from python.common.utils.registry import registry, IComponent
-from python.adapters.kafka.adapter_proxy import AdapterProxy
-from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
-from python.adapters.kafka.core_proxy import CoreProxy
-from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+from voltha.common.utils.registry import registry, IComponent
+from voltha.adapters.kafka.adapter_proxy import AdapterProxy
+from voltha.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from voltha.adapters.kafka.core_proxy import CoreProxy
+from voltha.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
     get_messaging_proxy
-from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from voltha.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
 from openolt import OpenoltAdapter
-from python.protos import third_party
-from python.protos.adapter_pb2 import AdapterConfig
+from voltha.protos import third_party
+from voltha.protos.adapter_pb2 import AdapterConfig
 
 _ = third_party
 
@@ -241,6 +241,7 @@
         dir = os.path.dirname(os.path.abspath(__file__))
         path = os.path.join(dir, path)
     path = os.path.abspath(path)
+    
     with open(path) as fd:
         config = yaml.load(fd)
     return config
@@ -269,7 +270,7 @@
     def __init__(self):
 
         self.args = args = parse_args()
-        self.config = load_config(args)
+	self.config = load_config(args)
 
         verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
         self.log = setup_logging(self.config.get('logging', {}),
@@ -277,7 +278,7 @@
                                  verbosity_adjust=verbosity_adjust)
         self.log.info('container-number-extractor',
                       regex=args.container_name_regex)
-
+        
         self.openolt_adapter_version = self.get_version()
         self.log.info('Open-OLT-Adapter-Version', version=
         self.openolt_adapter_version)
diff --git a/python/adapters/openolt/openolt.py b/python/adapters/openolt/openolt.py
index 2c87730..3a410ae 100644
--- a/python/adapters/openolt/openolt.py
+++ b/python/adapters/openolt/openolt.py
@@ -32,45 +32,51 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.task import LoopingCall
 
-from python.adapters.common.frameio.frameio import BpfProgramFilter, hexify
-from python.adapters.iadapter import OltAdapter
-from python.common.utils.asleep import asleep
-from python.common.utils.registry import registry
-from python.adapters.kafka.kafka_proxy import get_kafka_proxy
-from python.protos import openolt_pb2
-from python.protos import third_party
-from python.protos.common_pb2 import OperStatus, ConnectStatus
-from python.protos.common_pb2 import LogLevel
-from python.protos.common_pb2 import OperationResp
-from python.protos.inter_container_pb2 import SwitchCapability, PortCapability, \
+from voltha.adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from voltha.adapters.iadapter import OltAdapter
+from voltha.common.utils.asleep import asleep
+from voltha.common.utils.registry import registry
+from voltha.adapters.kafka.kafka_proxy import get_kafka_proxy
+from voltha.protos import openolt_pb2
+from voltha.protos import third_party
+from voltha.protos.common_pb2 import OperStatus, ConnectStatus
+from voltha.protos.common_pb2 import LogLevel
+from voltha.protos.common_pb2 import OperationResp
+from voltha.protos.inter_container_pb2 import SwitchCapability, PortCapability, \
     InterAdapterMessageType, InterAdapterResponseBody
-from python.protos.device_pb2 import Port, PmConfig, PmConfigs, \
+from voltha.protos.device_pb2 import Port, PmConfig, PmConfigs, \
     DeviceType, DeviceTypes
-from python.protos.adapter_pb2 import Adapter
-from python.protos.adapter_pb2 import AdapterConfig
-
+from voltha.protos.adapter_pb2 import Adapter
+from voltha.protos.adapter_pb2 import AdapterConfig
+from voltha.adapters.openolt.openolt_flow_mgr import OpenOltFlowMgr
+from voltha.adapters.openolt.openolt_alarms import OpenOltAlarmMgr
+from voltha.adapters.openolt.openolt_statistics import OpenOltStatisticsMgr
+from voltha.adapters.openolt.openolt_bw import OpenOltBW
+from voltha.adapters.openolt.openolt_platform import OpenOltPlatform
+from voltha.adapters.openolt.openolt_resource_manager import OpenOltResourceMgr
+from voltha.adapters.openolt.openolt_device import OpenoltDevice
  
-from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
-from python.protos.logical_device_pb2 import LogicalPort
-from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from voltha.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from voltha.protos.logical_device_pb2 import LogicalPort
+from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPF_1GB_FD, \
     OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
     ofp_switch_features, ofp_desc
-from python.protos.openflow_13_pb2 import ofp_port
-from python.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest, PonSimStub
+from voltha.protos.openflow_13_pb2 import ofp_port
+from voltha.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest, PonSimStub
 
 _ = third_party
 log = structlog.get_logger()
-#OpenOltDefaults = {
-#    'support_classes': {
-#        'platform': OpenOltPlatform,
-#        'resource_mgr': OpenOltResourceMgr,
-#        'flow_mgr': OpenOltFlowMgr,
-#        'alarm_mgr': OpenOltAlarmMgr,
-#        'stats_mgr': OpenOltStatisticsMgr,
-#        'bw_mgr': OpenOltBW
-#    }
-#}
+OpenOltDefaults = {
+    'support_classes': {
+        'platform': OpenOltPlatform,
+        'resource_mgr': OpenOltResourceMgr,
+        'flow_mgr': OpenOltFlowMgr,
+        'alarm_mgr': OpenOltAlarmMgr,
+        'stats_mgr': OpenOltStatisticsMgr,
+        'bw_mgr': OpenOltBW
+    }
+}
 
 class AdapterPmMetrics:
     def __init__(self, device):
@@ -416,6 +422,7 @@
         self.pm_metrics = None
         self.alarms = None
         self.frames = None
+	self.num_devices = 0
 
     @inlineCallbacks
     def get_channel(self):
@@ -450,30 +457,32 @@
         ports = yield self.core_proxy.get_ports(self.device_id,
                                                 Port.ETHERNET_NNI)
         returnValue(ports)
+    
+    def init_device(self, kwargs):
+	self.device = OpenoltDevice(**kwargs)
 
     @inlineCallbacks
     def activate(self, device):
         try:
             self.log.info('activating')
-	    print (dir(device))
             if not device.host_and_port:
                 device.oper_status = OperStatus.FAILED
                 device.reason = 'No host_and_port field provided'
                 self.core_proxy.device_update(device)
                 return
-	    """        
+	          
 	    kwargs = {
             	'support_classes': OpenOltDefaults['support_classes'],
-                'adapter_agent': self.adapter_proxy,
+                'adapter_agent': self.core_proxy,
                 'device': device,
                 'device_num': self.num_devices + 1
             }
             try:
-                self.devices[device.id] = OpenoltDevice(**kwargs)
+		yield self.init_device(kwargs)
             except Exception as e:
                 log.error('Failed to adopt OpenOLT device', error=e)
                 # TODO set status to ERROR so that is clear something went wrong
-                del self.devices[device.id]
+                #del self.devices[device.id]
                 raise
             else:
                 self.num_devices += 1
@@ -537,6 +546,7 @@
 
             # Start collecting stats from the device after a brief pause
             self.start_kpi_collection(device.id)
+	    """
         except Exception as e:
             log.exception("Exception-activating", e=e)
 
diff --git a/python/adapters/openolt/openolt_alarms.py b/python/adapters/openolt/openolt_alarms.py
index 764a013..aa227d3 100644
--- a/python/adapters/openolt/openolt_alarms.py
+++ b/python/adapters/openolt/openolt_alarms.py
@@ -30,7 +30,7 @@
 from voltha.extensions.alarms.onu.onu_window_drift_alarm import OnuWindowDriftAlarm
 from voltha.extensions.alarms.onu.onu_activation_fail_alarm import OnuActivationFailAlarm
 
-import protos.openolt_pb2 as openolt_pb2
+import voltha.protos.openolt_pb2 as openolt_pb2
 import voltha.protos.device_pb2 as device_pb2
 
 
diff --git a/python/adapters/openolt/openolt_device.py b/python/adapters/openolt/openolt_device.py
index 2779dc6..d3080ba 100644
--- a/python/adapters/openolt/openolt_device.py
+++ b/python/adapters/openolt/openolt_device.py
@@ -19,29 +19,28 @@
 import socket
 import re
 import structlog
+import time
 from twisted.internet import reactor
 from scapy.layers.l2 import Ether, Dot1Q
 from transitions import Machine
 
-#from voltha.adapters.openolt.protos import openolt_pb2_grpc, openolt_pb2
-from python.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
-from python.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
+from voltha.protos import openolt_pb2_grpc, openolt_pb2
 
-from python.adapters.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
+from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
 
-from python.common.utils.nethelpers import mac_str_to_tuple
-from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from voltha.common.utils.nethelpers import mac_str_to_tuple
+from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPS_LINK_DOWN, OFPPF_1GB_FD, \
     OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
     ofp_switch_features, ofp_port, ofp_port_stats, ofp_desc
-from python.common.utils.registry import registry
-from python.protos import openolt_pb2
-from python.protos import third_party
-from python.protos.common_pb2 import AdminStatus, OperStatus, ConnectStatus
-from python.protos.common_pb2 import LogLevel
-from python.protos.device_pb2 import Port, Device
+from voltha.common.utils.registry import registry
+from voltha.protos import openolt_pb2
+from voltha.protos import third_party
+from voltha.protos.common_pb2 import AdminState, OperStatus, ConnectStatus
+from voltha.protos.common_pb2 import LogLevel
+from voltha.protos.device_pb2 import Port, Device
 
-from python.protos.logical_device_pb2 import LogicalDevice, LogicalPort
+from voltha.protos.logical_device_pb2 import LogicalDevice, LogicalPort
 
 class OpenoltDevice(object):
     """
@@ -96,14 +95,14 @@
         self.alarm_mgr_class = kwargs['support_classes']['alarm_mgr']
         self.stats_mgr_class = kwargs['support_classes']['stats_mgr']
         self.bw_mgr_class = kwargs['support_classes']['bw_mgr']
-
+	
         is_reconciliation = kwargs.get('reconciliation', False)
         self.device_id = device.id
         self.host_and_port = device.host_and_port
         self.extra_args = device.extra_args
         self.log = structlog.get_logger(id=self.device_id,
                                         ip=self.host_and_port)
-        self.proxy = registry('core').get_proxy('/')
+        #self.proxy = registry('core').get_proxy('/')
 
         self.log.info('openolt-device-init')
 
@@ -119,7 +118,7 @@
             device.root = True
             device.connect_status = ConnectStatus.UNREACHABLE
             device.oper_status = OperStatus.ACTIVATING
-            self.adapter_agent.update_device(device)
+            self.adapter_agent.device_update(device)
 
         # If logical device does exist use it, else create one after connecting to device
         if device.parent_id:
@@ -185,7 +184,7 @@
 
         device = self.adapter_agent.get_device(self.device_id)
         device.serial_number = serial_number
-        self.adapter_agent.update_device(device)
+        self.adapter_agent.device_update(device)
 
         self.dpid = dpid
         self.serial_number = serial_number
@@ -208,6 +207,7 @@
 
     def do_state_init(self, event):
         # Initialize gRPC
+	print ("Host And Port", self.host_and_port)
         self.channel = grpc.insecure_channel(self.host_and_port)
         self.channel_ready_future = grpc.channel_ready_future(self.channel)
 
@@ -227,6 +227,7 @@
             # property instead. The Jinkins error will happon on the reason of
             # Exception in thread Thread-1 (most likely raised # during
             # interpreter shutdown)
+	    self.log.debug('starting indications thread')
             self.indications_thread_handle.setDaemon(True)
             self.indications_thread_handle.start()
         except Exception as e:
@@ -239,13 +240,31 @@
 
         self.stub = openolt_pb2_grpc.OpenoltStub(self.channel)
 
-        device_info = self.stub.GetDeviceInfo(openolt_pb2.Empty())
+        delay = 1
+        while True:
+            try:
+                device_info = self.stub.GetDeviceInfo(openolt_pb2.Empty())
+                break
+            except Exception as e:
+                reraise = True
+                if delay > 120:
+                    self.log.error("gRPC failure too many times")
+                else:
+                    self.log.warn("gRPC failure, retry in %ds: %s"
+                                  % (delay, repr(e)))
+                    time.sleep(delay)
+                    delay += delay
+                    reraise = False
+
+                if reraise:
+                    raise
+
         self.log.info('Device connected', device_info=device_info)
 
         self.create_logical_device(device_info)
 
         device.serial_number = self.serial_number
-
+	
         self.resource_mgr = self.resource_mgr_class(self.device_id,
                                                     self.host_and_port,
                                                     self.extra_args,
@@ -260,9 +279,9 @@
                                               self.device_id,
                                               self.logical_device_id,
                                               self.platform)
-        self.stats_mgr = self.stats_mgr_class(self, self.log, self.platform)
+	self.stats_mgr = self.stats_mgr_class(self, self.log, self.platform)
         self.bw_mgr = self.bw_mgr_class(self.log, self.proxy)
-
+	
         device.vendor = device_info.vendor
         device.model = device_info.model
         device.hardware_version = device_info.hardware_version
@@ -271,7 +290,7 @@
         # TODO: check for uptime and reboot if too long (VOL-1192)
 
         device.connect_status = ConnectStatus.REACHABLE
-        self.adapter_agent.update_device(device)
+        self.adapter_agent.device_update(device)
 
     def do_state_up(self, event):
         self.log.debug("do_state_up")
@@ -281,7 +300,7 @@
         # Update phys OF device
         device.parent_id = self.logical_device_id
         device.oper_status = OperStatus.ACTIVE
-        self.adapter_agent.update_device(device)
+        self.adapter_agent.device_update(device)
 
     def do_state_down(self, event):
         self.log.debug("do_state_down")
@@ -327,7 +346,7 @@
         device.oper_status = oper_state
         device.connect_status = connect_state
 
-        reactor.callLater(2, self.adapter_agent.update_device, device)
+        reactor.callLater(2, self.adapter_agent.device_update, device)
 
     # def post_up(self, event):
     #     self.log.debug('post-up')
@@ -471,7 +490,7 @@
         else:
             if onu_device.connect_status != ConnectStatus.REACHABLE:
                 onu_device.connect_status = ConnectStatus.REACHABLE
-                self.adapter_agent.update_device(onu_device)
+                self.adapter_agent.device_update(onu_device)
 
             onu_id = onu_device.proxy_address.onu_id
             if onu_device.oper_status == OperStatus.DISCOVERED \
@@ -491,7 +510,7 @@
                               onu_id=onu_id, serial_number=serial_number_str)
 
                 onu_device.oper_status = OperStatus.DISCOVERED
-                self.adapter_agent.update_device(onu_device)
+                self.adapter_agent.device_update(onu_device)
                 try:
                     self.activate_onu(intf_id, onu_id, serial_number,
                                       serial_number_str)
@@ -576,14 +595,14 @@
 
             if onu_device.connect_status != ConnectStatus.UNREACHABLE:
                 onu_device.connect_status = ConnectStatus.UNREACHABLE
-                self.adapter_agent.update_device(onu_device)
+                self.adapter_agent.device_update(onu_device)
 
             # Move to discovered state
             self.log.debug('onu-oper-state-is-down')
 
             if onu_device.oper_status != OperStatus.DISCOVERED:
                 onu_device.oper_status = OperStatus.DISCOVERED
-                self.adapter_agent.update_device(onu_device)
+                self.adapter_agent.device_update(onu_device)
             # Set port oper state to Discovered
             self.onu_ports_down(onu_device, OperStatus.DISCOVERED)
 
@@ -594,7 +613,7 @@
 
             if onu_device.connect_status != ConnectStatus.REACHABLE:
                 onu_device.connect_status = ConnectStatus.REACHABLE
-                self.adapter_agent.update_device(onu_device)
+                self.adapter_agent.device_update(onu_device)
 
             if onu_device.oper_status != OperStatus.DISCOVERED:
                 self.log.debug("ignore onu indication",
@@ -994,8 +1013,6 @@
         try:
             self.stub.ReenableOlt(openolt_pb2.Empty())
 
-            self.log.info('enabling-all-ports', device_id=self.device_id)
-            self.adapter_agent.enable_all_ports(self.device_id)
         except Exception as e:
             self.log.error('Failure to reenable openolt device', error=e)
         else:
diff --git a/python/compose/adapters-openolt.yml b/python/compose/adapters-openolt.yml
index da3b0c3..a076f07 100644
--- a/python/compose/adapters-openolt.yml
+++ b/python/compose/adapters-openolt.yml
@@ -23,7 +23,7 @@
         max-size: "10m"
         max-file: "3"
     command: [
-      "/voltha/python/adapters/openolt/main.py",
+      "/voltha/voltha/adapters/openolt/main.py",
       "-v",
       "--name=openolt",
       "--kafka_adapter=${DOCKER_HOST_IP}:9092",
diff --git a/python/core/__init__.py b/python/core/__init__.py
new file mode 100644
index 0000000..7de7115
--- /dev/null
+++ b/python/core/__init__.py
@@ -0,0 +1,16 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
diff --git a/python/core/config/__init__.py b/python/core/config/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/python/core/config/__init__.py
diff --git a/python/core/config/config_backend.py b/python/core/config/config_backend.py
new file mode 100644
index 0000000..d906348
--- /dev/null
+++ b/python/core/config/config_backend.py
@@ -0,0 +1,289 @@
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from consul import Consul, ConsulException
+from common.utils.asleep import asleep
+from requests import ConnectionError
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+import etcd3
+import structlog
+
+
+class ConsulStore(object):
+    """ Config kv store for consul with a cache for quicker subsequent reads
+
+        TODO: This will block the reactor. Should either change
+        whole call stack to yield or put the put/delete transactions into a
+        queue to write later with twisted. Will need a transaction
+        log to ensure we don't lose anything.
+        Making the whole callstack yield is troublesome because other tasks can
+        come in on the side and start modifying things which could be bad.
+    """
+
+    CONNECT_RETRY_INTERVAL_SEC = 1
+    RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+    def __init__(self, host, port, path_prefix):
+
+        self.log = structlog.get_logger()
+        self._consul = Consul(host=host, port=port)
+        self.host = host
+        self.port = port
+        self._path_prefix = path_prefix
+        self._cache = {}
+        self.retries = 0
+
+    def make_path(self, key):
+        return '{}/{}'.format(self._path_prefix, key)
+
+    def __getitem__(self, key):
+        if key in self._cache:
+            return self._cache[key]
+        value = self._kv_get(self.make_path(key))
+        if value is not None:
+            # consul turns empty strings to None, so we do the reverse here
+            self._cache[key] = value['Value'] or ''
+            return value['Value'] or ''
+        else:
+            raise KeyError(key)
+
+    def __contains__(self, key):
+        if key in self._cache:
+            return True
+        value = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value['Value']
+            return True
+        else:
+            return False
+
+    def __setitem__(self, key, value):
+        try:
+            assert isinstance(value, basestring)
+            self._cache[key] = value
+            self._kv_put(self.make_path(key), value)
+        except Exception, e:
+            self.log.exception('cannot-set-item', e=e)
+
+    def __delitem__(self, key):
+        self._cache.pop(key, None)
+        self._kv_delete(self.make_path(key))
+
+    @inlineCallbacks
+    def _backoff(self, msg):
+        wait_time = self.RETRY_BACKOFF[min(self.retries,
+                                           len(self.RETRY_BACKOFF) - 1)]
+        self.retries += 1
+        self.log.error(msg, retry_in=wait_time)
+        yield asleep(wait_time)
+
+    def _redo_consul_connection(self):
+        self._consul = Consul(host=self.host, port=self.port)
+        self._cache.clear()
+
+    def _clear_backoff(self):
+        if self.retries:
+            self.log.info('reconnected-to-consul', after_retries=self.retries)
+            self.retries = 0
+
+    def _get_consul(self):
+        return self._consul
+
+    # Proxy methods for consul with retry support
+    def _kv_get(self, *args, **kw):
+        return self._retry('GET', *args, **kw)
+
+    def _kv_put(self, *args, **kw):
+        return self._retry('PUT', *args, **kw)
+
+    def _kv_delete(self, *args, **kw):
+        return self._retry('DELETE', *args, **kw)
+
+    def _retry(self, operation, *args, **kw):
+        while 1:
+            try:
+                consul = self._get_consul()
+                self.log.debug('consul', consul=consul, operation=operation,
+                         args=args)
+                if operation == 'GET':
+                    index, result = consul.kv.get(*args, **kw)
+                elif operation == 'PUT':
+                     result = consul.kv.put(*args, **kw)
+                elif operation == 'DELETE':
+                    result = consul.kv.delete(*args, **kw)
+                else:
+                    # Default case - consider operation as a function call
+                    result = operation(*args, **kw)
+                self._clear_backoff()
+                break
+            except ConsulException, e:
+                self.log.exception('consul-not-up', e=e)
+                self._backoff('consul-not-up')
+            except ConnectionError, e:
+                self.log.exception('cannot-connect-to-consul', e=e)
+                self._backoff('cannot-connect-to-consul')
+            except Exception, e:
+                self.log.exception(e)
+                self._backoff('unknown-error')
+            self._redo_consul_connection()
+
+        return result
+
+
+class EtcdStore(object):
+    """ Config kv store for etcd with a cache for quicker subsequent reads
+
+        TODO: This will block the reactor. Should either change
+        whole call stack to yield or put the put/delete transactions into a
+        queue to write later with twisted. Will need a transaction
+        log to ensure we don't lose anything.
+        Making the whole callstack yield is troublesome because other tasks can
+        come in on the side and start modifying things which could be bad.
+    """
+
+    CONNECT_RETRY_INTERVAL_SEC = 1
+    RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+    def __init__(self, host, port, path_prefix):
+
+        self.log = structlog.get_logger()
+        self._etcd = etcd3.client(host=host, port=port)
+        self.host = host
+        self.port = port
+        self._path_prefix = path_prefix
+        self._cache = {}
+        self.retries = 0
+
+    def make_path(self, key):
+        return '{}/{}'.format(self._path_prefix, key)
+
+    def __getitem__(self, key):
+        if key in self._cache:
+            return self._cache[key]
+        (value, meta) = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value
+            return value
+        else:
+            raise KeyError(key)
+
+    def __contains__(self, key):
+        if key in self._cache:
+            return True
+        (value, meta) = self._kv_get(self.make_path(key))
+        if value is not None:
+            self._cache[key] = value
+            return True
+        else:
+            return False
+
+    def __setitem__(self, key, value):
+        try:
+            assert isinstance(value, basestring)
+            self._cache[key] = value
+            self._kv_put(self.make_path(key), value)
+        except Exception, e:
+            self.log.exception('cannot-set-item', e=e)
+
+    def __delitem__(self, key):
+        self._cache.pop(key, None)
+        self._kv_delete(self.make_path(key))
+
+    @inlineCallbacks
+    def _backoff(self, msg):
+        wait_time = self.RETRY_BACKOFF[min(self.retries,
+                                           len(self.RETRY_BACKOFF) - 1)]
+        self.retries += 1
+        self.log.error(msg, retry_in=wait_time)
+        yield asleep(wait_time)
+
+    def _redo_etcd_connection(self):
+        self._etcd = etcd3.client(host=self.host, port=self.port)
+        self._cache.clear()
+
+    def _clear_backoff(self):
+        if self.retries:
+            self.log.info('reconnected-to-etcd', after_retries=self.retries)
+            self.retries = 0
+
+    def _get_etcd(self):
+        return self._etcd
+
+    # Proxy methods for etcd with retry support
+    def _kv_get(self, *args, **kw):
+        return self._retry('GET', *args, **kw)
+
+    def _kv_put(self, *args, **kw):
+        return self._retry('PUT', *args, **kw)
+
+    def _kv_delete(self, *args, **kw):
+        return self._retry('DELETE', *args, **kw)
+
+    def _retry(self, operation, *args, **kw):
+
+        # etcd data sometimes contains non-utf8 sequences, replace
+        self.log.debug('backend-op',
+                  operation=operation,
+                  args=map(lambda x : unicode(x,'utf8','replace'), args),
+                  kw=kw)
+
+        while 1:
+            try:
+                etcd = self._get_etcd()
+                self.log.debug('etcd', etcd=etcd, operation=operation,
+                    args=map(lambda x : unicode(x,'utf8','replace'), args))
+                if operation == 'GET':
+                    (value, meta) = etcd.get(*args, **kw)
+                    result = (value, meta)
+                elif operation == 'PUT':
+                    result = etcd.put(*args, **kw)
+                elif operation == 'DELETE':
+                    result = etcd.delete(*args, **kw)
+                else:
+                    # Default case - consider operation as a function call
+                    result = operation(*args, **kw)
+                self._clear_backoff()
+                break
+            except Exception, e:
+                self.log.exception(e)
+                self._backoff('unknown-error-with-etcd')
+            self._redo_etcd_connection()
+
+        return result
+
+
+def load_backend(store_id, store_prefix, args):
+    """ Return the kv store backend based on the command line arguments
+    """
+
+    def load_consul_store():
+        instance_core_store_prefix = '{}/{}'.format(store_prefix, store_id)
+
+        host, port = args.consul.split(':', 1)
+        return ConsulStore(host, int(port), instance_core_store_prefix)
+
+    def load_etcd_store():
+        instance_core_store_prefix = '{}/{}'.format(store_prefix, store_id)
+
+        host, port = args.etcd.split(':', 1)
+        return EtcdStore(host, int(port), instance_core_store_prefix)
+
+    loaders = {
+        'none': lambda: None,
+        'consul': load_consul_store,
+        'etcd': load_etcd_store
+    }
+
+    return loaders[args.backend]()
diff --git a/python/core/flow_decomposer.py b/python/core/flow_decomposer.py
new file mode 100644
index 0000000..faf3141
--- /dev/null
+++ b/python/core/flow_decomposer.py
@@ -0,0 +1,1010 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A mix-in class implementing flow decomposition
+"""
+from collections import OrderedDict
+from copy import copy, deepcopy
+from hashlib import md5
+
+import structlog
+
+from voltha.protos import third_party
+from voltha.protos import openflow_13_pb2 as ofp
+from common.tech_profile import tech_profile
+_ = third_party
+log = structlog.get_logger()
+
+
+# aliases
+ofb_field = ofp.ofp_oxm_ofb_field
+action = ofp.ofp_action
+
+# OFPAT_* shortcuts
+OUTPUT = ofp.OFPAT_OUTPUT
+COPY_TTL_OUT = ofp.OFPAT_COPY_TTL_OUT
+COPY_TTL_IN = ofp.OFPAT_COPY_TTL_IN
+SET_MPLS_TTL = ofp.OFPAT_SET_MPLS_TTL
+DEC_MPLS_TTL = ofp.OFPAT_DEC_MPLS_TTL
+PUSH_VLAN = ofp.OFPAT_PUSH_VLAN
+POP_VLAN = ofp.OFPAT_POP_VLAN
+PUSH_MPLS = ofp.OFPAT_PUSH_MPLS
+POP_MPLS = ofp.OFPAT_POP_MPLS
+SET_QUEUE = ofp.OFPAT_SET_QUEUE
+GROUP = ofp.OFPAT_GROUP
+SET_NW_TTL = ofp.OFPAT_SET_NW_TTL
+NW_TTL = ofp.OFPAT_DEC_NW_TTL
+SET_FIELD = ofp.OFPAT_SET_FIELD
+PUSH_PBB = ofp.OFPAT_PUSH_PBB
+POP_PBB = ofp.OFPAT_POP_PBB
+EXPERIMENTER = ofp.OFPAT_EXPERIMENTER
+
+# OFPXMT_OFB_* shortcuts (incomplete)
+IN_PORT = ofp.OFPXMT_OFB_IN_PORT
+IN_PHY_PORT = ofp.OFPXMT_OFB_IN_PHY_PORT
+METADATA = ofp.OFPXMT_OFB_METADATA
+ETH_DST = ofp.OFPXMT_OFB_ETH_DST
+ETH_SRC = ofp.OFPXMT_OFB_ETH_SRC
+ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
+VLAN_VID = ofp.OFPXMT_OFB_VLAN_VID
+VLAN_PCP = ofp.OFPXMT_OFB_VLAN_PCP
+IP_DSCP = ofp.OFPXMT_OFB_IP_DSCP
+IP_ECN = ofp.OFPXMT_OFB_IP_ECN
+IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
+IPV4_SRC = ofp.OFPXMT_OFB_IPV4_SRC
+IPV4_DST = ofp.OFPXMT_OFB_IPV4_DST
+TCP_SRC = ofp.OFPXMT_OFB_TCP_SRC
+TCP_DST = ofp.OFPXMT_OFB_TCP_DST
+UDP_SRC = ofp.OFPXMT_OFB_UDP_SRC
+UDP_DST = ofp.OFPXMT_OFB_UDP_DST
+SCTP_SRC = ofp.OFPXMT_OFB_SCTP_SRC
+SCTP_DST = ofp.OFPXMT_OFB_SCTP_DST
+ICMPV4_TYPE = ofp.OFPXMT_OFB_ICMPV4_TYPE
+ICMPV4_CODE = ofp.OFPXMT_OFB_ICMPV4_CODE
+ARP_OP = ofp.OFPXMT_OFB_ARP_OP
+ARP_SPA = ofp.OFPXMT_OFB_ARP_SPA
+ARP_TPA = ofp.OFPXMT_OFB_ARP_TPA
+ARP_SHA = ofp.OFPXMT_OFB_ARP_SHA
+ARP_THA = ofp.OFPXMT_OFB_ARP_THA
+IPV6_SRC = ofp.OFPXMT_OFB_IPV6_SRC
+IPV6_DST = ofp.OFPXMT_OFB_IPV6_DST
+IPV6_FLABEL = ofp.OFPXMT_OFB_IPV6_FLABEL
+ICMPV6_TYPE = ofp.OFPXMT_OFB_ICMPV6_TYPE
+ICMPV6_CODE = ofp.OFPXMT_OFB_ICMPV6_CODE
+IPV6_ND_TARGET = ofp.OFPXMT_OFB_IPV6_ND_TARGET
+OFB_IPV6_ND_SLL = ofp.OFPXMT_OFB_IPV6_ND_SLL
+IPV6_ND_TLL = ofp.OFPXMT_OFB_IPV6_ND_TLL
+MPLS_LABEL = ofp.OFPXMT_OFB_MPLS_LABEL
+MPLS_TC = ofp.OFPXMT_OFB_MPLS_TC
+MPLS_BOS = ofp.OFPXMT_OFB_MPLS_BOS
+PBB_ISID = ofp.OFPXMT_OFB_PBB_ISID
+TUNNEL_ID = ofp.OFPXMT_OFB_TUNNEL_ID
+IPV6_EXTHDR = ofp.OFPXMT_OFB_IPV6_EXTHDR
+
+# ofp_action_* shortcuts
+
+def output(port, max_len=ofp.OFPCML_MAX):
+    return action(
+        type=OUTPUT,
+        output=ofp.ofp_action_output(port=port, max_len=max_len)
+    )
+
+def mpls_ttl(ttl):
+    return action(
+        type=SET_MPLS_TTL,
+        mpls_ttl=ofp.ofp_action_mpls_ttl(mpls_ttl=ttl)
+    )
+
+def push_vlan(eth_type):
+    return action(
+        type=PUSH_VLAN,
+        push=ofp.ofp_action_push(ethertype=eth_type)
+    )
+
+def pop_vlan():
+    return action(
+        type=POP_VLAN
+    )
+
+def pop_mpls(eth_type):
+    return action(
+        type=POP_MPLS,
+        pop_mpls=ofp.ofp_action_pop_mpls(ethertype=eth_type)
+    )
+
+def group(group_id):
+    return action(
+        type=GROUP,
+        group=ofp.ofp_action_group(group_id=group_id)
+    )
+
+def nw_ttl(nw_ttl):
+    return action(
+        type=NW_TTL,
+        nw_ttl=ofp.ofp_action_nw_ttl(nw_ttl=nw_ttl)
+    )
+
+def set_field(field):
+    return action(
+        type=SET_FIELD,
+        set_field=ofp.ofp_action_set_field(
+            field=ofp.ofp_oxm_field(
+                oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                ofb_field=field))
+    )
+
+def experimenter(experimenter, data):
+    return action(
+        type=EXPERIMENTER,
+        experimenter=ofp.ofp_action_experimenter(
+            experimenter=experimenter, data=data)
+    )
+
+
+# ofb_field generators (incomplete set)
+
+def in_port(_in_port):
+    return ofb_field(type=IN_PORT, port=_in_port)
+
+def in_phy_port(_in_phy_port):
+    return ofb_field(type=IN_PHY_PORT, port=_in_phy_port)
+
+def metadata(_table_metadata):
+    return ofb_field(type=METADATA, table_metadata=_table_metadata)
+
+def eth_dst(_eth_dst):
+    return ofb_field(type=ETH_DST, table_metadata=_eth_dst)
+
+def eth_src(_eth_src):
+    return ofb_field(type=ETH_SRC, table_metadata=_eth_src)
+
+def eth_type(_eth_type):
+    return ofb_field(type=ETH_TYPE, eth_type=_eth_type)
+
+def vlan_vid(_vlan_vid):
+    return ofb_field(type=VLAN_VID, vlan_vid=_vlan_vid)
+
+def vlan_pcp(_vlan_pcp):
+    return ofb_field(type=VLAN_PCP, vlan_pcp=_vlan_pcp)
+
+def ip_dscp(_ip_dscp):
+    return ofb_field(type=IP_DSCP, ip_dscp=_ip_dscp)
+
+def ip_ecn(_ip_ecn):
+    return ofb_field(type=IP_ECN, ip_ecn=_ip_ecn)
+
+def ip_proto(_ip_proto):
+    return ofb_field(type=IP_PROTO, ip_proto=_ip_proto)
+
+def ipv4_src(_ipv4_src):
+    return ofb_field(type=IPV4_SRC, ipv4_src=_ipv4_src)
+
+def ipv4_dst(_ipv4_dst):
+    return ofb_field(type=IPV4_DST, ipv4_dst=_ipv4_dst)
+
+def tcp_src(_tcp_src):
+    return ofb_field(type=TCP_SRC, tcp_src=_tcp_src)
+
+def tcp_dst(_tcp_dst):
+    return ofb_field(type=TCP_DST, tcp_dst=_tcp_dst)
+
+def udp_src(_udp_src):
+    return ofb_field(type=UDP_SRC, udp_src=_udp_src)
+
+def udp_dst(_udp_dst):
+    return ofb_field(type=UDP_DST, udp_dst=_udp_dst)
+
+def sctp_src(_sctp_src):
+    return ofb_field(type=SCTP_SRC, sctp_src=_sctp_src)
+
+def sctp_dst(_sctp_dst):
+    return ofb_field(type=SCTP_DST, sctp_dst=_sctp_dst)
+
+def icmpv4_type(_icmpv4_type):
+    return ofb_field(type=ICMPV4_TYPE, icmpv4_type=_icmpv4_type)
+
+def icmpv4_code(_icmpv4_code):
+    return ofb_field(type=ICMPV4_CODE, icmpv4_code=_icmpv4_code)
+
+def arp_op(_arp_op):
+    return ofb_field(type=ARP_OP, arp_op=_arp_op)
+
+def arp_spa(_arp_spa):
+    return ofb_field(type=ARP_SPA, arp_spa=_arp_spa)
+
+def arp_tpa(_arp_tpa):
+    return ofb_field(type=ARP_TPA, arp_tpa=_arp_tpa)
+
+def arp_sha(_arp_sha):
+    return ofb_field(type=ARP_SHA, arp_sha=_arp_sha)
+
+def arp_tha(_arp_tha):
+    return ofb_field(type=ARP_THA, arp_tha=_arp_tha)
+
+def ipv6_src(_ipv6_src):
+    return ofb_field(type=IPV6_SRC, arp_tha=_ipv6_src)
+
+def ipv6_dst(_ipv6_dst):
+    return ofb_field(type=IPV6_DST, arp_tha=_ipv6_dst)
+
+def ipv6_flabel(_ipv6_flabel):
+    return ofb_field(type=IPV6_FLABEL, arp_tha=_ipv6_flabel)
+
+def ipmpv6_type(_icmpv6_type):
+    return ofb_field(type=ICMPV6_TYPE, arp_tha=_icmpv6_type)
+
+def icmpv6_code(_icmpv6_code):
+    return ofb_field(type=ICMPV6_CODE, arp_tha=_icmpv6_code)
+
+def ipv6_nd_target(_ipv6_nd_target):
+    return ofb_field(type=IPV6_ND_TARGET, arp_tha=_ipv6_nd_target)
+
+def ofb_ipv6_nd_sll(_ofb_ipv6_nd_sll):
+    return ofb_field(type=OFB_IPV6_ND_SLL, arp_tha=_ofb_ipv6_nd_sll)
+
+def ipv6_nd_tll(_ipv6_nd_tll):
+    return ofb_field(type=IPV6_ND_TLL, arp_tha=_ipv6_nd_tll)
+
+def mpls_label(_mpls_label):
+    return ofb_field(type=MPLS_LABEL, arp_tha=_mpls_label)
+
+def mpls_tc(_mpls_tc):
+    return ofb_field(type=MPLS_TC, arp_tha=_mpls_tc)
+
+def mpls_bos(_mpls_bos):
+    return ofb_field(type=MPLS_BOS, arp_tha=_mpls_bos)
+
+def pbb_isid(_pbb_isid):
+    return ofb_field(type=PBB_ISID, arp_tha=_pbb_isid)
+
+def tunnel_id(_tunnel_id):
+    return ofb_field(type=TUNNEL_ID, arp_tha=_tunnel_id)
+
+def ipv6_exthdr(_ipv6_exthdr):
+    return ofb_field(type=IPV6_EXTHDR, arp_tha=_ipv6_exthdr)
+
+
+# frequently used extractors:
+
+def get_actions(flow):
+    """Extract list of ofp_action objects from flow spec object"""
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    # we have the following hard assumptions for now
+    actions = []
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_APPLY_ACTIONS or instruction.type == ofp.OFPIT_WRITE_ACTIONS:
+            actions.extend(instruction.actions.actions)
+    return actions
+
+
+def get_ofb_fields(flow):
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    assert flow.match.type == ofp.OFPMT_OXM
+    ofb_fields = []
+    for field in flow.match.oxm_fields:
+        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
+        ofb_fields.append(field.ofb_field)
+    return ofb_fields
+
+def get_out_port(flow):
+    for action in get_actions(flow):
+        if action.type == OUTPUT:
+            return action.output.port
+    return None
+
+def get_in_port(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == IN_PORT:
+            return field.port
+    return None
+
+def get_goto_table_id(flow):
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_GOTO_TABLE:
+            return instruction.goto_table.table_id
+    return None
+
+def get_metadata(flow):
+    ''' legacy get method (only want lower 32 bits '''
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata & 0xffffffff
+    return None
+
+def get_metadata_64_bit(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata
+    return None
+
+
+def get_port_number_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return md & 0xffffffff
+
+
+def get_inner_tag_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return (md >> 32) & 0xffffffff
+
+
+# test and extract next table and group information
+def has_next_table(flow):
+    return get_goto_table_id(flow) is not None
+
+def get_group(flow):
+    for action in get_actions(flow):
+        if action.type == GROUP:
+            return action.group.group_id
+    return None
+
+def get_meter_ids_from_flow(flow):
+    meter_ids = list()
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_METER:
+            meter_ids.append(instruction.meter.meter_id)
+    return meter_ids
+
+def has_group(flow):
+    return get_group(flow) is not None
+
+def mk_oxm_fields(match_fields):
+    oxm_fields = [
+        ofp.ofp_oxm_field(
+            oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+            ofb_field=field
+        ) for field in match_fields
+        ]
+
+    return oxm_fields
+
+def mk_instructions_from_actions(actions):
+    instructions_action = ofp.ofp_instruction_actions()
+    instructions_action.actions.extend(actions)
+    instruction = ofp.ofp_instruction(type=ofp.OFPIT_APPLY_ACTIONS,
+                                      actions=instructions_action)
+    return [instruction]
+
+def mk_simple_flow_mod(match_fields, actions, command=ofp.OFPFC_ADD,
+                       next_table_id=None, meters=None, **kw):
+    """
+    Convenience function to generare ofp_flow_mod message with OXM BASIC match
+    composed from the match_fields, and single APPLY_ACTIONS instruction with
+    a list if ofp_action objects.
+    :param match_fields: list(ofp_oxm_ofb_field)
+    :param actions: list(ofp_action)
+    :param command: one of OFPFC_*
+    :param kw: additional keyword-based params to ofp_flow_mod
+    :return: initialized ofp_flow_mod object
+    """
+    instructions = [
+        ofp.ofp_instruction(
+            type=ofp.OFPIT_APPLY_ACTIONS,
+            actions=ofp.ofp_instruction_actions(actions=actions)
+        )
+    ]
+
+    if meters is not None:
+        for meter_id in meters:
+            instructions.append(ofp.ofp_instruction(
+                type=ofp.OFPIT_METER,
+                meter=ofp.ofp_instruction_meter(meter_id=meter_id)
+            ))
+
+    if next_table_id is not None:
+        instructions.append(ofp.ofp_instruction(
+            type=ofp.OFPIT_GOTO_TABLE,
+            goto_table=ofp.ofp_instruction_goto_table(table_id=next_table_id)
+        ))
+
+    return ofp.ofp_flow_mod(
+        command=command,
+        match=ofp.ofp_match(
+            type=ofp.OFPMT_OXM,
+            oxm_fields=[
+                ofp.ofp_oxm_field(
+                    oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                    ofb_field=field
+                ) for field in match_fields
+            ]
+        ),
+        instructions=instructions,
+        **kw
+    )
+
+
+def mk_multicast_group_mod(group_id, buckets, command=ofp.OFPGC_ADD):
+    group = ofp.ofp_group_mod(
+        command=command,
+        type=ofp.OFPGT_ALL,
+        group_id=group_id,
+        buckets=buckets
+    )
+    return group
+
+
+def hash_flow_stats(flow):
+    """
+    Return unique 64-bit integer hash for flow covering the following
+    attributes: 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
+    """
+    _instruction_string = ""
+    for _instruction in flow.instructions:
+        _instruction_string += _instruction.SerializeToString()
+
+    hex = md5('{},{},{},{},{},{}'.format(
+        flow.table_id,
+        flow.priority,
+        flow.flags,
+        flow.cookie,
+        flow.match.SerializeToString(),
+        _instruction_string
+    )).hexdigest()
+    return int(hex[:16], 16)
+
+
+def flow_stats_entry_from_flow_mod_message(mod):
+    flow = ofp.ofp_flow_stats(
+        table_id=mod.table_id,
+        priority=mod.priority,
+        idle_timeout=mod.idle_timeout,
+        hard_timeout=mod.hard_timeout,
+        flags=mod.flags,
+        cookie=mod.cookie,
+        match=mod.match,
+        instructions=mod.instructions
+    )
+    flow.id = hash_flow_stats(flow)
+    return flow
+
+
+def group_entry_from_group_mod(mod):
+    group = ofp.ofp_group_entry(
+        desc=ofp.ofp_group_desc(
+            type=mod.type,
+            group_id=mod.group_id,
+            buckets=mod.buckets
+        ),
+        stats=ofp.ofp_group_stats(
+            group_id=mod.group_id
+            # TODO do we need to instantiate bucket bins?
+        )
+    )
+    return group
+
+
+def mk_flow_stat(**kw):
+    return flow_stats_entry_from_flow_mod_message(mk_simple_flow_mod(**kw))
+
+
+def mk_group_stat(**kw):
+    return group_entry_from_group_mod(mk_multicast_group_mod(**kw))
+
+class RouteHop(object):
+    __slots__ = ('_device', '_ingress_port', '_egress_port')
+    def __init__(self, device, ingress_port, egress_port):
+        self._device = device
+        self._ingress_port = ingress_port
+        self._egress_port = egress_port
+    @property
+    def device(self): return self._device
+    @property
+    def ingress_port(self): return self._ingress_port
+    @property
+    def egress_port(self): return self._egress_port
+    def __eq__(self, other):
+        return (
+            self._device == other._device and
+            self._ingress_port == other._ingress_port and
+            self._egress_port == other._egress_port)
+    def __ne__(self, other):
+        return not self.__eq__(other)
+    def __str__(self):
+        return 'RouteHop device_id {}, ingress_port {}, egress_port {}'.format(
+            self._device.id, self._ingress_port, self._egress_port)
+
+class FlowDecomposer(object):
+
+    def __init__(self, *args, **kw):
+        self.logical_device_id = 'this shall be overwritten in derived class'
+        super(FlowDecomposer, self).__init__(*args, **kw)
+
+    # ~~~~~~~~~~~~~~~~~~~~ methods exposed *to* derived class ~~~~~~~~~~~~~~~~~
+
+    def decompose_rules(self, flows, groups):
+        """
+        Generate per-device flows and flow-groups from the flows and groups
+        defined on a logical device
+        :param flows: logical device flows
+        :param groups: logical device flow groups
+        :return: dict(device_id ->
+            (OrderedDict-of-device-flows, OrderedDict-of-device-flow-groups))
+        """
+
+        device_rules = deepcopy(self.get_all_default_rules())
+        group_map = dict((g.desc.group_id, g) for g in groups)
+
+        for flow in flows:
+            for device_id, (_flows, _groups) \
+                    in self.decompose_flow(flow, group_map).iteritems():
+                fl_lst, gr_lst = device_rules.setdefault(
+                    device_id, (OrderedDict(), OrderedDict()))
+                for _flow in _flows:
+                    if _flow.id not in fl_lst:
+                        fl_lst[_flow.id] = _flow
+                for _group in _groups:
+                    if _group.group_id not in gr_lst:
+                        gr_lst[_group.group_id] = _group
+        return device_rules
+
+    def decompose_flow(self, flow, group_map):
+        assert isinstance(flow, ofp.ofp_flow_stats)
+
+        ####################################################################
+        #
+        # limited, heuristics based implementation
+        # needs to be replaced, see https://jira.opencord.org/browse/CORD-841
+        #
+        ####################################################################
+
+        in_port_no = get_in_port(flow)
+        out_port_no = get_out_port(flow)  # may be None
+
+        device_rules = {}  # accumulator
+
+        route = self.get_route(in_port_no, out_port_no)
+        if route is None:
+            log.error('no-route', in_port_no=in_port_no,
+                      out_port_no=out_port_no, comment='deleting flow')
+            self.flow_delete(flow)
+            return device_rules
+
+        assert len(route) == 2
+        ingress_hop, egress_hop = route
+
+        def is_downstream():
+            return ingress_hop.device.root
+
+        def is_upstream():
+            return not is_downstream()
+
+        def update_devices_rules(flow, curr_device_rules, meter_ids=None, table_id=None):
+            actions = [action.type for action in get_actions(flow)]
+            if len(actions) == 1 and OUTPUT in actions:
+                # Transparent ONU and OLT case (No-L2-Modification flow)
+                child_device_flow_lst, _ = curr_device_rules.setdefault(
+                    ingress_hop.device.id, ([], []))
+                parent_device_flow_lst, _ = curr_device_rules.setdefault(
+                    egress_hop.device.id, ([], []))
+
+                child_device_flow_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(ingress_hop.ingress_port.port_no)
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                        output(ingress_hop.egress_port.port_no)
+                    ]
+                ))
+
+                parent_device_flow_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(egress_hop.ingress_port.port_no),
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                        output(egress_hop.egress_port.port_no)
+                    ],
+                    table_id=table_id,
+                    meters=meter_ids
+                ))
+
+            else:
+                fl_lst, _ = curr_device_rules.setdefault(
+                    egress_hop.device.id, ([], []))
+                fl_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                                     in_port(egress_hop.ingress_port.port_no)
+                                 ] + [
+                                     field for field in get_ofb_fields(flow)
+                                     if field.type not in (IN_PORT,)
+                                 ],
+                    actions=[
+                                action for action in get_actions(flow)
+                                if action.type != OUTPUT
+                            ] + [
+                                output(egress_hop.egress_port.port_no)
+                            ],
+                    table_id=table_id,
+                    meters=meter_ids
+                ))
+
+        if out_port_no is not None and \
+                (out_port_no & 0x7fffffff) == ofp.OFPP_CONTROLLER:
+
+            # UPSTREAM CONTROLLER-BOUND FLOW
+
+            # we assume that the ingress device is already pushing a
+            # customer-specific vlan (c-vid), based on its default flow
+            # rules so there is nothing else to do on the ONU
+
+            # on the olt, we need to push a new tag and set it to 4000
+            # which for now represents in-bound channel to the controller
+            # (via Voltha)
+            # TODO make the 4000 configurable
+            fl_lst, _ = device_rules.setdefault(
+                egress_hop.device.id, ([], []))
+
+            log.info('trap-flow', in_port_no=in_port_no,
+                     nni=self._nni_logical_port_no)
+
+            if in_port_no == self._nni_logical_port_no:
+                log.debug('trap-nni')
+                # Trap flow for NNI port
+                fl_lst.append(mk_flow_stat(
+                    priority=flow.priority,
+                    cookie=flow.cookie,
+                    match_fields=[
+                        in_port(egress_hop.egress_port.port_no)
+                    ] + [
+                        field for field in get_ofb_fields(flow)
+                        if field.type not in (IN_PORT,)
+                    ],
+                    actions=[
+                        action for action in get_actions(flow)
+                    ]
+                ))
+
+            else:
+                log.debug('trap-uni')
+                # Trap flow for UNI port
+
+                # in_port_no is None for wildcard input case, do not include
+                # upstream port for 4000 flow in input
+                if in_port_no is None:
+                    in_ports = self.get_wildcard_input_ports(exclude_port=
+                                                             egress_hop.egress_port.port_no)
+                else:
+                    in_ports = [in_port_no]
+
+                for input_port in in_ports:
+                    fl_lst.append(mk_flow_stat(        # Upstream flow
+                        priority=flow.priority,
+                        cookie=flow.cookie,
+                        match_fields=[
+                            in_port(egress_hop.ingress_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | input_port)
+                        ] + [
+                            field for field in get_ofb_fields(flow)
+                            if field.type not in (IN_PORT, VLAN_VID)
+                        ],
+                        actions=[
+                            push_vlan(0x8100),
+                            set_field(vlan_vid(ofp.OFPVID_PRESENT | 4000)),
+                            output(egress_hop.egress_port.port_no)]
+                    ))
+                    fl_lst.append(mk_flow_stat(            # Downstream flow
+                        priority=flow.priority,
+                        match_fields=[
+                            in_port(egress_hop.egress_port.port_no),
+                            vlan_vid(ofp.OFPVID_PRESENT | 4000),
+                            vlan_pcp(0),
+                            metadata(input_port)
+                        ],
+                        actions=[
+                            pop_vlan(),
+                            output(egress_hop.ingress_port.port_no)]
+                    ))
+        else:
+            # NOT A CONTROLLER-BOUND FLOW
+            if is_upstream():
+
+                # We assume that anything that is upstream needs to get Q-in-Q
+                # treatment and that this is expressed via two flow rules,
+                # the first using the goto-statement. We also assume that the
+                # inner tag is applied at the ONU, while the outer tag is
+                # applied at the OLT
+                next_table_id = get_goto_table_id(flow)
+                if next_table_id is not None and next_table_id < tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is None
+                    fl_lst, _ = device_rules.setdefault(
+                        ingress_hop.device.id, ([], []))
+                    fl_lst.append(mk_flow_stat(
+                        priority=flow.priority,
+                        cookie=flow.cookie,
+                        match_fields=[
+                            in_port(ingress_hop.ingress_port.port_no)
+                        ] + [
+                            field for field in get_ofb_fields(flow)
+                            if field.type not in (IN_PORT,)
+                        ],
+                        actions=[
+                            action for action in get_actions(flow)
+                        ] + [
+                            output(ingress_hop.egress_port.port_no)
+                        ]
+                    ))
+
+                elif next_table_id is not None and next_table_id >= tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is not None
+                    meter_ids = get_meter_ids_from_flow(flow)
+                    update_devices_rules(flow, device_rules, meter_ids, next_table_id)
+                else:
+                    update_devices_rules(flow, device_rules)
+
+            else:  # downstream
+                next_table_id = get_goto_table_id(flow)
+                if next_table_id is not None and next_table_id < tech_profile.DEFAULT_TECH_PROFILE_TABLE_ID:
+                    assert out_port_no is None
+
+                    if get_metadata(flow) is not None:
+                        log.debug('creating-metadata-flow', flow=flow)
+                        # For downstream flows with dual-tags, recalculate route.
+                        port_number = get_port_number_from_metadata(flow)
+
+                        if port_number is not None:
+                            route = self.get_route(in_port_no, port_number)
+                            if route is None:
+                                log.error('no-route-double-tag', in_port_no=in_port_no,
+                                          out_port_no=port_number, comment='deleting flow',
+                                          metadata=get_metadata_64_bit(flow))
+                                self.flow_delete(flow)
+                                return device_rules
+                            assert len(route) == 2
+                            ingress_hop, egress_hop = route
+
+                        inner_tag = get_inner_tag_from_metadata(flow)
+
+                        if inner_tag is None:
+                            log.error('no-inner-tag-double-tag', in_port_no=in_port_no,
+                                      out_port_no=port_number, comment='deleting flow',
+                                      metadata=get_metadata_64_bit(flow))
+                            self.flow_delete(flow)
+                            return device_rules
+
+                        fl_lst, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(ingress_hop.ingress_port.port_no),
+                                metadata(inner_tag)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT, METADATA)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                            ] + [
+                                output(ingress_hop.egress_port.port_no)
+                            ]
+                        ))
+                    else:
+                        log.debug('creating-standard-flow', flow=flow)
+                        fl_lst, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(ingress_hop.ingress_port.port_no)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT,)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                            ] + [
+                                output(ingress_hop.egress_port.port_no)
+                            ]
+                        ))
+
+                elif out_port_no is not None:  # unicast case
+
+                    actions = [action.type for action in get_actions(flow)]
+                    # Transparent ONU and OLT case (No-L2-Modification flow)
+                    if len(actions) == 1 and OUTPUT in actions:
+                        parent_device_flow_lst, _ = device_rules.setdefault(
+                                                ingress_hop.device.id, ([], []))
+                        child_device_flow_lst, _ = device_rules.setdefault(
+                                                egress_hop.device.id, ([], []))
+
+                        parent_device_flow_lst.append(mk_flow_stat(
+                                            priority=flow.priority,
+                                            cookie=flow.cookie,
+                                            match_fields=[
+                                                in_port(ingress_hop.ingress_port.port_no)
+                                            ] + [
+                                                field for field in get_ofb_fields(flow)
+                                                if field.type not in (IN_PORT,)
+                                            ],
+                                            actions=[
+                                                 output(ingress_hop.egress_port.port_no)
+                                            ]
+                                            ))
+
+                        child_device_flow_lst.append(mk_flow_stat(
+                                            priority = flow.priority,
+                                            cookie=flow.cookie,
+                                            match_fields = [
+                                                 in_port(egress_hop.ingress_port.port_no),
+                                            ] + [
+                                                 field for field in get_ofb_fields(flow)
+                                                 if field.type not in (IN_PORT, )
+                                            ],
+                                            actions=[
+                                                 output(egress_hop.egress_port.port_no)
+                                            ]
+                                            ))
+                    else:
+                        fl_lst, _ = device_rules.setdefault(
+                            egress_hop.device.id, ([], []))
+                        fl_lst.append(mk_flow_stat(
+                            priority=flow.priority,
+                            cookie=flow.cookie,
+                            match_fields=[
+                                in_port(egress_hop.ingress_port.port_no)
+                            ] + [
+                                field for field in get_ofb_fields(flow)
+                                if field.type not in (IN_PORT,)
+                            ],
+                            actions=[
+                                action for action in get_actions(flow)
+                                if action.type not in (OUTPUT,)
+                            ] + [
+                                output(egress_hop.egress_port.port_no)
+                            ],
+                            #table_id=flow.table_id,
+                            #meters=None if len(get_meter_ids_from_flow(flow)) == 0 else get_meter_ids_from_flow(flow)
+                        ))
+                else:
+                    grp_id = get_group(flow)
+
+                    if grp_id is not None: # multicast case
+                        fl_lst_olt, _ = device_rules.setdefault(
+                            ingress_hop.device.id, ([], []))
+                        # having no group yet is the same as having a group with
+                        # no buckets
+                        group = group_map.get(grp_id, ofp.ofp_group_entry())
+
+                        for bucket in group.desc.buckets:
+                            found_pop_vlan = False
+                            other_actions = []
+                            for action in bucket.actions:
+                                if action.type == POP_VLAN:
+                                    found_pop_vlan = True
+                                elif action.type == OUTPUT:
+                                    out_port_no = action.output.port
+                                else:
+                                    other_actions.append(action)
+                            # re-run route request to determine egress device and
+                            # ports
+                            route2 = self.get_route(in_port_no, out_port_no)
+                            if not route2 or len(route2) != 2:
+                                log.error('mc-no-route', in_port_no=in_port_no,
+                                    out_port_no=out_port_no, route2=route2,
+                                    comment='deleting flow')
+                                self.flow_delete(flow)
+                                continue
+
+                            ingress_hop2, egress_hop = route2
+
+                            if ingress_hop.ingress_port != ingress_hop2.ingress_port:
+                                log.error('mc-ingress-hop-hop2-mismatch',
+                                    ingress_hop=ingress_hop,
+                                    ingress_hop2=ingress_hop2,
+                                    in_port_no=in_port_no,
+                                    out_port_no=out_port_no,
+                                    comment='ignoring flow')
+                                continue
+
+                            fl_lst_olt.append(mk_flow_stat(
+                                priority=flow.priority,
+                                cookie=flow.cookie,
+                                match_fields=[
+                                    in_port(ingress_hop.ingress_port.port_no)
+                                ] + [
+                                    field for field in get_ofb_fields(flow)
+                                    if field.type not in (IN_PORT,)
+                                ],
+                                actions=[
+                                    action for action in get_actions(flow)
+                                    if action.type not in (GROUP,)
+                                ] + [
+                                    pop_vlan(),
+                                    output(egress_hop.ingress_port.port_no)
+                                ]
+                            ))
+
+                            fl_lst_onu, _ = device_rules.setdefault(
+                                egress_hop.device.id, ([], []))
+                            fl_lst_onu.append(mk_flow_stat(
+                                priority=flow.priority,
+                                cookie=flow.cookie,
+                                match_fields=[
+                                    in_port(egress_hop.ingress_port.port_no)
+                                ] + [
+                                    field for field in get_ofb_fields(flow)
+                                    if field.type not in (IN_PORT, VLAN_VID, VLAN_PCP)
+                                ],
+                                actions=other_actions + [
+                                    output(egress_hop.egress_port.port_no)
+                                ]
+                            ))
+                    else:
+                        raise NotImplementedError('undefined downstream case for flows')
+        return device_rules
+
+    # ~~~~~~~~~~~~ methods expected to be provided by derived class ~~~~~~~~~~~
+
+    def get_all_default_rules(self):
+        raise NotImplementedError('derived class must provide')
+
+    def get_default_rules(self, device_id):
+        raise NotImplementedError('derived class must provide')
+
+    def get_route(self, ingress_port_no, egress_port_no):
+        raise NotImplementedError('derived class must provide')
+
+    def get_wildcard_input_ports(self, exclude_port=None):
+        raise NotImplementedError('derived class must provide')
+
+    def flow_delete(self, mod):
+        raise NotImplementedError('derived class must provide')
diff --git a/python/docker/Dockerfile.adapter_openolt b/python/docker/Dockerfile.adapter_openolt
index c1fc607..4101145 100644
--- a/python/docker/Dockerfile.adapter_openolt
+++ b/python/docker/Dockerfile.adapter_openolt
@@ -24,15 +24,41 @@
 # Bundle app source
 RUN mkdir /voltha && touch /voltha/__init__.py
 ENV PYTHONPATH=/voltha
+COPY common /voltha/voltha/common/
+COPY common /voltha/common/
+COPY adapters/common /voltha/common/
+COPY adapters/common /voltha/voltha/adapters/common/
+COPY adapters/kafka /voltha/voltha/adapters/kafka
+COPY adapters/*.py /voltha/voltha/adapters/
+COPY adapters/openolt /voltha/voltha/adapters/openolt
+COPY adapters/extensions /voltha/voltha/extensions
+COPY core /voltha/voltha/core/
+COPY common/utils/registry.py /voltha/voltha/
+RUN touch /voltha/voltha/__init__.py
+RUN touch /voltha/voltha/adapters/__init__.py
+
+#two copies of the libraries for now to support various import
+#This can be simiplified once the common libraries are unified in one repo
 COPY common /voltha/python/common/
 COPY adapters/common /voltha/python/adapters/common/
 COPY adapters/kafka /voltha/python/adapters/kafka
 COPY adapters/*.py /voltha/python/adapters/
 COPY adapters/openolt /voltha/python/adapters/openolt
+COPY adapters/extensions /voltha/python/extensions
 RUN touch /voltha/python/__init__.py
 RUN touch /voltha/python/adapters/__init__.py
 
 # Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /voltha/voltha/protos
+COPY --from=protos /protos/google/api /voltha/voltha/protos/third_party/google/api
+COPY --from=openolt_protos /protos/voltha /voltha/voltha/protos
+COPY protos/third_party/__init__.py /voltha/voltha/protos/third_party
+COPY --from=openolt_protos /protos/voltha /voltha/voltha/adapters/openolt/protos/
+RUN touch /voltha/voltha/protos/__init__.py
+RUN touch /voltha/voltha/protos/third_party/google/__init__.py
+RUN touch /voltha/voltha/adapters/openolt/protos/__init__.py
+
+# Copy in the generated GRPC proto code
 COPY --from=protos /protos/voltha /voltha/python/protos
 COPY --from=protos /protos/google/api /voltha/python/protos/third_party/google/api
 COPY --from=openolt_protos /protos/voltha /voltha/python/protos
diff --git a/python/docker/Dockerfile.openolt_protos b/python/docker/Dockerfile.openolt_protos
index 44afb5f..71d8b56 100644
--- a/python/docker/Dockerfile.openolt_protos
+++ b/python/docker/Dockerfile.openolt_protos
@@ -23,8 +23,8 @@
 
 COPY protos/third_party/google/api/*.proto /protos/google/api/
 COPY docker/config/Makefile.protos /protos/google/api/Makefile.protos
-COPY protos/*.proto /protos/voltha/
 COPY --from=protos /protos/voltha/ /protos/voltha/
+COPY protos/*.proto /protos/voltha/
 COPY docker/config/Makefile.protos /protos/voltha/Makefile.protos
 
 WORKDIR /protos
diff --git a/python/protos/events.proto b/python/protos/events.proto
new file mode 100644
index 0000000..725bb8c
--- /dev/null
+++ b/python/protos/events.proto
@@ -0,0 +1,195 @@
+syntax = "proto3";
+
+option go_package = "github.com/opencord/voltha/protos/go/voltha";
+
+package voltha;
+
+import "meta.proto";
+import "google/api/annotations.proto";
+
+message ConfigEventType {
+    enum ConfigEventType {
+        add = 0; // A new config has been added
+        remove = 1; // A config has been removed
+        update = 2; // A config has been updated
+    }
+}
+
+message ConfigEvent {
+    ConfigEventType.ConfigEventType type = 1;
+
+    string hash = 2; // hash for this change, can be used for quick lookup
+    string data = 3; // the actual new data, in json format
+}
+
+message KpiEventType {
+    enum KpiEventType {
+        slice = 0; // slice: a set of path/metric data for same time-stamp
+        ts = 1; // time-series: array of data for same metric
+    }
+}
+
+/*
+ * Struct to convey a dictionary of metric metadata.
+ */
+message MetricMetaData {
+    string title = 1;   // Metric group or individual metric name
+    double ts = 2;      // UTC time-stamp of data (seconds since epoch) of
+                        // when the metric or metric group was collected.
+                        // If this is a 15-min historical group, it is the
+                        // time of the collection and reporting, not the
+                        // start or end of the 15-min group interval.
+
+    string logical_device_id = 3; // The logical device ID of the VOLTHA
+                                  // (equivalent to the DPID that ONOS has
+                                  // for the VOLTHA device without the
+                                  //  'of:' prefix
+    string serial_no = 4;         // The OLT, ONU, ... device serial number
+    string device_id = 5;         // The OLT, ONU, ... physical device ID
+
+    map<string, string> context = 6; // Name value pairs that provide additional
+                                     // context information on the metrics being
+                                     // reported.
+}
+
+/*
+ * Struct to convey a dictionary of metric->value pairs. Typically used in
+ * pure shared-timestamp or shared-timestamp + shared object prefix situations.
+ */
+message MetricValuePairs {
+
+    // Metric / value pairs.
+    map<string, float> metrics = 1;
+
+}
+
+/*
+ * Struct to group metadata for a metric (or group of metrics) with the key-value
+ * pairs of collected metrics
+ */
+message MetricInformation {
+    MetricMetaData metadata = 1;
+    map<string, float> metrics = 2;
+}
+
+/*
+ * Legacy KPI Event structured.  In mid-August, the KPI event format was updated
+ *                               to a more easily parsable format. See VOL-1140
+ *                               for more information.
+ */
+message KpiEvent {
+
+    KpiEventType.KpiEventType type = 1;
+
+    // Fields used when for slice:
+
+    float ts = 2; // UTC time-stamp of data in slice mode (seconds since epoc)
+
+    map<string, MetricValuePairs> prefixes = 3;
+
+}
+
+message KpiEvent2 {
+    // Type of KPI Event
+    KpiEventType.KpiEventType type = 1;
+
+    // Fields used when for slice:
+    double ts = 2;  // UTC time-stamp of data in slice mode (seconds since epoch)
+                    // of the time this entire KpiEvent was published to the kafka bus
+
+    repeated MetricInformation slice_data = 3;
+}
+
+/*
+ * Identify to the area of the system impacted by the alarm
+ */
+message AlarmEventType {
+    enum AlarmEventType {
+        COMMUNICATION = 0;
+        ENVIRONMENT = 1;
+        EQUIPMENT = 2;
+        SERVICE = 3;
+        PROCESSING = 4;
+        SECURITY = 5;
+    }
+}
+
+/*
+ * Identify to the functional category originating the alarm
+ */
+message AlarmEventCategory {
+    enum AlarmEventCategory {
+        PON = 0;
+        OLT = 1;
+        ONT = 2;
+        ONU = 3;
+        NNI = 4;
+    }
+}
+
+/*
+ * Active state of the alarm
+ */
+message AlarmEventState {
+    enum AlarmEventState {
+        RAISED = 0;
+        CLEARED = 1;
+    }
+}
+
+/*
+ * Identify the overall impact of the alarm on the system
+ */
+message AlarmEventSeverity {
+    enum AlarmEventSeverity {
+        INDETERMINATE = 0;
+        WARNING = 1;
+        MINOR = 2;
+        MAJOR = 3;
+        CRITICAL = 4;
+    }
+}
+
+/*
+ *
+ */
+message AlarmEvent {
+    // Unique ID for this alarm.  e.g. voltha.some_olt.1234
+    string id = 1;
+
+    // Refers to the area of the system impacted by the alarm
+    AlarmEventType.AlarmEventType type = 2;
+
+    // Refers to functional category of the alarm
+    AlarmEventCategory.AlarmEventCategory category = 3;
+
+    // Current active state of the alarm
+    AlarmEventState.AlarmEventState state = 4;
+
+    // Overall impact of the alarm on the system
+    AlarmEventSeverity.AlarmEventSeverity severity = 5;
+
+    // Timestamp at which the alarm was first raised
+    float raised_ts = 6;
+
+    // Timestamp at which the alarm was reported
+    float reported_ts = 7;
+
+    // Timestamp at which the alarm has changed since it was raised
+    float changed_ts = 8;
+
+    // Identifier of the originating resource of the alarm
+    string resource_id = 9;
+
+    // Textual explanation of the alarm
+    string description = 10;
+
+    // Key/Value storage for extra information that may give context to the alarm
+    map<string, string> context = 11;
+
+    // logical device id
+    string logical_device_id = 12;
+
+    // alarm_type  name indicates clearly the name of the alarm
+    string alarm_type_name = 13;
+}
diff --git a/python/protos/openolt.proto b/python/protos/openolt.proto
index f6c1da1..ddc8f74 100644
--- a/python/protos/openolt.proto
+++ b/python/protos/openolt.proto
@@ -13,14 +13,8 @@
 // limitations under the License.
 
 syntax = "proto3";
-option go_package = "github.com/opencord/voltha-go/protos/voltha";
-
-package voltha;
-
+package openolt;
 import "google/api/annotations.proto";
-import "openflow_13.proto";
-
-
 
 service Openolt {
 
@@ -136,6 +130,20 @@
         };
     }
 
+    rpc CreateTconts(Tconts) returns (Empty) {
+        option (google.api.http) = {
+            post: "/v1/CreateTconts"
+            body: "*"
+        };
+    }
+
+    rpc RemoveTconts(Tconts) returns (Empty) {
+        option (google.api.http) = {
+            post: "/v1/RemoveTconts"
+            body: "*"
+        };
+    }
+
     rpc EnableIndication(Empty) returns (stream Indication) {}
 }
 
@@ -209,6 +217,8 @@
     fixed32 intf_id = 1;
     fixed32 gemport_id = 2;
     fixed32 flow_id = 3;
+    fixed32 port_no = 6;
+    fixed64 cookie = 7;
     bytes pkt = 4;
 }
 
@@ -225,7 +235,6 @@
     fixed32 onu_id = 2;
     SerialNumber serial_number = 3;
     fixed32 pir = 4;   // peak information rate assigned to onu
-    fixed32 alloc_id = 5;
 }
 
 message OmciMsg {
@@ -237,6 +246,7 @@
 message OnuPacket {
     fixed32 intf_id = 1;
     fixed32 onu_id = 2;
+    fixed32 port_no = 4;
     bytes pkt = 3;
 }
 
@@ -250,6 +260,8 @@
     string model = 2;
     string hardware_version = 3;
     string firmware_version = 4;
+    string device_id = 16;
+    string device_serial_number = 17;
 
     // Total number of pon intf ports on the device
     fixed32 pon_ports = 12;
@@ -335,6 +347,7 @@
 message Flow {
     sfixed32 access_intf_id = 1;
     sfixed32 onu_id = 2;
+    sfixed32 uni_id = 11;
     fixed32 flow_id = 3;
     string flow_type = 4;	// upstream, downstream, broadcast, multicast
     sfixed32 alloc_id = 10;
@@ -343,6 +356,8 @@
     Classifier classifier = 7;
     Action action = 8;
     sfixed32 priority = 9;
+    fixed64 cookie = 12; // must be provided for any flow with trap_to_host action. Returned in PacketIndication
+    fixed32 port_no = 13; // must be provided for any flow with trap_to_host action. Returned in PacketIndication
 }
 
 message SerialNumber {
@@ -449,5 +464,93 @@
     fixed32 onu_id = 2;
 }
 
+enum Direction {
+    UPSTREAM = 0;
+    DOWNSTREAM = 1;
+    BIDIRECTIONAL = 2;
+}
+
+enum SchedulingPolicy {
+    WRR = 0;
+    StrictPriority = 1;
+    Hybrid = 2;
+}
+
+enum AdditionalBW {
+    AdditionalBW_None = 0;
+    AdditionalBW_NA = 1;
+    AdditionalBW_BestEffort = 2;
+    AdditionalBW_Auto = 3;
+}
+
+enum DiscardPolicy {
+    TailDrop = 0;
+    WTailDrop = 1;
+    Red = 2;
+    WRed = 3;
+}
+
+enum InferredAdditionBWIndication {
+    InferredAdditionBWIndication_None = 0;
+    InferredAdditionBWIndication_Assured = 1;
+    InferredAdditionBWIndication_BestEffort = 2;
+}
+
+message Scheduler {
+    Direction direction = 1;
+    AdditionalBW additional_bw = 2; // Valid on for “direction == Upstream”.
+    fixed32 priority = 3;
+    fixed32 weight = 4;
+    SchedulingPolicy sched_policy = 5;
+}
+
+message TrafficShapingInfo {
+    fixed32 cir = 1;
+    fixed32 cbs = 2;
+    fixed32 pir = 3;
+    fixed32 pbs = 4;
+    fixed32 gir = 5; // only if “direction == Upstream ”
+    InferredAdditionBWIndication add_bw_ind = 6; // only if “direction == Upstream”
+}
+
+message Tcont {
+    Direction direction = 1;
+    fixed32 alloc_id = 2; // valid only if “direction == Upstream ”
+    Scheduler scheduler = 3;
+    TrafficShapingInfo traffic_shaping_info = 4;
+}
+
+message Tconts {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    fixed32 uni_id = 4;
+    fixed32 port_no = 5;
+    repeated Tcont tconts = 3;
+}
+
+message TailDropDiscardConfig {
+    fixed32 queue_size = 1;
+}
+
+message RedDiscardConfig {
+    fixed32 min_threshold = 1;
+    fixed32 max_threshold = 2;
+    fixed32 max_probability = 3;
+}
+
+message WRedDiscardConfig {
+    RedDiscardConfig green = 1;
+    RedDiscardConfig yellow = 2;
+    RedDiscardConfig red = 3;
+}
+
+message DiscardConfig {
+    DiscardPolicy discard_policy = 1;
+    oneof discard_config {
+        TailDropDiscardConfig tail_drop_discard_config = 2;
+        RedDiscardConfig red_discard_config = 3;
+        WRedDiscardConfig wred_discard_config = 4;
+    }
+}
 
 message Empty {}