Initial commit moving openolt adapter from voltha-go to the new repo.
This version works with ponsim rather than openolt, this is temporary.
It is currently being fixed to work with openolt.

Change-Id: I34a800c98f050140b367e2d474b7aa8b79f34b9a
Signed-off-by: William Kurkian <wkurkian@cisco.com>
diff --git a/python/common/__init__.py b/python/common/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/event_bus.py b/python/common/event_bus.py
new file mode 100644
index 0000000..e717c16
--- /dev/null
+++ b/python/common/event_bus.py
@@ -0,0 +1,194 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A simple internal pub/sub event bus with topics and filter-based registration.
+"""
+import re
+
+import structlog
+
+
+log = structlog.get_logger()
+
+
+class _Subscription(object):
+
+    __slots__ = ('bus', 'predicate', 'callback', 'topic')
+    def __init__(self, bus, predicate, callback, topic=None):
+        self.bus = bus
+        self.predicate = predicate
+        self.callback = callback
+        self.topic = topic
+
+
+class EventBus(object):
+
+    def __init__(self):
+        self.subscriptions = {}  # topic -> list of _Subscription objects
+                                 # topic None holds regexp based topic subs.
+        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
+
+    def list_subscribers(self, topic=None):
+        if topic is None:
+            return sum(self.subscriptions.itervalues(), [])
+        else:
+            if topic in self.subscriptions:
+                return self.subscriptions[topic]
+            else:
+                return []
+
+    @staticmethod
+    def _get_topic_key(topic):
+        if isinstance(topic, str):
+            return topic
+        elif hasattr(topic, 'match'):
+            return None
+        else:
+            raise AttributeError('topic not a string nor a compiled regex')
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function signature def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        subscription = _Subscription(self, predicate, callback, topic)
+        topic_key = self._get_topic_key(topic)
+        self.subscriptions.setdefault(topic_key, []).append(subscription)
+        self.subs_topic_map[subscription] = topic_key
+        return subscription
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        topic_key = self.subs_topic_map[subscription]
+        self.subscriptions[topic_key].remove(subscription)
+
+    def publish(self, topic, msg):
+        """
+        Publish given message to all subscribers registered with topic taking
+        the predicate functions into account.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        from copy import copy
+
+        def passes(msg, predicate):
+            try:
+                return predicate(msg)
+            except Exception, e:
+                return False  # failed predicate function treated as no match
+
+        # lookup subscribers with explicit topic subscriptions
+        subscribers = self.subscriptions.get(topic, [])
+
+        # add matching regexp topic subscribers
+        subscribers.extend(s for s in self.subscriptions.get(None, [])
+                           if s.topic.match(topic))
+
+        # iterate over a shallow-copy of subscribers
+        for candidate in copy(subscribers):
+            predicate = candidate.predicate
+            if predicate is None or passes(msg, predicate):
+                try:
+                    candidate.callback(topic, msg)
+                except Exception, e:
+                    log.exception('callback-failed', e=repr(e), topic=topic)
+
+
+
+default_bus = EventBus()
+
+
+class EventBusClient(object):
+    """
+    Primary interface to the EventBus. Usage:
+
+    Publish:
+    >>> events = EventBusClient()
+    >>> msg = dict(a=1, b='foo')
+    >>> events.publish('a.topic', msg)
+
+    Subscribe to get all messages on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event)
+
+    Subscribe to get messages matching predicate on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
+
+    Use a DeferredQueue to buffer incoming messages
+    >>> queue = DeferredQueue()
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
+
+    """
+    def __init__(self, bus=None):
+        """
+        Obtain a client interface for the pub/sub event bus.
+        :param bus: An optional specific event bus. Inteded for mainly test
+        use. If not provided, the process default bus will be used, which is
+        the preferred use (a process shall not need more than one bus).
+        """
+        self.bus = bus or default_bus
+
+    def publish(self, topic, msg):
+        """
+        Publish given msg to given topic.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        self.bus.publish(topic, msg)
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function with signature
+        def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        return self.bus.subscribe(topic, callback, predicate)
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        return self.bus.unsubscribe(subscription)
+
+    def list_subscribers(self, topic=None):
+        """
+        Return list of subscribers. If topci is provided, it is filtered for
+        those subscribing to the topic.
+        :param topic: Optional topic
+        :return: List of subscriptions
+        """
+        return self.bus.list_subscribers(topic)
diff --git a/python/common/manhole.py b/python/common/manhole.py
new file mode 100644
index 0000000..c00c900
--- /dev/null
+++ b/python/common/manhole.py
@@ -0,0 +1,129 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import rlcompleter
+from pprint import pprint
+
+import structlog
+from twisted.conch import manhole_ssh
+from twisted.conch.manhole import ColoredManhole
+from twisted.conch.ssh import keys
+from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
+from twisted.cred.portal import Portal
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+
+
+MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
+MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
+
+
+def get_rsa_keys():
+    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
+                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
+        # generate a RSA keypair
+        log.info('generate-rsa-keypair')
+        from Crypto.PublicKey import RSA
+        rsa_key = RSA.generate(1024)
+        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
+        private_key_str = rsa_key.exportKey()
+
+        # save keys for next time
+        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
+        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
+        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
+                  private=MANHOLE_SERVER_RSA_PRIVATE)
+    else:
+        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
+        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
+    return public_key_str, private_key_str
+
+
+class ManholeWithCompleter(ColoredManhole):
+
+    def __init__(self, namespace):
+        namespace['manhole'] = self
+        super(ManholeWithCompleter, self).__init__(namespace)
+        self.last_tab = None
+        self.completer = rlcompleter.Completer(self.namespace)
+
+    def handle_TAB(self):
+        if self.last_tab != self.lineBuffer:
+            self.last_tab = self.lineBuffer
+            return
+
+        buffer = ''.join(self.lineBuffer)
+        completions = []
+        maxlen = 3
+        for c in xrange(1000):
+            candidate = self.completer.complete(buffer, c)
+            if not candidate:
+                break
+
+            if len(candidate) > maxlen:
+                maxlen = len(candidate)
+
+            completions.append(candidate)
+
+        if len(completions) == 1:
+            rest = completions[0][len(buffer):]
+            self.terminal.write(rest)
+            self.lineBufferIndex += len(rest)
+            self.lineBuffer.extend(rest)
+
+        elif len(completions):
+            maxlen += 3
+            numcols = self.width / maxlen
+            self.terminal.nextLine()
+            for idx, candidate in enumerate(completions):
+                self.terminal.write('%%-%ss' % maxlen % candidate)
+                if not ((idx + 1) % numcols):
+                    self.terminal.nextLine()
+            self.terminal.nextLine()
+            self.drawInputLine()
+
+
+class Manhole(object):
+
+    def __init__(self, port, pws, **kw):
+        kw.update(globals())
+        kw['pp'] = pprint
+
+        realm = manhole_ssh.TerminalRealm()
+        manhole = ManholeWithCompleter(kw)
+
+        def windowChanged(_, win_size):
+            manhole.terminalSize(*reversed(win_size[:2]))
+
+        realm.sessionFactory.windowChanged = windowChanged
+        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
+        portal = Portal(realm)
+        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
+        factory = manhole_ssh.ConchFactory(portal)
+        public_key_str, private_key_str = get_rsa_keys()
+        factory.publicKeys = {
+            'ssh-rsa': keys.Key.fromString(public_key_str)
+        }
+        factory.privateKeys = {
+            'ssh-rsa': keys.Key.fromString(private_key_str)
+        }
+        reactor.listenTCP(port, factory, interface='localhost')
+
+
+if __name__ == '__main__':
+    Manhole(12222, dict(admin='admin'))
+    reactor.run()
diff --git a/python/common/openflow/__init__.py b/python/common/openflow/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/openflow/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/openflow/utils.py b/python/common/openflow/utils.py
new file mode 100644
index 0000000..456ae06
--- /dev/null
+++ b/python/common/openflow/utils.py
@@ -0,0 +1,558 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import structlog
+
+from python.protos import openflow_13_pb2 as ofp
+from hashlib import md5
+
+log = structlog.get_logger()
+
+# aliases
+ofb_field = ofp.ofp_oxm_ofb_field
+action = ofp.ofp_action
+
+# OFPAT_* shortcuts
+OUTPUT = ofp.OFPAT_OUTPUT
+COPY_TTL_OUT = ofp.OFPAT_COPY_TTL_OUT
+COPY_TTL_IN = ofp.OFPAT_COPY_TTL_IN
+SET_MPLS_TTL = ofp.OFPAT_SET_MPLS_TTL
+DEC_MPLS_TTL = ofp.OFPAT_DEC_MPLS_TTL
+PUSH_VLAN = ofp.OFPAT_PUSH_VLAN
+POP_VLAN = ofp.OFPAT_POP_VLAN
+PUSH_MPLS = ofp.OFPAT_PUSH_MPLS
+POP_MPLS = ofp.OFPAT_POP_MPLS
+SET_QUEUE = ofp.OFPAT_SET_QUEUE
+GROUP = ofp.OFPAT_GROUP
+SET_NW_TTL = ofp.OFPAT_SET_NW_TTL
+NW_TTL = ofp.OFPAT_DEC_NW_TTL
+SET_FIELD = ofp.OFPAT_SET_FIELD
+PUSH_PBB = ofp.OFPAT_PUSH_PBB
+POP_PBB = ofp.OFPAT_POP_PBB
+EXPERIMENTER = ofp.OFPAT_EXPERIMENTER
+
+# OFPXMT_OFB_* shortcuts (incomplete)
+IN_PORT = ofp.OFPXMT_OFB_IN_PORT
+IN_PHY_PORT = ofp.OFPXMT_OFB_IN_PHY_PORT
+METADATA = ofp.OFPXMT_OFB_METADATA
+ETH_DST = ofp.OFPXMT_OFB_ETH_DST
+ETH_SRC = ofp.OFPXMT_OFB_ETH_SRC
+ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
+VLAN_VID = ofp.OFPXMT_OFB_VLAN_VID
+VLAN_PCP = ofp.OFPXMT_OFB_VLAN_PCP
+IP_DSCP = ofp.OFPXMT_OFB_IP_DSCP
+IP_ECN = ofp.OFPXMT_OFB_IP_ECN
+IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
+IPV4_SRC = ofp.OFPXMT_OFB_IPV4_SRC
+IPV4_DST = ofp.OFPXMT_OFB_IPV4_DST
+TCP_SRC = ofp.OFPXMT_OFB_TCP_SRC
+TCP_DST = ofp.OFPXMT_OFB_TCP_DST
+UDP_SRC = ofp.OFPXMT_OFB_UDP_SRC
+UDP_DST = ofp.OFPXMT_OFB_UDP_DST
+SCTP_SRC = ofp.OFPXMT_OFB_SCTP_SRC
+SCTP_DST = ofp.OFPXMT_OFB_SCTP_DST
+ICMPV4_TYPE = ofp.OFPXMT_OFB_ICMPV4_TYPE
+ICMPV4_CODE = ofp.OFPXMT_OFB_ICMPV4_CODE
+ARP_OP = ofp.OFPXMT_OFB_ARP_OP
+ARP_SPA = ofp.OFPXMT_OFB_ARP_SPA
+ARP_TPA = ofp.OFPXMT_OFB_ARP_TPA
+ARP_SHA = ofp.OFPXMT_OFB_ARP_SHA
+ARP_THA = ofp.OFPXMT_OFB_ARP_THA
+IPV6_SRC = ofp.OFPXMT_OFB_IPV6_SRC
+IPV6_DST = ofp.OFPXMT_OFB_IPV6_DST
+IPV6_FLABEL = ofp.OFPXMT_OFB_IPV6_FLABEL
+ICMPV6_TYPE = ofp.OFPXMT_OFB_ICMPV6_TYPE
+ICMPV6_CODE = ofp.OFPXMT_OFB_ICMPV6_CODE
+IPV6_ND_TARGET = ofp.OFPXMT_OFB_IPV6_ND_TARGET
+OFB_IPV6_ND_SLL = ofp.OFPXMT_OFB_IPV6_ND_SLL
+IPV6_ND_TLL = ofp.OFPXMT_OFB_IPV6_ND_TLL
+MPLS_LABEL = ofp.OFPXMT_OFB_MPLS_LABEL
+MPLS_TC = ofp.OFPXMT_OFB_MPLS_TC
+MPLS_BOS = ofp.OFPXMT_OFB_MPLS_BOS
+PBB_ISID = ofp.OFPXMT_OFB_PBB_ISID
+TUNNEL_ID = ofp.OFPXMT_OFB_TUNNEL_ID
+IPV6_EXTHDR = ofp.OFPXMT_OFB_IPV6_EXTHDR
+
+
+# ofp_action_* shortcuts
+
+def output(port, max_len=ofp.OFPCML_MAX):
+    return action(
+        type=OUTPUT,
+        output=ofp.ofp_action_output(port=port, max_len=max_len)
+    )
+
+
+def mpls_ttl(ttl):
+    return action(
+        type=SET_MPLS_TTL,
+        mpls_ttl=ofp.ofp_action_mpls_ttl(mpls_ttl=ttl)
+    )
+
+
+def push_vlan(eth_type):
+    return action(
+        type=PUSH_VLAN,
+        push=ofp.ofp_action_push(ethertype=eth_type)
+    )
+
+
+def pop_vlan():
+    return action(
+        type=POP_VLAN
+    )
+
+
+def pop_mpls(eth_type):
+    return action(
+        type=POP_MPLS,
+        pop_mpls=ofp.ofp_action_pop_mpls(ethertype=eth_type)
+    )
+
+
+def group(group_id):
+    return action(
+        type=GROUP,
+        group=ofp.ofp_action_group(group_id=group_id)
+    )
+
+
+def nw_ttl(nw_ttl):
+    return action(
+        type=NW_TTL,
+        nw_ttl=ofp.ofp_action_nw_ttl(nw_ttl=nw_ttl)
+    )
+
+
+def set_field(field):
+    return action(
+        type=SET_FIELD,
+        set_field=ofp.ofp_action_set_field(
+            field=ofp.ofp_oxm_field(
+                oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                ofb_field=field))
+    )
+
+
+def experimenter(experimenter, data):
+    return action(
+        type=EXPERIMENTER,
+        experimenter=ofp.ofp_action_experimenter(
+            experimenter=experimenter, data=data)
+    )
+
+
+# ofb_field generators (incomplete set)
+
+def in_port(_in_port):
+    return ofb_field(type=IN_PORT, port=_in_port)
+
+
+def in_phy_port(_in_phy_port):
+    return ofb_field(type=IN_PHY_PORT, port=_in_phy_port)
+
+
+def metadata(_table_metadata):
+    return ofb_field(type=METADATA, table_metadata=_table_metadata)
+
+
+def eth_dst(_eth_dst):
+    return ofb_field(type=ETH_DST, table_metadata=_eth_dst)
+
+
+def eth_src(_eth_src):
+    return ofb_field(type=ETH_SRC, table_metadata=_eth_src)
+
+
+def eth_type(_eth_type):
+    return ofb_field(type=ETH_TYPE, eth_type=_eth_type)
+
+
+def vlan_vid(_vlan_vid):
+    return ofb_field(type=VLAN_VID, vlan_vid=_vlan_vid)
+
+
+def vlan_pcp(_vlan_pcp):
+    return ofb_field(type=VLAN_PCP, vlan_pcp=_vlan_pcp)
+
+
+def ip_dscp(_ip_dscp):
+    return ofb_field(type=IP_DSCP, ip_dscp=_ip_dscp)
+
+
+def ip_ecn(_ip_ecn):
+    return ofb_field(type=IP_ECN, ip_ecn=_ip_ecn)
+
+
+def ip_proto(_ip_proto):
+    return ofb_field(type=IP_PROTO, ip_proto=_ip_proto)
+
+
+def ipv4_src(_ipv4_src):
+    return ofb_field(type=IPV4_SRC, ipv4_src=_ipv4_src)
+
+
+def ipv4_dst(_ipv4_dst):
+    return ofb_field(type=IPV4_DST, ipv4_dst=_ipv4_dst)
+
+
+def tcp_src(_tcp_src):
+    return ofb_field(type=TCP_SRC, tcp_src=_tcp_src)
+
+
+def tcp_dst(_tcp_dst):
+    return ofb_field(type=TCP_DST, tcp_dst=_tcp_dst)
+
+
+def udp_src(_udp_src):
+    return ofb_field(type=UDP_SRC, udp_src=_udp_src)
+
+
+def udp_dst(_udp_dst):
+    return ofb_field(type=UDP_DST, udp_dst=_udp_dst)
+
+
+def sctp_src(_sctp_src):
+    return ofb_field(type=SCTP_SRC, sctp_src=_sctp_src)
+
+
+def sctp_dst(_sctp_dst):
+    return ofb_field(type=SCTP_DST, sctp_dst=_sctp_dst)
+
+
+def icmpv4_type(_icmpv4_type):
+    return ofb_field(type=ICMPV4_TYPE, icmpv4_type=_icmpv4_type)
+
+
+def icmpv4_code(_icmpv4_code):
+    return ofb_field(type=ICMPV4_CODE, icmpv4_code=_icmpv4_code)
+
+
+def arp_op(_arp_op):
+    return ofb_field(type=ARP_OP, arp_op=_arp_op)
+
+
+def arp_spa(_arp_spa):
+    return ofb_field(type=ARP_SPA, arp_spa=_arp_spa)
+
+
+def arp_tpa(_arp_tpa):
+    return ofb_field(type=ARP_TPA, arp_tpa=_arp_tpa)
+
+
+def arp_sha(_arp_sha):
+    return ofb_field(type=ARP_SHA, arp_sha=_arp_sha)
+
+
+def arp_tha(_arp_tha):
+    return ofb_field(type=ARP_THA, arp_tha=_arp_tha)
+
+
+def ipv6_src(_ipv6_src):
+    return ofb_field(type=IPV6_SRC, arp_tha=_ipv6_src)
+
+
+def ipv6_dst(_ipv6_dst):
+    return ofb_field(type=IPV6_DST, arp_tha=_ipv6_dst)
+
+
+def ipv6_flabel(_ipv6_flabel):
+    return ofb_field(type=IPV6_FLABEL, arp_tha=_ipv6_flabel)
+
+
+def ipmpv6_type(_icmpv6_type):
+    return ofb_field(type=ICMPV6_TYPE, arp_tha=_icmpv6_type)
+
+
+def icmpv6_code(_icmpv6_code):
+    return ofb_field(type=ICMPV6_CODE, arp_tha=_icmpv6_code)
+
+
+def ipv6_nd_target(_ipv6_nd_target):
+    return ofb_field(type=IPV6_ND_TARGET, arp_tha=_ipv6_nd_target)
+
+
+def ofb_ipv6_nd_sll(_ofb_ipv6_nd_sll):
+    return ofb_field(type=OFB_IPV6_ND_SLL, arp_tha=_ofb_ipv6_nd_sll)
+
+
+def ipv6_nd_tll(_ipv6_nd_tll):
+    return ofb_field(type=IPV6_ND_TLL, arp_tha=_ipv6_nd_tll)
+
+
+def mpls_label(_mpls_label):
+    return ofb_field(type=MPLS_LABEL, arp_tha=_mpls_label)
+
+
+def mpls_tc(_mpls_tc):
+    return ofb_field(type=MPLS_TC, arp_tha=_mpls_tc)
+
+
+def mpls_bos(_mpls_bos):
+    return ofb_field(type=MPLS_BOS, arp_tha=_mpls_bos)
+
+
+def pbb_isid(_pbb_isid):
+    return ofb_field(type=PBB_ISID, arp_tha=_pbb_isid)
+
+
+def tunnel_id(_tunnel_id):
+    return ofb_field(type=TUNNEL_ID, arp_tha=_tunnel_id)
+
+
+def ipv6_exthdr(_ipv6_exthdr):
+    return ofb_field(type=IPV6_EXTHDR, arp_tha=_ipv6_exthdr)
+
+
+# frequently used extractors:
+
+def get_actions(flow):
+    """Extract list of ofp_action objects from flow spec object"""
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    # we have the following hard assumptions for now
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+            return instruction.actions.actions
+
+
+def get_ofb_fields(flow):
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    assert flow.match.type == ofp.OFPMT_OXM
+    ofb_fields = []
+    for field in flow.match.oxm_fields:
+        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
+        ofb_fields.append(field.ofb_field)
+    return ofb_fields
+
+
+def get_out_port(flow):
+    for action in get_actions(flow):
+        if action.type == OUTPUT:
+            return action.output.port
+    return None
+
+
+def get_in_port(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == IN_PORT:
+            return field.port
+    return None
+
+
+def get_goto_table_id(flow):
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_GOTO_TABLE:
+            return instruction.goto_table.table_id
+    return None
+
+
+def get_metadata(flow):
+    ''' legacy get method (only want lower 32 bits '''
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata & 0xffffffff
+    return None
+
+
+def get_metadata_64_bit(flow):
+    for field in get_ofb_fields(flow):
+        if field.type == METADATA:
+            return field.table_metadata
+    return None
+
+
+def get_port_number_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return md & 0xffffffff
+
+
+def get_inner_tag_from_metadata(flow):
+    """
+    The port number (UNI on ONU) is in the lower 32-bits of metadata and
+    the inner_tag is in the upper 32-bits
+
+    This is set in the ONOS OltPipeline as a metadata field
+    """
+    md = get_metadata_64_bit(flow)
+
+    if md is None:
+        return None
+
+    if md <= 0xffffffff:
+        log.warn('onos-upgrade-suggested',
+                 netadata=md,
+                 message='Legacy MetaData detected form OltPipeline')
+        return md
+
+    return (md >> 32) & 0xffffffff
+
+
+# test and extract next table and group information
+def has_next_table(flow):
+    return get_goto_table_id(flow) is not None
+
+
+def get_group(flow):
+    for action in get_actions(flow):
+        if action.type == GROUP:
+            return action.group.group_id
+    return None
+
+
+def has_group(flow):
+    return get_group(flow) is not None
+
+
+def mk_oxm_fields(match_fields):
+    oxm_fields = [
+        ofp.ofp_oxm_field(
+            oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+            ofb_field=field
+        ) for field in match_fields
+    ]
+
+    return oxm_fields
+
+
+def mk_instructions_from_actions(actions):
+    instructions_action = ofp.ofp_instruction_actions()
+    instructions_action.actions.extend(actions)
+    instruction = ofp.ofp_instruction(type=ofp.OFPIT_APPLY_ACTIONS,
+                                      actions=instructions_action)
+    return [instruction]
+
+
+def mk_simple_flow_mod(match_fields, actions, command=ofp.OFPFC_ADD,
+                       next_table_id=None, **kw):
+    """
+    Convenience function to generare ofp_flow_mod message with OXM BASIC match
+    composed from the match_fields, and single APPLY_ACTIONS instruction with
+    a list if ofp_action objects.
+    :param match_fields: list(ofp_oxm_ofb_field)
+    :param actions: list(ofp_action)
+    :param command: one of OFPFC_*
+    :param kw: additional keyword-based params to ofp_flow_mod
+    :return: initialized ofp_flow_mod object
+    """
+    instructions = [
+        ofp.ofp_instruction(
+            type=ofp.OFPIT_APPLY_ACTIONS,
+            actions=ofp.ofp_instruction_actions(actions=actions)
+        )
+    ]
+    if next_table_id is not None:
+        instructions.append(ofp.ofp_instruction(
+            type=ofp.OFPIT_GOTO_TABLE,
+            goto_table=ofp.ofp_instruction_goto_table(table_id=next_table_id)
+        ))
+
+    return ofp.ofp_flow_mod(
+        command=command,
+        match=ofp.ofp_match(
+            type=ofp.OFPMT_OXM,
+            oxm_fields=[
+                ofp.ofp_oxm_field(
+                    oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
+                    ofb_field=field
+                ) for field in match_fields
+            ]
+        ),
+        instructions=instructions,
+        **kw
+    )
+
+
+def mk_multicast_group_mod(group_id, buckets, command=ofp.OFPGC_ADD):
+    group = ofp.ofp_group_mod(
+        command=command,
+        type=ofp.OFPGT_ALL,
+        group_id=group_id,
+        buckets=buckets
+    )
+    return group
+
+
+def hash_flow_stats(flow):
+    """
+    Return unique 64-bit integer hash for flow covering the following
+    attributes: 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
+    """
+    _instruction_string = ""
+    for _instruction in flow.instructions:
+        _instruction_string += _instruction.SerializeToString()
+
+    hex = md5('{},{},{},{},{},{}'.format(
+        flow.table_id,
+        flow.priority,
+        flow.flags,
+        flow.cookie,
+        flow.match.SerializeToString(),
+        _instruction_string
+    )).hexdigest()
+    return int(hex[:16], 16)
+
+
+def flow_stats_entry_from_flow_mod_message(mod):
+    flow = ofp.ofp_flow_stats(
+        table_id=mod.table_id,
+        priority=mod.priority,
+        idle_timeout=mod.idle_timeout,
+        hard_timeout=mod.hard_timeout,
+        flags=mod.flags,
+        cookie=mod.cookie,
+        match=mod.match,
+        instructions=mod.instructions
+    )
+    flow.id = hash_flow_stats(flow)
+    return flow
+
+
+def group_entry_from_group_mod(mod):
+    group = ofp.ofp_group_entry(
+        desc=ofp.ofp_group_desc(
+            type=mod.type,
+            group_id=mod.group_id,
+            buckets=mod.buckets
+        ),
+        stats=ofp.ofp_group_stats(
+            group_id=mod.group_id
+            # TODO do we need to instantiate bucket bins?
+        )
+    )
+    return group
+
+
+def mk_flow_stat(**kw):
+    return flow_stats_entry_from_flow_mod_message(mk_simple_flow_mod(**kw))
+
+
+def mk_group_stat(**kw):
+    return group_entry_from_group_mod(mk_multicast_group_mod(**kw))
diff --git a/python/common/structlog_setup.py b/python/common/structlog_setup.py
new file mode 100644
index 0000000..cbbda89
--- /dev/null
+++ b/python/common/structlog_setup.py
@@ -0,0 +1,132 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Setting up proper logging for Voltha"""
+
+import logging
+import logging.config
+from collections import OrderedDict
+
+import structlog
+from structlog.stdlib import BoundLogger, INFO
+
+try:
+    from thread import get_ident as _get_ident
+except ImportError:
+    from dummy_thread import get_ident as _get_ident
+
+
+class StructuredLogRenderer(object):
+    def __call__(self, logger, name, event_dict):
+        # in order to keep structured log data in event_dict to be forwarded as
+        # is, we need to pass it into the logger framework as the first
+        # positional argument.
+        args = (event_dict,)
+        kwargs = {}
+        return args, kwargs
+
+
+class PlainRenderedOrderedDict(OrderedDict):
+    """Our special version of OrderedDict that renders into string as a dict,
+       to make the log stream output cleaner.
+    """
+    def __repr__(self, _repr_running={}):
+        'od.__repr__() <==> repr(od)'
+        call_key = id(self), _get_ident()
+        if call_key in _repr_running:
+            return '...'
+        _repr_running[call_key] = 1
+        try:
+            if not self:
+                return '{}'
+            return '{%s}' % ", ".join("%s: %s" % (k, v)
+                                      for k, v in self.items())
+        finally:
+            del _repr_running[call_key]
+
+
+def setup_logging(log_config, instance_id, verbosity_adjust=0):
+    """
+    Set up logging such that:
+    - The primary logging entry method is structlog
+      (see http://structlog.readthedocs.io/en/stable/index.html)
+    - By default, the logging backend is Python standard lib logger
+    """
+
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    # Configure standard logging
+    logging.config.dictConfig(log_config)
+    logging.root.level -= 10 * verbosity_adjust
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
+                        context_class=PlainRenderedOrderedDict,
+                        wrapper_class=BoundLogger,
+                        processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("first-line")
+    return log
+
+
+def update_logging(instance_id, vcore_id):
+    """
+    Add the vcore id to the structured logger
+    :param vcore_id:  The assigned vcore id
+    :return: structure logger
+    """
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    def add_vcore_id(_, __, event_dict):
+        event_dict['vcore_id'] = vcore_id
+        return event_dict
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        add_vcore_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("updated-logger")
+    return log
diff --git a/python/common/tech_profile/README.md b/python/common/tech_profile/README.md
new file mode 100644
index 0000000..12610a7
--- /dev/null
+++ b/python/common/tech_profile/README.md
@@ -0,0 +1,347 @@
+# Technology Profile Management
+## Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles/<TECHNOLOGY>/<TID>; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+
+
+`NOTE`: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+### Example:
+```sh
+/xgspon/64  {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+```
+
+## Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to `store` or `update` the technical template into the proper location in the etcd key/value store:
+```sh
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set /xgspon/64
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+```
+
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+
+
+## Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+```sh
+# List all the Technology profiles for a Technology
+kubectl exec -i etcd-cluster-0000 -- etcdctl ls /xgspon
+
+# Example output
+/xgspon/64
+/xgspon/65
+```
+
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+```sh
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+# Example outpout
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+```
+
+## Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+```sh
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
+```
+
+## Reference
+https://wiki.opencord.org/display/CORD/Technology+Profile+Management
+
diff --git a/python/common/tech_profile/__init__.py b/python/common/tech_profile/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/tech_profile/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/tech_profile/tech_profile.py b/python/common/tech_profile/tech_profile.py
new file mode 100644
index 0000000..abea364
--- /dev/null
+++ b/python/common/tech_profile/tech_profile.py
@@ -0,0 +1,583 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json
+import ast
+from collections import namedtuple
+import structlog
+from enum import Enum
+
+from voltha.core.config.config_backend import ConsulStore
+from voltha.core.config.config_backend import EtcdStore
+from voltha.registry import registry
+from voltha.adapters.openolt.protos import openolt_pb2
+
+# logger
+log = structlog.get_logger()
+
+DEFAULT_TECH_PROFILE_TABLE_ID = 64
+
+# Enums used while creating TechProfileInstance
+Direction = Enum('Direction', ['UPSTREAM', 'DOWNSTREAM', 'BIDIRECTIONAL'],
+                 start=0)
+SchedulingPolicy = Enum('SchedulingPolicy',
+                        ['WRR', 'StrictPriority', 'Hybrid'], start=0)
+AdditionalBW = Enum('AdditionalBW', ['None', 'NA', 'BestEffort', 'Auto'],
+                    start=0)
+DiscardPolicy = Enum('DiscardPolicy',
+                     ['TailDrop', 'WTailDrop', 'RED', 'WRED'], start=0)
+InferredAdditionBWIndication = Enum('InferredAdditionBWIndication',
+                                    ['None', 'NoneAssured', 'BestEffort'],
+                                    start=0)
+
+
+class InstanceControl(object):
+    # Default value constants
+    ONU_DEFAULT_INSTANCE = 'multi-instance'
+    UNI_DEFAULT_INSTANCE = 'single-instance'
+    DEFAULT_NUM_GEM_PORTS = 1
+    DEFAULT_GEM_PAYLOAD_SIZE = 'auto'
+
+    def __init__(self, onu=ONU_DEFAULT_INSTANCE,
+                 uni=UNI_DEFAULT_INSTANCE,
+                 num_gem_ports=DEFAULT_NUM_GEM_PORTS,
+                 max_gem_payload_size=DEFAULT_GEM_PAYLOAD_SIZE):
+        self.onu = onu
+        self.uni = uni
+        self.num_gem_ports = num_gem_ports
+        self.max_gem_payload_size = max_gem_payload_size
+
+
+class Scheduler(object):
+    # Default value constants
+    DEFAULT_ADDITIONAL_BW = 'auto'
+    DEFAULT_PRIORITY = 0
+    DEFAULT_WEIGHT = 0
+    DEFAULT_Q_SCHED_POLICY = 'hybrid'
+
+    def __init__(self, direction, additional_bw=DEFAULT_ADDITIONAL_BW,
+                 priority=DEFAULT_PRIORITY,
+                 weight=DEFAULT_WEIGHT,
+                 q_sched_policy=DEFAULT_Q_SCHED_POLICY):
+        self.direction = direction
+        self.additional_bw = additional_bw
+        self.priority = priority
+        self.weight = weight
+        self.q_sched_policy = q_sched_policy
+
+
+class GemPortAttribute(object):
+    # Default value constants
+    DEFAULT_AES_ENCRYPTION = 'True'
+    DEFAULT_PRIORITY_Q = 0
+    DEFAULT_WEIGHT = 0
+    DEFAULT_MAX_Q_SIZE = 'auto'
+    DEFAULT_DISCARD_POLICY = DiscardPolicy.TailDrop.name
+
+    def __init__(self, pbit_map, discard_config,
+                 aes_encryption=DEFAULT_AES_ENCRYPTION,
+                 scheduling_policy=SchedulingPolicy.WRR.name,
+                 priority_q=DEFAULT_PRIORITY_Q,
+                 weight=DEFAULT_WEIGHT,
+                 max_q_size=DEFAULT_MAX_Q_SIZE,
+                 discard_policy=DiscardPolicy.TailDrop.name):
+        self.max_q_size = max_q_size
+        self.pbit_map = pbit_map
+        self.aes_encryption = aes_encryption
+        self.scheduling_policy = scheduling_policy
+        self.priority_q = priority_q
+        self.weight = weight
+        self.discard_policy = discard_policy
+        self.discard_config = discard_config
+
+
+class DiscardConfig(object):
+    # Default value constants
+    DEFAULT_MIN_THRESHOLD = 0
+    DEFAULT_MAX_THRESHOLD = 0
+    DEFAULT_MAX_PROBABILITY = 0
+
+    def __init__(self, min_threshold=DEFAULT_MIN_THRESHOLD,
+                 max_threshold=DEFAULT_MAX_THRESHOLD,
+                 max_probability=DEFAULT_MAX_PROBABILITY):
+        self.min_threshold = min_threshold
+        self.max_threshold = max_threshold
+        self.max_probability = max_probability
+
+
+class TechProfile(object):
+    # Constants used in default tech profile
+    DEFAULT_TECH_PROFILE_NAME = 'Default_1tcont_1gem_Profile'
+    DEFAULT_VERSION = 1.0
+    DEFAULT_GEMPORTS_COUNT = 1
+    pbits = ['0b11111111']
+
+    # Tech profile path prefix in kv store
+    KV_STORE_TECH_PROFILE_PATH_PREFIX = 'service/voltha/technology_profiles'
+
+    # Tech profile path in kv store
+    TECH_PROFILE_PATH = '{}/{}'  # <technology>/<table_id>
+
+    # Tech profile instance path in kv store
+    # Format: <technology>/<table_id>/<uni_port_name>
+    TECH_PROFILE_INSTANCE_PATH = '{}/{}/{}'
+
+    # Tech-Profile JSON String Keys
+    NAME = 'name'
+    PROFILE_TYPE = 'profile_type'
+    VERSION = 'version'
+    NUM_GEM_PORTS = 'num_gem_ports'
+    INSTANCE_CONTROL = 'instance_control'
+    US_SCHEDULER = 'us_scheduler'
+    DS_SCHEDULER = 'ds_scheduler'
+    UPSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'upstream_gem_port_attribute_list'
+    DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'downstream_gem_port_attribute_list'
+    ONU = 'onu'
+    UNI = 'uni'
+    MAX_GEM_PAYLOAD_SIZE = 'max_gem_payload_size'
+    DIRECTION = 'direction'
+    ADDITIONAL_BW = 'additional_bw'
+    PRIORITY = 'priority'
+    Q_SCHED_POLICY = 'q_sched_policy'
+    WEIGHT = 'weight'
+    PBIT_MAP = 'pbit_map'
+    DISCARD_CONFIG = 'discard_config'
+    MAX_THRESHOLD = 'max_threshold'
+    MIN_THRESHOLD = 'min_threshold'
+    MAX_PROBABILITY = 'max_probability'
+    DISCARD_POLICY = 'discard_policy'
+    PRIORITY_Q = 'priority_q'
+    SCHEDULING_POLICY = 'scheduling_policy'
+    MAX_Q_SIZE = 'max_q_size'
+    AES_ENCRYPTION = 'aes_encryption'
+
+    def __init__(self, resource_mgr):
+        try:
+            self.args = registry('main').get_args()
+            self.resource_mgr = resource_mgr
+
+            if self.args.backend == 'etcd':
+                # KV store's IP Address and PORT
+                host, port = self.args.etcd.split(':', 1)
+                self._kv_store = EtcdStore(
+                    host, port, TechProfile.
+                    KV_STORE_TECH_PROFILE_PATH_PREFIX)
+            elif self.args.backend == 'consul':
+                # KV store's IP Address and PORT
+                host, port = self.args.consul.split(':', 1)
+                self._kv_store = ConsulStore(
+                    host, port, TechProfile.
+                    KV_STORE_TECH_PROFILE_PATH_PREFIX)
+
+            # self.tech_profile_instance_store = dict()
+        except Exception as e:
+            log.exception("exception-in-init")
+            raise Exception(e)
+
+    class DefaultTechProfile(object):
+        def __init__(self, name, **kwargs):
+            self.name = name
+            self.profile_type = kwargs[TechProfile.PROFILE_TYPE]
+            self.version = kwargs[TechProfile.VERSION]
+            self.num_gem_ports = kwargs[TechProfile.NUM_GEM_PORTS]
+            self.instance_control = kwargs[TechProfile.INSTANCE_CONTROL]
+            self.us_scheduler = kwargs[TechProfile.US_SCHEDULER]
+            self.ds_scheduler = kwargs[TechProfile.DS_SCHEDULER]
+            self.upstream_gem_port_attribute_list = kwargs[
+                TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+            self.downstream_gem_port_attribute_list = kwargs[
+                TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+
+        def to_json(self):
+            return json.dumps(self, default=lambda o: o.__dict__,
+                              indent=4)
+
+    def get_tp_path(self, table_id, uni_port_name):
+        return TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
+            self.resource_mgr.technology, table_id, uni_port_name)
+
+    def create_tech_profile_instance(self, table_id, uni_port_name, intf_id):
+        tech_profile_instance = None
+        try:
+            # Get tech profile from kv store
+            tech_profile = self._get_tech_profile_from_kv_store(table_id)
+            path = self.get_tp_path(table_id, uni_port_name)
+
+            if tech_profile is not None:
+                tech_profile = self._get_tech_profile(tech_profile)
+                log.debug(
+                    "Created-tech-profile-instance-with-values-from-kvstore")
+            else:
+                tech_profile = self._default_tech_profile()
+                log.debug(
+                    "Created-tech-profile-instance-with-default-values")
+
+            tech_profile_instance = TechProfileInstance(
+                uni_port_name, tech_profile, self.resource_mgr, intf_id)
+            self._add_tech_profile_instance(path,
+                                            tech_profile_instance.to_json())
+        except Exception as e:
+            log.exception("Create-tech-profile-instance-failed", exception=e)
+
+        return tech_profile_instance
+
+    def get_tech_profile_instance(self, table_id, uni_port_name):
+        # path to fetch tech profile instance json from kv store
+        path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
+            self.resource_mgr.technology, table_id, uni_port_name)
+
+        try:
+            tech_profile_instance = self._kv_store[path]
+            log.debug("Tech-profile-instance-present-in-kvstore", path=path,
+                      tech_profile_instance=tech_profile_instance)
+
+            # Parse JSON into an object with attributes corresponding to dict keys.
+            tech_profile_instance = json.loads(tech_profile_instance,
+                                               object_hook=lambda d:
+                                               namedtuple('tech_profile_instance',
+                                                          d.keys())(*d.values()))
+            log.debug("Tech-profile-instance-after-json-to-object-conversion", path=path,
+                      tech_profile_instance=tech_profile_instance)
+            return tech_profile_instance
+        except BaseException as e:
+            log.debug("Tech-profile-instance-not-present-in-kvstore",
+                      path=path, tech_profile_instance=None, exception=e)
+            return None
+
+    def delete_tech_profile_instance(self, tp_path):
+
+        try:
+            del self._kv_store[tp_path]
+            log.debug("Delete-tech-profile-instance-success", path=tp_path)
+            return True
+        except Exception as e:
+            log.debug("Delete-tech-profile-instance-failed", path=tp_path,
+                      exception=e)
+            return False
+
+    def _get_tech_profile_from_kv_store(self, table_id):
+        """
+        Get tech profile from kv store.
+
+        :param table_id: reference to get tech profile
+        :return: tech profile if present in kv store else None
+        """
+        # get tech profile from kv store
+        path = TechProfile.TECH_PROFILE_PATH.format(self.resource_mgr.technology,
+                                                    table_id)
+        try:
+            tech_profile = self._kv_store[path]
+            if tech_profile != '':
+                log.debug("Get-tech-profile-success", tech_profile=tech_profile)
+                return json.loads(tech_profile)
+                # return ast.literal_eval(tech_profile)
+        except KeyError as e:
+            log.info("Get-tech-profile-failed", exception=e)
+            return None
+
+    def _default_tech_profile(self):
+        # Default tech profile
+        upstream_gem_port_attribute_list = list()
+        downstream_gem_port_attribute_list = list()
+        for pbit in TechProfile.pbits:
+            upstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=pbit,
+                                 discard_config=DiscardConfig()))
+            downstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=pbit,
+                                 discard_config=DiscardConfig()))
+
+        return TechProfile.DefaultTechProfile(
+            TechProfile.DEFAULT_TECH_PROFILE_NAME,
+            profile_type=self.resource_mgr.technology,
+            version=TechProfile.DEFAULT_VERSION,
+            num_gem_ports=TechProfile.DEFAULT_GEMPORTS_COUNT,
+            instance_control=InstanceControl(),
+            us_scheduler=Scheduler(direction=Direction.UPSTREAM.name),
+            ds_scheduler=Scheduler(direction=Direction.DOWNSTREAM.name),
+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
+            downstream_gem_port_attribute_list=
+            downstream_gem_port_attribute_list)
+
+    @staticmethod
+    def _get_tech_profile(tech_profile):
+        # Tech profile fetched from kv store
+        instance_control = tech_profile[TechProfile.INSTANCE_CONTROL]
+        instance_control = InstanceControl(
+            onu=instance_control[TechProfile.ONU],
+            uni=instance_control[TechProfile.UNI],
+            max_gem_payload_size=instance_control[
+                TechProfile.MAX_GEM_PAYLOAD_SIZE])
+
+        us_scheduler = tech_profile[TechProfile.US_SCHEDULER]
+        us_scheduler = Scheduler(direction=us_scheduler[TechProfile.DIRECTION],
+                                 additional_bw=us_scheduler[
+                                     TechProfile.ADDITIONAL_BW],
+                                 priority=us_scheduler[TechProfile.PRIORITY],
+                                 weight=us_scheduler[TechProfile.WEIGHT],
+                                 q_sched_policy=us_scheduler[
+                                     TechProfile.Q_SCHED_POLICY])
+        ds_scheduler = tech_profile[TechProfile.DS_SCHEDULER]
+        ds_scheduler = Scheduler(direction=ds_scheduler[TechProfile.DIRECTION],
+                                 additional_bw=ds_scheduler[
+                                     TechProfile.ADDITIONAL_BW],
+                                 priority=ds_scheduler[TechProfile.PRIORITY],
+                                 weight=ds_scheduler[TechProfile.WEIGHT],
+                                 q_sched_policy=ds_scheduler[
+                                     TechProfile.Q_SCHED_POLICY])
+
+        upstream_gem_port_attribute_list = list()
+        downstream_gem_port_attribute_list = list()
+        us_gemport_attr_list = tech_profile[
+            TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+        for i in range(len(us_gemport_attr_list)):
+            upstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=us_gemport_attr_list[i][TechProfile.PBIT_MAP],
+                                 discard_config=DiscardConfig(
+                                     max_threshold=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_THRESHOLD],
+                                     min_threshold=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MIN_THRESHOLD],
+                                     max_probability=
+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_PROBABILITY]),
+                                 discard_policy=us_gemport_attr_list[i][
+                                     TechProfile.DISCARD_POLICY],
+                                 priority_q=us_gemport_attr_list[i][
+                                     TechProfile.PRIORITY_Q],
+                                 weight=us_gemport_attr_list[i][TechProfile.WEIGHT],
+                                 scheduling_policy=us_gemport_attr_list[i][
+                                     TechProfile.SCHEDULING_POLICY],
+                                 max_q_size=us_gemport_attr_list[i][
+                                     TechProfile.MAX_Q_SIZE],
+                                 aes_encryption=us_gemport_attr_list[i][
+                                     TechProfile.AES_ENCRYPTION]))
+
+        ds_gemport_attr_list = tech_profile[
+            TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
+        for i in range(len(ds_gemport_attr_list)):
+            downstream_gem_port_attribute_list.append(
+                GemPortAttribute(pbit_map=ds_gemport_attr_list[i][TechProfile.PBIT_MAP],
+                                 discard_config=DiscardConfig(
+                                     max_threshold=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_THRESHOLD],
+                                     min_threshold=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MIN_THRESHOLD],
+                                     max_probability=
+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
+                                         TechProfile.MAX_PROBABILITY]),
+                                 discard_policy=ds_gemport_attr_list[i][
+                                     TechProfile.DISCARD_POLICY],
+                                 priority_q=ds_gemport_attr_list[i][
+                                     TechProfile.PRIORITY_Q],
+                                 weight=ds_gemport_attr_list[i][TechProfile.WEIGHT],
+                                 scheduling_policy=ds_gemport_attr_list[i][
+                                     TechProfile.SCHEDULING_POLICY],
+                                 max_q_size=ds_gemport_attr_list[i][
+                                     TechProfile.MAX_Q_SIZE],
+                                 aes_encryption=ds_gemport_attr_list[i][
+                                     TechProfile.AES_ENCRYPTION]))
+
+        return TechProfile.DefaultTechProfile(
+            tech_profile[TechProfile.NAME],
+            profile_type=tech_profile[TechProfile.PROFILE_TYPE],
+            version=tech_profile[TechProfile.VERSION],
+            num_gem_ports=tech_profile[TechProfile.NUM_GEM_PORTS],
+            instance_control=instance_control,
+            us_scheduler=us_scheduler,
+            ds_scheduler=ds_scheduler,
+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
+            downstream_gem_port_attribute_list=
+            downstream_gem_port_attribute_list)
+
+    def _add_tech_profile_instance(self, path, tech_profile_instance):
+        """
+        Add tech profile to kv store.
+
+        :param path: path to add tech profile
+        :param tech_profile_instance: tech profile instance need to be added
+        """
+        try:
+            self._kv_store[path] = str(tech_profile_instance)
+            log.debug("Add-tech-profile-instance-success", path=path,
+                      tech_profile_instance=tech_profile_instance)
+            return True
+        except BaseException as e:
+            log.exception("Add-tech-profile-instance-failed", path=path,
+                          tech_profile_instance=tech_profile_instance,
+                          exception=e)
+        return False
+
+    @staticmethod
+    def get_us_scheduler(tech_profile_instance):
+        # upstream scheduler
+        us_scheduler = openolt_pb2.Scheduler(
+            direction=TechProfile.get_parameter(
+                'direction', tech_profile_instance.us_scheduler.
+                    direction),
+            additional_bw=TechProfile.get_parameter(
+                'additional_bw', tech_profile_instance.
+                    us_scheduler.additional_bw),
+            priority=tech_profile_instance.us_scheduler.priority,
+            weight=tech_profile_instance.us_scheduler.weight,
+            sched_policy=TechProfile.get_parameter(
+                'sched_policy', tech_profile_instance.
+                    us_scheduler.q_sched_policy))
+
+        return us_scheduler
+
+    @staticmethod
+    def get_ds_scheduler(tech_profile_instance):
+        ds_scheduler = openolt_pb2.Scheduler(
+            direction=TechProfile.get_parameter(
+                'direction', tech_profile_instance.ds_scheduler.
+                    direction),
+            additional_bw=TechProfile.get_parameter(
+                'additional_bw', tech_profile_instance.
+                    ds_scheduler.additional_bw),
+            priority=tech_profile_instance.ds_scheduler.priority,
+            weight=tech_profile_instance.ds_scheduler.weight,
+            sched_policy=TechProfile.get_parameter(
+                'sched_policy', tech_profile_instance.ds_scheduler.
+                    q_sched_policy))
+
+        return ds_scheduler
+
+    @staticmethod
+    def get_tconts(tech_profile_instance, us_scheduler=None, ds_scheduler=None):
+        if us_scheduler is None:
+            us_scheduler = TechProfile.get_us_scheduler(tech_profile_instance)
+        if ds_scheduler is None:
+            ds_scheduler = TechProfile.get_ds_scheduler(tech_profile_instance)
+
+        tconts = [openolt_pb2.Tcont(direction=TechProfile.get_parameter(
+            'direction',
+            tech_profile_instance.
+                us_scheduler.direction),
+            alloc_id=tech_profile_instance.
+                us_scheduler.alloc_id,
+            scheduler=us_scheduler),
+            openolt_pb2.Tcont(direction=TechProfile.get_parameter(
+                'direction',
+                tech_profile_instance.
+                    ds_scheduler.direction),
+                alloc_id=tech_profile_instance.
+                    ds_scheduler.alloc_id,
+                scheduler=ds_scheduler)]
+
+        return tconts
+
+    @staticmethod
+    def get_parameter(param_type, param_value):
+        parameter = None
+        try:
+            if param_type == 'direction':
+                for direction in openolt_pb2.Direction.keys():
+                    if param_value == direction:
+                        parameter = direction
+            elif param_type == 'discard_policy':
+                for discard_policy in openolt_pb2.DiscardPolicy.keys():
+                    if param_value == discard_policy:
+                        parameter = discard_policy
+            elif param_type == 'sched_policy':
+                for sched_policy in openolt_pb2.SchedulingPolicy.keys():
+                    if param_value == sched_policy:
+                        parameter = sched_policy
+            elif param_type == 'additional_bw':
+                for bw_component in openolt_pb2.AdditionalBW.keys():
+                    if param_value == bw_component:
+                        parameter = bw_component
+        except BaseException as e:
+            log.exception(exception=e)
+        return parameter
+
+
+class TechProfileInstance(object):
+    def __init__(self, subscriber_identifier, tech_profile, resource_mgr,
+                 intf_id, num_of_tconts=1):
+        if tech_profile is not None:
+            self.subscriber_identifier = subscriber_identifier
+            self.num_of_tconts = num_of_tconts
+            self.num_of_gem_ports = tech_profile.num_gem_ports
+            self.name = tech_profile.name
+            self.profile_type = tech_profile.profile_type
+            self.version = tech_profile.version
+            self.instance_control = tech_profile.instance_control
+
+            # TODO: Fixed num_of_tconts to 1 per TP Instance.
+            # This may change in future
+            assert (num_of_tconts == 1)
+            # Get alloc id and gemport id using resource manager
+            alloc_id = resource_mgr.get_resource_id(intf_id,
+                                                    'ALLOC_ID',
+                                                    num_of_tconts)
+            gem_ports = resource_mgr.get_resource_id(intf_id,
+                                                     'GEMPORT_ID',
+                                                     self.num_of_gem_ports)
+
+            gemport_list = list()
+            if isinstance(gem_ports, int):
+                gemport_list.append(gem_ports)
+            elif isinstance(gem_ports, list):
+                for gem in gem_ports:
+                    gemport_list.append(gem)
+            else:
+                raise Exception("invalid-type")
+
+            self.us_scheduler = TechProfileInstance.IScheduler(
+                alloc_id, tech_profile.us_scheduler)
+            self.ds_scheduler = TechProfileInstance.IScheduler(
+                alloc_id, tech_profile.ds_scheduler)
+
+            self.upstream_gem_port_attribute_list = list()
+            self.downstream_gem_port_attribute_list = list()
+            for i in range(self.num_of_gem_ports):
+                self.upstream_gem_port_attribute_list.append(
+                    TechProfileInstance.IGemPortAttribute(
+                        gemport_list[i],
+                        tech_profile.upstream_gem_port_attribute_list[
+                            i]))
+                self.downstream_gem_port_attribute_list.append(
+                    TechProfileInstance.IGemPortAttribute(
+                        gemport_list[i],
+                        tech_profile.downstream_gem_port_attribute_list[
+                            i]))
+
+    class IScheduler(Scheduler):
+        def __init__(self, alloc_id, scheduler):
+            super(TechProfileInstance.IScheduler, self).__init__(
+                scheduler.direction, scheduler.additional_bw,
+                scheduler.priority,
+                scheduler.weight, scheduler.q_sched_policy)
+            self.alloc_id = alloc_id
+
+    class IGemPortAttribute(GemPortAttribute):
+        def __init__(self, gemport_id, gem_port_attribute):
+            super(TechProfileInstance.IGemPortAttribute, self).__init__(
+                gem_port_attribute.pbit_map, gem_port_attribute.discard_config,
+                gem_port_attribute.aes_encryption,
+                gem_port_attribute.scheduling_policy,
+                gem_port_attribute.priority_q, gem_port_attribute.weight,
+                gem_port_attribute.max_q_size,
+                gem_port_attribute.discard_policy)
+            self.gemport_id = gemport_id
+
+    def to_json(self):
+        return json.dumps(self, default=lambda o: o.__dict__,
+                          indent=4)
diff --git a/python/common/utils/__init__.py b/python/common/utils/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/common/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/common/utils/asleep.py b/python/common/utils/asleep.py
new file mode 100644
index 0000000..10d1ce3
--- /dev/null
+++ b/python/common/utils/asleep.py
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+""" Async sleep (asleep) method and other twisted goodies """
+
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+
+
+def asleep(dt):
+    """
+    Async (event driven) wait for given time period (in seconds)
+    :param dt: Delay in seconds
+    :return: Deferred to be fired with value None when time expires.
+    """
+    d = Deferred()
+    reactor.callLater(dt, lambda: d.callback(None))
+    return d
diff --git a/python/common/utils/consulhelpers.py b/python/common/utils/consulhelpers.py
new file mode 100644
index 0000000..853143b
--- /dev/null
+++ b/python/common/utils/consulhelpers.py
@@ -0,0 +1,178 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some consul related convenience functions
+"""
+
+from structlog import get_logger
+from consul import Consul
+from random import randint
+from nethelpers import get_my_primary_local_ipv4
+
+log = get_logger()
+
+
+def connect_to_consult(consul_endpoint):
+    log.debug('getting-service-endpoint', consul=consul_endpoint)
+
+    host = consul_endpoint.split(':')[0].strip()
+    port = int(consul_endpoint.split(':')[1].strip())
+
+    return Consul(host=host, port=port)
+
+
+def verify_all_services_healthy(consul_endpoint, service_name=None,
+                                number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+def get_all_services(consul_endpoint):
+    log.debug('getting-service-verify-health')
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.services()
+
+    return services
+
+
+def get_all_instances_of_service(consul_endpoint, service_name):
+    log.debug('getting-all-instances-of-service', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    for service in services:
+        log.debug('service',
+                  name=service['ServiceName'],
+                  serviceid=service['ServiceID'],
+                  serviceport=service['ServicePort'],
+                  createindex=service['CreateIndex'])
+
+    return services
+
+
+def get_endpoint_from_consul(consul_endpoint, service_name):
+    """
+    Get endpoint of service_name from consul.
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service for which endpoint
+                         needs to be found.
+    :return: service endpoint if available, else exit.
+    """
+    log.debug('getting-service-info', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    if len(services) == 0:
+        raise Exception(
+            'Cannot find service {} in consul'.format(service_name))
+        os.exit(1)
+
+    """ Get host IPV4 address
+    """
+    local_ipv4 = get_my_primary_local_ipv4()
+    """ If host IP address from where the request came in matches
+        the IP address of the requested service's host IP address,
+        pick the endpoint
+    """
+    for i in range(len(services)):
+        service = services[i]
+        if service['ServiceAddress'] == local_ipv4:
+            log.debug("picking address locally")
+            endpoint = '{}:{}'.format(service['ServiceAddress'],
+                                      service['ServicePort'])
+            return endpoint
+
+    """ If service is not available locally, picak a random
+        endpoint for the service from the list
+    """
+    service = services[randint(0, len(services) - 1)]
+    endpoint = '{}:{}'.format(service['ServiceAddress'],
+                              service['ServicePort'])
+
+    return endpoint
+
+
+def get_healthy_instances(consul_endpoint, service_name=None,
+                          number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+if __name__ == '__main__':
+    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
+    # print get_healthy_instances('10.100.198.220:8500')
+    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/python/common/utils/deferred_utils.py b/python/common/utils/deferred_utils.py
new file mode 100644
index 0000000..3c55c1a
--- /dev/null
+++ b/python/common/utils/deferred_utils.py
@@ -0,0 +1,56 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.internet.error import AlreadyCalled
+
+
+class TimeOutError(Exception): pass
+
+
+class DeferredWithTimeout(Deferred):
+    """
+    Deferred with a timeout. If neither the callback nor the errback method
+    is not called within the given time, the deferred's errback will be called
+    with a TimeOutError() exception.
+
+    All other uses are the same as of Deferred().
+    """
+    def __init__(self, timeout=1.0):
+        Deferred.__init__(self)
+        self._timeout = timeout
+        self.timer = reactor.callLater(timeout, self.timed_out)
+
+    def timed_out(self):
+        self.errback(
+            TimeOutError('timed out after {} seconds'.format(self._timeout)))
+
+    def callback(self, result):
+        self._cancel_timer()
+        return Deferred.callback(self, result)
+
+    def errback(self, fail):
+        self._cancel_timer()
+        return Deferred.errback(self, fail)
+
+    def cancel(self):
+        self._cancel_timer()
+        return Deferred.cancel(self)
+
+    def _cancel_timer(self):
+        try:
+            self.timer.cancel()
+        except AlreadyCalled:
+            pass
+
diff --git a/python/common/utils/dockerhelpers.py b/python/common/utils/dockerhelpers.py
new file mode 100644
index 0000000..4620aef
--- /dev/null
+++ b/python/common/utils/dockerhelpers.py
@@ -0,0 +1,75 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some docker related convenience functions
+"""
+from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
+
+import os
+import socket
+from structlog import get_logger
+
+from docker import Client, errors
+
+
+docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
+log = get_logger()
+
+def get_my_containers_name():
+    """
+    Return the docker containers name in which this process is running.
+    To look up the container name, we use the container ID extracted from the
+    $HOSTNAME environment variable (which is set by docker conventions).
+    :return: String with the docker container name (or None if any issue is
+             encountered)
+    """
+    my_container_id = os.environ.get('HOSTNAME', None)
+
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(my_container_id)
+
+    except Exception, e:
+        log.exception('failed', my_container_id=my_container_id, e=e)
+        raise
+
+    name = info['Name'].lstrip('/')
+
+    return name
+
+def get_all_running_containers():
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        containers = docker_cli.containers()
+
+    except Exception, e:
+        log.exception('failed', e=e)
+        raise
+
+    return containers
+
+def inspect_container(id):
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(id)
+    except Exception, e:
+        log.exception('failed-inspect-container', id=id, e=e)
+        raise
+
+    return info
+
diff --git a/python/common/utils/grpc_utils.py b/python/common/utils/grpc_utils.py
new file mode 100644
index 0000000..8df630e
--- /dev/null
+++ b/python/common/utils/grpc_utils.py
@@ -0,0 +1,109 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Utilities to handle gRPC server and client side code in a Twisted environment
+"""
+import structlog
+from concurrent.futures import Future
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.python.threadable import isInIOThread
+
+
+log = structlog.get_logger()
+
+
+def twisted_async(func):
+    """
+    This decorator can be used to implement a gRPC method on the twisted
+    thread, allowing asynchronous programming in Twisted while serving
+    a gRPC call.
+
+    gRPC methods normally are called on the futures.ThreadPool threads,
+    so these methods cannot directly use Twisted protocol constructs.
+    If the implementation of the methods needs to touch Twisted, it is
+    safer (or mandatory) to wrap the method with this decorator, which will
+    call the inner method from the external thread and ensure that the
+    result is passed back to the foreign thread.
+
+    Example usage:
+
+    When implementing a gRPC server, typical pattern is:
+
+    class SpamService(SpamServicer):
+
+        def GetBadSpam(self, request, context):
+            '''this is called from a ThreadPoolExecutor thread'''
+            # generally unsafe to make Twisted calls
+
+        @twisted_async
+        def GetSpamSafely(self, request, context):
+            '''this method now is executed on the Twisted main thread
+            # safe to call any Twisted protocol functions
+
+        @twisted_async
+        @inlineCallbacks
+        def GetAsyncSpam(self, request, context):
+            '''this generator can use inlineCallbacks Twisted style'''
+            result = yield some_async_twisted_call(request)
+            returnValue(result)
+
+    """
+    def in_thread_wrapper(*args, **kw):
+
+        if isInIOThread():
+
+            return func(*args, **kw)
+
+        f = Future()
+
+        def twisted_wrapper():
+            try:
+                d = func(*args, **kw)
+                if isinstance(d, Deferred):
+
+                    def _done(result):
+                        f.set_result(result)
+                        f.done()
+
+                    def _error(e):
+                        f.set_exception(e)
+                        f.done()
+
+                    d.addCallback(_done)
+                    d.addErrback(_error)
+
+                else:
+                    f.set_result(d)
+                    f.done()
+
+            except Exception, e:
+                f.set_exception(e)
+                f.done()
+
+        reactor.callFromThread(twisted_wrapper)
+        try:
+            result = f.result()
+        except Exception, e:
+            log.exception(e=e, func=func, args=args, kw=kw)
+            raise
+
+        return result
+
+    return in_thread_wrapper
+
+
diff --git a/python/common/utils/id_generation.py b/python/common/utils/id_generation.py
new file mode 100644
index 0000000..e0fea1c
--- /dev/null
+++ b/python/common/utils/id_generation.py
@@ -0,0 +1,116 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# """ ID generation utils """
+
+from uuid import uuid4
+
+
+BROADCAST_CORE_ID=hex(0xFFFF)[2:]
+
+def get_next_core_id(current_id_in_hex_str):
+    """
+    :param current_id_in_hex_str: a hex string of the maximum core id 
+    assigned without the leading 0x characters
+    :return: current_id_in_hex_str + 1 in hex string 
+    """
+    if not current_id_in_hex_str or current_id_in_hex_str == '':
+        return '0001'
+    else:
+        return format(int(current_id_in_hex_str, 16) + 1, '04x')
+
+
+def create_cluster_logical_device_ids(core_id, switch_id):
+    """
+    Creates a logical device id and an OpenFlow datapath id that is unique 
+    across the Voltha cluster.
+    The returned logical device id  represents a 64 bits integer where the
+    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
+    the datapath id the core id is set to '0000' as it is not used for voltha
+    core routing
+    :param core_id: string
+    :param switch_id:int
+    :return: cluster logical device id and OpenFlow datapath id
+    """
+    switch_id = format(switch_id, '012x')
+    core_in_hex=format(int(core_id, 16), '04x')
+    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
+    dpid_id = '{}{}'.format('0000', switch_id[-12:])
+    return ld_id, int(dpid_id, 16)
+
+def is_broadcast_core_id(id):
+    assert id and len(id) == 16
+    return id[:4] == BROADCAST_CORE_ID
+
+def create_empty_broadcast_id():
+    """
+    Returns an empty broadcast id (ffff000000000000). The id is used to
+    dispatch xPON objects across all the Voltha instances.
+    :return: An empty broadcast id
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
+
+def create_cluster_id():
+    """
+    Returns an id that is common across all voltha instances.  The id  
+    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
+    object while the upper 16 bits refers a broadcast core_id
+    :return: An common id across all Voltha instances
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
+
+def create_cluster_device_id(core_id):
+    """
+    Creates a device id that is unique across the Voltha cluster.
+    The device id is a str of 64 bits.  The lower 48 bits refers to the 
+    device id while the upper 16 bits refers to the core id.
+    :param core_id: string
+    :return: cluster device id
+    """
+    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
+
+
+def get_core_id_from_device_id(device_id):
+    # Device id is a string and the first 4 characters represent the core_id
+    assert device_id and len(device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return device_id[:4]
+
+
+def get_core_id_from_logical_device_id(logical_device_id):
+    """ 
+    Logical Device id is a string and the first 4 characters represent the 
+    core_id
+    :param logical_device_id: 
+    :return: core_id string
+    """
+    assert logical_device_id and len(logical_device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return logical_device_id[:4]
+
+
+def get_core_id_from_datapath_id(datapath_id):
+    """
+    datapath id is a uint64 where:
+        - low 48 bits -> switch_id
+        - high 16 bits -> core id
+    :param datapath_id: 
+    :return: core_id string
+    """
+    assert datapath_id
+    # Get the hex string and remove the '0x' prefix
+    id_in_hex_str = hex(datapath_id)[2:]
+    assert len(id_in_hex_str) > 12
+    return id_in_hex_str[:-12]
diff --git a/python/common/utils/indexpool.py b/python/common/utils/indexpool.py
new file mode 100644
index 0000000..858cb3a
--- /dev/null
+++ b/python/common/utils/indexpool.py
@@ -0,0 +1,64 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from bitstring import BitArray
+import structlog
+
+log = structlog.get_logger()
+
+class IndexPool(object):
+    def __init__(self, max_entries, offset):
+        self.max_entries = max_entries
+        self.offset = offset
+        self.indices = BitArray(self.max_entries)
+
+    def get_next(self):
+        try:
+            _pos = self.indices.find('0b0')
+            self.indices.set(1, _pos)
+            return self.offset + _pos[0]
+        except IndexError:
+            log.info("exception-fail-to-allocate-id-all-bits-in-use")
+            return None
+
+    def allocate(self, index):
+        try:
+            _pos = index - self.offset
+            if not (0 <= _pos < self.max_entries):
+                log.info("{}-out-of-range".format(index))
+                return None
+            if self.indices[_pos]:
+                log.info("{}-is-already-allocated".format(index))
+                return None
+            self.indices.set(1, _pos)
+            return index
+
+        except IndexError:
+            return None
+
+    def release(self, index):
+        index -= self.offset
+        _pos = (index,)
+        try:
+            self.indices.set(0, _pos)
+        except IndexError:
+            log.info("bit-position-{}-out-of-range".format(index))
+
+    #index or multiple indices to set all of them to 1 - need to be a tuple
+    def pre_allocate(self, index):
+        if(isinstance(index, tuple)):
+            _lst = list(index)
+            for i in range(len(_lst)):
+                _lst[i] -= self.offset
+            index = tuple(_lst)
+            self.indices.set(1, index)
diff --git a/python/common/utils/json_format.py b/python/common/utils/json_format.py
new file mode 100644
index 0000000..c18d013
--- /dev/null
+++ b/python/common/utils/json_format.py
@@ -0,0 +1,105 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Monkey patched json_format to allow best effort decoding of Any fields.
+Use the additional flag (strict_any_handling=False) to trigger the
+best-effort behavior. Omit the flag, or just use the original json_format
+module fot the strict behavior.
+"""
+
+from google.protobuf import json_format
+
+class _PatchedPrinter(json_format._Printer):
+
+    def __init__(self, including_default_value_fields=False,
+                 preserving_proto_field_name=False,
+                 strict_any_handling=False):
+        super(_PatchedPrinter, self).__init__(including_default_value_fields,
+                                              preserving_proto_field_name)
+        self.strict_any_handling = strict_any_handling
+
+    def _BestEffortAnyMessageToJsonObject(self, msg):
+        try:
+            res = self._AnyMessageToJsonObject(msg)
+        except TypeError:
+            res = self._RegularMessageToJsonObject(msg, {})
+        return res
+
+
+def MessageToDict(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+    """Converts protobuf message to a JSON dictionary.
+
+    Args:
+      message: The protocol buffers message instance to serialize.
+      including_default_value_fields: If True, singular primitive fields,
+          repeated fields, and map fields will always be serialized.  If
+          False, only serialize non-empty fields.  Singular message fields
+          and oneof fields are not affected by this option.
+      preserving_proto_field_name: If True, use the original proto field
+          names as defined in the .proto file. If False, convert the field
+          names to lowerCamelCase.
+      strict_any_handling: If True, converion will error out (like in the
+          original method) if an Any field with value for which the Any type
+          is not loaded is encountered. If False, the conversion will leave
+          the field un-packed, but otherwise will continue.
+
+    Returns:
+      A dict representation of the JSON formatted protocol buffer message.
+    """
+    printer = _PatchedPrinter(including_default_value_fields,
+                              preserving_proto_field_name,
+                              strict_any_handling=strict_any_handling)
+    # pylint: disable=protected-access
+    return printer._MessageToJsonObject(message)
+
+
+def MessageToJson(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+  """Converts protobuf message to JSON format.
+
+  Args:
+    message: The protocol buffers message instance to serialize.
+    including_default_value_fields: If True, singular primitive fields,
+        repeated fields, and map fields will always be serialized.  If
+        False, only serialize non-empty fields.  Singular message fields
+        and oneof fields are not affected by this option.
+    preserving_proto_field_name: If True, use the original proto field
+        names as defined in the .proto file. If False, convert the field
+        names to lowerCamelCase.
+    strict_any_handling: If True, converion will error out (like in the
+        original method) if an Any field with value for which the Any type
+        is not loaded is encountered. If False, the conversion will leave
+        the field un-packed, but otherwise will continue.
+
+  Returns:
+    A string containing the JSON formatted protocol buffer message.
+  """
+  printer = _PatchedPrinter(including_default_value_fields,
+                            preserving_proto_field_name,
+                            strict_any_handling=strict_any_handling)
+  return printer.ToJsonString(message)
+
+
+json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
+    '_BestEffortAnyMessageToJsonObject',
+    '_ConvertAnyMessage'
+]
+
+json_format._Printer._BestEffortAnyMessageToJsonObject = \
+    json_format._Printer._AnyMessageToJsonObject
diff --git a/python/common/utils/message_queue.py b/python/common/utils/message_queue.py
new file mode 100644
index 0000000..2b4257a
--- /dev/null
+++ b/python/common/utils/message_queue.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from twisted.internet.defer import Deferred
+from twisted.internet.defer import succeed
+
+
+class MessageQueue(object):
+    """
+    An event driven queue, similar to twisted.internet.defer.DeferredQueue
+    but which allows selective dequeing based on a predicate function.
+    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
+    limit.
+    """
+
+    def __init__(self):
+        self.waiting = []  # tuples of (d, predicate)
+        self.queue = []  # messages piling up here if no one is waiting
+
+    def reset(self):
+        """
+        Purge all content as well as waiters (by errback-ing their entries).
+        :return: None
+        """
+        for d, _ in self.waiting:
+            d.errback(Exception('mesage queue reset() was called'))
+        self.waiting = []
+        self.queue = []
+
+    def _cancelGet(self, d):
+        """
+        Remove a deferred from our waiting list.
+        :param d: The deferred that was been canceled.
+        :return: None
+        """
+        for i in range(len(self.waiting)):
+            if self.waiting[i][0] is d:
+                self.waiting.pop(i)
+
+    def put(self, obj):
+        """
+        Add an object to this queue
+        :param obj: arbitrary object that will be added to the queue
+        :return:
+        """
+
+        # if someone is waiting for this, return right away
+        for i in range(len(self.waiting)):
+            d, predicate = self.waiting[i]
+            if predicate is None or predicate(obj):
+                self.waiting.pop(i)
+                d.callback(obj)
+                return
+
+        # otherwise...
+        self.queue.append(obj)
+
+    def get(self, predicate=None):
+        """
+        Attempt to retrieve and remove an object from the queue that
+        matches the optional predicate.
+        :return: Deferred which fires with the next object available.
+        If predicate was provided, only objects for which
+        predicate(obj) is True will be considered.
+        """
+        for i in range(len(self.queue)):
+            msg = self.queue[i]
+            if predicate is None or predicate(msg):
+                self.queue.pop(i)
+                return succeed(msg)
+
+        # there were no matching entries if we got here, so we wait
+        d = Deferred(canceller=self._cancelGet)
+        self.waiting.append((d, predicate))
+        return d
+
+
diff --git a/python/common/utils/nethelpers.py b/python/common/utils/nethelpers.py
new file mode 100644
index 0000000..7df7f9f
--- /dev/null
+++ b/python/common/utils/nethelpers.py
@@ -0,0 +1,88 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some network related convenience functions
+"""
+
+from netifaces import AF_INET
+
+import netifaces as ni
+import netaddr
+
+
+def _get_all_interfaces():
+    m_interfaces = []
+    for iface in ni.interfaces():
+        m_interfaces.append((iface, ni.ifaddresses(iface)))
+    return m_interfaces
+
+
+def _get_my_primary_interface():
+    gateways = ni.gateways()
+    assert 'default' in gateways, \
+        ("No default gateway on host/container, "
+         "cannot determine primary interface")
+    default_gw_index = gateways['default'].keys()[0]
+    # gateways[default_gw_index] has the format (example):
+    # [('10.15.32.1', 'en0', True)]
+    interface_name = gateways[default_gw_index][0][1]
+    return interface_name
+
+
+def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
+    if not inter_core_subnet:
+        return _get_my_primary_local_ipv4(ifname)
+    # My IP should belong to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            _ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(inter_core_subnet)
+            if _ip >= m_network.first and _ip <= m_network.last:
+                return m_ip
+    return None
+
+
+def get_my_primary_interface(pon_subnet=None):
+    if not pon_subnet:
+        return _get_my_primary_interface()
+    # My interface should have an IP that belongs to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            m_ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(pon_subnet)
+            if m_ip >= m_network.first and m_ip <= m_network.last:
+                return iface
+    return None
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+def _get_my_primary_local_ipv4(ifname=None):
+    try:
+        ifname = get_my_primary_interface() if ifname is None else ifname
+        addresses = ni.ifaddresses(ifname)
+        ipv4 = addresses[AF_INET][0]['addr']
+        return ipv4
+    except Exception as e:
+        return None
+
+if __name__ == '__main__':
+    print get_my_primary_local_ipv4()
diff --git a/python/common/utils/ordered_weakvalue_dict.py b/python/common/utils/ordered_weakvalue_dict.py
new file mode 100644
index 0000000..9ea739a
--- /dev/null
+++ b/python/common/utils/ordered_weakvalue_dict.py
@@ -0,0 +1,48 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from _weakref import ref
+from weakref import KeyedRef
+from collections import OrderedDict
+
+
+class OrderedWeakValueDict(OrderedDict):
+    """
+    Modified OrderedDict to use weak references as values. Entries disappear
+    automatically if the referred value has no more strong reference pointing
+    ot it.
+
+    Warning, this is not a complete implementation, only what is needed for
+    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
+    """
+    def __init__(self, *args, **kw):
+        def remove(wr, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                super(OrderedWeakValueDict, self).__delitem__(wr.key)
+        self._remove = remove
+        super(OrderedWeakValueDict, self).__init__(*args, **kw)
+
+    def __setitem__(self, key, value):
+        super(OrderedWeakValueDict, self).__setitem__(
+            key, KeyedRef(value, self._remove, key))
+
+    def __getitem__(self, key):
+        o = super(OrderedWeakValueDict, self).__getitem__(key)()
+        if o is None:
+            raise KeyError, key
+        else:
+            return o
+
diff --git a/python/common/utils/registry.py b/python/common/utils/registry.py
new file mode 100644
index 0000000..270bd71
--- /dev/null
+++ b/python/common/utils/registry.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Simple component registry to provide centralized access to any registered
+components.
+"""
+from collections import OrderedDict
+from zope.interface import Interface
+
+
+class IComponent(Interface):
+    """
+    A Voltha Component
+    """
+
+    def start():
+        """
+        Called once the componet is instantiated. Can be used for async
+        initialization.
+        :return: (None or Deferred)
+        """
+
+    def stop():
+        """
+        Called once before the component is unloaded. Can be used for async
+        cleanup operations.
+        :return: (None or Deferred)
+        """
+
+
+class Registry(object):
+
+    def __init__(self):
+        self.components = OrderedDict()
+
+    def register(self, name, component):
+        assert IComponent.providedBy(component)
+        assert name not in self.components
+        self.components[name] = component
+        return component
+
+    def unregister(self, name):
+        if name in self.components:
+            del self.components[name]
+
+    def __call__(self, name):
+        return self.components[name]
+
+    def iterate(self):
+        return self.components.values()
+
+
+# public shared registry
+registry = Registry()