This commit consists of:
1) Update the voltha.proto to remove duplicates between the voltha message and
the CoreInstance.   Since all data will be stored into the clustered KV store
then it makes sense to use a clustered proto message instead of core specific.
Each core will hold a subset of the data, only those it is actively or passively
managing.
2) Add a Makefile into the adapters directory to clearly separate the build of
adapters to the core build.   This is work in progress.
3) Add an initial readme.md into the adapters directory to show how to run ponsim
olt and onu adapters in containers
4) Minor cleanup, mostly around name consistency.

Change-Id: I55155c41b56e95877f8735b536873a87d6ca63b1
diff --git a/.gitignore b/.gitignore
index f70d370..bba1e09 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,6 +34,7 @@
 **/*_pb2.py
 **/*_pb2_grpc.py
 **/*.pb.go
+**/*.desc
 
 # Editors
 *.bak
@@ -55,5 +56,4 @@
 
 # Files copied over during make
 adapters/protos/*.proto
-adapters/protos/*.desc
 
diff --git a/Makefile b/Makefile
index 1761e50..b988f21 100644
--- a/Makefile
+++ b/Makefile
@@ -54,7 +54,7 @@
 	rw_core
 
 
-.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) rw_core ro_core
+.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) rw_core ro_core protos kafka db tests adapters
 
 # This should to be the first and default target in this Makefile
 help:
@@ -78,11 +78,7 @@
 	@echo "    CLEAN $(basename $@)"
 	$(Q)$(MAKE) -C $(basename $@) clean
 
-build: protoc protos 
-#build: protoc protos containers
-
-base:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-base:${TAG} -f adapters/docker/Dockerfile.base .
+build: containers
 
 containers: rw_core
 
@@ -94,17 +90,4 @@
 	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-rw-core:${TAG} -f docker/Dockerfile.rw_core_d .
 endif
 
-protoc:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} -f adapters/docker/Dockerfile.protoc .
-
-protos:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} -f adapters/docker/Dockerfile.protos .
-
-ponsim_adapter_olt:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-olt:${TAG} -f adapters/docker/Dockerfile.ponsim_adapter_olt .
-
-ponsim_adapter_onu:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-onu:${TAG} -f adapters/docker/Dockerfile.ponsim_adapter_onu .
-
-
 # end file
diff --git a/adapters/Makefile b/adapters/Makefile
new file mode 100644
index 0000000..9374284
--- /dev/null
+++ b/adapters/Makefile
@@ -0,0 +1,225 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifneq ($(VOLTHA_BUILD),docker)
+ifeq ($(VOLTHA_BASE)_set,_set)
+$(error To get started, please source the env.sh file)
+endif
+endif
+
+ifeq ($(TAG),)
+TAG := latest
+endif
+
+ifeq ($(TARGET_TAG),)
+TARGET_TAG := latest
+endif
+
+# If no DOCKER_HOST_IP is specified grab a v4 IP address associated with
+# the default gateway
+ifeq ($(DOCKER_HOST_IP),)
+DOCKER_HOST_IP := $(shell ifconfig $$(netstat -rn | grep -E '^(default|0.0.0.0)' | head -1 | awk '{print $$NF}') | grep inet | awk '{print $$2}' | sed -e 's/addr://g')
+endif
+
+ifneq ($(http_proxy)$(https_proxy),)
+# Include proxies from the environment
+DOCKER_PROXY_ARGS = \
+       --build-arg http_proxy=$(http_proxy) \
+       --build-arg https_proxy=$(https_proxy) \
+       --build-arg ftp_proxy=$(ftp_proxy) \
+       --build-arg no_proxy=$(no_proxy) \
+       --build-arg HTTP_PROXY=$(HTTP_PROXY) \
+       --build-arg HTTPS_PROXY=$(HTTPS_PROXY) \
+       --build-arg FTP_PROXY=$(FTP_PROXY) \
+       --build-arg NO_PROXY=$(NO_PROXY)
+endif
+
+DOCKER_BUILD_ARGS = \
+	--build-arg TAG=$(TAG) \
+	--build-arg REGISTRY=$(REGISTRY) \
+	--build-arg REPOSITORY=$(REPOSITORY) \
+	$(DOCKER_PROXY_ARGS) $(DOCKER_CACHE_ARG) \
+	 --rm --force-rm \
+	$(DOCKER_BUILD_EXTRA_ARGS)
+
+VENVDIR := venv-$(shell uname -s | tr '[:upper:]' '[:lower:]')
+
+DOCKER_IMAGE_LIST = \
+	base \
+	protoc \
+	protos \
+	adapter-ponsim-olt \
+	adapter-ponsim-onu \
+
+# The following list was scavanged from the compose / stack files as well as
+# from the Dockerfiles. If nothing else it highlights that VOLTHA is not
+# using consistent versions for some of the containers.
+
+FETCH_BUILD_IMAGE_LIST = \
+       alpine:3.6 \
+       centos:7 \
+       centurylink/ca-certs:latest \
+       grpc/python:latest \
+       ubuntu:xenial
+
+FETCH_COMPOSE_IMAGE_LIST = \
+        wurstmeister/kafka:latest \
+        wurstmeister/zookeeper:latest
+
+# find k8s -type f | xargs grep image: | awk '{print $NF}' | sed -e 's/\"//g' | sed '/:.*$/!s/$/:latest/g' | sort -u | sed -e 's/^/       /g' -e 's/$/ \\/g'
+# Manually remove some image from this list as they don't reflect the new 
+# naming conventions for the VOLTHA build
+FETCH_K8S_IMAGE_LIST = \
+       wurstmeister/kafka:1.0.0 \
+       zookeeper:3.4.11
+
+FETCH_IMAGE_LIST = $(shell echo $(FETCH_BUILD_IMAGE_LIST) $(FETCH_COMPOSE_IMAGE_LIST) $(FETCH_K8S_IMAGE_LIST) | tr ' ' '\n' | sort -u)
+
+.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 base ponsim_olt ponsim_onu protos kafka common start stop tag push pull
+
+# This should to be the first and default target in this Makefile
+help:
+	@echo "Usage: make [<target>]"
+	@echo "where available targets are:"
+	@echo
+	@echo "build        : Build the Adapters protos and docker images.\n\
+               If this is the first time you are building, choose \"make build\" option."
+	@echo "clean        : Remove files created by the build and tests"
+	@echo "distclean    : Remove venv directory"
+	@echo "fetch        : Pre-fetch artifacts for subsequent local builds"
+	@echo "help         : Print this help"
+	@echo "protoc       : Build a container with protoc installed"
+	@echo "protos       : Compile all grpc/protobuf files"
+	@echo "rebuild-venv : Rebuild local Python virtualenv from scratch"
+	@echo "venv         : Build local Python virtualenv if did not exist yet"
+	@echo "containers   : Build all the docker containers"
+	@echo "base         : Build the base docker container used by all other dockers"
+	@echo "adapter_ponsim_olt       : Build the ponsim olt adapter docker container"
+	@echo "adapter_ponsim_onu       : Build the ponsim olt adapter docker container"
+	@echo "tag          : Tag a set of images"
+	@echo "push         : Push the docker images to an external repository"
+	@echo "pull         : Pull the docker images from a repository"
+	@echo
+
+## New directories can be added here
+#DIRS:=
+
+## If one directory depends on another directory that
+## dependency can be expressed here
+##
+## For example, if the Tibit directory depended on the eoam
+## directory being built first, then that can be expressed here.
+##  driver/tibit: eoam
+
+# Parallel Build
+$(DIRS):
+	@echo "    MK $@"
+	$(Q)$(MAKE) -C $@
+
+# Parallel Clean
+DIRS_CLEAN = $(addsuffix .clean,$(DIRS))
+$(DIRS_CLEAN):
+	@echo "    CLEAN $(basename $@)"
+	$(Q)$(MAKE) -C $(basename $@) clean
+
+# Parallel Flake8
+DIRS_FLAKE8 = $(addsuffix .flake8,$(DIRS))
+$(DIRS_FLAKE8):
+	@echo "    FLAKE8 $(basename $@)"
+	-$(Q)$(MAKE) -C $(basename $@) flake8
+
+build: protoc protos containers
+
+containers: base adapter_ponsim_olt adapter_ponsim_onu
+
+base:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-base:${TAG} -f docker/Dockerfile.base .
+
+adapter_ponsim_olt:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-olt:${TAG} -f docker/Dockerfile.adapter_ponsim_olt .
+
+adapter_ponsim_onu:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-onu:${TAG} -f docker/Dockerfile.adapter_ponsim_onu .
+
+tag: $(patsubst  %,%.tag,$(DOCKER_IMAGE_LIST))
+
+push: tag $(patsubst  %,%.push,$(DOCKER_IMAGE_LIST))
+
+pull: $(patsubst  %,%.pull,$(DOCKER_IMAGE_LIST))
+
+%.tag:
+	docker tag ${REGISTRY}${REPOSITORY}voltha-$(subst .tag,,$@):${TAG} ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .tag,,$@):${TARGET_TAG}
+
+%.push:
+	docker push ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .push,,$@):${TARGET_TAG}
+
+%.pull:
+	docker pull ${REGISTRY}${REPOSITORY}voltha-$(subst .pull,,$@):${TAG}
+
+protoc:
+ifeq ($(VOLTHA_BUILD),docker)
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} -f docker/Dockerfile.protoc .
+endif
+
+protos:
+ifneq ($(VOLTHA_BUILD),docker)
+	make -C protos
+else
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} -f docker/Dockerfile.protos .
+endif
+
+install-protoc:
+	make -C voltha/protos install-protoc
+
+clean:
+	find voltha -name '*.pyc' | xargs rm -f
+
+distclean: clean
+	rm -rf ${VENVDIR}
+
+fetch:
+	@bash -c ' \
+		for i in $(FETCH_IMAGE_LIST); do \
+			docker pull $$i; \
+		done'
+
+purge-venv:
+	rm -fr ${VENVDIR}
+
+rebuild-venv: purge-venv venv
+
+ifneq ($(VOLTHA_BUILD),docker)
+venv: ${VENVDIR}/.built
+else
+venv:
+endif
+
+${VENVDIR}/.built:
+	@ virtualenv ${VENVDIR}
+	@ . ${VENVDIR}/bin/activate && \
+	    pip install --upgrade pip; \
+	    if ! pip install -r requirements.txt; \
+	    then \
+	        echo "On MAC OS X, if the installation failed with an error \n'<openssl/opensslv.h>': file not found,"; \
+	        echo "see the BUILD.md file for a workaround"; \
+	    else \
+	        uname -s > ${VENVDIR}/.built; \
+	    fi
+
+
+flake8: $(DIRS_FLAKE8)
+
+# end file
diff --git a/adapters/README.md b/adapters/README.md
new file mode 100644
index 0000000..3eda078
--- /dev/null
+++ b/adapters/README.md
@@ -0,0 +1,33 @@
+# How to Build and Develop a Voltha Adapter
+
+The build and development environment of a Voltha Adapter is left to the developer's choice.  
+
+### Build
+
+You can build the Voltha Adapter by:
+
+```
+cd adapters
+. env.sh
+make build
+```
+
+The above has generates a few docker images. An example is below:
+
+```
+$ docker images
+REPOSITORY                  TAG                                        IMAGE ID            CREATED             SIZE
+voltha-adapter-ponsim-onu   latest                                     3638b16b5262        36 seconds ago      774MB
+voltha-adapter-ponsim-olt   latest                                     9e98a3a8e1aa        58 seconds ago      775MB
+voltha-base                 latest                                     40ed93942a6a        23 minutes ago      771MB
+voltha-rw-core              latest                                     648be4bc594a        About an hour ago   29.1MB
+voltha-protos               latest                                     d458a391cc81        12 days ago         2.66MB
+```
+
+### Run the ponsim adapters 
+
+The simplest way to run the containerized adapters is using the docker compose command:
+
+```
+docker-compose -f ../compose/adapters-ponsim.yml up -d
+```
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_olt b/adapters/docker/Dockerfile.adapter_ponsim_olt
similarity index 83%
rename from adapters/docker/Dockerfile.ponsim_adapter_olt
rename to adapters/docker/Dockerfile.adapter_ponsim_olt
index 5b714f1..209200d 100644
--- a/adapters/docker/Dockerfile.ponsim_adapter_olt
+++ b/adapters/docker/Dockerfile.adapter_ponsim_olt
@@ -23,18 +23,18 @@
 # Bundle app source
 RUN mkdir /adapters && touch /adapters/__init__.py
 ENV PYTHONPATH=/adapters
-COPY adapters/common /adapters/adapters/common
-COPY adapters/kafka /adapters/adapters/kafka
-COPY adapters/*.py /adapters/adapters/
+COPY common /adapters/adapters/common
+COPY kafka /adapters/adapters/kafka
+COPY ./*.py /adapters/adapters/
 #COPY pki /voltha/pki
-COPY adapters/ponsim_olt /adapters/adapters/ponsim_olt
+COPY ponsim_olt /adapters/adapters/ponsim_olt
 RUN touch /adapters/adapters/__init__.py
 
 
 # Copy in the generated GRPC proto code
 COPY --from=protos /protos/voltha /adapters/adapters/protos
 COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
 RUN touch /adapters/adapters/protos/__init__.py
 RUN touch /adapters/adapters/protos/third_party/google/__init__.py
 
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_onu b/adapters/docker/Dockerfile.adapter_ponsim_onu
similarity index 83%
rename from adapters/docker/Dockerfile.ponsim_adapter_onu
rename to adapters/docker/Dockerfile.adapter_ponsim_onu
index 57cc113..d0d3e36 100644
--- a/adapters/docker/Dockerfile.ponsim_adapter_onu
+++ b/adapters/docker/Dockerfile.adapter_ponsim_onu
@@ -23,18 +23,18 @@
 # Bundle app source
 RUN mkdir /adapters && touch /adapters/__init__.py
 ENV PYTHONPATH=/adapters
-COPY adapters/common /adapters/adapters/common
-COPY adapters/kafka /adapters/adapters/kafka
-COPY adapters/*.py /adapters/adapters/
+COPY common /adapters/adapters/common
+COPY kafka /adapters/adapters/kafka
+COPY ./*.py /adapters/adapters/
 #COPY pki /voltha/pki
-COPY adapters/ponsim_onu /adapters/adapters/ponsim_onu
+COPY ponsim_onu /adapters/adapters/ponsim_onu
 RUN touch /adapters/adapters/__init__.py
 
 
 # Copy in the generated GRPC proto code
 COPY --from=protos /protos/voltha /adapters/adapters/protos
 COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
 RUN touch /adapters/adapters/protos/__init__.py
 RUN touch /adapters/adapters/protos/third_party/google/__init__.py
 
diff --git a/adapters/docker/Dockerfile.base b/adapters/docker/Dockerfile.base
index a50a3ee..1b912e0 100644
--- a/adapters/docker/Dockerfile.base
+++ b/adapters/docker/Dockerfile.base
@@ -20,7 +20,7 @@
 RUN apt-get update && \
     apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
 
-COPY adapters/requirements.txt /tmp/requirements.txt
+COPY requirements.txt /tmp/requirements.txt
 
 # pip install cython enum34 six && \
 # Install app dependencies
diff --git a/adapters/env.sh b/adapters/env.sh
new file mode 100644
index 0000000..f4f9f97
--- /dev/null
+++ b/adapters/env.sh
@@ -0,0 +1,29 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# sourcing this file is needed to make local development and integration testing work
+export VOLTHA_BASE=$PWD
+
+# load local python virtualenv if exists, otherwise create it
+VENVDIR="venv-$(uname -s | tr '[:upper:]' '[:lower:]')"
+if [ ! -e "$VENVDIR/.built" ]; then
+    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+    echo "Initializing OS-appropriate virtual env."
+    echo "This will take a few minutes."
+    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+    make venv
+fi
+. $VENVDIR/bin/activate
+
+# add top-level voltha dir to pythonpath
+export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/protos/third_party
diff --git a/compose/ponsim-adapters.yml b/compose/adapters-ponsim.yml
similarity index 88%
rename from compose/ponsim-adapters.yml
rename to compose/adapters-ponsim.yml
index 0e4b9f8..dacf83c 100644
--- a/compose/ponsim-adapters.yml
+++ b/compose/adapters-ponsim.yml
@@ -15,8 +15,8 @@
 
 version: '2'
 services:
-  ponsim_olt_adapter:
-    image: "${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-olt${TAG}"
+  adapter_ponsim_olt:
+    image: "${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-olt${TAG}"
     logging:
       driver: "json-file"
       options:
@@ -33,8 +33,8 @@
     networks:
     - default
 
-  ponsim_onu_adapter:
-    image: "${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-onu${TAG}"
+  adapter_ponsim_onu:
+    image: "${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-onu${TAG}"
     logging:
       driver: "json-file"
       options:
diff --git a/compose/ponsim.yml b/compose/ponsim.yml
deleted file mode 100644
index cae0cb4..0000000
--- a/compose/ponsim.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-# Copyright 2018 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: '2'
-services:
-  ponsim_olt:
-    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
-    logging:
-      driver: "json-file"
-      options:
-        max-size: "10m"
-        max-file: "3"
-    entrypoint:
-      - /app/ponsim
-      - -device_type
-      - "OLT"
-      - -onus
-      - "4"
-      - -internal_if
-      - "eth0"
-      - -external_if
-      - "eth0"
-      - -vcore_endpoint
-      - "vcore"
-      - -promiscuous
-      - -verbose
-    ports:
-      - "50060:50060"
-    networks:
-    - default
-
-  ponsim_onu:
-    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
-    logging:
-      driver: "json-file"
-      options:
-        max-size: "10m"
-        max-file: "3"
-    entrypoint:
-      - /app/ponsim
-      - -device_type
-      - "ONU"
-      - -parent_addr
-      - "ponsim_olt"
-      - -grpc_port
-      - "50061"
-      - -internal_if
-      - "eth0"
-      - -external_if
-      - "lo"
-      - -promiscuous
-      - -verbose
-    ports:
-    - "50061:50061"
-    networks:
-    - default
-
-networks:
-  default:
-    driver: bridge
diff --git a/protos/voltha.proto b/protos/voltha.proto
index 92d59dd..6d37329 100644
--- a/protos/voltha.proto
+++ b/protos/voltha.proto
@@ -75,25 +75,15 @@
     string package_name = 2;
 }
 
-// Top-level (root) node for a Voltha Instance
+// CoreInstance represents a core instance.  It is data held in memory when a core
+// is running.  This data is not persistent.
 message CoreInstance {
     option (yang_message_rule) = CREATE_BOTH_GROUPING_AND_CONTAINER;
 
     string instance_id = 1  [(access) = READ_ONLY];
 
-    string version = 2 [(access) = READ_ONLY];
+    HealthStatus health = 2 [(child_node) = {}];
 
-    LogLevel.LogLevel log_level = 3;
-
-    HealthStatus health = 10 [(child_node) = {}];
-
-    repeated LogicalDevice logical_devices = 12 [(child_node) = {key: "id"}];
-
-    repeated Device devices = 13 [(child_node) = {key: "id"}];
-
-    repeated DeviceGroup device_groups = 15 [(child_node) = {key: "id"}];
-
-    repeated AlarmFilter alarm_filters = 16 [(child_node) = {key: "id"}];
 }
 
 message CoreInstances {
@@ -101,25 +91,23 @@
     repeated string items = 1;
 }
 
-// Voltha representing the entire Voltha cluster
+// Voltha represents the Voltha cluster data.  Each Core instance will hold a subset of
+// the entire cluster. However, some items (e.g. adapters) will be held by all cores
+// for better performance
 message Voltha {
     option (yang_message_rule) = CREATE_BOTH_GROUPING_AND_CONTAINER;
 
     string version = 1 [(access) = READ_ONLY];
 
-    LogLevel.LogLevel log_level = 2;
+    repeated Adapter adapters = 2 [(child_node) = {key: "id"}];
 
-    repeated CoreInstance core_instances = 3 [(child_node) = {key: "instance_id"}];
+    repeated LogicalDevice logical_devices = 3 [(child_node) = {key: "id"}];
 
-    repeated Adapter adapters = 11 [(child_node) = {key: "id"}];
+    repeated Device devices = 4 [(child_node) = {key: "id"}];
 
-    repeated LogicalDevice logical_devices = 12 [(child_node) = {key: "id"}];
+    repeated DeviceGroup device_groups = 5 [(child_node) = {key: "id"}];
 
-    repeated Device devices = 13 [(child_node) = {key: "id"}];
-
-    repeated DeviceGroup device_groups = 15 [(child_node) = {key: "id"}];
-
-    repeated AlarmFilter alarm_filters = 16 [(child_node) = {key: "id"}];
+    repeated AlarmFilter alarm_filters = 6 [(child_node) = {key: "id"}];
 
     repeated
         omci.MibDeviceData omci_mib_database = 28
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index f9e42ef..06f3ca3 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -63,8 +63,8 @@
 	log.Info("starting-core")
 	core.startKafkaMessagingProxy(ctx)
 	log.Info("values", log.Fields{"kmp": core.kmp})
-	core.deviceMgr = NewDeviceManager(core.kmp, core.localDataProxy)
-	core.logicalDeviceMgr = NewLogicalDeviceManager(core.deviceMgr, core.kmp, core.localDataProxy)
+	core.deviceMgr = NewDeviceManager(core.kmp, core.clusterDataProxy)
+	core.logicalDeviceMgr = NewLogicalDeviceManager(core.deviceMgr, core.kmp, core.clusterDataProxy)
 	core.registerAdapterRequestHandler(ctx, core.deviceMgr, core.logicalDeviceMgr, core.localDataProxy, core.clusterDataProxy)
 	go core.startDeviceManager(ctx)
 	go core.startLogicalDeviceManager(ctx)
@@ -86,7 +86,7 @@
 	core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false)
 	log.Info("grpc-server-created")
 
-	core.grpcNBIAPIHanfler = NewAPIHandler(core.deviceMgr, core.logicalDeviceMgr, core.clusterDataProxy, core.localDataProxy)
+	core.grpcNBIAPIHanfler = NewAPIHandler(core.deviceMgr, core.logicalDeviceMgr)
 	//	Create a function to register the core GRPC service with the GRPC server
 	f := func(gs *grpc.Server) {
 		voltha.RegisterVolthaServiceServer(
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index aa13748..805dd21 100644
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -28,15 +28,15 @@
 )
 
 type DeviceAgent struct {
-	deviceId       string
-	lastData       *voltha.Device
-	adapterProxy   *AdapterProxy
-	deviceMgr      *DeviceManager
-	localDataProxy *model.Proxy
-	exitChannel    chan int
+	deviceId         string
+	lastData         *voltha.Device
+	adapterProxy     *AdapterProxy
+	deviceMgr        *DeviceManager
+	clusterDataProxy *model.Proxy
+	exitChannel      chan int
 }
 
-func newDeviceAgent(ap *AdapterProxy, device *voltha.Device, deviceMgr *DeviceManager, ldProxy *model.Proxy) *DeviceAgent {
+func newDeviceAgent(ap *AdapterProxy, device *voltha.Device, deviceMgr *DeviceManager, cdProxy *model.Proxy) *DeviceAgent {
 	var agent DeviceAgent
 	device.Id = CreateDeviceId()
 	agent.deviceId = device.Id
@@ -44,14 +44,14 @@
 	agent.lastData = device
 	agent.deviceMgr = deviceMgr
 	agent.exitChannel = make(chan int, 1)
-	agent.localDataProxy = ldProxy
+	agent.clusterDataProxy = cdProxy
 	return &agent
 }
 
 func (agent *DeviceAgent) start(ctx context.Context) {
 	log.Debugw("starting-device-agent", log.Fields{"device": agent.lastData})
 	// Add the initial device to the local model
-	if added := agent.localDataProxy.Add("/devices", agent.lastData, ""); added == nil {
+	if added := agent.clusterDataProxy.Add("/devices", agent.lastData, ""); added == nil {
 		log.Errorw("failed-to-add-device", log.Fields{"deviceId": agent.deviceId})
 	}
 	log.Debug("device-agent-started")
@@ -72,7 +72,7 @@
 		cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
 		cloned.AdminState = voltha.AdminState_ENABLED
 		cloned.OperStatus = voltha.OperStatus_ACTIVATING
-		if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		} else {
 			if err := agent.adapterProxy.AdoptDevice(ctx, &cloned); err != nil {
@@ -136,7 +136,7 @@
 	} else {
 		// store the changed data
 		cloned := (proto.Clone(device)).(*voltha.Device)
-		afterUpdate := agent.localDataProxy.Update("/devices/"+device.Id, cloned, false, "")
+		afterUpdate := agent.clusterDataProxy.Update("/devices/"+device.Id, cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", device.Id)
 		}
@@ -164,7 +164,7 @@
 		}
 		log.Debugw("DeviceStateUpdate-device", log.Fields{"device": cloned})
 		// Store the device
-		if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		// Perform the state transition
@@ -187,7 +187,7 @@
 		cp := proto.Clone(pmConfigs)
 		cloned.PmConfigs = cp.(*voltha.PmConfigs)
 		// Store the device
-		afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
+		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -210,7 +210,7 @@
 		cp := proto.Clone(port)
 		cloned.Ports = append(cloned.Ports, cp.(*voltha.Port))
 		// Store the device
-		afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
+		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -250,7 +250,7 @@
 	log.Debugw("update-field-status", log.Fields{"device": storeDevice, "name": name, "updated": updated})
 	//	Save the data
 	cloned := reflect.ValueOf(storeDevice).Elem().Interface().(voltha.Device)
-	if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+	if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
 		log.Warnw("attribute-update-failed", log.Fields{"attribute": name, "value": value})
 	}
 	return
diff --git a/rw_core/core/device_manager.go b/rw_core/core/device_manager.go
index a6c0c8d..fd18c10 100644
--- a/rw_core/core/device_manager.go
+++ b/rw_core/core/device_manager.go
@@ -36,18 +36,18 @@
 	logicalDeviceMgr    *LogicalDeviceManager
 	kafkaProxy          *kafka.KafkaMessagingProxy
 	stateTransitions    *TransitionMap
-	localDataProxy      *model.Proxy
+	clusterDataProxy    *model.Proxy
 	exitChannel         chan int
 	lockDeviceAgentsMap sync.RWMutex
 }
 
-func NewDeviceManager(kafkaProxy *kafka.KafkaMessagingProxy, ldProxy *model.Proxy) *DeviceManager {
+func NewDeviceManager(kafkaProxy *kafka.KafkaMessagingProxy, cdProxy *model.Proxy) *DeviceManager {
 	var deviceMgr DeviceManager
 	deviceMgr.exitChannel = make(chan int, 1)
 	deviceMgr.deviceAgents = make(map[string]*DeviceAgent)
 	deviceMgr.adapterProxy = NewAdapterProxy(kafkaProxy)
 	deviceMgr.kafkaProxy = kafkaProxy
-	deviceMgr.localDataProxy = ldProxy
+	deviceMgr.clusterDataProxy = cdProxy
 	deviceMgr.lockDeviceAgentsMap = sync.RWMutex{}
 	return &deviceMgr
 }
@@ -98,7 +98,7 @@
 	log.Debugw("createDevice-start", log.Fields{"device": device, "aproxy": dMgr.adapterProxy})
 
 	// Create and start a device agent for that device
-	agent := newDeviceAgent(dMgr.adapterProxy, device, dMgr, dMgr.localDataProxy)
+	agent := newDeviceAgent(dMgr.adapterProxy, device, dMgr, dMgr.clusterDataProxy)
 	dMgr.addDeviceAgentToMap(agent)
 	agent.start(ctx)
 
@@ -122,7 +122,7 @@
 func (dMgr *DeviceManager) getDevice(id string) (*voltha.Device, error) {
 	log.Debugw("getDevice-start", log.Fields{"deviceid": id})
 
-	if device := dMgr.localDataProxy.Get("/devices/"+id, 1, false, ""); device == nil {
+	if device := dMgr.clusterDataProxy.Get("/devices/"+id, 1, false, ""); device == nil {
 		return nil, status.Errorf(codes.NotFound, "%s", id)
 	} else {
 		cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
@@ -136,7 +136,7 @@
 	dMgr.lockDeviceAgentsMap.Lock()
 	defer dMgr.lockDeviceAgentsMap.Unlock()
 	for _, agent := range dMgr.deviceAgents {
-		if device := dMgr.localDataProxy.Get("/devices/"+agent.deviceId, 1, false, ""); device != nil {
+		if device := dMgr.clusterDataProxy.Get("/devices/"+agent.deviceId, 1, false, ""); device != nil {
 			cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
 			result.Items = append(result.Items, &cloned)
 		}
@@ -215,7 +215,7 @@
 	childDevice.ProxyAddress = &voltha.Device_ProxyAddress{ChannelId: uint32(channelId)}
 
 	// Create and start a device agent for that device
-	agent := newDeviceAgent(dMgr.adapterProxy, childDevice, dMgr, dMgr.localDataProxy)
+	agent := newDeviceAgent(dMgr.adapterProxy, childDevice, dMgr, dMgr.clusterDataProxy)
 	dMgr.addDeviceAgentToMap(agent)
 	agent.start(nil)
 
diff --git a/rw_core/core/grpc_nbi_api_handler.go b/rw_core/core/grpc_nbi_api_handler.go
index 6af73cd..bd28322 100644
--- a/rw_core/core/grpc_nbi_api_handler.go
+++ b/rw_core/core/grpc_nbi_api_handler.go
@@ -21,7 +21,6 @@
 	"github.com/golang/protobuf/ptypes/empty"
 	da "github.com/opencord/voltha-go/common/core/northbound/grpc"
 	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
 	"github.com/opencord/voltha-go/protos/common"
 	"github.com/opencord/voltha-go/protos/openflow_13"
 	"github.com/opencord/voltha-go/protos/voltha"
@@ -33,16 +32,12 @@
 type APIHandler struct {
 	deviceMgr        *DeviceManager
 	logicalDeviceMgr *LogicalDeviceManager
-	clusterDataProxy *model.Proxy
-	localDataProxy   *model.Proxy
 	da.DefaultAPIHandler
 }
 
-func NewAPIHandler(deviceMgr *DeviceManager, lDeviceMgr *LogicalDeviceManager, cdProxy *model.Proxy, ldProxy *model.Proxy) *APIHandler {
+func NewAPIHandler(deviceMgr *DeviceManager, lDeviceMgr *LogicalDeviceManager) *APIHandler {
 	handler := &APIHandler{deviceMgr: deviceMgr,
-		logicalDeviceMgr: lDeviceMgr,
-		clusterDataProxy: cdProxy,
-		localDataProxy:   ldProxy}
+		logicalDeviceMgr: lDeviceMgr}
 	return handler
 }
 func isTestMode(ctx context.Context) bool {
diff --git a/rw_core/core/id.go b/rw_core/core/id.go
index d5aebd5..b28151f 100644
--- a/rw_core/core/id.go
+++ b/rw_core/core/id.go
@@ -44,7 +44,7 @@
 	return val
 }
 
-// CreateLogicalPortId produces a random port ID for a logical device.   
+// CreateLogicalPortId produces a random port ID for a logical device.
 func CreateLogicalPortId() uint32 {
 	//	A logical port is a uint32
 	return m.Uint32()
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index 5a9562a..117c869 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -29,23 +29,23 @@
 )
 
 type LogicalDeviceAgent struct {
-	logicalDeviceId string
-	lastData        *voltha.LogicalDevice
-	rootDeviceId    string
-	deviceMgr       *DeviceManager
-	ldeviceMgr      *LogicalDeviceManager
-	localDataProxy  *model.Proxy
-	exitChannel     chan int
+	logicalDeviceId  string
+	lastData         *voltha.LogicalDevice
+	rootDeviceId     string
+	deviceMgr        *DeviceManager
+	ldeviceMgr       *LogicalDeviceManager
+	clusterDataProxy *model.Proxy
+	exitChannel      chan int
 }
 
 func NewLogicalDeviceAgent(id string, device *voltha.Device, ldeviceMgr *LogicalDeviceManager, deviceMgr *DeviceManager,
-	ldProxy *model.Proxy) *LogicalDeviceAgent {
+	cdProxy *model.Proxy) *LogicalDeviceAgent {
 	var agent LogicalDeviceAgent
 	agent.exitChannel = make(chan int, 1)
 	agent.logicalDeviceId = id
 	agent.rootDeviceId = device.Id
 	agent.deviceMgr = deviceMgr
-	agent.localDataProxy = ldProxy
+	agent.clusterDataProxy = cdProxy
 	agent.ldeviceMgr = ldeviceMgr
 	return &agent
 }
@@ -83,7 +83,7 @@
 		ld.Ports = append(ld.Ports, lp)
 	}
 	// Save the logical device
-	if added := agent.localDataProxy.Add("/logical_devices", ld, ""); added == nil {
+	if added := agent.clusterDataProxy.Add("/logical_devices", ld, ""); added == nil {
 		log.Errorw("failed-to-add-logical-device", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
 	} else {
 		log.Debugw("logicaldevice-created", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
@@ -108,7 +108,7 @@
 		cloned := reflect.ValueOf(ldevice).Elem().Interface().(voltha.LogicalDevice)
 		lp := (proto.Clone(portCap.Port)).(*voltha.LogicalPort)
 		cloned.Ports = append(cloned.Ports, lp)
-		afterUpdate := agent.localDataProxy.Update("/logical_devices/"+agent.logicalDeviceId, &cloned, false, "")
+		afterUpdate := agent.clusterDataProxy.Update("/logical_devices/"+agent.logicalDeviceId, &cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-add-UNI-port:%s", agent.logicalDeviceId)
 		}
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
index 61f96e8..aa22d57 100644
--- a/rw_core/core/logical_device_manager.go
+++ b/rw_core/core/logical_device_manager.go
@@ -34,18 +34,18 @@
 	deviceMgr                  *DeviceManager
 	adapterProxy               *AdapterProxy
 	kafkaProxy                 *kafka.KafkaMessagingProxy
-	localDataProxy             *model.Proxy
+	clusterDataProxy           *model.Proxy
 	exitChannel                chan int
 	lockLogicalDeviceAgentsMap sync.RWMutex
 }
 
-func NewLogicalDeviceManager(deviceMgr *DeviceManager, kafkaProxy *kafka.KafkaMessagingProxy, ldProxy *model.Proxy) *LogicalDeviceManager {
+func NewLogicalDeviceManager(deviceMgr *DeviceManager, kafkaProxy *kafka.KafkaMessagingProxy, cdProxy *model.Proxy) *LogicalDeviceManager {
 	var logicalDeviceMgr LogicalDeviceManager
 	logicalDeviceMgr.exitChannel = make(chan int, 1)
 	logicalDeviceMgr.logicalDeviceAgents = make(map[string]*LogicalDeviceAgent)
 	logicalDeviceMgr.deviceMgr = deviceMgr
 	logicalDeviceMgr.kafkaProxy = kafkaProxy
-	logicalDeviceMgr.localDataProxy = ldProxy
+	logicalDeviceMgr.clusterDataProxy = cdProxy
 	logicalDeviceMgr.lockLogicalDeviceAgentsMap = sync.RWMutex{}
 	return &logicalDeviceMgr
 }
@@ -80,7 +80,7 @@
 
 func (ldMgr *LogicalDeviceManager) getLogicalDevice(id string) (*voltha.LogicalDevice, error) {
 	log.Debugw("getlogicalDevice-start", log.Fields{"logicaldeviceid": id})
-	logicalDevice := ldMgr.localDataProxy.Get("/logical_devices/"+id, 1, false, "")
+	logicalDevice := ldMgr.clusterDataProxy.Get("/logical_devices/"+id, 1, false, "")
 	if logicalDevice != nil {
 		cloned := reflect.ValueOf(logicalDevice).Elem().Interface().(voltha.LogicalDevice)
 		return &cloned, nil
@@ -94,7 +94,7 @@
 	ldMgr.lockLogicalDeviceAgentsMap.Lock()
 	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
 	for _, agent := range ldMgr.logicalDeviceAgents {
-		logicalDevice := ldMgr.localDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 1, false, "")
+		logicalDevice := ldMgr.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 1, false, "")
 		if logicalDevice != nil {
 			cloned := reflect.ValueOf(logicalDevice).Elem().Interface().(voltha.LogicalDevice)
 			result.Items = append(result.Items, &cloned)
@@ -118,7 +118,7 @@
 	id := strings.Replace(macAddress, ":", "", -1)
 	log.Debugw("setting-logical-device-id", log.Fields{"logicaldeviceId": id})
 
-	agent := NewLogicalDeviceAgent(id, device, ldMgr, ldMgr.deviceMgr, ldMgr.localDataProxy)
+	agent := NewLogicalDeviceAgent(id, device, ldMgr, ldMgr.deviceMgr, ldMgr.clusterDataProxy)
 	ldMgr.addLogicalDeviceAgentToMap(agent)
 	go agent.Start(ctx)