Large update which includes the following
- The introduction of a tools container which can be expanded as
  necessary. It connects to voltha and kafka networks
- The introduction of the envoy proxy container
- The addition of an option to the CreateInstaller.sh script to rebuild
  the voltha VM to pick up any new code changes
- A fix to work around a dpkg issue where it didn't handle depnedencies
  propoerly
- Addition of start and stop scripts for the voltha suite and the use of
  those scripts by the installer. The old per service start eliminated
  from the installer
- Increased the number of vCPUs used by the installer and the voltha VM
  to speed up installer creation.
Note the envoy proxy has not been integrated into the voltha suite yet,
this commit adds the container and some preliminary configuration but
integration of the proxy will be committed in a subsequent update.

Addressed comments from the reviewers

Change-Id: I5475f110ba955631baf05b0e34aa6a934ca69a24
diff --git a/Makefile b/Makefile
index 992c0f1..bdd4009 100644
--- a/Makefile
+++ b/Makefile
@@ -22,7 +22,7 @@
 
 VENVDIR := venv-$(shell uname -s | tr '[:upper:]' '[:lower:]')
 
-.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 docker-base voltha chameleon ofagent podder netconf shovel onos dashd vcli portainer grafana nginx consul registrator
+.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 docker-base voltha chameleon ofagent podder netconf shovel onos dashd vcli portainer grafana nginx consul registrator envoy
 
 # This should to be the first and default target in this Makefile
 help:
@@ -96,9 +96,9 @@
 production: protos prod-containers
 
 
-prod-containers: docker-base voltha chameleon ofagent netconf shovel dashd vcli grafana consul registrator
+prod-containers: docker-base voltha chameleon ofagent netconf shovel dashd vcli grafana consul registrator envoy registry
 
-containers: docker-base voltha chameleon ofagent podder netconf shovel onos tester config-push dashd vcli portainer grafana nginx consul registrator
+containers: docker-base voltha chameleon ofagent podder netconf shovel onos tester config-push dashd vcli portainer grafana nginx consul registrator tools envoy
 
 docker-base:
 	docker build -t cord/voltha-base -f docker/Dockerfile.base .
@@ -121,6 +121,12 @@
 podder:
 	docker build -t cord/podder -f docker/Dockerfile.podder .
 
+tools:
+	docker build -t voltha/tools -f docker/Dockerfile.tools .
+
+envoy:
+	docker build -t voltha/envoy -f docker/Dockerfile.envoy .
+
 netconf:
 	docker build -t cord/netconf -f docker/Dockerfile.netconf .
 
@@ -186,6 +192,8 @@
 	docker pull zookeeper:latest
 	docker pull nginx:latest
 	docker pull portainer/portainer:latest
+	docker pull lyft/envoy:latest
+	docker pull registry:2
 
 purge-venv:
 	rm -fr ${VENVDIR}
diff --git a/Vagrantfile b/Vagrantfile
index 8ea095d..6de0e0f 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -47,6 +47,7 @@
     d.vm.provision :shell, inline: "cd /cord/incubator/voltha && source env.sh && make install-protoc && chmod 777 /tmp/fluentd"
     d.vm.provider Provider do |v|
       v.memory = 6144
+      v.cpus = 4
     end
   end
 
diff --git a/compose/docker-compose-envoy-swarm.yml b/compose/docker-compose-envoy-swarm.yml
new file mode 100644
index 0000000..95d6580
--- /dev/null
+++ b/compose/docker-compose-envoy-swarm.yml
@@ -0,0 +1,32 @@
+#
+# This Docker stackfile deploys an envoy proxy container along with one backup.
+#
+# The stackfile assumes that overlay network 'voltha_net' has already been
+# created. To deploy the stack, issue the command:
+#
+#     docker stack deploy -c docker-compose-envoy-swarm.yml envoy
+#
+
+version: "3"
+services:
+  envoy:
+    image: voltha/envoy:latest
+    deploy:
+      replicas: 1
+    environment:
+      DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+    entrypoint:
+      - /usr/local/bin/envoy
+      - -c /etc/envoy/front-proxy/voltha-grpc-proxy.json
+    networks:
+      - voltha-net
+    ports:
+      - "50556:50556"
+    volumes:
+      - /cord/incubator/voltha/envoy:/etc/envoy
+      
+networks:
+  voltha-net:
+    external:
+      name: voltha_net
+
diff --git a/docker/Dockerfile.envoy b/docker/Dockerfile.envoy
new file mode 100644
index 0000000..23debf8
--- /dev/null
+++ b/docker/Dockerfile.envoy
@@ -0,0 +1,8 @@
+FROM lyft/envoy:latest
+
+RUN apt-get update && apt-get -q install -y \
+    curl
+ADD envoy /etc/
+
+
+CMD /usr/local/bin/envoy -c /etc/envoy/front-proxy/voltha-grpc-proxy.json
diff --git a/docker/Dockerfile.tools b/docker/Dockerfile.tools
new file mode 100755
index 0000000..08956a2
--- /dev/null
+++ b/docker/Dockerfile.tools
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM cord/voltha-base
+
+MAINTAINER Sergio Slobodrian <sslobodr@ciena.com>
+
+# Bundle app source
+#RUN mkdir /cli && touch /cli/__init__.py
+#ENV PYTHONPATH=/cli
+#COPY common /cli/common
+#COPY cli /cli/cli
+#COPY voltha /cli/voltha
+RUN useradd -b /home -d /home/tools tools -s /bin/bash
+RUN mkdir /home/tools
+COPY docker/config/bashrc /home/tools/.bashrc
+RUN chown -R tools.tools /home/tools
+RUN echo "tools:tools" | chpasswd
+RUN apt-get update && apt-get -y upgrade && apt-get -y install openssh-server kafkacat iputils-ping vim manpages iproute2 net-tools moreutils
+RUN mkdir /var/run/sshd
+RUN echo 'root:screencast' | chpasswd
+RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+# SSH login fix. Otherwise user is kicked off after login
+RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+ENV NOTVISIBLE "in users profile"
+RUN echo "export VISIBLE=now" >> /etc/profile
+
+EXPOSE 22
+
+# Exposing process and default entry point
+ENTRYPOINT ["/usr/bin/dumb-init", "--"]
+
+CMD ["/usr/sbin/sshd", "-D"]
diff --git a/docker/config/bashrc b/docker/config/bashrc
new file mode 100644
index 0000000..b964504
--- /dev/null
+++ b/docker/config/bashrc
@@ -0,0 +1,119 @@
+# ~/.bashrc: executed by bash(1) for non-login shells.
+# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
+# for examples
+
+# If not running interactively, don't do anything
+case $- in
+    *i*) ;;
+      *) return;;
+esac
+
+# don't put duplicate lines or lines starting with space in the history.
+# See bash(1) for more options
+HISTCONTROL=ignoreboth
+
+# append to the history file, don't overwrite it
+shopt -s histappend
+
+# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
+HISTSIZE=1000
+HISTFILESIZE=2000
+
+# check the window size after each command and, if necessary,
+# update the values of LINES and COLUMNS.
+shopt -s checkwinsize
+
+# If set, the pattern "**" used in a pathname expansion context will
+# match all files and zero or more directories and subdirectories.
+#shopt -s globstar
+
+# make less more friendly for non-text input files, see lesspipe(1)
+[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
+
+# set variable identifying the chroot you work in (used in the prompt below)
+if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
+    debian_chroot=$(cat /etc/debian_chroot)
+fi
+
+# set a fancy prompt (non-color, unless we know we "want" color)
+case "$TERM" in
+    xterm-color|*-256color) color_prompt=yes;;
+esac
+
+# uncomment for a colored prompt, if the terminal has the capability; turned
+# off by default to not distract the user: the focus in a terminal window
+# should be on the output of commands, not on the prompt
+#force_color_prompt=yes
+
+if [ -n "$force_color_prompt" ]; then
+    if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
+	# We have color support; assume it's compliant with Ecma-48
+	# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
+	# a case would tend to support setf rather than setaf.)
+	color_prompt=yes
+    else
+	color_prompt=
+    fi
+fi
+
+if [ "$color_prompt" = yes ]; then
+    PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+else
+    PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
+fi
+unset color_prompt force_color_prompt
+
+# If this is an xterm set the title to user@host:dir
+case "$TERM" in
+xterm*|rxvt*)
+    PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
+    ;;
+*)
+    ;;
+esac
+
+# enable color support of ls and also add handy aliases
+if [ -x /usr/bin/dircolors ]; then
+    test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
+    alias ls='ls --color=auto'
+    #alias dir='dir --color=auto'
+    #alias vdir='vdir --color=auto'
+
+    alias grep='grep --color=auto'
+    alias fgrep='fgrep --color=auto'
+    alias egrep='egrep --color=auto'
+fi
+
+# colored GCC warnings and errors
+#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
+
+# some more ls aliases
+alias ll='ls -alF'
+alias la='ls -A'
+alias l='ls -CF'
+
+# Add an "alert" alias for long running commands.  Use like so:
+#   sleep 10; alert
+alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
+
+# Alias definitions.
+# You may want to put all your additions into a separate file like
+# ~/.bash_aliases, instead of adding them here directly.
+# See /usr/share/doc/bash-doc/examples in the bash-doc package.
+
+if [ -f ~/.bash_aliases ]; then
+    . ~/.bash_aliases
+fi
+
+# enable programmable completion features (you don't need to enable
+# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
+# sources /etc/bash.bashrc).
+if ! shopt -oq posix; then
+  if [ -f /usr/share/bash-completion/bash_completion ]; then
+    . /usr/share/bash-completion/bash_completion
+  elif [ -f /etc/bash_completion ]; then
+    . /etc/bash_completion
+  fi
+fi
+
+export EDITOR=vi
diff --git a/envoy/front-proxy/start_service.sh b/envoy/front-proxy/start_service.sh
new file mode 100644
index 0000000..cf98f2c
--- /dev/null
+++ b/envoy/front-proxy/start_service.sh
@@ -0,0 +1,2 @@
+python /code/service.py &
+envoy -c /etc/service-envoy.json
diff --git a/envoy/front-proxy/voltha-grpc-proxy.json b/envoy/front-proxy/voltha-grpc-proxy.json
new file mode 100644
index 0000000..fb0605e
--- /dev/null
+++ b/envoy/front-proxy/voltha-grpc-proxy.json
@@ -0,0 +1,184 @@
+{
+  "listeners": [
+    {
+      "address": "tcp://0.0.0.0:3000",
+      "filters": [
+        {
+          "type": "read",
+          "name": "http_connection_manager",
+          "config": {
+            "codec_type": "auto",
+            "stat_prefix": "ingress_http",
+            "route_config": {
+              "virtual_hosts": [
+                {
+                  "name": "backend",
+                  "domains": ["*"],
+                  "routes": [
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/docker/",
+		      "prefix_rewrite" : "/",
+                      "cluster": "portainer"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/docker",
+		      "prefix_rewrite" : "/",
+                      "cluster": "portainer"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/grafana",
+		      "prefix_rewrite" : "/",
+                      "cluster": "grafana"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/consul/",
+		      "prefix_rewrite" : "/",
+                      "cluster": "consul"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/consul",
+		      "prefix_rewrite" : "/",
+                      "cluster": "consul"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/v1",
+                      "cluster": "consul"
+                    },
+                    {
+                      "timeout_ms": 0,
+                      "prefix": "/ui",
+                      "cluster": "consul"
+                    }
+
+                  ]
+                }
+              ]
+            },
+            "filters": [
+              {
+                "type": "decoder",
+                "name": "router",
+                "config": {}
+              }
+            ]
+          }
+        }
+      ]
+    },
+    {
+      "address": "tcp://0.0.0.0:50555",
+      "filters": [
+        {
+          "type": "read",
+          "name": "http_connection_manager",
+          "config": {
+            "codec_type": "http2",
+            "stat_prefix": "ingress_http2",
+	    "access_log": [
+		{
+			"path": "/envoy/voltha_access_log.log"
+		}
+	    ],
+            "route_config": {
+              "virtual_hosts": [
+                {
+                  "name": "backend",
+                  "domains": ["*"],
+                  "routes": [
+                    {
+                      "timeout_ms": 0,
+		      "prefix": "/voltha.HealthService/GetHealthStatus",
+                      "cluster": "voltha-grpc"
+                    },
+                    {
+                      "timeout_ms": 0,
+		      "prefix": "/voltha.VolthaLocalService/CreateDevice",
+                      "cluster": "voltha-grpc"
+                    },
+                    {
+                      "timeout_ms": 0,
+		      "prefix": "/voltha.VolthaLocalService",
+                      "cluster": "voltha-grpc"
+                    },
+                    {
+                      "timeout_ms": 0,
+		      "prefix": "/voltha.",
+                      "cluster": "voltha-grpc"
+                    }
+                  ]
+                }
+              ]
+            },
+            "filters": [
+              {
+                "type": "decoder",
+                "name": "router",
+                "config": {}
+              }
+            ]
+          }
+        }
+      ]
+    }
+  ],
+  "admin": {
+    "access_log_path": "/envoy/access.log",
+    "address": "tcp://0.0.0.0:8001"
+  },
+  "cluster_manager": {
+    "clusters": [
+      {
+        "name": "voltha-grpc",
+        "connect_timeout_ms": 250,
+        "type": "static",
+        "lb_type": "round_robin",
+	"features": "http2",
+        "hosts": [
+          {
+            "url": "tcp://10.0.2.15:32786"
+          }
+        ]
+      },
+      {
+        "name": "portainer",
+        "connect_timeout_ms": 250,
+        "type": "static",
+        "lb_type": "round_robin",
+        "hosts": [
+          {
+            "url": "tcp://10.0.2.15:9000"
+          }
+        ]
+      },
+      {
+        "name": "consul",
+        "connect_timeout_ms": 250,
+        "type": "static",
+        "lb_type": "round_robin",
+        "hosts": [
+          {
+            "url": "tcp://10.0.2.15:8500"
+          }
+        ]
+      },
+      {
+        "name": "grafana",
+        "connect_timeout_ms": 250,
+        "type": "static",
+        "lb_type": "round_robin",
+        "hosts": [
+          {
+            "url": "tcp://10.0.2.15:8882"
+          }
+        ]
+      }
+    ]
+  }
+}
+
diff --git a/envoy/hot-restarter.py b/envoy/hot-restarter.py
new file mode 100755
index 0000000..bac54dd
--- /dev/null
+++ b/envoy/hot-restarter.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+import os
+import signal
+import sys
+import time
+
+restart_epoch = 0
+pid_list = []
+
+def force_kill_all_children():
+  """ Iterate through all known child processes and force kill them. In the future we might consider
+      possibly giving the child processes time to exit but this is fine for now. If someone force kills
+      us and does not clean the process tree this will leave child processes around unless they choose
+      to end themselves if their parent process dies. """
+
+  # First uninstall the SIGCHLD handler so that we don't get called again.
+  signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+
+  global pid_list
+  for pid in pid_list:
+    print "force killing PID={}".format(pid)
+    try:
+      os.kill(pid, signal.SIGKILL)
+    except:
+      print "error force killing PID={} continuing".format(pid)
+
+  pid_list = []
+
+
+def sigterm_handler(signum, frame):
+  """ Handler for SIGTERM. See force_kill_all_children() for further discussion. """
+
+  print "got SIGTERM"
+  force_kill_all_children()
+  sys.exit(0)
+
+
+def sighup_handler(signum, frame):
+  """ Handler for SIGUP. This signal is used to cause the restarter to fork and exec a new
+      child. """
+
+  print "got SIGHUP"
+  fork_and_exec()
+
+def sigusr1_handler(signum, frame):
+  """ Handler for SIGUSR1. Propagate SIGUSR1 to all of the child processes """
+
+  global pid_list
+  for pid in pid_list:
+    print "sending SIGUSR1 to PID={}".format(pid)
+    try:
+      os.kill(pid, signal.SIGUSR1)
+    except:
+      print "error in SIGUSR1 to PID={} continuing".format(pid)
+
+
+def sigchld_handler(signum, frame):
+  """ Handler for SIGCHLD. Iterates through all of our known child processes and figures out whether
+      the signal/exit was expected or not. Python doesn't have any of the native signal handlers
+      ability to get the child process info directly from the signal handler so we need to iterate
+      through all child processes and see what happened."""
+
+  print "got SIGCHLD"
+
+  kill_all_and_exit = False
+  global pid_list
+  pid_list_copy = list(pid_list)
+  for pid in pid_list_copy:
+    ret_pid, exit_status = os.waitpid(pid, os.WNOHANG)
+    if ret_pid == 0 and exit_status == 0:
+      # This child is still running.
+      continue
+
+    pid_list.remove(pid)
+
+    # Now we see how the child exited.
+    if os.WIFEXITED(exit_status):
+      exit_code = os.WEXITSTATUS(exit_status)
+      print "PID={} exited with code={}".format(ret_pid, exit_code)
+      if exit_code == 0:
+        # Normal exit. We assume this was on purpose.
+        pass
+      else:
+        # Something bad happened. We need to tear everything down so that whoever started the
+        # restarter can know about this situation and restart the whole thing.
+        kill_all_and_exit = True
+    elif os.WIFSIGNALED(exit_status):
+      print "PID={} was killed with signal={}".format(ret_pid, os.WTERMSIG(exit_status))
+      kill_all_and_exit = True
+    else:
+      kill_all_and_exit = True
+
+  if kill_all_and_exit:
+    print "Due to abnormal exit, force killing all child processes and exiting"
+    force_kill_all_children()
+
+  # Our last child died, so we have no purpose. Exit.
+  if not pid_list:
+    print "exiting due to lack of child processes"
+    sys.exit(1 if kill_all_and_exit else 0)
+
+
+def fork_and_exec():
+  """ This routine forks and execs a new child process and keeps track of its PID. Before we fork,
+      set the current restart epoch in an env variable that processes can read if they care. """
+
+  global restart_epoch
+  os.environ['RESTART_EPOCH'] = str(restart_epoch)
+  print "forking and execing new child process at epoch {}".format(restart_epoch)
+  restart_epoch += 1
+
+  child_pid = os.fork()
+  if child_pid == 0:
+    # Child process
+    os.execl(sys.argv[1], sys.argv[1])
+  else:
+    # Parent process
+    print "forked new child process with PID={}".format(child_pid)
+    pid_list.append(child_pid)
+
+
+def main():
+  """ Script main. This script is designed so that a process watcher like runit or monit can watch
+      this process and take corrective action if it ever goes away. """
+
+  print "starting hot-restarter with target: {}".format(sys.argv[1])
+
+  signal.signal(signal.SIGTERM, sigterm_handler)
+  signal.signal(signal.SIGHUP, sighup_handler)
+  signal.signal(signal.SIGCHLD, sigchld_handler)
+  signal.signal(signal.SIGUSR1, sigusr1_handler)
+
+  # Start the first child process and then go into an endless loop since everything else happens via
+  # signals.
+  fork_and_exec()
+  while True:
+    time.sleep(60)
+
+if __name__ == '__main__':
+  main()
diff --git a/envoy/start_envoy.sh b/envoy/start_envoy.sh
new file mode 100755
index 0000000..216e59f
--- /dev/null
+++ b/envoy/start_envoy.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+RESTART_EPOCH=0
+
+
+echo "Staring envoy re-starter"
+
+
+function fork_envoy()
+{
+    echo "Forking envoy"
+    /usr/local/bin/envoy -l debug -c envoy/front-proxy/voltha-grpc-proxy.json --restart-epoch $RESTART_EPOCH &
+    CUR_PID=$!
+    RESTART_EPOCH=`expr $RESTART_EPOCH + 1`
+    wait
+}
+
+function end_envoy()
+{
+        echo "Killing envoy"
+	kill -KILL $CUR_PID
+}
+
+trap fork_envoy SIGHUP
+trap end_envoy SIGTERM
+
+fork_envoy
+
+
diff --git a/install/CreateInstaller.sh b/install/CreateInstaller.sh
index 4a79b37..b9ae71a 100755
--- a/install/CreateInstaller.sh
+++ b/install/CreateInstaller.sh
@@ -13,6 +13,7 @@
 
 # Command line argument variables
 testMode="no"
+rebuildVoltha="no"
 
 
 
@@ -38,6 +39,10 @@
 				testMode="yes"
 				echo -e "${lBlue}Test mode is ${green}enabled${NC}"
 				;;
+			"rebuild" )
+				rebuildVoltha="yes"
+				echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
+				;;
 		esac
 	done
 }
@@ -231,9 +236,12 @@
 
 # Ensure that the voltha VM is running so that images can be secured
 echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
-vVM=`virsh list | grep voltha_voltha${uId}`
+vVm=`virsh list | grep "voltha_voltha${uId}"`
+#echo "vVm: $vVm"
+#echo "rebuildVoltha: $rebuildVoltha"
 
-if [ -z "$vVM" ]; then
+
+if [ -z "$vVm" -o "$rebuildVoltha" == "yes" ]; then
 	if [ "$testMode" == "yes" ]; then
 		./BuildVoltha.sh $1
 		rtrn=$?
diff --git a/install/Vagrantfile b/install/Vagrantfile
index bc1f9d2..65c3ce2 100644
--- a/install/Vagrantfile
+++ b/install/Vagrantfile
@@ -18,6 +18,7 @@
       d.vm.provision :shell, inline: "apt-get -y install python"
       d.vm.provider "libvirt" do |v|
         v.memory = 6144
+	v.cpus = 2
       end
     end
   end
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 8035072..e3115ef 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -77,6 +77,14 @@
     links: yes
   tags: [cluster_host]
 
+- name: pre-emptive strike to avoid errors during package installation
+  apt:
+    name: "{{ item }}"
+    state: absent
+  with_items:
+    - ubuntu-core-launcher
+    - snapd
+  tags: [cluster_host]
 
 - name: Dependent software is installed (this can take about 10 Min, DONT'T PANIC, go for coffee instead)
   command: dpkg -R -i "{{ target_voltha_home }}/deb_files"
diff --git a/install/ansible/roles/installer/tasks/main.yml b/install/ansible/roles/installer/tasks/main.yml
index dc6bbcf..b5b7afa 100644
--- a/install/ansible/roles/installer/tasks/main.yml
+++ b/install/ansible/roles/installer/tasks/main.yml
@@ -33,9 +33,12 @@
   with_items:
     - install/installer.sh
     - install/install.cfg
+    - install/voltha-swarm-start.sh
+    - install/voltha-swarm-stop.sh
     - install/ansible
     - compose
     - nginx_config
+    - envoy
   tags: [installer]
 
 - name: Determine if test mode is active
@@ -85,6 +88,7 @@
     - deb_files
     - docker-py
     - netifaces
+    - envoy
   tags: [installer]
 - name: Installer files are owned by vinstall
   file:
@@ -95,5 +99,7 @@
   with_items:
     - installer.sh
     - install.cfg
+    - voltha-swarm-start.sh
+    - voltha-swarm-stop.sh
     - docker-compose-Linux-x86_64
   tags: [installer]
diff --git a/install/ansible/roles/voltha/files/consul_config/base_config.json b/install/ansible/roles/voltha/files/consul_config/base_config.json
deleted file mode 100644
index 217fc09..0000000
--- a/install/ansible/roles/voltha/files/consul_config/base_config.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-	"server": true,
-	"ui": true, 
-	"bootstrap_expect": 3,
-	"client_addr": "0.0.0.0",
-	"disable_update_check": true,
-	"retry_join": ["10.10.10.3", "10.10.10.4", "10.10.10.5"]
-}
-
diff --git a/install/ansible/roles/voltha/tasks/main.yml b/install/ansible/roles/voltha/tasks/main.yml
index 2fa5ccd..5633b76 100644
--- a/install/ansible/roles/voltha/tasks/main.yml
+++ b/install/ansible/roles/voltha/tasks/main.yml
@@ -31,7 +31,7 @@
   when: target == "cluster"
   tags: [voltha]
 
-- name: Installer files and directories are copied
+- name: Configuration files and directories are copied
   synchronize:
     src: "/home/vinstall/{{ item }}"
     dest: "{{ target_voltha_dir }}"
@@ -43,12 +43,15 @@
   with_items:
     - compose
     - nginx_config
+    - envoy
+    - voltha-swarm-start.sh
+    - voltha-swarm-stop.sh
   when: target == "cluster"
   tags: [voltha]
 
-- name: Installer directories are owned by voltha
+- name: Configuration directories are owned by voltha
   file:
-    path: /home/vinstall/{{ item }}
+    path: "{{ target_voltha_dir }}/{{ item }}"
     owner: voltha
     group: voltha
     recurse: yes
@@ -56,6 +59,20 @@
   with_items:
     - compose
     - nginx_config
+    - envoy
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: Script files are owned by voltha and executable
+  file:
+    path: "{{ target_voltha_dir }}/{{ item }}"
+    owner: voltha
+    group: voltha
+    mode: 0755
+    follow: no
+  with_items:
+    - voltha-swarm-start.sh
+    - voltha-swarm-stop.sh
   when: target == "cluster"
   tags: [voltha]
 
@@ -66,12 +83,14 @@
   when: target == "cluster"
   tags: [voltha]
 
-- name: Configuration files are on the cluster host
-  copy:
-    src: "files/consul_config"
-    dest: "{{ target_voltha_dir }}"
-  when: target == "cluster"
-  tags: [voltha]
+#- name: Configuration files are on the cluster host
+#  copy:
+#    src: "files/consul_config"
+#    dest: "{{ target_voltha_dir }}"
+#    owner: voltha
+#    group: voltha
+#  when: target == "cluster"
+#  tags: [voltha]
 
 - name: Docker containers for Voltha are pulled
   command: docker pull {{ docker_registry }}/{{ item }}
@@ -124,31 +143,22 @@
   with_items: "{{ voltha_containers }}"
   when: target == "installer"
   tags: [voltha]
+
 - name: Docker containers for Voltha are pushed
   command: docker push {{ docker_push_registry }}/{{ item }}
   with_items: "{{ voltha_containers }}"
   when: target == "installer"
   tags: [voltha]
+
 - name: Temporary registry push tags are removed
   command: docker rmi {{ docker_push_registry }}/{{ item }}
   with_items: "{{ voltha_containers }}"
   when: target == "installer"
   tags: [voltha]
 
-- name: voltha overlay network exists
-  command: docker network create --opt encrypted=true --driver overlay --subnet 10.10.12.0/24 voltha_net
+- name: voltha components are started
+  command: "{{ target_voltha_dir }}/voltha-swarm-start.sh"
   become: voltha
   when: target == "startup"
   tags: [voltha]
 
-- name: consul cluster is running
-  command: docker stack deploy -c {{ target_voltha_dir }}/compose/docker-compose-consul-cluster.yml consul
-  become: voltha
-  when: target == "startup"
-  tags: [voltha]
-
-- name: kafka is running
-  command: docker stack deploy -c {{ target_voltha_dir}}/compose/docker-compose-kafka-cluster.yml kafka
-  become: voltha
-  when: target == "startup"
-  tags: [voltha]
diff --git a/install/containers.cfg b/install/containers.cfg
index a9ffbbb..df0d264 100644
--- a/install/containers.cfg
+++ b/install/containers.cfg
@@ -13,3 +13,5 @@
   - wurstmeister/kafka:latest
   - zookeeper:latest
   - gliderlabs/registrator:master
+  - voltha/envoy:latest
+  - registry:2
diff --git a/install/vmTemplate.xml b/install/vmTemplate.xml
index b08cea6..d60ba56 100644
--- a/install/vmTemplate.xml
+++ b/install/vmTemplate.xml
@@ -2,7 +2,7 @@
   <name>{{ VMName }}</name>
   <memory unit='KiB'>1048576</memory>
   <currentMemory unit='KiB'>1048576</currentMemory>
-  <vcpu placement='static'>2</vcpu>
+  <vcpu placement='static'>4</vcpu>
   <os>
     <type arch='x86_64' machine='pc-i440fx-xenial'>hvm</type>
     <boot dev='hd'/>
diff --git a/install/voltha-swarm-start.sh b/install/voltha-swarm-start.sh
new file mode 100755
index 0000000..71245f1
--- /dev/null
+++ b/install/voltha-swarm-start.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+voltha_base_dir="/cord/incubator/voltha"
+
+docker network create --driver overlay --subnet=10.0.1.0/24 --opt encrypted=true voltha_net
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-kafka-cluster.yml kafka
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-consul-cluster.yml consul
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-voltha-swarm.yml voltha
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-vcli.yml cli
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-chameleon-swarm.yml chameleon
+docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-netconf-swarm.yml netconf
+docker service create -d --name tools --network voltha_net  --network kafka_net --publish "4022:22" voltha/tools
+
diff --git a/install/voltha-swarm-stop.sh b/install/voltha-swarm-stop.sh
new file mode 100644
index 0000000..8c21cfe
--- /dev/null
+++ b/install/voltha-swarm-stop.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+docker service rm chameleon_chameleon
+docker service rm netconf_netconf
+docker service rm cli_cli
+docker service rm voltha_voltha
+docker stack rm consul
+docker stack rm kafka
+docker service rm tools
+docker network rm voltha_net