VOL-595 - container image name consistency and general cleanup

Change-Id: Iccac1ccba61537cefa046118df139196e9e87713
diff --git a/compose/voltha-stack.yml.j2 b/compose/voltha-stack.yml.j2
new file mode 100644
index 0000000..09033d4
--- /dev/null
+++ b/compose/voltha-stack.yml.j2
@@ -0,0 +1,484 @@
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# SWARM_MANAGER_COUNT - The number of managers in the swarm cluster. This is
+#               used to limit the number of instances of some of the 
+#               service instances for smaller clusters. 
+#               [ NOTE - currently this parameter is handled via a Jinga2
+#                 templating capability, because as of 17.09.0-ce Docker
+#                 cannot use and environment variable for the `replicas`
+#                 attribute on a container. ]
+# REGISTRY    - The docker registry from which images should be pulled.
+#               If specified if should end in a `/`. Defaults to empty.
+# TAG         - The tag that should be used when referencing VOLTHA docker
+#               images, defaults to `latest`.
+# CONSUL_ROOT - The local path on which the consul/data and consul/config
+#               directories should be created. Defaults to `/var/local`.
+# VOLTHA_LOGS - The location into which `fluentd` should writes logs.
+#               Defaults to `/var/log`.
+# RADIUS_ROOT - The location in which the `data/clients.conf` and 
+#               `data/users` configuration files can be found. Defaults
+#               to `/var/local`.
+# DOCKER_HOST_IP -
+# ZK_TAG      - The tag that should be used when referencing the ZooKeeper
+#               docker image. Defaults to `3.4.11`.
+# KAFKA_TAG   - The tag that should be used when referencing the Kafka
+#               docker image. Default to `1.0.0`.
+# CONSUL_TAG  - The tag that should be used when referencing the Consul
+#               docker image. Defaults to `0.9.2`.
+# RADIUS_TAG  - The tag that should be used when referencing the FreeRADIUS
+#               docker image. Defaults to `latest`.
+version: "3"
+
+services:
+  zk1:
+    image: "${REGISTRY}zookeeper:${ZK_TAG:-3.4.11}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    networks:
+      - kafka-net
+    environment:
+      ZOO_MY_ID: 1
+      ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zk2:2888:3888 server.3=zk3:2888:3888
+
+  zk2:
+    image: "${REGISTRY}zookeeper:${ZK_TAG:-3.4.11}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    networks:
+      - kafka-net
+    environment:
+      ZOO_MY_ID: 2
+      ZOO_SERVERS: server.1=zk1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zk3:2888:3888
+
+  zk3:
+    image: "${REGISTRY}zookeeper:${ZK_TAG:-3.4.11}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    networks:
+      - kafka-net
+    environment:
+      ZOO_MY_ID: 3
+      ZOO_SERVERS: server.1=zk1:2888:3888 server.2=zk2:2888:3888 server.3=0.0.0.0:2888:3888
+
+  kafka:
+    deploy:
+      mode: global
+    image: "${REGISTRY}wurstmeister/kafka:${KAFKA_TAG:-1.0.0}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    environment:
+      KAFKA_ADVERTISED_PORT: 9092
+      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
+      KAFKA_HEAP_OPTS: -Xmx256M -Xms128M
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+    networks:
+      - kafka-net
+      - voltha-net
+    ports:
+      - 9092:9092
+
+  consul:
+    image: ${REGISTRY}consul:${CONSUL_TAG:-0.9.2}
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+
+    # Deploy to all docker manager nodes
+    deploy:
+      mode: global
+      placement:
+        constraints:
+          - node.role == manager
+      restart_policy:
+        condition: on-failure
+    entrypoint:
+      - consul
+      - agent
+      - -server
+      - -bootstrap-expect={{ SWARM_MANAGER_COUNT | default(1) }}
+      - -config-dir=/consul/config
+      - -data-dir=/consul/data              # mandatory property
+      - -client=0.0.0.0
+      - -advertise={{ '{{' }} GetAllInterfaces | include "network" "172.29.20.0/29" | sort "size,address" | attr "address" {{ '}}' }}
+      - -ui
+      - -raft-protocol=3
+      - -rejoin
+      - -retry-join=172.29.20.2
+      - -retry-join=172.29.20.3
+      - -retry-join=172.29.20.4
+      - -retry-join=172.29.20.5
+      - -retry-join=172.29.20.6
+      - -retry-join=172.29.20.7
+    networks:
+      - consul-net
+      - voltha-net
+    ports:
+      - "8300:8300"
+      - "8400:8400"
+      - "8500:8500"
+      - "8600:8600/udp"
+{%- if CONSUL_ROOT is defined and CONSUL_ROOT %}
+    volumes:
+      - {{ CONSUL_ROOT }}/data:/consul/data
+      - {{ CONSUL_ROOT }}/config:/consul/config
+{%- endif %}
+
+  fluentdactv:
+    image: "${REGISTRY}voltha/fluentd:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: 1
+      restart_policy:
+        condition: on-failure
+    environment:
+        FLUENTD_CONF: fluent-agg.conf
+    networks:
+      - voltha-net
+    ports:
+      - "24224"
+{%- if VOLTHA_LOGS is defined and VOLTHA_LOGS %}
+    volumes:
+      - "{{ VOLTHA_LOGS }}:/fluentd/log"
+{%- endif %}
+
+  fluentdstby:
+    image: "${REGISTRY}voltha/fluentd:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: 1
+      restart_policy:
+        condition: on-failure
+    environment:
+        FLUENTD_CONF: fluent-agg.conf
+    networks:
+      - voltha-net
+    ports:
+      - "24224"
+{%- if VOLTHA_LOGS is defined and VOLTHA_LOGS %}
+    volumes:
+      - "{{ VOLTHA_LOGS }}:/fluentd/log"
+{%- endif %}
+
+  # The Fluentd container is part of the data collection
+  # infrastructure.
+  fluentd:
+    image: "${REGISTRY}voltha/fluentd:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      mode: replicated
+      replicas: {{ SWARM_MANAGER_COUNT | default(1) }}
+      restart_policy:
+        condition: any
+    environment:
+        SERVICE_24224_NAME: "fluentd-intake"
+        FLUENTD_CONF: fluent.conf
+        WAIT_FOR: "fluentdactv:24224 fluentdstby:24224"
+        WAIT_FOR_TIMEOUT: 0
+    networks:
+      - voltha-net
+    ports:
+      - "24224"
+
+  # Free RADIUS can be used to test VOLTHA's authentication 
+  # sequence, i.e., EAPOL from a device behind an OLT to 
+  # RADIUS authentication on the back end systems. By default
+  # no instances of Free RADIUS are deployed.
+  freeradius:
+    deploy:
+      replicas: 0
+    image: "${REGISTRY}marcelmaatkamp/freeradius:${RADIUS_TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    ports:
+      - "1812:1812/udp"
+      - "1813:1813"
+      - "18120:18120"
+  
+{%- if RADIUS_ROOT is defined and RADIUS_ROOT %}  
+    # CONFIG: When deploying a Free RADIUS instance client and user
+    #      information will need to be configured for the service to
+    #      operate correctly.
+    #
+    volumes:
+      - {{ RADIUS_ROOT }}/data/clients.conf:/etc/raddb/clients.conf
+      - {{ RADIUS_ROOT }}/data/users:/etc/raddb/users
+{%- endif %}
+    networks:
+      - voltha-net
+
+  # The cluster manager container calculates and servers ONOS cluster
+  # meta data via HTTP so that ONOS instances can form an HA cluster.
+  # The cluster manager must run on a manager node so that it can 
+  # retrieve service information from manager nodes
+  onos_cluster_manager:
+    image: "${REGISTRY}voltha/unum:${TAG:-latest}"
+    deploy:
+      replicas: 1
+      placement:
+        constraints:
+          - node.role == manager
+    environment:
+      PERIOD: "10s"
+      LOG_LEVEL: "debug"
+      ORCHESTRATION: "swarm://"
+      LABELS: "org.voltha.onos.cluster:true"
+      NETWORK: "org.voltha.onos.cluster:true"
+    ports:
+      - 5411:5411
+    networks:
+      - voltha-net
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+
+  # ONOS is the SDN controller for the solution and handles AAA,
+  # IGMP proxy, and L2 DHCP requests as well as manages flows
+  # down to the virtual devices (MCAST + OLT flows).
+  # Currently there is a single instance of ONOS as some of the
+  # applications running under ONOS do not support HA.
+  onos:
+    deploy:
+      replicas: 1
+      labels:
+        org.voltha.onos.cluster: "true"
+    image: "${REGISTRY}voltha/onos:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    ports:
+      - 8101:8101 # ssh
+      - 6653:6653 # OF
+      - 8181:8181 # UI
+    environment:
+      EXTRA_JAVA_OPTS: "-Donos.cluster.metadata.uri=http://onos_cluster_manager:5411/config/"
+    networks:
+      - voltha-net
+
+  # The VCORE container is the core capabilities of VOLTHA including
+  # interacting with device adapters
+  vcore:
+    image: "${REGISTRY}voltha/voltha:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: {{ SWARM_MANAGER_COUNT | default(1) }}
+    entrypoint:
+      - voltha/voltha/main.py
+      - -v
+      - --consul=consul:8500
+      - --fluentd=fluentd:24224
+      - --kafka=kafka
+      - --rest-port=8880
+      - --grpc-port=50556
+      - --instance-id-is-container-name
+      - --backend=consul
+      - --inter-core-subnet=172.29.19.0/24
+      - --pon-subnet=172.29.19.0/24
+    networks:
+      - voltha-net
+    ports:
+      - "8880:8880"
+      - "18880:18880"
+      - "50556:50556"
+    volumes:
+      - /var/run/docker.sock:/tmp/docker.sock
+
+  # The OpenFlow Agent support the OpenFlow protocol communication
+  # between ONOS and VOLTHA.
+  ofagent:
+    image: "${REGISTRY}voltha/ofagent:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: 1
+    entrypoint:
+      - /ofagent/ofagent/main.py
+      - -v
+      - --consul=consul:8500
+      - --fluentd=fluentd:24224
+      - --controller=onos:6653
+      - --grpc-endpoint=vcore:50556
+      - --instance-id-is-container-name
+    volumes:
+      - /var/run/docker.sock:/tmp/docker.sock
+    networks:
+      - voltha-net
+
+  # The VOLTHA container supports load balancing of request to
+  # the VOLTHA components as well as surfaces a REST API and maps
+  # the requests to GPRC
+  voltha:
+    image: "${REGISTRY}voltha/envoy:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: 1
+    #environment:
+    #  DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+    entrypoint:
+      - /usr/local/bin/envoyd
+      - -envoy-cfg-template
+      - "/envoy/voltha-grpc-proxy.template.json"
+      - -envoy-config
+      - "/envoy/voltha-grpc-proxy.json"
+    networks:
+      - voltha-net
+    ports:
+      - "50555:50555"
+      - "8882:8882"
+      - "8443:8443"
+      - "8001:8001"
+    volumes:
+      - /var/run/docker.sock:/tmp/docker.sock
+
+  # The CLI container provides an CLI to the VOLTHA capabilitiy 
+  # that can be accessed via SSH.
+  cli:
+    image: "${REGISTRY}voltha/cli:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      replicas: {{ SWARM_MANAGER_COUNT | default(1) }}
+    #environment:
+    #  DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+    entrypoint:
+      - /cli/cli/setup.sh
+      - -C consul:8500
+      - -g voltha:50555
+      - -s voltha:18880
+      - -G
+    networks:
+      - voltha-net
+    ports:
+      - "5022:22"
+
+  # The Netconf container provides an NETCONF API to be used
+  # with VOLTHA and maps that to GRPC requests
+  netconf:
+    image: "${REGISTRY}voltha/netconf:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      mode: global
+    environment:
+      SERVICE_1830_NAME: "netconf-server"
+    entrypoint:
+      - /netconf/netconf/main.py
+      - -v
+      - --consul=consul:8500
+      - --fluentd=fluentd:24224
+      - --grpc-endpoint=voltha:50555
+      - --instance-id-is-container-name
+    networks:
+      - voltha-net
+    ports:
+      - "830:1830"
+    volumes:
+      - /var/run/docker.sock:/tmp/docker.sock
+
+  # The tools container provides a bash command shell to which
+  # an operator can SSH that has the same network connectivity
+  # as other VOLTHA containers and is thus a convenient 
+  # troubleshooting tool
+  tools:
+    image: "${REGISTRY}voltha/tools:${TAG:-latest}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: 3
+    deploy:
+      mode: replicated
+      replicas: 1
+      restart_policy:
+        condition: on-failure
+    ports:
+      - "4022:22"
+    networks:
+      - voltha-net
+      - kafka-net
+
+networks:
+  consul-net:
+    driver: overlay
+    driver_opts:
+      encrypted: "true"
+    ipam:
+      driver: default
+      config:
+        - subnet: 172.29.20.0/29
+
+  voltha-net:
+    driver: overlay
+    driver_opts:
+      encrypted: "true"
+    ipam:
+      driver: default
+      config:
+        - subnet: 172.29.19.0/24
+    labels:
+      org.voltha.onos.cluster: "true"
+
+  kafka-net:
+    driver: overlay
+    driver_opts:
+      encrypted: "true"