Update the Jenkins build platform to build only the necessary images to the run the tests.  This is to prevent the Vagrant VM running out of disk space

Change-Id: I2c87edb3a2a5da7a0c48700bc4e6e11ce3ae88cc
diff --git a/Jenkinsfile b/Jenkinsfile
index 25ad933..95a0840 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -11,10 +11,10 @@
             sh 'vagrant ssh -c "rm -rf /cord/incubator/voltha/venv-linux"'
 
             stage 'Build voltha'
-            sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && make fetch && make build" voltha'
+            sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && make fetch-jenkins && make jenkins" voltha'
 
             stage 'Bring up voltha containers'
-            sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && docker-compose -f compose/docker-compose-system-test.yml up -d" voltha'
+            sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && docker-compose -f compose/docker-compose-docutests.yml up -d" voltha'
 
             stage 'Run Integration Tests'
             sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && make smoke-test" voltha'
diff --git a/Makefile b/Makefile
index bdd4009..9237434 100644
--- a/Makefile
+++ b/Makefile
@@ -95,6 +95,9 @@
 
 production: protos prod-containers
 
+jenkins : protos jenkins-containers
+
+jenkins-containers: docker-base voltha chameleon ofagent netconf consul registrator
 
 prod-containers: docker-base voltha chameleon ofagent netconf shovel dashd vcli grafana consul registrator envoy registry
 
@@ -182,6 +185,16 @@
 distclean: clean
 	rm -rf ${VENVDIR}
 
+
+fetch-jenkins:
+	docker pull consul:latest
+	docker pull fluent/fluentd:latest
+	docker pull gliderlabs/registrator:master
+	docker pull ubuntu:xenial
+	docker pull wurstmeister/kafka:latest
+	docker pull wurstmeister/zookeeper:latest
+	docker pull zookeeper:latest
+
 fetch:
 	docker pull consul:latest
 	docker pull fluent/fluentd:latest
diff --git a/compose/docker-compose-docutests.yml b/compose/docker-compose-docutests.yml
new file mode 100644
index 0000000..50e4e94
--- /dev/null
+++ b/compose/docker-compose-docutests.yml
@@ -0,0 +1,200 @@
+version: '2'
+services:
+  #
+  # Single-node zookeeper service
+  #
+  zookeeper:
+    image: wurstmeister/zookeeper
+    ports:
+    - 2181
+    environment:
+      SERVICE_2181_NAME: "zookeeper"
+  #
+  # Single-node kafka service
+  #
+  kafka:
+    image: wurstmeister/kafka
+    ports:
+     - 9092
+    environment:
+      KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      KAFKA_HEAP_OPTS: "-Xmx256M -Xms128M"
+      SERVICE_9092_NAME: "kafka"
+    depends_on:
+    - consul
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+  #
+  # Single-node consul agent
+  #
+  consul:
+    image: consul:latest
+    command: agent -server -bootstrap -client 0.0.0.0 -ui
+    ports:
+    - "8300:8300"
+    - "8400:8400"
+    - "8500:8500"
+    - "8600:8600/udp"
+    environment:
+      #SERVICE_53_IGNORE: "yes"
+      SERVICE_8300_IGNORE: "yes"
+      SERVICE_8400_IGNORE: "yes"
+      SERVICE_8500_NAME: "consul-rest"
+  #
+  # Registrator
+  #
+  registrator:
+    image: gliderlabs/registrator:latest
+    command: [
+      "-ip=${DOCKER_HOST_IP}",
+      "-retry-attempts", "100",
+      "-cleanup",
+      # "-internal",
+      "consul://consul:8500"
+    ]
+    links:
+    - consul
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+
+  #
+  # Fluentd log server
+  #
+  fluentd:
+    image: fluent/fluentd
+    ports:
+    - "24224:24224"
+    volumes:
+    - "/tmp/fluentd:/fluentd/log"
+    environment:
+      SERVICE_24224_NAME: "fluentd-intake"
+
+  #
+  # Voltha server instance(s)
+  #
+  voltha:
+    image: cord/voltha
+    command: [
+      "/voltha/voltha/main.py",
+      "-v",
+      "--consul=${DOCKER_HOST_IP}:8500",
+      "--fluentd=fluentd:24224",
+      "--rest-port=8880",
+      "--grpc-port=50555",
+      "--kafka=@kafka",
+      "--instance-id-is-container-name",
+      "--interface=eth1",
+      "--backend=consul",
+      "-v"
+    ]
+    ports:
+    - 8880
+    - 50555
+    - 18880
+    depends_on:
+    - consul
+    links:
+    - consul
+    - fluentd
+    environment:
+      SERVICE_8880_NAME: "voltha-health"
+      SERVICE_8880_CHECK_HTTP: "/health"
+      SERVICE_8880_CHECK_INTERVAL: "5s"
+      SERVICE_8880_CHECK_TIMEOUT: "1s"
+      SERVICE_50555_NAME: "voltha-grpc"
+      SERVICE_18880_NAME: "voltha-sim-rest"
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+    networks:
+    - default
+    - ponmgmt
+
+  #
+  # Chameleon server instance(s)
+  #
+  chameleon:
+    image: cord/chameleon
+    command: [
+      "/chameleon/chameleon/main.py",
+      "-v",
+      "--consul=consul:8500",
+      "--fluentd=fluentd:24224",
+      "--rest-port=8881",
+      "--grpc-endpoint=@voltha-grpc",
+      "--instance-id-is-container-name",
+      "-v"
+    ]
+    ports:
+    - 8881
+    depends_on:
+    - consul
+    - voltha
+    links:
+    - consul
+    - fluentd
+    environment:
+      SERVICE_8881_NAME: "chameleon-rest"
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+  #
+  # ofagent server instance
+  #
+  ofagent:
+    image: cord/ofagent
+    command: [
+      "/ofagent/ofagent/main.py",
+      "-v",
+      "--consul=${DOCKER_HOST_IP}:8500",
+      "--fluentd=fluentd:24224",
+      "--controller=${DOCKER_HOST_IP}:6653",
+      "--grpc-endpoint=@voltha-grpc",
+      "--instance-id-is-container-name",
+      "-v"
+    ]
+    depends_on:
+    - consul
+    - voltha
+    links:
+    - consul
+    - fluentd
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+    restart: unless-stopped
+
+  #
+  # Netconf server instance(s)
+  #
+  netconf:
+    image: cord/netconf
+    privileged: true
+    command: [
+      "/netconf/netconf/main.py",
+      "-v",
+      "--consul=${DOCKER_HOST_IP}:8500",
+      "--fluentd=fluentd:24224",
+      "--grpc-endpoint=@voltha-grpc",
+      "--instance-id-is-container-name",
+      "-v"
+    ]
+    ports:
+    - "830:1830"
+    depends_on:
+    - consul
+    - voltha
+    links:
+    - consul
+    - fluentd
+    environment:
+      SERVICE_1830_NAME: "netconf-server"
+    volumes:
+    - "/var/run/docker.sock:/tmp/docker.sock"
+
+networks:
+  default:
+    driver: bridge
+  ponmgmt:
+    driver: bridge
+    driver_opts:
+      com.docker.network.bridge.name: "ponmgmt"
diff --git a/tests/itests/docutests/build_md_test.py b/tests/itests/docutests/build_md_test.py
index 0750f9d..a5bf1ce 100644
--- a/tests/itests/docutests/build_md_test.py
+++ b/tests/itests/docutests/build_md_test.py
@@ -36,7 +36,7 @@
 LOCAL_CONSUL = "localhost:8500"
 LOCAL_CONSUL_URL = "http://%s" % LOCAL_CONSUL
 LOCAL_CONSUL_DNS = "@localhost -p 8600"
-DOCKER_COMPOSE_FILE = "compose/docker-compose-system-test.yml"
+DOCKER_COMPOSE_FILE = "compose/docker-compose-docutests.yml"
 DOCKER_COMPOSE_FILE_SERVICES_COUNT = 7
 
 command_defs = dict(
@@ -90,7 +90,8 @@
     consul_get_srv_voltha_health="curl -s {}/v1/catalog/service/voltha-health "
                                  "| jq -r .".format(LOCAL_CONSUL_URL),
     kafka_client_run="kafkacat -b {} -L",
-    kafka_client_heart_check="kafkacat -b {} -C -t voltha.heartbeat -c 1",
+    kafka_client_heart_check="kafkacat -o end -b {} -C -t voltha.heartbeat "
+                             "-c 5",
     consul_get_voltha_rest_a_record="dig {} voltha-health.service.consul"
         .format(LOCAL_CONSUL_DNS),
     consul_get_voltha_rest_ip="dig {} +short voltha-health.service.consul"
@@ -480,8 +481,11 @@
             print "Verify kafka client is receiving the heartbeat messages from voltha..."
             expected_pattern = ['heartbeat', 'voltha_instance']
             cmd = command_defs['kafka_client_heart_check'].format(kafka_endpoint)
-            kafka_client_output = run_long_running_command_with_timeout(cmd, 20)
+            print time.ctime()
+            kafka_client_output = run_long_running_command_with_timeout(cmd,
+                                                                        40)
 
+            print time.ctime()
             print kafka_client_output
             # TODO check that there are heartbeats
             # Verify the kafka client output