This update contains several changes
- Used a more recent commit of envoy specifically the one right before
they broke it until the envoy team fixes the issue.
- Exposed envoy's admin interfacet to enable the gathering of NBI
activity stats from the cluster.
- Removed some unncessary filters from Envoy to reduce the size of the
config file and make it easier to understand.
- Removed the envoy directory and the mount thereof because everything
now comes packaged in the containter.
- Fixed a bug int the cluster VM Vagrantfile that was causing kernel
panics on some architectures.
- Added a log collection script to facilitate problem reporting.
- Removed the nginx directory from the install since nginx isn't being
used in production.
Change-Id: Ifc73a4e177d0a2ac9c9d373d936988fa17d586d2
diff --git a/Makefile b/Makefile
index ef77149..7f64095 100644
--- a/Makefile
+++ b/Makefile
@@ -215,7 +215,7 @@
docker pull zookeeper:latest
docker pull nginx:latest
docker pull portainer/portainer:latest
- docker pull lyft/envoy:092f01381467dfc3324e5e9528c67ee5d65744fd
+ docker pull lyft/envoy:29361deae91575a1d46c7a21e913f19e75622ebe
docker pull registry:2
docker pull kamon/grafana_graphite:latest
diff --git a/compose/docker-compose-envoy-swarm.yml b/compose/docker-compose-envoy-swarm.yml
index 7d5eecd..5c14cfa 100644
--- a/compose/docker-compose-envoy-swarm.yml
+++ b/compose/docker-compose-envoy-swarm.yml
@@ -27,8 +27,9 @@
- "50555:50555"
- "8882:8882"
- "8443:8443"
+ - "8001:8001"
volumes:
- - /cord/incubator/voltha/envoy:/etc/envoy
+ - /var/run/docker.sock:/tmp/docker.sock
networks:
voltha-net:
diff --git a/docker/Dockerfile.envoy b/docker/Dockerfile.envoy
index 7049fc9..afe1977 100644
--- a/docker/Dockerfile.envoy
+++ b/docker/Dockerfile.envoy
@@ -1,10 +1,10 @@
-FROM lyft/envoy:092f01381467dfc3324e5e9528c67ee5d65744fd
+FROM lyft/envoy:29361deae91575a1d46c7a21e913f19e75622ebe
RUN apt-get update && apt-get -q install -y \
curl
-ADD envoy/front-proxy /envoy/
-ADD envoy/proto.pb /envoy/
-ADD pki /envoy/
-ADD envoy/go/envoyd/envoyd /usr/local/bin
+COPY envoy/front-proxy /envoy/
+COPY envoy/proto.pb /envoy/
+COPY pki /envoy/
+COPY envoy/go/envoyd/envoyd /usr/local/bin
CMD /usr/local/bin/envoy -c /envoy/front-proxy/voltha-grpc-proxy.json
diff --git a/envoy/front-proxy/voltha-grpc-proxy-no-http.template.json b/envoy/front-proxy/voltha-grpc-proxy-no-http.template.json
index 7b7461f..fcdc8a4 100644
--- a/envoy/front-proxy/voltha-grpc-proxy-no-http.template.json
+++ b/envoy/front-proxy/voltha-grpc-proxy-no-http.template.json
@@ -65,11 +65,6 @@
}
}
},
- {
- "type": "both",
- "name": "grpc_http1_bridge",
- "config": {}
- },
{
"type": "decoder",
"name": "router",
diff --git a/envoy/front-proxy/voltha-grpc-proxy-no-https.template.json b/envoy/front-proxy/voltha-grpc-proxy-no-https.template.json
index 7448323..22e7788 100644
--- a/envoy/front-proxy/voltha-grpc-proxy-no-https.template.json
+++ b/envoy/front-proxy/voltha-grpc-proxy-no-https.template.json
@@ -60,11 +60,6 @@
}
}
},
- {
- "type": "both",
- "name": "grpc_http1_bridge",
- "config": {}
- },
{
"type": "decoder",
"name": "router",
diff --git a/envoy/front-proxy/voltha-grpc-proxy.template.json b/envoy/front-proxy/voltha-grpc-proxy.template.json
index dffebc2..a8e41ee 100644
--- a/envoy/front-proxy/voltha-grpc-proxy.template.json
+++ b/envoy/front-proxy/voltha-grpc-proxy.template.json
@@ -65,11 +65,6 @@
}
}
},
- {
- "type": "both",
- "name": "grpc_http1_bridge",
- "config": {}
- },
{
"type": "decoder",
"name": "router",
@@ -140,11 +135,6 @@
}
}
},
- {
- "type": "both",
- "name": "grpc_http1_bridge",
- "config": {}
- },
{
"type": "decoder",
"name": "router",
diff --git a/install/Vagrantfile b/install/Vagrantfile
index bd98bfa..310dab6 100644
--- a/install/Vagrantfile
+++ b/install/Vagrantfile
@@ -18,10 +18,8 @@
d.vm.provider "libvirt" do |v|
v.memory = 6144
v.cpus = 2
- if settings['vProvider'] == "KVM"
- v.cpu_mode = 'host-passthrough'
- v.cpu_fallback = 'allow'
- end
+ v.cpu_mode = 'host-passthrough'
+ v.cpu_fallback = 'allow'
end
end
end
diff --git a/install/ansible/roles/installer/tasks/main.yml b/install/ansible/roles/installer/tasks/main.yml
index b5b7afa..cbf2206 100644
--- a/install/ansible/roles/installer/tasks/main.yml
+++ b/install/ansible/roles/installer/tasks/main.yml
@@ -2,25 +2,14 @@
apt_repository:
repo: 'ppa:ansible/ansible'
tags: [installer]
+
- name: Debian ansible is present
apt:
name: ansible
state: latest
force: yes
tags: [installer]
-#- name: Installer files and directories are copied
-# copy:
-# src: "{{ cord_home }}/incubator/voltha/{{ item }}"
-# dest: /home/vinstall
-# owner: vinstall
-# group: vinstall
-# follow: no
-# with_items:
-# - install/installer.sh
-# - install/install.cfg
-# - install/ansible
-# - compose
-# - nginx_config
+
- name: Installer files and directories are copied
synchronize:
src: "{{ cord_home }}/incubator/voltha/{{ item }}"
@@ -35,10 +24,9 @@
- install/install.cfg
- install/voltha-swarm-start.sh
- install/voltha-swarm-stop.sh
+ - install/get-logs.sh
- install/ansible
- compose
- - nginx_config
- - envoy
tags: [installer]
- name: Determine if test mode is active
@@ -46,33 +34,41 @@
local_action: stat path="{{ cord_home }}/incubator/voltha/install/.test"
register: file
ignore_errors: true
+
- name: Test mode file is copied
copy:
src: "{{ cord_home }}/incubator/voltha/install/.test"
dest: /home/vinstall
when: file.stat.exists
+
- name: The installer is made executable
file:
path: /home/vinstall/installer.sh
mode: 0744
tags: [installer]
+
- name: Python docker-py {{ docker_py_version }} package source is available
command: pip download -d /home/vinstall/docker-py "docker-py=={{ docker_py_version }}"
tags: [installer]
+
- name: Python netifaces {{ netifaces_version }} package source is available
command: pip download -d /home/vinstall/netifaces "netifaces=={{ netifaces_version }}"
tags: [installer]
+
- name: Deb file directory doesn't exist
file:
path: /home/vinstall/deb_files
state: absent
tags: [installer]
+
- name: Deb files are saved.
command: cp -r /var/cache/apt/archives /home/vinstall
tags: [installer]
+
- name: Deb file directory is renamed
command: mv /home/vinstall/archives /home/vinstall/deb_files
tags: [installer]
+
- name: Installer directories are owned by vinstall
file:
path: /home/vinstall/{{ item }}
@@ -83,13 +79,12 @@
with_items:
- ansible
- compose
- - nginx_config
- .ansible
- deb_files
- docker-py
- netifaces
- - envoy
tags: [installer]
+
- name: Installer files are owned by vinstall
file:
path: /home/vinstall/{{ item }}
@@ -101,5 +96,6 @@
- install.cfg
- voltha-swarm-start.sh
- voltha-swarm-stop.sh
+ - get-logs.sh
- docker-compose-Linux-x86_64
tags: [installer]
diff --git a/install/ansible/roles/voltha/tasks/main.yml b/install/ansible/roles/voltha/tasks/main.yml
index 76afee1..6852f82 100644
--- a/install/ansible/roles/voltha/tasks/main.yml
+++ b/install/ansible/roles/voltha/tasks/main.yml
@@ -43,9 +43,9 @@
with_items:
- compose
- nginx_config
- - envoy
- voltha-swarm-start.sh
- voltha-swarm-stop.sh
+ - get-logs.sh
when: target == "cluster"
tags: [voltha]
@@ -70,8 +70,6 @@
follow: no
with_items:
- compose
- - nginx_config
- - envoy
when: target == "cluster"
tags: [voltha]
@@ -85,46 +83,27 @@
with_items:
- voltha-swarm-start.sh
- voltha-swarm-stop.sh
+ - get-logs.sh
when: target == "cluster"
tags: [voltha]
-- name: Nginx statup script is executable
- file:
- path: "{{ target_voltha_dir }}/nginx_config/start_service.sh"
- mode: 0755
- when: target == "cluster"
- tags: [voltha]
-
-#- name: Configuration files are on the cluster host
-# copy:
-# src: "files/consul_config"
-# dest: "{{ target_voltha_dir }}"
-# owner: voltha
-# group: voltha
-# when: target == "cluster"
-# tags: [voltha]
-
- name: Docker containers for Voltha are pulled
command: docker pull {{ docker_registry }}/{{ item }}
with_items: "{{ voltha_containers }}"
when: target == "cluster"
tags: [voltha]
+
- name: Docker images are re-tagged to expected names
command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
with_items: "{{ voltha_containers }}"
when: target == "cluster"
tags: [voltha]
+
- name: Docker images are re-tagged to cluster registry names
command: docker tag {{ docker_registry }}/{{ item }} {{ inventory_hostname }}:5001/{{ item }}
with_items: "{{ voltha_containers }}"
when: target == "cluster"
tags: [voltha]
-#- name: Old docker image tags are removed
-# command: docker rmi {{ docker_registry }}/{{ item }}
-# with_items: "{{ voltha_containers }}"
-# when: target == "cluster"
-# tags: [voltha]
-
# Update the insecure registry to reflect the current installer.
# The installer name can change depending on whether test mode
@@ -150,11 +129,6 @@
when: target == "installer"
tags: [voltha]
-#- name: TEMPORARY RULE TO INSTALL ZOOKEEPER
-# command: docker pull zookeeper
-# when: target == "installer"
-# tags: [voltha]
-
- name: Docker images are re-tagged to registry for push
command: docker tag {{ item }} {{ docker_push_registry }}/{{ item }}
with_items: "{{ voltha_containers }}"
diff --git a/install/get-logs.sh b/install/get-logs.sh
new file mode 100644
index 0000000..37d0755
--- /dev/null
+++ b/install/get-logs.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# This script will collect all of the pertinent logs from a voltha
+# HA swarm cluster, tar, and bizip them to facilitate sending them
+# to the suspected issue owner.
+
+volthaDir="/cord/incubator/voltha"
+
+# Get the list of the other hosts that make up the cluster
+hosts=`docker node ls | tail -n +2 | awk '{print $2}' | grep -v "*"`
+
+# Create a temporary directory for temporary storage of all the logs
+mkdir ${volthaDir}/log_tmp
+pushd ${volthaDir}/log_tmp
+
+# Docker health in general.
+
+echo "Getting docker node ls"
+docker node ls > docker_node_ls.log 2>&1
+echo "Getting docker service ls"
+docker service ls > docker_service_ls.log 2>&1
+
+# Get the list of services to ps each one and get logs for each one.
+svcs=`docker service ls | tail -n +2 | awk '{print $2}'`
+
+# Get the PS information
+for i in $svcs
+do
+ echo "Getting docker service ps $i"
+ docker service ps ${i} > docker_service_ps_${i} 2>&1
+done
+
+# Get the logs for each service
+for i in $svcs
+do
+ echo "Getting docker service logs $i"
+ docker service logs ${i} > docker_service_logs_${i} 2>&1 &
+done
+
+patience=10
+while [ ! -z "`jobs -p`" ]
+do
+ echo "*** Waiting on log collection to complete. Outstanding jobs: `jobs -p | wc -l`"
+ sleep 10
+ patience=`expr $patience - 1`
+ if [ $patience -eq 0 ]; then
+ echo "Log collection stuck, killing any active collectors"
+ for i in `jobs -p`
+ do
+ kill -s TERM $i
+ done
+ break
+ fi
+done
+
+# Get the image list from this host
+echo "Getting docker image ls from `hostname`"
+docker image ls > docker_image_ls_`hostname` 2>&1
+for i in $hosts
+do
+ echo "Getting docker image ls from $i"
+ ssh voltha@$i "docker image ls" > docker_image_ls_$i 2>&1
+done
+
+
+popd
+tar cjvf logs.tar.bz2 log_tmp/*
+rm -fr log_tmp
+
+
+