VOL-633: Update the single-node Voltha environment to support Kubernetes

Change-Id: Id105eb33463995401c61b365d222915d6c669aec
diff --git a/BUILD.md b/BUILD.md
index 04cdf23..9ff36fd 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -299,6 +299,72 @@
 exit # from vagrant box back to your native environmnet
 vagrant destroy -f
 ```
+### Single-node Kubernetes
+
+To run voltha in a Kubernetes environment, the "voltha" development machine can be configured as a Kubernetes master running in a Kubernetes single-node cluster.
+
+To install Kubernetes, execute the following ansible playbook:
+```
+ansible-playbook /cord/incubator/voltha/ansible/kubernetes.yml -c local
+```
+Run these next commands to create the "voltha" namespace"
+```
+cd /cord/incubator/voltha
+kubectl apply -f k8s/namespace.yml
+```
+Follow the steps in either one of the next two sub-sections depending on whether a Consul or Etcd KV store is to be used with voltha.
+
+#### Single-node Kubernetes with Consul KV store
+
+In order to access the Consul UI, set up the ingress framework:
+```
+kubectl apply -f k8s/ingress/
+```
+Deploy the base components:
+```
+kubectl apply -f k8s/single-node/zookeeper.yml
+kubectl apply -f k8s/single-node/kafka.yml
+kubectl apply -f k8s/single-node/consul.yml
+kubectl apply -f k8s/single-node/fluentd.yml
+```
+The following steps will succeed only if the voltha images have been built:
+```
+kubectl apply -f k8s/single-node/vcore_for_consul.yml
+kubectl apply -f k8s/single-node/ofagent.yml
+kubectl apply -f k8s/envoy_for_consul.yml   # Note the file path
+kubectl apply -f k8s/single-node/vcli.yml
+kubectl apply -f k8s/single-node/netconf.yml
+```
+To deploy the monitoring components (Note the file paths):
+```
+kubectl apply -f k8s/grafana.yml
+kubectl apply -f k8s/stats.yml
+```
+#### Single-node Kubernetes with Etcd KV store
+
+Deploy the base components:
+```
+kubectl apply -f k8s/single-node/zookeeper.yml
+kubectl apply -f k8s/single-node/kafka.yml
+kubectl apply -f k8s/operator/etcd/cluster_role.yml
+kubectl apply -f k8s/operator/etcd/cluster_role_binding.yml
+kubectl apply -f k8s/operator/etcd/operator.yml
+kubectl apply -f k8s/single-node/etcd_cluster.yml
+kubectl apply -f k8s/single-node/fluentd.yml
+```
+The following steps will succeed only if the voltha images have been built:
+```
+kubectl apply -f k8s/single-node/vcore_for_etcd.yml
+kubectl apply -f k8s/single-node/ofagent.yml
+kubectl apply -f k8s/envoy_for_etcd.yml
+kubectl apply -f k8s/single-node/vcli.yml
+kubectl apply -f k8s/single-node/netconf.yml
+```
+To deploy the monitoring components (Note the file paths):
+```
+kubectl apply -f k8s/grafana.yml
+kubectl apply -f k8s/stats.yml
+```
 
 # Testing
 
diff --git a/ansible/kubernetes.yml b/ansible/kubernetes.yml
new file mode 100644
index 0000000..140f736
--- /dev/null
+++ b/ansible/kubernetes.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+  remote_user: vagrant
+  serial: 1
+  roles:
+    - k8s-master
diff --git a/ansible/roles/k8s-master/tasks/main.yml b/ansible/roles/k8s-master/tasks/main.yml
new file mode 100644
index 0000000..6fe0456
--- /dev/null
+++ b/ansible/roles/k8s-master/tasks/main.yml
@@ -0,0 +1,74 @@
+- name: Install Kubernetes dependencies
+  become: yes
+  apt:
+    name: "{{ item }}"
+    state: latest
+  with_items:
+    - apt-transport-https
+    - jq
+    
+- name: Get apt signing key from Google Cloud
+  become: yes
+  apt_key:
+    url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
+    state: present
+
+- name: Add Kubernetes apt repository
+  become: yes
+  apt_repository:
+    repo: 'deb http://apt.kubernetes.io/ kubernetes-xenial main'
+    filename: kubernetes
+    state: present
+    update_cache: yes
+
+- name: Install Kubernetes
+  become: yes
+  apt:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - kubeadm=1.8.5-00
+    - kubectl=1.8.5-00
+    - kubelet=1.8.5-00
+    - kubernetes-cni=0.6.0-00
+
+- name: Initialize node as Kubernetes master
+  become: yes
+  command: "kubeadm init --apiserver-advertise-address=10.100.198.220"
+
+- name: Create .kube directory under home
+  become: yes
+  file:
+    dest: /home/ubuntu/.kube
+    mode: 0755
+    owner: ubuntu
+    group: ubuntu
+    state: directory
+
+- name: Copy config to home directory
+  become: yes
+  command: "sudo cp /etc/kubernetes/admin.conf /home/ubuntu/.kube/config"
+
+- name: Change ownership of ~/.kube/config
+  become: yes
+  file:
+    path: /home/ubuntu/.kube/config
+    mode: 0600
+    owner: ubuntu
+    group: ubuntu
+
+- name: Set proxy-mode flag in kube-proxy daemonset (workaround for https://github.com/kubernetes/kubernetes/issues/34101)
+  become: yes
+  shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system get ds -l 'k8s-app==kube-proxy' -o json | jq '.items[0].spec.template.spec.containers[0].command |= .+ [\"--proxy-mode=userspace\"]' | kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f - && kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system delete pods -l 'k8s-app==kube-proxy'"
+  register: proxy
+  until: proxy.rc == 0
+  retries: 60
+  delay: 10
+
+- name: Allow workloads on Kubernetes master
+  become: yes
+  command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-"
+
+- name: Install pod network
+  become: yes
+  command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /cord/incubator/voltha/k8s/weave-net-1.8.yml"
\ No newline at end of file
diff --git a/k8s/single-node/consul.yml b/k8s/single-node/consul.yml
new file mode 100644
index 0000000..ec980e8
--- /dev/null
+++ b/k8s/single-node/consul.yml
@@ -0,0 +1,106 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: consul
+  namespace: voltha
+  labels:
+    name: consul
+spec:
+  type: ClusterIP
+  clusterIP: None
+  ports:
+    - name: http
+      port: 8500
+      targetPort: 8500
+    - name: https
+      port: 8443
+      targetPort: 8443
+    - name: rpc
+      port: 8400
+      targetPort: 8400
+    - name: serflan-tcp
+      protocol: "TCP"
+      port: 8301
+      targetPort: 8301
+    - name: serflan-udp
+      protocol: "UDP"
+      port: 8301
+      targetPort: 8301
+    - name: serfwan-tcp
+      protocol: "TCP"
+      port: 8302
+      targetPort: 8302
+    - name: serfwan-udp
+      protocol: "UDP"
+      port: 8302
+      targetPort: 8302
+    - name: server
+      port: 8300
+      targetPort: 8300
+    - name: consuldns
+      port: 8600
+      targetPort: 8600
+  selector:
+    app: consul
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: consul
+  namespace: voltha
+spec:
+  serviceName: consul
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: consul
+      annotations:
+        cni: "weave"
+    spec:
+      terminationGracePeriodSeconds: 10
+      containers:
+        - name: consul
+          image: "consul:0.9.2"
+          env:
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          args:
+            - "agent"
+            - "-server"
+            - "-bootstrap"
+            - "-config-dir=/consul/config"
+            - "-data-dir=/consul/data"
+            - "-bind=0.0.0.0"
+            - "-client=0.0.0.0"
+            - "-ui"
+            - "-raft-protocol=3"
+          lifecycle:
+            preStop:
+              exec:
+                command:
+                - /bin/sh
+                - -c
+                - consul leave
+          ports:
+            - containerPort: 8500
+              name: ui-port
+            - containerPort: 8400
+              name: alt-port
+            - containerPort: 53
+              name: udp-port
+            - containerPort: 8443
+              name: https-port
+            - containerPort: 8080
+              name: http-port
+            - containerPort: 8301
+              name: serflan
+            - containerPort: 8302
+              name: serfwan
+            - containerPort: 8600
+              name: consuldns
+            - containerPort: 8300
+              name: server
+
diff --git a/k8s/single-node/etcd_cluster.yml b/k8s/single-node/etcd_cluster.yml
new file mode 100644
index 0000000..b7e40c2
--- /dev/null
+++ b/k8s/single-node/etcd_cluster.yml
@@ -0,0 +1,9 @@
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdCluster"
+metadata:
+  name: etcd
+  namespace: voltha
+spec:
+  size: 1
+  version: "3.2.9"
+
diff --git a/k8s/single-node/fluentd.yml b/k8s/single-node/fluentd.yml
new file mode 100644
index 0000000..8a9c4df
--- /dev/null
+++ b/k8s/single-node/fluentd.yml
@@ -0,0 +1,43 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: fluentd
+  namespace: voltha
+spec:
+  clusterIP: None
+  selector:
+    app: fluentd
+  ports:
+  - protocol: TCP
+    port: 24224
+    targetPort: 24224
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: fluentd
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: fluentd
+      annotations:
+        cni: "weave"
+    spec:
+      terminationGracePeriodSeconds: 10
+      containers:
+      - name: fluentd
+        image: fluent/fluentd:v0.12.42
+        volumeMounts:
+        - name: fluentd-log
+          mountPath: /fluentd/log
+        ports:
+        - containerPort: 24224
+      volumes:
+      - name: fluentd-log
+        hostPath:
+          path: /tmp/fluentd
+          type: Directory
+
diff --git a/k8s/single-node/kafka.yml b/k8s/single-node/kafka.yml
new file mode 100644
index 0000000..2d11968
--- /dev/null
+++ b/k8s/single-node/kafka.yml
@@ -0,0 +1,45 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kafka
+  namespace: voltha
+spec:
+  clusterIP: None
+  selector:
+    app: kafka
+  ports:
+  - protocol: TCP
+    port: 9092
+    targetPort: 9092
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: kafka
+  namespace: voltha
+spec:
+  serviceName: kafka
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kafka
+  template:
+    metadata:
+      labels:
+        app: kafka
+      annotations:
+        cni: "weave"
+    spec:
+      terminationGracePeriodSeconds: 10
+      containers:
+      - name: kafka
+        image: wurstmeister/kafka:1.0.0
+        ports:
+        - containerPort: 9092
+        env:
+        - name: KAFKA_ADVERTISED_PORT
+          value: "9092"
+        - name: KAFKA_ZOOKEEPER_CONNECT
+          value: zoo:2181
+        - name: KAFKA_HEAP_OPTS
+          value: "-Xmx256M -Xms128M"
diff --git a/k8s/single-node/netconf.yml b/k8s/single-node/netconf.yml
new file mode 100644
index 0000000..acf6f00
--- /dev/null
+++ b/k8s/single-node/netconf.yml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: netconf
+  namespace: voltha
+spec:
+  selector:
+    app: netconf
+  clusterIP: None
+  ports:
+  - port: 830
+    targetPort: 830
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: netconf
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: netconf
+    spec:
+      terminationGracePeriodSeconds: 10
+      containers:
+      - name: netconf
+        image: voltha-netconf
+        imagePullPolicy: Never
+        ports:
+        - containerPort: 830
+        env:
+        - name: NETCONF_PORT
+          value: "830"
+        args:
+        - "/netconf/netconf/main.py"
+        - "-v"
+        - "--consul=consul:8500"
+        - "--fluentd=fluentd:24224"
+        - "--grpc-endpoint=voltha:50555"
diff --git a/k8s/single-node/ofagent.yml b/k8s/single-node/ofagent.yml
new file mode 100644
index 0000000..6311ac6
--- /dev/null
+++ b/k8s/single-node/ofagent.yml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: ofagent
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: ofagent
+      annotations:
+        cni: "weave"
+    spec:
+      terminationGracePeriodSeconds: 10
+      containers:
+      - name: ofagent
+        image: voltha-ofagent
+        imagePullPolicy: Never
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        args:
+        - "/ofagent/ofagent/main.py"
+        - "-v"
+        - "--consul=consul.$(NAMESPACE).svc.cluster.local:8500"
+        - "--fluentd=fluentd.$(NAMESPACE).svc.cluster.local:24224"
+        - "--controller=onos:6653"
+        - "--grpc-endpoint=vcore.$(NAMESPACE).svc.cluster.local:50556"
+        - "--enable-tls"
+        - "--key-file=/ofagent/pki/voltha.key"
+        - "--cert-file=/ofagent/pki/voltha.crt"
diff --git a/k8s/single-node/vcli.yml b/k8s/single-node/vcli.yml
new file mode 100644
index 0000000..848bd3a
--- /dev/null
+++ b/k8s/single-node/vcli.yml
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: vcli
+  namespace: voltha
+  labels:
+    name: vcli
+spec:
+  ports:
+    - name: ssh
+      port: 5022
+      targetPort: 22
+  selector:
+    app: vcli
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: vcli
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: vcli
+      annotations:
+        cni: "weave"
+    spec:
+      containers:
+        - name: vcli
+          image: voltha-cli
+          env:
+            - name: POD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          args:
+            - "/cli/cli/setup.sh"
+            - "-C consul:8500"
+            - "-g voltha:50555"
+            - "-s voltha:18880"
+            - "-G"
+          ports:
+            - containerPort: 22
+              name: ssh-port
+          imagePullPolicy: Never
+
+
diff --git a/k8s/single-node/vcore_for_consul.yml b/k8s/single-node/vcore_for_consul.yml
new file mode 100644
index 0000000..da35ffd
--- /dev/null
+++ b/k8s/single-node/vcore_for_consul.yml
@@ -0,0 +1,64 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: vcore
+  namespace: voltha
+  labels:
+    name: vcore
+spec:
+  clusterIP: None
+  ports:
+    - name: rest
+      port: 8880
+      targetPort: 8880
+    - name: mystery
+      port: 18880
+      targetPort: 18880
+    - name: grpc
+      port: 50556
+      targetPort: 50556
+  selector:
+    app: vcore
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: vcore
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: vcore
+      annotations:
+        cni: "weave"
+    spec:
+      containers:
+      - name: voltha
+        image: voltha-voltha
+        imagePullPolicy: Never
+        ports:
+        - containerPort: 8880
+          name: rest-port
+        - containerPort: 18880
+          name: mystery-port
+        - containerPort: 50556
+          name: grpc-port
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        args:
+        - "voltha/voltha/main.py"
+        - "-v"
+        - "--consul=consul.$(NAMESPACE).svc.cluster.local:8500"
+        - "--kafka=kafka.$(NAMESPACE).svc.cluster.local"
+        - "--fluentd=fluentd.$(NAMESPACE).svc.cluster.local:24224"
+        - "--rest-port=8880"
+        - "--grpc-port=50556"
+        - "--interface=eth1"
+        - "--backend=consul"
+        - "--pon-subnet=10.38.0.0/12"
+#        - "--ponsim-comm=grpc"
diff --git a/k8s/single-node/vcore_for_etcd.yml b/k8s/single-node/vcore_for_etcd.yml
new file mode 100644
index 0000000..6af13d5
--- /dev/null
+++ b/k8s/single-node/vcore_for_etcd.yml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: vcore
+  namespace: voltha
+  labels:
+    name: vcore
+spec:
+  clusterIP: None
+  ports:
+    - name: rest
+      port: 8880
+      targetPort: 8880
+    - name: mystery
+      port: 18880
+      targetPort: 18880
+    - name: grpc
+      port: 50556
+      targetPort: 50556
+  selector:
+    app: vcore
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: vcore
+  namespace: voltha
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: vcore
+      annotations:
+        cni: "weave"
+    spec:
+      containers:
+        - name: voltha
+          image: voltha-voltha
+          env:
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          args:
+            - "voltha/voltha/main.py"
+            - "-v"
+            - "--etcd=etcd.$(NAMESPACE).svc.cluster.local:2379"
+            - "--kafka=kafka.$(NAMESPACE).svc.cluster.local"
+            - "--fluentd=fluentd.$(NAMESPACE).svc.cluster.local:24224"
+            - "--rest-port=8880"
+            - "--grpc-port=50556"
+            - "--interface=eth1"
+            - "--backend=etcd"
+            - "--pon-subnet=10.38.0.0/12"
+#            - "--ponsim-comm=grpc"
+          ports:
+            - containerPort: 8880
+              name: rest-port
+            - containerPort: 18880
+              name: mystery-port
+            - containerPort: 50556
+              name: grpc-port
+          imagePullPolicy: Never
+
+
diff --git a/k8s/single-node/zookeeper.yml b/k8s/single-node/zookeeper.yml
new file mode 100644
index 0000000..9ced14a
--- /dev/null
+++ b/k8s/single-node/zookeeper.yml
@@ -0,0 +1,56 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: zoo
+  namespace: voltha
+spec:
+  clusterIP: None
+  selector:
+    app: zookeeper
+  ports:
+  - name: client
+    port: 2181
+    targetPort: 2181
+  - name: follower
+    port: 2888
+    targetPort: 2888
+  - name: leader
+    port: 3888
+    targetPort: 3888
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: zookeeper
+  namespace: voltha
+spec:
+  serviceName: zoo
+  replicas: 1
+  selector:
+    matchLabels:
+      app: zookeeper
+  template:
+    metadata:
+      labels:
+        app: zookeeper
+      annotations:
+        cni: "weave"
+    spec:
+      containers:
+      - name: zoo
+        image: zookeeper:3.4.11
+        ports:
+        - containerPort: 2181
+        - containerPort: 2888
+        - containerPort: 3888
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: ZOO_MY_ID
+          value: "1"
+#        - name: ZOO_SERVERS
+#          value: >
+#            server.1=zookeeper1-0.zoo1.$(NAMESPACE).svc.cluster.local:2888:3888
+
diff --git a/k8s/weave-net-1.8.yml b/k8s/weave-net-1.8.yml
new file mode 100644
index 0000000..c991cd1
--- /dev/null
+++ b/k8s/weave-net-1.8.yml
@@ -0,0 +1,243 @@
+apiVersion: v1
+kind: List
+items:
+  - apiVersion: v1
+    kind: ServiceAccount
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: ClusterRole
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+    rules:
+      - apiGroups:
+          - ''
+        resources:
+          - pods
+          - namespaces
+          - nodes
+        verbs:
+          - get
+          - list
+          - watch
+      - apiGroups:
+          - networking.k8s.io
+        resources:
+          - networkpolicies
+        verbs:
+          - get
+          - list
+          - watch
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: ClusterRoleBinding
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+    roleRef:
+      kind: ClusterRole
+      name: weave-net
+      apiGroup: rbac.authorization.k8s.io
+    subjects:
+      - kind: ServiceAccount
+        name: weave-net
+        namespace: kube-system
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: Role
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+    rules:
+      - apiGroups:
+          - ''
+        resourceNames:
+          - weave-net
+        resources:
+          - configmaps
+        verbs:
+          - get
+          - update
+      - apiGroups:
+          - ''
+        resources:
+          - configmaps
+        verbs:
+          - create
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: RoleBinding
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+    roleRef:
+      kind: Role
+      name: weave-net
+      apiGroup: rbac.authorization.k8s.io
+    subjects:
+      - kind: ServiceAccount
+        name: weave-net
+        namespace: kube-system
+  - apiVersion: extensions/v1beta1
+    kind: DaemonSet
+    metadata:
+      name: weave-net
+      annotations:
+        cloud.weave.works/launcher-info: |-
+          {
+            "original-request": {
+              "url": "/k8s/v1.8/net.yaml",
+              "date": "Mon Feb 12 2018 15:44:36 GMT+0000 (UTC)"
+            },
+            "email-address": "support@weave.works"
+          }
+      labels:
+        name: weave-net
+      namespace: kube-system
+    spec:
+      template:
+        metadata:
+          labels:
+            name: weave-net
+        spec:
+          containers:
+            - name: weave
+              command:
+                - /home/weave/launch.sh
+              env:
+                - name: HOSTNAME
+                  valueFrom:
+                    fieldRef:
+                      apiVersion: v1
+                      fieldPath: spec.nodeName
+              image: 'weaveworks/weave-kube:2.2.0'
+              livenessProbe:
+                httpGet:
+                  host: 127.0.0.1
+                  path: /status
+                  port: 6784
+                initialDelaySeconds: 30
+              resources:
+                requests:
+                  cpu: 10m
+              securityContext:
+                privileged: true
+              volumeMounts:
+                - name: weavedb
+                  mountPath: /weavedb
+                - name: cni-bin
+                  mountPath: /host/opt
+                - name: cni-bin2
+                  mountPath: /host/home
+                - name: cni-conf
+                  mountPath: /host/etc
+                - name: dbus
+                  mountPath: /host/var/lib/dbus
+                - name: lib-modules
+                  mountPath: /lib/modules
+                - name: xtables-lock
+                  mountPath: /run/xtables.lock
+            - name: weave-npc
+              args: []
+              env:
+                - name: HOSTNAME
+                  valueFrom:
+                    fieldRef:
+                      apiVersion: v1
+                      fieldPath: spec.nodeName
+              image: 'weaveworks/weave-npc:2.2.0'
+              resources:
+                requests:
+                  cpu: 10m
+              securityContext:
+                privileged: true
+              volumeMounts:
+                - name: xtables-lock
+                  mountPath: /run/xtables.lock
+          hostNetwork: true
+          hostPID: true
+          restartPolicy: Always
+          securityContext:
+            seLinuxOptions: {}
+          serviceAccountName: weave-net
+          tolerations:
+            - effect: NoSchedule
+              operator: Exists
+          volumes:
+            - name: weavedb
+              hostPath:
+                path: /var/lib/weave
+            - name: cni-bin
+              hostPath:
+                path: /opt
+            - name: cni-bin2
+              hostPath:
+                path: /home
+            - name: cni-conf
+              hostPath:
+                path: /etc
+            - name: dbus
+              hostPath:
+                path: /var/lib/dbus
+            - name: lib-modules
+              hostPath:
+                path: /lib/modules
+            - name: xtables-lock
+              hostPath:
+                path: /run/xtables.lock
+      updateStrategy:
+        type: RollingUpdate