Merge "updating kubernetes conf file location output from setup script"
diff --git a/Makefile b/Makefile
index 9d532a4..058091f 100644
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,7 @@
 
 # Other repos with documentation that's included in the gitbook
 # edit the `git_refs` file with the commit/tag/branch that you want to use
-OTHER_REPO_DOCS ?= cord-tester fabric hippie-oss kubernetes-service olt-service onos-service openolt openstack rcord simpleexampleservice exampleservice vrouter xos xos-gui xos-tosca
+OTHER_REPO_DOCS ?= att-workflow-driver cord-tester fabric hippie-oss kubernetes-service olt-service onos-service openolt openstack rcord simpleexampleservice exampleservice vrouter xos xos-gui xos-tosca
 GENERATED_DOCS  ?= # should be 'swagger', but currently broken
 ALL_DOCS        ?= $(OTHER_REPO_DOCS) $(GENERATED_DOCS)
 
@@ -87,3 +87,13 @@
 swagger: xos
 	pushd repos/xos/docs; make swagger_docs; popd;
 
+# generate a list of git checksums suitable for updating git_refs
+freeze: repos
+	@for repo in $(OTHER_REPO_DOCS) ; do \
+	  GIT_SUBDIR=`grep "^$$repo " git_refs | awk '{print $$2}'` ;\
+	  cd "repos/$$repo" > /dev/null ;\
+	    HEAD_SHA=`git rev-parse HEAD` ;\
+	    printf "%-21s %-8s %-40s\n" $$repo $$GIT_SUBDIR $$HEAD_SHA ;\
+	  cd ../.. ;\
+	done
+
diff --git a/SUMMARY.md b/SUMMARY.md
index 5da0124..ffad806 100644
--- a/SUMMARY.md
+++ b/SUMMARY.md
@@ -20,8 +20,9 @@
     * [Fabric Software Setup](fabric-setup.md)
     * [Bringing Up CORD](profiles/intro.md)
         * [R-CORD](profiles/rcord/install.md)
-            * [OLT Setup](openolt/README.md)
-                * [Emulated OLT/ONU](profiles/rcord/emulate.md)
+            * [EdgeCore (OpenOLT driver) Setup](openolt/README.md)
+            * [Celestica / Microsemi Setup](profiles/rcord/celestica-olt-setup.md)
+            * [Emulated OLT/ONU](profiles/rcord/emulate.md)
         * [M-CORD](profiles/mcord/install.md)
             * [EnodeB Setup](profiles/mcord/enodeb-setup.md)
     * [Helm Reference](charts/helm.md)
@@ -32,20 +33,24 @@
         * [Hippie OSS](charts/hippie-oss.md)
         * [Base OpenStack](charts/base-openstack.md)
             * [VTN Setup](prereqs/vtn-setup.md)
+        * [R-CORD](charts/rcord.md)
         * [M-CORD](charts/mcord.md)
         * [XOSSH](charts/xossh.md)
+        * [Logging and Monitoring](charts/logging-monitoring.md)
+        * [Persistent Storage](charts/storage.md)
+        * [BBSim](charts/bbsim.md)
 * [Operations Guide](operating_cord/operating_cord.md)
     * [General Info](operating_cord/general.md)
         * [GUI](operating_cord/gui.md)
             * [Configuring the Service Graph](xos-gui/developer/service_graph.md)
-        * [REST API](operating_cord/rest_apis.md)
-        * [TOSCA](xos-tosca/README.md)
         * [XOSSH](xos/dev/xossh.md)
         * [XOS Internals](operating_cord/xos_internals.md)
             * [XOS Containers](xos/xos_internals.md)
             * [XOS Configuration](xos/modules/xosconfig.md)
     * [Configuring Profiles](operating_cord/profiles.md)
         * [R-CORD](profiles/rcord/configuration.md)
+            * Workflows
+                * [AT&T](profiles/rcord/workflows/att.md)
         * [M-CORD](profiles/mcord/configuration.md)
     * [Configuring Services](operating_cord/services.md)
         * [Fabric](fabric/README.md)
@@ -53,6 +58,12 @@
         * [RCORD](rcord/README.md)
         * [vOLT](olt-service/README.md)
         * [vRouter](vrouter/README.md)
+        * [AT&T Workflow Driver](att-workflow-driver/README.md)
+    * [Attach containers to external NICs](operating_cord/veth_intf.md)
+* [API Guide](api/api.md)
+    * [gRPC API Tutorial](xos/dev/grpc_api.md)
+    * [REST API](operating_cord/rest_apis.md)
+    * [TOSCA](xos-tosca/README.md)
 * [Development Guide](developer/developer.md)
     * [Getting the Source Code](developer/getting_the_code.md)
     * [Writing Models and Synchronizers](xos/intro.md)
diff --git a/api/api.md b/api/api.md
new file mode 100644
index 0000000..1948c3f
--- /dev/null
+++ b/api/api.md
@@ -0,0 +1,14 @@
+# API Guide
+
+This guide describes workflows for interacting with the API of the NEM. There
+are several different API mechanisms that the NEM supports. Some of them are
+used in a Northbound context, for services sitting on top the NEM to interact
+with the NEM, and some are used internally for components to communicate with each other.
+
+* [gRPC](/xos/dev/grpc_api.md). The gRPC API is used internally for synchronizers and for Chameleon to speak with the XOS core. It's also available as a Northbound API.
+
+* [REST](/operating_cord/rest_apis.md). The REST API is implemented by the Chameleon container. In addition to being a popular Northbound API, it's also used by the XOS GUI.
+
+* [TOSCA](/xos-tosca/README.md). TOSCA is implemented by the xos-tosca container and is typically used to configure and provision a
+   POD. Later sections of this guide give examples of TOSCA workflows used to provision and configure various
+   [profiles](/operating_cord/profiles.md) and [services](/operating_cord/services.md). TOSCA can also be used as a general-purpose runtime API.
diff --git a/book.json b/book.json
index fd3f609..0bf2058 100644
--- a/book.json
+++ b/book.json
@@ -4,6 +4,9 @@
   "structure": {
     "summary": "SUMMARY.md"
   },
+  "styles": {
+    "website": "styles/website.css"
+  },
   "variables": {
     "branch": "master"
   },
@@ -21,6 +24,10 @@
           "text": "Master (Devel)"
         },
         {
+          "value": "/cord-6.0",
+          "text": "6.0 (Stable)"
+        },
+        {
           "value": "/cord-5.0",
           "text": "5.0 (Stable)"
         },
@@ -29,10 +36,6 @@
           "text": "4.1 (Stable)"
         },
         {
-          "value": "/cord-4.0",
-          "text": "4.0 (Stable)"
-        },
-        {
           "value": "https://wiki.opencord.org/display/CORD/Building+and+Installing+CORD",
           "text": "3.0 and previous (wiki)"
         }
diff --git a/charts/bbsim.md b/charts/bbsim.md
new file mode 100644
index 0000000..cabc6c6
--- /dev/null
+++ b/charts/bbsim.md
@@ -0,0 +1,53 @@
+# BBSim Helm Chart
+
+This chart let you install the broadband simulator.
+Note that this chart depends on [kafka](kafka.md)
+
+```shell
+helm install -n bbsim bbsim
+```
+
+## Set a different number of ONUs
+
+You can configure the number of ONUs trough a parameter in the installation:
+
+```shell
+helm install -n bbsim bbsim --set onus_per_pon_port={number_od_onus}
+```
+
+## Set a different mode
+
+By default BBSim will bring up a certain number of ONUs and the start sending
+authentication requests, via EAPOL, and DHCP requests.
+
+You can change the behaviour via:
+
+```shell
+helm install -n bbsim bbsim --set emulation_mode="{both|aaa|default}"
+```
+
+Where:
+
+- `both` stands for authentication and DHCP
+- `aaa` stands for authentication only
+- `default` will just activate the devices
+
+## Start BBSim without Kafka
+
+Kafka is used to aggregate the logs in CORD's [logging](logging-monitoring.md)
+framework.
+
+If you want to start BBSim without pushing the logs to kafka, you can install it
+with:
+
+```shell
+helm install -n bbsim bbsim --set kafka_broker=""
+```
+
+## Provision the BBSim OLT in NEM
+
+You can use this file to bring up the BBSim OLT in NEM: [bbsim-16.yaml](https://github.com/opencord/pod-configs/blob/master/tosca-configs/bbsim/bbsim-16.yaml).
+
+Note that in that file there is a bit of configuration for the `dhcpl2relay` application
+in ONOS that instructs it to send DHCP packet back to the OLT. This may differ
+from a POD where you are sending those packets out of the fabric.
\ No newline at end of file
diff --git a/charts/helm.md b/charts/helm.md
index 8b1c2e3..addf574 100644
--- a/charts/helm.md
+++ b/charts/helm.md
@@ -82,35 +82,102 @@
 
 ### Add the CORD Repository to Helm
 
-If you don't want to download the repository, you can just add the OPENCord charts to your helm repo:
+If you don't want to download the repository, you can make the charts available
+to helm by adding the repo to the list of repos it can obtain charts from:
 
 ```shell
 helm repo add cord https://charts.opencord.org/master
 helm repo update
 ```
 
-If you decide to follow this route, the `cord/` prefix needs to be
-added to specify the repo to use. For example:
+If you decide to follow this route, you have to use the repo name (in this case
+`cord`) with a prefix ( `cord/`) to specify which repo to obtain a chart from.
+
+For example:
 
 ```shell
 helm install -n xos-core xos-core
 ```
 
-will become
+would become:
 
 ```shell
 helm install -n xos-core cord/xos-core
 ```
 
-## CORD Example Values
+## Overriding chart values
 
-There is an `example` directory in the `helm-chart` repository.
-The files contained in that directory are examples of possible overrides
-to obtain a custom deployment.
+Occasionally you may need to [override and customize the default
+settings](https://docs.helm.sh/using_helm/#customizing-the-chart-before-installing)
+of a chart.
 
-For example, it is possible to deploy a single instance of `kafka`,
-for development purposes, by using this value file:
+This is done using a "values file", and is done most frequently during
+development or when customizing a deployment.
 
-```shell
-helm install --name cord-kafka incubator/kafka -f examples/kafka-single.yaml
+Development-specific and deployment example values files can be found in the
+[helm-charts/examples](https://gerrit.opencord.org/gitweb?p=helm-charts.git;a=tree;f=examples)
+directory.
+
+### Specifying a Docker registry
+
+Most charts specify a global value for the address of a Docker image registry.
+By default this is blank, assuming that images will be pulled from the global
+hub.docker.com registry:
+
+```yaml
+global:
+  registry: ''
 ```
+
+This would be overridden as follows - make sure to include the trailing `/`
+character to separate the registry from the name of the container:
+
+```yaml
+global:
+  registry: '10.90.0.101:30500/'
+```
+
+Note that using setting this value with change the registry setting for every
+image in a chart.
+
+To handle building and pushing images to a registry, see the [development
+documentation](../developer/workflows.md#pushing-changes-to-a-remote-registry).
+
+If you want to change only the registry for one specific image, the easiest way
+is to modify the `repository` setting - for example:
+
+```yaml
+images:
+  xos_gui:
+    repository: 'xosproject/xos-gui'
+    tag: '2.1.0'
+    pullPolicy: 'Always'
+
+  xos_ws:
+    repository: 'xosproject/xos-ws'
+    tag: '2.0.0'
+    pullPolicy: 'Always'
+
+global:
+  registry: ''
+```
+
+You would modify the `repository` value for the specific image, but not the
+global `registry` value:
+
+```yaml
+images:
+  xos_gui:
+    repository: '10.90.0.101:30500/xosproject/xos-gui'
+    tag: '2.1.0'
+    pullPolicy: 'Always'
+
+  xos_ws:
+    repository: 'xosproject/xos-ws'
+    tag: '2.0.0'
+    pullPolicy: 'Always'
+
+global:
+  registry: ''
+```
+
diff --git a/charts/kafka.md b/charts/kafka.md
index 464dfe5..26d4645 100644
--- a/charts/kafka.md
+++ b/charts/kafka.md
@@ -3,21 +3,54 @@
 The `kafka` helm chart is not maintained by CORD,
 but it is available online at: <https://github.com/kubernetes/charts/tree/master/incubator/kafka>
 
-To install kafka you can use:
+To install kafka using the `cord-kafka` name, run the following commands:
 
 ```shell
 helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
-helm install --name cord-kafka \
---set replicas=1 \
---set persistence.enabled=false \
---set zookeeper.servers=1 \
---set zookeeper.persistence.enabled=false \
-incubator/kafka
+helm install -f examples/kafka-single.yaml --version 0.8.8 -n cord-kafka incubator/kafka
 ```
+> NOTE: Historically there were two kafka busses deployed (another one named
+> `voltha-kafka`) but these have been consolidated.
 
-If you are experierencing problems with a multi instance installation of kafka,
-you can try to install a single instance of it:
+## Viewing events with kafkacat
+
+As a debugging tool you can deploy a container containing `kafkacat` and use
+that to listen for events:
 
 ```shell
-helm install --name cord-kafka incubator/kafka -f examples/kafka-single.yaml
-```
\ No newline at end of file
+cd helm-charts
+helm install -n kafkacat xos-tools/kafkacat
+```
+
+Once the container is up and running you can exec into the pod and run kafkacat
+to perform various diagnostic commands.
+
+```shell
+kubectl exec -it kafkacat-##########-##### bash
+```
+
+For a complete reference, please refer to the [`kafkacat`
+guide](https://github.com/edenhill/kafkacat)
+
+ A few examples:
+
+- List available topics:
+  ```shell
+  kafkacat -b cord-kafka -L
+  ```
+
+- Listen for events on a particular topic:
+  ```shell
+  kafkacat -b cord-kafka -C -t <kafka-topic>
+  ```
+
+- Some example topics to listen on:
+
+  ```shell
+  kafkacat -b cord-kafka -C -t xos.log.core
+  kafkacat -b cord-kafka -C -t xos.gui_events
+  kafkacat -b cord-kafka -C -t voltha.events
+  kafkacat -b cord-kafka -C -t onu.events
+  kafkacat -b cord-kafka -C -t authentication.events
+  kafkacat -b cord-kafka -C -t dhcp.events
+  ```
diff --git a/charts/local-persistent-volume.md b/charts/local-persistent-volume.md
deleted file mode 100644
index b2bd8d8..0000000
--- a/charts/local-persistent-volume.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Local Persistent Volume Helm chart
-
-## Introduction
-
-The `local-persistent-volume` helm chart is a utility helm chart. It was
-created mainly to persist the `xos-core` DB data but this helm can be used
-to persist any data.
-
-It uses a relatively new kubernetes feature (it's a beta feature
-in Kubernetes 1.10.x) that allows us to define an independent persistent
-store in a kubernetes cluster.
-
-The helm chart mainly consists of the following kubernetes resources:
-
-- A storage class resource representing a local persistent volume
-- A persistent volume resource associated with the storage class and a specific directory on a specific node
-- A persistent volume claim resource that claims certain portion of the persistent volume on behalf of a pod
-
-The following variables are configurable in the helm chart:
-
-- `storageClassName`: The name of the storage class resource
-- `persistentVolumeName`: The name of the persistent volume resource
-- `pvClaimName`: The name of the persistent volume claim resource
-- `volumeHostName`: The name of the kubernetes node on which the data will be persisted
-- `hostLocalPath`: The directory or volume mount path on the chosen chosen node where data will be persisted
-- `pvStorageCapacity`: The capacity of the volume available to the persistent volume resource (e.g. 10Gi)
-
-Note: For this helm chart to work, the volume mount path or directory specified in the `hostLocalPath` variable needs to exist before the helm chart is deployed.
-
-## Standard Install
-
-```shell
-helm install -n local-store local-persistent-volume
-```
-
-## Standard Uninstall
-
-```shell
-helm delete --purge local-store
-```
diff --git a/charts/logging-monitoring.md b/charts/logging-monitoring.md
new file mode 100644
index 0000000..01dcfcb
--- /dev/null
+++ b/charts/logging-monitoring.md
@@ -0,0 +1,46 @@
+# Deploy Logging and Monitoring components
+
+To read more about logging and monitoring in CORD, please refer to [the design
+document](https://docs.google.com/document/d/1hCljvKzsNW9D2Y1cbvOTNOCbTy1AgH33zXvVjbicjH8/edit).
+
+There are currently two charts that deploy logging and monitoring
+functionality, `nem-monitoring` and `logging`.  Both of these charts depend on
+having [kafka](kafka.md) instances running in order to pass messages.
+
+
+## `nem-monitoring` charts
+
+```shell
+helm dep update nem-monitoring
+helm install -n nem-monitoring nem-monitoring
+```
+
+> NOTE: In order to display `voltha` kpis you need to have `voltha`
+> and `cord-kafka` installed.
+
+### Monitoring Dashboards
+
+This chart exposes two dashboards:
+
+- [Grafana](http://docs.grafana.org/) on port `31300`
+- [Prometheus](https://prometheus.io/docs/) on port `31301`
+
+## `logging` charts
+
+```shell
+helm dep up logging
+helm install -n logging logging
+```
+
+For smaller developer/test environments without persistent storage, please use
+the `examples/logging-single.yaml` file to run the logging chart, which doesn't
+create PVC's.
+
+### Logging Dashboard
+
+The [Kibana](https://www.elastic.co/guide/en/kibana/current/index.html)
+dashboard can be found on port `30601`
+
+To start using Kibana, you must create an index under *Management > Index
+Patterns*.  Create one with a name of `logstash-*`, then you can search for
+events in the *Discover* section.
diff --git a/charts/onos.md b/charts/onos.md
index 1a704db..942f1c0 100644
--- a/charts/onos.md
+++ b/charts/onos.md
@@ -1,48 +1,43 @@
 # Deploy ONOS
 
-The same chart can be used to deploy different flavors of ONOS, depending on the configuration applied (configurations available in the configs folder).
+## Configurations
 
-* **onos-fabric**: a specific version of ONOS used to control the Trellis fabric
-* **onos-voltha**: a specific version of ONOS used to control VOLTHA
-* **onos-vtn**: a speciic version of ONOS used to control VTN
-* **no configuration applied**: if no configurations are applied, a generic ONOS instance will be installed
+The same chart can be used to deploy different flavors of ONOS, depending on
+the configuration applied. These configurations can be found in the
+`helm-charts/configs` directory.
 
-## onos-fabric
+* **onos**: ONOS configured for the CORD scenarios with Trellis (Fabric), VOLTHA,
+  and VTN
+* **no configuration applied**: if no configurations are applied, a generic
+  ONOS instance will be installed
+
+## ONOS with CORD configuration
 
 ```shell
-helm install -n onos-fabric -f configs/onos-fabric.yaml onos
+helm install -n onos -f configs/onos.yaml onos
 ```
 
 **Nodeports exposed**
 
-* ovsdb: 31640
 * OpenFlow: 31653
-* SSH: 31101
-* REST/UI: 31181
-
-## onos-voltha
-
-> **Note:** This requires [VOLTHA](voltha.md) to be installed
-
-```shell
-helm install -n onos-voltha -f configs/onos-voltha.yaml onos
-```
-
-**Nodeports exposed**
-
 * SSH: 30115
 * REST/UI: 30120
+* Karaf debugger: 30555
 
-## onos-cord (onos-vtn)
+## Use VOLTHA-ONOS
+
+_This is intendend for development purposes_
 
 ```shell
-helm install -n onos-cord -f configs/onos-cord.yaml onos
+helm install -n onos -f configs/onos-voltha.yaml onos
 ```
 
 **Nodeports exposed**
 
-* SSH: 32101
-* REST/UI: 32181
+* OpenFlow: 31653
+* SSH: 30115
+* REST/UI: 30120
+* Karaf debugger: 30555
 
 ## Generic ONOS
 
@@ -50,4 +45,39 @@
 helm install -n onos onos
 ```
 
-The configuration doesn't expose any nodeport.
+**Nodeports exposed**: None
+
+## ONOS logging
+
+### `onos-log-agent` Sidecar container
+
+By default, the onos helm chart will run a sidecar container to ship logs using
+[Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
+to [Kafka](kafka.md) for aggregation of logs with the rest of the CORD
+platform.
+
+This container is named `onos-log-agent`, and because 2 containers are running
+in the pod when you run `kubectl` you may need to use the `-c` option to
+specify which container you want to interact with.  For example, to view the
+ONOS logs via kubectl, you would use:
+
+    kubectl logs onos-7bbc9555bf-2754p -c onos
+
+and to view the filebeat logs:
+
+    kubectl logs onos-7bbc9555bf-2754p -c onos-log-agent
+
+If this the sidecar isn't required, it can be disabled when installing the
+chart by passing `--set log_agent.enabled=false` or by using a values file.
+
+### Modifying ONOS logging levels
+
+An option can be added either to the default ONOS *values.yaml* file, or
+overritten through an external configuration file. Here is an example:
+
+```yaml
+application_logs: |
+  log4j.logger.org.opencord.olt = DEBUG
+  log4j.logger.org.opencord.kafka = DEBUG
+  log4j.logger.org.opencord.sadis = DEBUG
+```
diff --git a/charts/storage.md b/charts/storage.md
new file mode 100644
index 0000000..7f8fb11
--- /dev/null
+++ b/charts/storage.md
@@ -0,0 +1,369 @@
+# Persistent Storage charts
+
+These charts implement persistent storage that is within Kubernetes.
+
+See the Kubernetes documentation for background material on how persistent
+storage works:
+
+- [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+- [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+
+Using persistent storage is optional during development, but should be
+provisioned for and configured during production and realistic testing
+scenarios.
+
+## Local Directory
+
+The `local-directory` chart creates
+[local](https://kubernetes.io/docs/concepts/storage/volumes/#local) volumes on
+specific nodes, from directories. As there are no enforced limits for volume
+size and the node names are preconfigured, this chart is intended for use only
+for development and testing.
+
+Multiple directories can be specified in the `volumes` list - an example is
+given in the `values.yaml` file of the chart.  You should create another values
+file that is specific to your deployment that overrides these with the
+deployments node and directory names, and then ensure that these directories
+are created before running this chart.
+
+The `StorageClass` created for all volumes created by this chart is
+`local-directory`.
+
+There is an ansible playbook that automates the creation of directories on all
+the kubernetes nodes given a values file.  Make sure that the inventory name in
+ansible matches the name of the `host` in the `volumes` list, then invoke
+with:
+
+```shell
+ansible-playbook -i <path to ansible inventory> --extra-vars "helm_values_file:<path to values.yaml>" local-directory-playbook.yaml
+```
+
+to create all local directories.
+
+Then load the helm chart:
+
+```shell
+helm install -f <path to values.yaml> -n local-directory local-directory
+```
+
+You should then be able to list the local directory PV's:
+
+```shell
+$ kubectl get pv
+NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM     STORAGECLASS      REASON    AGE
+large-pv   10Gi       RWO            Retain           Available             local-directory             8s
+small-pv   2Gi        RWO            Retain           Available             local-directory             8s
+```
+
+
+## Local Provisioner
+
+The `local-provisioner` chart provides a
+[local](https://kubernetes.io/docs/concepts/storage/volumes/#local),
+non-distributed `PersistentVolume` that is usable on one specific node.  It
+does this by running the k8s [external storage local volume
+provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume/helm/provisioner).
+
+This type of storage is useful for workloads that have their own intrinsic HA
+or redundancy strategies, and only need storage on multiple nodes.
+
+This provisioner is not "dynamic" in the sense that that it can't create a new
+`PersistentVolume` on demand from a storage pool, but the provisioner can
+automatically create volumes as disks/partitions are mounted on the nodes.
+
+To create a new PV, a disk or partition on a node has to be formatted and
+mounted in specific locations, after which the provisioner will automatically
+create a `PersistentVolume` for the mount. As these volumes can't be split or
+resized, care must be taken to ensure that the correct quantity, types, and
+sizes of mounts are created for all the `PersistentVolumeClaim`'s required to
+be bound for a specific workload.
+
+By default, two `StorageClasses` were created to differentiate between Hard
+Disks and SSD's:
+
+- `local-hdd`, which offers PV's on volumes mounted in `/mnt/local-storage/hdd/*`
+- `local-ssd`, which offers PV's on volumes mounted in `/mnt/local-storage/ssd/*`
+
+### Adding a new local volume on a node
+
+If you wanted to add a new volume to a node, you'd physically install a new
+disk in the system, then determine the device file it uses. Assuming that it's
+a hard disk and the device file is `/dev/sdb`, you might partition, format, and
+mount the disk like this:
+
+```shell
+$ sudo parted -s /dev/sdb \
+    mklabel gpt \
+    mkpart primary ext4 1MiB 100%
+$ sudo mkfs.ext4 /dev/sdb1
+$ echo "/dev/sdb1 /mnt/local-storage/hdd/sdb1 ext4 defaults 0 0" | sudo tee -a /etc/fstab
+$ sudo mount /mnt/local-storage/hdd/sdb1
+```
+
+Then check that the `PersistentVolume` is created by the `local-provisioner`:
+
+```shell
+$ kubectl get pv
+NAME                CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                  STORAGECLASS     REASON    AGE
+local-pv-2bfa2c43   19Gi       RWO            Delete           Available                          local-hdd                  6h
+
+$ kubectl describe pv local-pv-
+Name:              local-pv-2bfa2c43
+Labels:            <none>
+Annotations:       pv.kubernetes.io/provisioned-by=local-volume-provisioner-node1-...
+Finalizers:        [kubernetes.io/pv-protection]
+StorageClass:      local-hdd
+Status:            Available
+Claim:
+Reclaim Policy:    Delete
+Access Modes:      RWO
+Capacity:          19Gi
+Node Affinity:
+  Required Terms:
+    Term 0:        kubernetes.io/hostname in [node1]
+Message:
+Source:
+    Type:  LocalVolume (a persistent volume backed by local storage on a node)
+    Path:  /mnt/local-storage/hdd/sdb1
+Events:    <none>
+```
+
+## Ceph deployed with Rook
+
+[Rook](https://rook.github.io/) provides an abstraction layer for Ceph and
+other distributed persistent data storage systems.
+
+There are 3 Rook charts included with CORD:
+
+- `rook-operator`, which runs the volume provisioning portion of Rook (and is a
+  thin wrapper around the upstream [rook-ceph
+  chart](https://rook.github.io/docs/rook/v0.8/helm-operator.html)
+
+- `rook-cluster`, which defines the Ceph cluster and creates these
+  `StorageClass` objects usable by other charts:
+
+    - `cord-ceph-rbd`, dynamically create `PersistentVolumes` when a
+      `PersistentVolumeClaim` is created. These volumes are only usable by a
+      single container at a time.
+
+    - `cord-cephfs`, a single shared filesystem which is mountable
+      `ReadWriteMulti` on multiple containers via `PersistentVolumeClaim`. It's
+      size is predetermined.
+
+- `rook-tools`, which provides a toolbox container for troubleshooting problems
+  with Rook/Ceph
+
+To create persistent volumes, you will need to load the first 2 charts, with
+the third only needed for troubleshooting and diagnostics.
+
+### Rook Node Prerequisties
+
+By default, all the nodes running k8s are expected to have a directory named
+`/mnt/ceph` where the Ceph data is stored (the `cephDataDir` variable can be
+used to change this path).
+
+In a production deployment, this would ideally be located on its own block
+storage device.
+
+There should be at least 3 nodes with storage available to provide data
+redundancy.
+
+### Loading Rook Charts
+
+First, add the `rook-beta` repo to helm, then load the `rook-operator` chart
+into the `rook-ceph-system` namespace:
+
+```shell
+cd helm-charts/storage
+helm repo add rook-beta https://charts.rook.io/beta
+helm dep update rook-operator
+helm install --namespace rook-ceph-system -n rook-operator rook-operator
+```
+
+Check that it's running (it will start the `rook-ceph-agent` and
+`rook-discover` DaemonSets):
+
+```shell
+$ kubectl -n rook-ceph-system get pods
+NAME                                  READY     STATUS    RESTARTS   AGE
+rook-ceph-agent-4c66b                 1/1       Running   0          6m
+rook-ceph-agent-dsdsr                 1/1       Running   0          6m
+rook-ceph-agent-gwjlk                 1/1       Running   0          6m
+rook-ceph-operator-687b7bb6ff-vzjsl   1/1       Running   0          7m
+rook-discover-9f87r                   1/1       Running   0          6m
+rook-discover-lmhz9                   1/1       Running   0          6m
+rook-discover-mxsr5                   1/1       Running   0          6m
+```
+
+Next, load the `rook-cluster` chart, which connects the storage on the nodes to
+the Ceph pool, and the CephFS filesystem:
+
+```shell
+helm install -n rook-cluster rook-cluster
+```
+
+Check that the cluster is running - this may take a few minutes, and look for the
+`rook-ceph-mds-*` containers to start:
+
+```shell
+$ kubectl -n rook-ceph get pods
+NAME                                                  READY     STATUS      RESTARTS   AGE
+rook-ceph-mds-cord-ceph-filesystem-7564b648cf-4wxzn   1/1       Running     0          1m
+rook-ceph-mds-cord-ceph-filesystem-7564b648cf-rcvnx   1/1       Running     0          1m
+rook-ceph-mgr-a-75654fb698-zqj67                      1/1       Running     0          5m
+rook-ceph-mon0-v9d2t                                  1/1       Running     0          5m
+rook-ceph-mon1-4sxgc                                  1/1       Running     0          5m
+rook-ceph-mon2-6b6pj                                  1/1       Running     0          5m
+rook-ceph-osd-id-0-85d887f76c-44w9d                   1/1       Running     0          4m
+rook-ceph-osd-id-1-866fb5c684-lmxfp                   1/1       Running     0          4m
+rook-ceph-osd-id-2-557dd69c5c-qdnmb                   1/1       Running     0          4m
+rook-ceph-osd-prepare-node1-bfzzm                     0/1       Completed   0          4m
+rook-ceph-osd-prepare-node2-dt4gx                     0/1       Completed   0          4m
+rook-ceph-osd-prepare-node3-t5fnn                     0/1       Completed   0          4m
+
+$ kubectl -n rook-ceph get storageclass
+NAME            PROVISIONER                    AGE
+cord-ceph-rbd   ceph.rook.io/block             6m
+cord-cephfs     kubernetes.io/no-provisioner   6m
+
+$ kubectl -n rook-ceph get filesystems
+NAME                   AGE
+cord-ceph-filesystem   6m
+
+$ kubectl -n rook-ceph get pools
+NAME             AGE
+cord-ceph-pool   6m
+
+$ kubectl -n rook-ceph get persistentvolume
+NAME                CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM     STORAGECLASS   REASON    AGE
+cord-cephfs-pv      20Gi       RWX            Retain           Available             cord-cephfs              7m
+```
+
+At this point you can create a `PersistentVolumeClaim` on `cord-ceph-rbd` and a
+corresponding `PersistentVolume` will be created by the `rook-ceph-operator`
+acting as a volume provisioner and bound to the PVC.
+
+Creating a `PeristentVolumeClaim` on `cord-cephfs` will mount the same CephFS
+filesystem on every container that requests it. The CephFS PV implementation
+currently isn't as mature as the Ceph RDB volumes, and may not remount properly
+when used with a PVC.
+
+### Troubleshooting Rook
+
+Checking the `rook-ceph-operator` logs can be enlightening:
+
+```shell
+kubectl -n rook-ceph-system logs -f rook-ceph-operator-...
+```
+
+The [Rook toolbox container](https://rook.io/docs/rook/v0.8/toolbox.html) has
+been containerized as the `rook-tools` chart, and provides a variety of tools
+for debugging Rook and Ceph.
+
+Load the `rook-tools` chart:
+
+```shell
+helm install -n rook-tools rook-tools
+```
+
+Once the container is running (check with `kubectl -n rook-ceph get pods`),
+exec into it to run a shell to access all tools:
+
+```shell
+kubectl -n rook-ceph exec -it rook-ceph-tools bash
+```
+
+or run a one-off command:
+
+```shell
+kubectl -n rook-ceph exec rook-ceph-tools -- ceph status
+```
+
+or mount the CephFS volume:
+
+```shell
+kubectl -n rook-ceph exec -it rook-ceph-tools bash
+mkdir /mnt/cephfs
+mon_endpoints=$(grep mon_host /etc/ceph/ceph.conf | awk '{print $3}')
+my_secret=$(grep key /etc/ceph/keyring | awk '{print $3}')
+mount -t ceph -o name=admin,secret=$my_secret $mon_endpoints:/ /mnt/cephfs
+ls /mnt/cephfs
+```
+
+### Cleaning up after Rook
+
+The `rook-operator` chart will leave a few `DaemonSet` behind after it's
+removed. Clean these up using these commands:
+
+```shell
+kubectl -n rook-ceph-system delete daemonset rook-ceph-agent
+kubectl -n rook-ceph-system delete daemonset rook-discover
+helm delete --purge rook-operator
+```
+
+If you have other charts that create `PersistentVolumeClaims`, you may need to
+clean them up manually (for example, if you've changed the `StorageClass` they
+use), list them with:
+
+```shell
+kubectl --all-namespaces get pvc
+```
+
+Files may be left behind in the Ceph storage directory and/or Rook
+configuration that need to be deleted before starting `rook-*` charts. If
+you've used the `automation-tools/kubespray-installer` scripts to set up a
+environment named `test`, you can delete all these files with the following
+commands:
+
+```shell
+cd cord/automation-tools/kubespray-installer
+ansible -i inventories/test/inventory.cfg -b -m shell -a "rm -rf /var/lib/rook && rm -rf /mnt/ceph/*" all
+```
+
+The current upgrade process for Rook involves manual intervention and
+inspection using the tools container.
+
+## Using Persistent Storage
+
+The general process for using persistent storage is to create a
+[PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+on the appropriate
+[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+for the workload you're trying to run.
+
+### Example: XOS Database on a local directory
+
+For development and testing, it may be useful to persist the XOS database.
+
+First, configure your nodes to deploy the [local-directory](#local-directory)
+chart, then run:
+
+```shell
+helm install -f examples/xos-db-local-dir.yaml -n xos-core xos-core
+```
+
+### Example: XOS Database on a Ceph RBD volume
+
+The XOS Database (Postgres) wants a volume that persists if a node goes down or
+is taken out of service, not shared with other containers running Postgres,
+thus the Ceph RBD volume is a reasonable choice to use with it.
+
+Deploy the [rook-operator and rook-cluster](#ceph-deployed-with-rook) charts,
+then load the XOS core charts with:
+
+```shell
+helm install -f examples/xos-db-ceph-rbd.yaml -n xos-core xos-core
+```
+
+### Example: Docker Registry on CephFS shared filesystem
+
+The Docker Registry wants a filesystem that is the shared across all
+containers, so it's a suitable workload for the `cephfs` shared filesystem.
+
+Deploy the [rook-operator and rook-cluster](#ceph-deployed-with-rook) charts,
+then load the registry chart with:
+
+```shell
+helm install -f examples/registry-cephfs.yaml -n docker-registry stable/docker-registry
+```
+
diff --git a/charts/voltha.md b/charts/voltha.md
index e39a45a..c7c5612 100644
--- a/charts/voltha.md
+++ b/charts/voltha.md
@@ -1,48 +1,44 @@
 # Deploy VOLTHA
 
+VOLTHA depends on having a [kafka message bus](kafka.md) deployed with a name
+of `cord-kafka`, so deploy that with helm before deploying the voltha chart.
+
+
 ## First Time Installation
 
-Download the helm charts `incubator` repository
+Download the helm charts `incubator` repository:
 
 ```shell
 helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
 ```
 
-Build dependencies
+Install the etcd-operator helm chart.   This chart provides a convenient way of creating and managing etcd clusters.   When voltha installs it will attempt to use etcd-operator to create its etcd cluster.  Once installed etcd-operator can be left running.
 
 ```shell
-helm dep build voltha
+helm install -n etcd-operator stable/etcd-operator --version 0.8.0
 ```
 
-Install the kafka dependency
+Allow etcd-operator enough time to create the EtdCluster CustomResourceDefinitions.  This should only be a couple of seconds after the etcd-operator pods are running.  Check the CRD are ready by running the following:
 
 ```shell
-helm install --name voltha-kafka \
---set replicas=1 \
---set persistence.enabled=false \
---set zookeeper.servers=1 \
---set zookeeper.persistence.enabled=false \
-incubator/kafka
+kubectl get crd | grep etcd
 ```
 
-There is an `etcd-operator` **known bug** that prevents deploying
-Voltha correctly the first time. We suggest the following workaround:
 
-First, install Voltha without an `etcd` custom resource definition:
+
+Update dependencies within the voltha chart:
 
 ```shell
-helm install -n voltha --set etcd-operator.customResources.createEtcdClusterCRD=false voltha
+helm dep up voltha
 ```
 
-Then upgrade Voltha, which defaults to using the `etcd` custom
-resource definition:
+Install the voltha helm chart.   This will create the voltha pods and additionally create the etcd-cluster pods
 
 ```shell
-helm upgrade --set etcd-operator.customResources.createEtcdClusterCRD=true voltha ./voltha
+helm install -n voltha voltha
 ```
 
-After this first installation, you can use the standard
-install/uninstall procedure described below.
+Allow enough time for the 3 etcd-cluster pods to start before using the voltha pods.
 
 ## Standard Uninstall
 
@@ -73,3 +69,61 @@
 ```shell
 ssh voltha@<pod-ip> -p 30110
 ```
+
+The default VOLTHA password is `admin`.
+
+## Building and using development images
+
+In some cases you want to build custom images to try out development code.
+The suggested way to do that is:
+
+```shell
+cd ~/cord/incubator/voltha
+REPOSITORY=voltha/ TAG=dev VOLTHA_BUILD=docker make build
+cd ~/cord/automation-tools/developer
+bash tag_and_push.sh dev 192.168.99.100:30500
+```
+
+_This set of commands will build the VOLTHA containers and push them to a local
+[docker registry](../prereqs/docker-registry.md) using a TAG called `dev`_
+
+> NOTE: Read more about the `tag_and_push` script [here](../prereqs/docker-registry.md)
+
+Once the images are pushed to a docker registry on the POD,
+you can use a values file like the following one:
+
+```yaml
+# voltha-values.yaml
+images:
+  vcore:
+    repository: '192.168.99.100:30500/voltha-voltha'
+    tag: 'dev'
+    pullPolicy: 'Always'
+
+  vcli:
+    repository: '192.168.99.100:30500/voltha-cli'
+    tag: 'dev'
+    pullPolicy: 'Always'
+
+  ofagent:
+    repository: '192.168.99.100:30500/voltha-ofagent'
+    tag: 'dev'
+    pullPolicy: 'Always'
+
+  netconf:
+    repository: '192.168.99.100:30500/voltha-netconf'
+    tag: 'dev'
+    pullPolicy: 'Always'
+
+  envoy_for_etcd:
+    repository: '192.168.99.100:30500/voltha-envoy'
+    tag: 'dev'
+    pullPolicy: 'Always'
+
+```
+
+and you can install VOLTHA using:
+
+```shell
+helm install -n voltha voltha -f voltha-values.yaml
+```
diff --git a/charts/xos-core.md b/charts/xos-core.md
index 7d2f44c..3d31434 100644
--- a/charts/xos-core.md
+++ b/charts/xos-core.md
@@ -6,6 +6,11 @@
 helm dep update xos-core
 helm install -n xos-core xos-core
 ```
+**Nodeports exposed**
+
+* UI: 30001
+* REST: 30006
+* Tosca: 30007
 
 ## Customizing security information
 
diff --git a/developer/configuration_rcord.md b/developer/configuration_rcord.md
index d8066af..ceb4200 100644
--- a/developer/configuration_rcord.md
+++ b/developer/configuration_rcord.md
@@ -13,7 +13,7 @@
 ## Prerequisites
 
 - All the components needed for the R-CORD profile are up and running
-   on your POD (xos-core, rcord-lite, voltha, onos-voltha).
+   on your POD (xos-core, rcord-lite, voltha, onos).
 - Configure `OLT/PONPORT/ONU` devices using the sample
    TOSCA config given below:
 
diff --git a/fabric-setup.md b/fabric-setup.md
index d0de4c2..7c480b8 100644
--- a/fabric-setup.md
+++ b/fabric-setup.md
@@ -17,6 +17,23 @@
 
 **Checksum**: *sha256:2db316ea83f5dc761b9b11cc8542f153f092f3b49d82ffc0a36a2c41290f5421*
 
+### Instructions to install ONL on Delta Switches
+
+If the Delta switch you are using has the following in /etc/machine.conf:
+```shell
+onie_platform=x86_64_<platform name>-r0
+onie_machine=<platform name>
+```
+Please change it to the following before installing ONL:
+```shell
+onie_platform=x86_64-delta_<platform name>-r0
+onie_machine=delta_<platform name>
+```
+After the installation of ONL, if you don't see '/usr/bin' in your PATH variable, please run the following command:
+```shell
+export PATH=$PATH:/usr/bin/ofdpa
+```
+
 Guidelines on how to install ONL on top of an ONIE compatible device can be found directly on the [ONL website](https://opennetlinux.org/docs/deploy).
 
 This specific version of ONL has been customized to accept an IP address through DHCP on the management interface, *ma0*. If you'd like to use a static IP, first give
@@ -41,12 +58,13 @@
 ```shell
 dpkg -i your-ofdpa.deb
 ```
-
 Three OFDPA drivers are available:
 
 * [EdgeCore 5712-54X / 5812-54X / 6712-32X](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa_3.0.5.5%2Baccton1.7-1_amd64.deb?raw=true) - *checksum: sha256:db228b6e79fb15f77497b59689235606b60abc157e72fc3356071bcc8dc4c01f*
+* [EdgeCore 7712-32X](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa_3.0.5.5%2Baccton1.7-1_amd64.deb) - *checksum: sha256:4f78e8f43976dc86ab1cdc2f98afa743ce2e0cc5923e429c91f96b0edc3ddf4b*
 * [QuantaMesh T3048-LY8](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa-ly8_0.3.0.5.0-EA5-qct-01.01_amd64.deb?raw=true) - *checksum: sha256:f8201530b1452145c1a0956ea1d3c0402c3568d090553d0d7b3c91a79137da9e*
 * [QuantaMesh BMS T7032-IX1/IX1B](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa-ix1_0.3.0.5.0-EA5-qct-01.00_amd64.deb?raw=true) *checksum: sha256:278b8ffed8a8fc705a1b60d16f8e70377e78342a27a11568a1d80b1efd706a46*
+* [Delta AG7648](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa-ag7648_0.3.0.5.6_amd64.deb?raw=true) *checksum: sha256:ddfc13cb98ca47291dce5e6938b1d65f0b99bbe77f0585e36ac0007017397f23*
 
 ## Connect the Fabric Switches to ONOS
 
@@ -71,3 +89,10 @@
 ```
 
 > NOTE: It may take a few seconds for the switches to initialize and connect to ONOS
+
+### Additional notes for Delta switches
+
+If optical sfp cables are not coming up, please use the following command to launch ofdpa:
+```shell
+./launcher ofagentapp -t <ip address of the controller>
+```
diff --git a/git_refs b/git_refs
index 7faa57f..81b8688 100644
--- a/git_refs
+++ b/git_refs
@@ -11,6 +11,7 @@
 
 _REPO NAME_           _DIR_    _REF_
 
+att-workflow-driver   /docs    master
 cord-tester           /docs    master
 fabric                /docs    master
 hippie-oss            /docs    master
diff --git a/images/att_workflow.png b/images/att_workflow.png
new file mode 100644
index 0000000..66c89ec
--- /dev/null
+++ b/images/att_workflow.png
Binary files differ
diff --git a/logos/cord.svg b/logos/cord.svg
new file mode 100644
index 0000000..b7bed6c
--- /dev/null
+++ b/logos/cord.svg
@@ -0,0 +1,125 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   viewBox="0 0 120 120"
+   height="120"
+   width="120"
+   xml:space="preserve"
+   id="svg4336"
+   version="1.1"><metadata
+     id="metadata4342"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs4340"><radialGradient
+       id="radialGradient4362"
+       spreadMethod="pad"
+       gradientTransform="matrix(27.154079,0,0,-27.154079,139.19807,65.775001)"
+       gradientUnits="userSpaceOnUse"
+       r="1"
+       cy="0"
+       cx="0"
+       fy="0"
+       fx="0"><stop
+         id="stop4358"
+         offset="0"
+         style="stop-opacity:1;stop-color:#ec2227" /><stop
+         id="stop4360"
+         offset="1"
+         style="stop-opacity:1;stop-color:#321415" /></radialGradient><radialGradient
+       id="radialGradient4382"
+       spreadMethod="pad"
+       gradientTransform="matrix(52.007076,0,0,-52.007076,131.21657,119.00029)"
+       gradientUnits="userSpaceOnUse"
+       r="1"
+       cy="0"
+       cx="0"
+       fy="0"
+       fx="0"><stop
+         id="stop4378"
+         offset="0"
+         style="stop-opacity:1;stop-color:#ec2227" /><stop
+         id="stop4380"
+         offset="1"
+         style="stop-opacity:1;stop-color:#321415" /></radialGradient><linearGradient
+       id="linearGradient4402"
+       spreadMethod="pad"
+       gradientTransform="matrix(-30.969683,30.969683,30.969683,30.969683,139.12646,50.672722)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop4398"
+         offset="0"
+         style="stop-opacity:1;stop-color:#5f6060" /><stop
+         id="stop4400"
+         offset="1"
+         style="stop-opacity:1;stop-color:#e5e6e5" /></linearGradient><linearGradient
+       id="linearGradient4424"
+       spreadMethod="pad"
+       gradientTransform="matrix(55.687431,0,0,-55.687431,129.98773,22.378069)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop4418"
+         offset="0"
+         style="stop-opacity:1;stop-color:#027bc1" /><stop
+         id="stop4420"
+         offset="0.81819"
+         style="stop-opacity:1;stop-color:#1fc4f4" /><stop
+         id="stop4422"
+         offset="1"
+         style="stop-opacity:1;stop-color:#1fc4f4" /></linearGradient><linearGradient
+       id="linearGradient4448"
+       spreadMethod="pad"
+       gradientTransform="matrix(58.191917,0,0,-58.191917,5.0481901,22.378069)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop4440"
+         offset="0"
+         style="stop-opacity:1;stop-color:#1c407d" /><stop
+         id="stop4442"
+         offset="0.0996414"
+         style="stop-opacity:1;stop-color:#1c407d" /><stop
+         id="stop4444"
+         offset="0.50264328"
+         style="stop-opacity:1;stop-color:#027bc1" /><stop
+         id="stop4446"
+         offset="1"
+         style="stop-opacity:1;stop-color:#027bc1" /></linearGradient><clipPath
+       id="clipPath4458"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path4456"
+         d="M 0,140.382 H 265 V 0 H 0 Z" /></clipPath></defs><g
+     transform="matrix(1.3333333,0,0,-1.3333333,-111.46248,188.61605)"
+     id="g4344"><g
+       id="g4346"><g
+         id="g4348"><g
+           id="g4354"><g
+             id="g4356"><path
+               id="path4364"
+               style="fill:url(#radialGradient4362);stroke:none"
+               d="m 159.68,92.663 3.751,-3.75 3.75,3.75 -3.75,3.751 z m -27.046,-27.284 1.956,-1.956 c 3.14,-3.14 5.724,-2.819 8.628,0.087 v 0 l 17.501,17.514 -3.464,3.464 3.606,3.605 -2.748,2.749 z m 27.488,20.088 2.547,-2.546 2.546,2.546 -2.546,2.546 z" /></g></g></g></g><g
+       id="g4366"><g
+         id="g4368"><g
+           id="g4374"><g
+             id="g4376"><path
+               id="path4384"
+               style="fill:url(#radialGradient4382);stroke:none"
+               d="m 123.206,132.052 6.211,-6.21 5.721,5.724 -6.207,6.207 z m 20.17,-9.552 6.207,-6.206 6.223,6.227 -6.203,6.201 z m -29.652,0.075 6.215,-6.215 5.861,5.865 -6.21,6.21 z m 9.854,-4.788 6.214,-6.215 5.861,5.866 -6.21,6.21 z m 12.416,-2.667 6.209,-6.21 5.721,5.724 -6.206,6.207 z m -27.561,2.161 -0.004,0.003 -11.02,-11.013 C 88.781,97.642 89.207,90.369 97.41,82.167 v 0 l 1.733,-1.733 c -2.623,3.5 -4.191,10.926 1.848,16.966 v 0 l 13.657,13.664 -0.003,0.004 2.404,2.405 -6.211,6.211 z m 7.318,-7.318 -0.001,0.001 -11.02,-11.013 c -8.63,-8.629 -8.202,-15.903 0,-24.104 v 0 l 1.733,-1.734 c -2.623,3.5 -4.191,10.927 1.848,16.966 v 0 l 13.658,13.666 h -10e-4 l 6.626,6.629 -6.215,6.215 z m 14.401,-0.678 -4.052,-4.05 -2.814,-2.814 -0.001,10e-4 -11.021,-11.013 c -8.628,-8.629 -8.201,-15.903 0.001,-24.104 v 0 l 1.733,-1.734 c -2.623,3.5 -4.192,10.927 1.848,16.966 v 0 l 13.658,13.666 h -0.001 l 2.814,2.815 0.001,-0.002 6.003,6.007 -6.215,6.215 z" /></g></g></g></g><g
+       id="g4386"><g
+         id="g4388"><g
+           id="g4394"><g
+             id="g4396"><path
+               id="path4404"
+               style="fill:url(#linearGradient4402);stroke:none"
+               d="M 114.065,77.791 130.71,61.14 c 2.279,-2.28 6.954,-3.184 9.159,-0.979 v 0 c -2.543,-0.682 -4.539,0.909 -6.085,2.456 v 0 l -17.952,17.946 c -0.753,-0.909 -1.332,-1.839 -1.767,-2.772 m -1.007,-3.589 c -0.054,-0.493 -0.074,-0.978 -0.067,-1.454 v 0 l 0.616,-0.616 0.054,-0.06 0.127,-0.145 12.623,-12.629 c 2.28,-2.279 6.955,-3.185 9.16,-0.98 v 0 c -2.544,-0.681 -4.538,0.91 -6.085,2.456 v 0 L 113.604,76.65 c -0.282,-0.825 -0.459,-1.645 -0.546,-2.448 m 4.64,-12.325 4.415,-4.421 c 2.281,-2.279 6.954,-3.185 9.159,-0.98 v 0 c -2.543,-0.681 -4.537,0.911 -6.084,2.456 v 0 L 113.16,70.953 c 0.531,-3.105 1.807,-6.342 4.538,-9.076" /></g></g></g></g></g></svg>
\ No newline at end of file
diff --git a/logos/onos.svg b/logos/onos.svg
new file mode 100644
index 0000000..52a8e3f
--- /dev/null
+++ b/logos/onos.svg
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   viewBox="0 0 160 160"
+   height="160"
+   width="160"
+   xml:space="preserve"
+   id="svg29"
+   version="1.1"><metadata
+     id="metadata35"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs33"><linearGradient
+       id="linearGradient55"
+       spreadMethod="pad"
+       gradientTransform="matrix(0.3303223,18.925018,-18.925018,0.3303223,293.40207,298.54611)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop51"
+         offset="0"
+         style="stop-opacity:1;stop-color:#007ec5" /><stop
+         id="stop53"
+         offset="1"
+         style="stop-opacity:1;stop-color:#00c0ed" /></linearGradient><linearGradient
+       id="linearGradient75"
+       spreadMethod="pad"
+       gradientTransform="matrix(2.7860043,159.6113,-159.6113,2.7860043,364.22665,396.58569)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop71"
+         offset="0"
+         style="stop-opacity:1;stop-color:#007ec5" /><stop
+         id="stop73"
+         offset="1"
+         style="stop-opacity:1;stop-color:#00c0ed" /></linearGradient><linearGradient
+       id="linearGradient95"
+       spreadMethod="pad"
+       gradientTransform="matrix(0.3261117,18.679504,-18.679504,0.3261117,429.72345,225.29692)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop91"
+         offset="0"
+         style="stop-opacity:1;stop-color:#007ec5" /><stop
+         id="stop93"
+         offset="1"
+         style="stop-opacity:1;stop-color:#00c0ed" /></linearGradient><linearGradient
+       id="linearGradient115"
+       spreadMethod="pad"
+       gradientTransform="matrix(0.3299248,18.903107,-18.903107,0.3299248,499.41495,187.5108)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop111"
+         offset="0"
+         style="stop-opacity:1;stop-color:#007ec5" /><stop
+         id="stop113"
+         offset="1"
+         style="stop-opacity:1;stop-color:#00c0ed" /></linearGradient><clipPath
+       id="clipPath125"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path123"
+         d="M 0,612 H 792 V 0 H 0 Z" /></clipPath><linearGradient
+       id="linearGradient255"
+       spreadMethod="pad"
+       gradientTransform="matrix(-326.67047,225.63072,225.63072,326.67047,605.24286,196.58757)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop243"
+         offset="0"
+         style="stop-opacity:1;stop-color:#c6262f" /><stop
+         id="stop245"
+         offset="0.00128806"
+         style="stop-opacity:1;stop-color:#c6262f" /><stop
+         id="stop247"
+         offset="0.3136"
+         style="stop-opacity:1;stop-color:#d64b43" /><stop
+         id="stop249"
+         offset="0.5951"
+         style="stop-opacity:1;stop-color:#e26255" /><stop
+         id="stop251"
+         offset="0.8334"
+         style="stop-opacity:1;stop-color:#ea6f61" /><stop
+         id="stop253"
+         offset="1"
+         style="stop-opacity:1;stop-color:#ed7466" /></linearGradient><clipPath
+       id="clipPath265"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path263"
+         d="M 0,612 H 792 V 0 H 0 Z" /></clipPath><clipPath
+       id="clipPath281"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path279"
+         d="M 0,612 H 792 V 0 H 0 Z" /></clipPath></defs><g
+     transform="matrix(1.3333333,0,0,-1.3333333,-464.33332,522.99999)"
+     id="g37"><g
+       id="g231"><g
+         id="g233"><g
+           id="g239"><g
+             id="g241"><path
+               id="path257"
+               style="fill:url(#linearGradient255);stroke:none"
+               d="m 428.8,388 c 9.6,-17.1 7.2,-38.3 3.1,-54.2 v 0 c -2,-7.3 -5.2,-15 -4.2,-22.2 v 0 c -6.9,-0.6 -13.1,-5 -19.2,-7.1 v 0 c -18.1,-6.2 -33.9,-9.1 -56.5,-4.7 v 0 c 24.6,-17.2 36.6,-13 63.7,-0.1 v 0 c -0.5,-0.6 -0.7,-1.3 -1.3,-1.9 v 0 c 1.4,0.4 2.4,1.7 3.4,2.2 v 0 c -0.4,-0.7 -0.9,-1.5 -1.4,-1.9 v 0 c 2.2,0.6 3.7,2.3 5.9,3.9 v 0 c -2.4,-2.1 -4.2,-5 -6,-8 v 0 c -1.5,-2.5 -3.1,-4.8 -5.1,-6.9 v 0 l -2.9,-2.9 c -1.4,-1.3 -2.9,-2.5 -5.1,-2.9 v 0 c 1.7,-0.1 3.6,0.3 6.5,1.9 v 0 c -1.6,-2.4 -7.1,-6.2 -9.9,-7.2 v 0 c 10.5,2.6 19.2,15.9 25.7,18 v 0 c 18.3,5.9 13.8,3.4 27,14.2 v 0 c 1.6,1.3 3,1 5.1,0.8 v 0 c 1.1,-0.1 2.1,-0.3 3.2,-0.5 v 0 c 0.8,-0.2 1.4,-0.4 2.2,-0.8 v 0 l 1.8,-0.9 c -1.9,4.5 -2.3,4.1 -5.9,6 v 0 c -2.3,1.3 -3.3,3.8 -6.2,4.9 v 0 c -2.506,0.917 -4.724,-0.269 -6.538,-1.455 v 0 c -3.327,-2.177 -5.292,-4.353 -5.162,6.455 v 0 c 0.1,8 4.2,14.4 6.4,22 v 0 c 1.1,3.8 2.3,7.6 2.4,11.5 v 0 c 0.1,2.3 0,4.7 -0.4,7 v 0 c -2,11.2 -8.4,21.5 -19.7,24.8 v 0 c -0.5,0.15 -0.775,0.225 -0.9,0.225 v 0 c -0.125,0 -0.1,-0.075 0,-0.225" /></g></g></g></g></g></svg>
\ No newline at end of file
diff --git a/logos/xos.svg b/logos/xos.svg
new file mode 100644
index 0000000..9fe79a6
--- /dev/null
+++ b/logos/xos.svg
@@ -0,0 +1,290 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   viewBox="0 0 120 120"
+   height="120"
+   width="120"
+   xml:space="preserve"
+   id="svg3732"
+   version="1.1"><metadata
+     id="metadata3738"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs3736"><linearGradient
+       id="linearGradient3760"
+       spreadMethod="pad"
+       gradientTransform="matrix(96.509964,0,0,-96.509964,5.900569,29.752949)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3754"
+         offset="0"
+         style="stop-opacity:1;stop-color:#1c407d" /><stop
+         id="stop3756"
+         offset="0.65772165"
+         style="stop-opacity:1;stop-color:#027bc1" /><stop
+         id="stop3758"
+         offset="1"
+         style="stop-opacity:1;stop-color:#027bc1" /></linearGradient><clipPath
+       id="clipPath3770"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path3768"
+         d="M 0,155.192 H 323.007 V 0 H 0 Z" /></clipPath><linearGradient
+       id="linearGradient3794"
+       spreadMethod="pad"
+       gradientTransform="matrix(98.190834,0,0,-98.190834,219.32466,29.752949)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3788"
+         offset="0"
+         style="stop-opacity:1;stop-color:#027bc1" /><stop
+         id="stop3790"
+         offset="0.81819"
+         style="stop-opacity:1;stop-color:#1fc4f4" /><stop
+         id="stop3792"
+         offset="1"
+         style="stop-opacity:1;stop-color:#1fc4f4" /></linearGradient><clipPath
+       id="clipPath3804"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path3802"
+         d="M 0,155.192 H 323.007 V 0 H 0 Z" /></clipPath><linearGradient
+       id="linearGradient3832"
+       spreadMethod="pad"
+       gradientTransform="matrix(-22.080078,0,0,22.080078,153.14258,126.72021)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3826"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3828"
+         offset="0.26827305"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3830"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><linearGradient
+       id="linearGradient3858"
+       spreadMethod="pad"
+       gradientTransform="matrix(28.641113,0,0,-28.641113,156.55273,97.77832)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3848"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3850"
+         offset="0.32383672"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3852"
+         offset="0.503806"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3854"
+         offset="0.67696965"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3856"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><clipPath
+       id="clipPath3868"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path3866"
+         d="M 0,155.192 H 323.007 V 0 H 0 Z" /></clipPath><linearGradient
+       id="linearGradient3896"
+       spreadMethod="pad"
+       gradientTransform="matrix(-28.084961,0,0,28.084961,166.32178,130.81885)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3890"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3892"
+         offset="0.51719277"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3894"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><linearGradient
+       id="linearGradient3918"
+       spreadMethod="pad"
+       gradientTransform="matrix(22.179199,0,0,-22.179199,170.36377,101.6731)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3912"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3914"
+         offset="0.62137275"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3916"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><clipPath
+       id="clipPath3928"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path3926"
+         d="M 0,155.192 H 323.007 V 0 H 0 Z" /></clipPath><linearGradient
+       id="linearGradient3964"
+       spreadMethod="pad"
+       gradientTransform="matrix(-28.264648,0,0,28.264648,191.45166,124.11792)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3958"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3960"
+         offset="0.60090196"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3962"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><linearGradient
+       id="linearGradient3986"
+       spreadMethod="pad"
+       gradientTransform="matrix(35.230957,0,0,-35.230957,124.53809,101.16992)"
+       gradientUnits="userSpaceOnUse"
+       y2="0"
+       x2="1"
+       y1="0"
+       x1="0"><stop
+         id="stop3980"
+         offset="0"
+         style="stop-opacity:1;stop-color:#7a1214" /><stop
+         id="stop3982"
+         offset="0.97251668"
+         style="stop-opacity:1;stop-color:#c72026" /><stop
+         id="stop3984"
+         offset="1"
+         style="stop-opacity:1;stop-color:#c72026" /></linearGradient><clipPath
+       id="clipPath3996"
+       clipPathUnits="userSpaceOnUse"><path
+         id="path3994"
+         d="M 0,155.192 H 323.007 V 0 H 0 Z" /></clipPath></defs><g
+     transform="matrix(1.3333333,0,0,-1.3333333,-156.6852,222.03906)"
+     id="g3740"><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3798"><g
+         clip-path="url(#clipPath3804)"
+         id="g3800"><g
+           transform="translate(122.6123,141.0522)"
+           id="g3806"><path
+             id="path3808"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="M 0,0 -5.001,4.953 0.067,10.07 5.068,5.117 Z" /></g><g
+           transform="translate(129.5181,134.2109)"
+           id="g3810"><path
+             id="path3812"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="m 0,0 -5.349,5.299 5.07,5.116 5.348,-5.299 z" /></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3814"><g
+         id="g3816"><g
+           id="g3822"><g
+             id="g3824"><path
+               id="path3834"
+               style="fill:url(#linearGradient3832);stroke:none"
+               d="m 131.062,132.685 16.982,-17.017 5.098,5.087 -16.981,17.018 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3836"><g
+         id="g3838"><g
+           id="g3844"><g
+             id="g3846"><path
+               id="path3860"
+               style="fill:url(#linearGradient3858);stroke:none"
+               d="m 156.552,107.088 23.532,-23.694 5.11,5.075 -23.531,23.694 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3862"><g
+         clip-path="url(#clipPath3868)"
+         id="g3864"><g
+           transform="translate(186.6953,76.752)"
+           id="g3870"><path
+             id="path3872"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="M 0,0 -4.939,4.962 0.165,10.043 5.104,5.081 Z" /></g><g
+           transform="translate(136.665,141.5376)"
+           id="g3874"><path
+             id="path3876"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="m 0,0 -4.919,4.919 5.092,5.093 4.92,-4.919 z" /></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3878"><g
+         id="g3880"><g
+           id="g3886"><g
+             id="g3888"><path
+               id="path3898"
+               style="fill:url(#linearGradient3896);stroke:none"
+               d="m 138.236,139.734 23.001,-22.931 5.085,5.1 -23.001,22.932 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3900"><g
+         id="g3902"><g
+           id="g3908"><g
+             id="g3910"><path
+               id="path3920"
+               style="fill:url(#linearGradient3918);stroke:none"
+               d="m 170.364,107.67 17.086,-17.087 5.093,5.093 -17.086,17.087 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3922"><g
+         clip-path="url(#clipPath3928)"
+         id="g3924"><g
+           transform="translate(194.1509,83.8828)"
+           id="g3930"><path
+             id="path3932"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="M 0,0 -5.031,5.031 0.062,10.124 5.093,5.093 Z" /></g><g
+           transform="translate(200.7979,77.2358)"
+           id="g3934"><path
+             id="path3936"
+             style="fill:#c72026;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="m 0,0 -5.317,5.316 5.093,5.093 5.317,-5.316 z" /></g><g
+           transform="translate(199.8418,141.5972)"
+           id="g3938"><path
+             id="path3940"
+             style="fill:#7a1214;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="M 0,0 -5.098,5.088 0.079,10.275 5.177,5.187 Z" /></g><g
+           transform="translate(193.0996,134.8418)"
+           id="g3942"><path
+             id="path3944"
+             style="fill:#7a1214;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="m 0,0 -5.098,5.088 5.075,5.084 5.098,-5.088 z" /></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3946"><g
+         id="g3948"><g
+           id="g3954"><g
+             id="g3956"><path
+               id="path3966"
+               style="fill:url(#linearGradient3964);stroke:none"
+               d="m 163.187,115.046 5.099,-5.086 23.166,23.23 -5.1,5.086 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3968"><g
+         id="g3970"><g
+           id="g3976"><g
+             id="g3978"><path
+               id="path3988"
+               style="fill:url(#linearGradient3986);stroke:none"
+               d="m 124.538,88.671 5.089,-5.097 30.142,30.095 -5.089,5.097 z" /></g></g></g></g><g
+       transform="translate(1.0695596,8.7703889)"
+       id="g3990"><g
+         clip-path="url(#clipPath3996)"
+         id="g3992"><g
+           transform="translate(122.6299,76.5293)"
+           id="g3998"><path
+             id="path4000"
+             style="fill:#7a1214;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             d="m 0,0 -5.116,5.069 5.097,5.145 5.116,-5.069 z" /></g></g></g></g></svg>
\ No newline at end of file
diff --git a/operating_cord/general.md b/operating_cord/general.md
index 49a06ce..10a7ec4 100644
--- a/operating_cord/general.md
+++ b/operating_cord/general.md
@@ -1,15 +1,9 @@
 # General Info
 
-CORD's operations and management interface is primarily defined by
-its Northbound API. There is typically more than one variant of this
-interface, and they are auto-generated from the models loaded into
-CORD, as described [elsewhere](../xos/README.md). Most notably:
+This section of the guide describes tools for interacting with CORD. These tools generally interact with
+CORD over a variety of APIs that are described [here](/api/api.md). Most notably:
 
 * A graphical interface is documented [here](gui.md).
 
-* A RESTful version of this API is documented [here](rest_apis.md).
+* A text shell, [XOSSH](/xos/dev/xossh.md), can be used to operate on objects in the CORD data model.
 
-* A TOSCA version is typically used to configure and provision a
-   POD. Later sections of this guide give examples of TOSCA workflows
-   used to provision and configure various [profiles](profiles.md)
-   and [services](services.md).
diff --git a/operating_cord/veth_intf.md b/operating_cord/veth_intf.md
new file mode 100644
index 0000000..4ccf526
--- /dev/null
+++ b/operating_cord/veth_intf.md
@@ -0,0 +1,112 @@
+# Manually connect containers to a network card
+
+Sometimes you may need to attach some containers NICs to the network cards of the machines hosting them, for example to run some data plane traffic through them.
+
+Although CORD doesn't fully support this natively there are some (hackish) ways to do this manually.
+
+## Create a bridge and a veth
+
+The easiest way to do this is to skip Kubernetes and directly attach the Docker container link it to the host network interface, through a Virtual Ethernet Interface Pair (veth pair).
+
+Let's see how.
+
+For completeness, let's assume you're running a three nodes Kubernetes deployment, and that you're trying to attach a container *already deployed* called *vcore-5b4c5478f-lxrpb* to a physical interface *eth1* (already existing on one of the three hosts, running your container). The virtual interface inside the container will be called *eth2*.
+
+You got the name of the container running
+
+```shell
+$ kubectl get pods [-n NAMESPACE]
+NAME                      READY     STATUS    RESTARTS   AGE
+vcore-5b4c5478f-lxrpb     1/1       Running   1          7d
+```
+
+Find out on which of the three nodes the container has been deployed
+
+```shell
+$ kubectl describe pod  vcore-5b4c5478f-lxrpb | grep Node
+Node:           node3/10.90.0.103
+Node-Selectors:  <none>
+```
+As you can see from the first line, the container has been deployed by Kubernetes on the Docker daemon running on node 3 (this is just an example). In this case, with IP *10.90.0.103*.
+
+Let's SSH into the node and let's look for the specific Docker container ID
+
+```shell
+$ container_id=$(sudo docker ps | grep vcore-5b4c5478f-lxrpb | head -n 1 | awk '{print $1}')
+85fed7deea7b
+```
+
+The interface on the hosting machine should be turned off first
+
+```shell
+sudo ip link set eth1 down
+```
+
+Create a veth called *veth0* and let's add to it the new virtual interface *eth2*
+
+```shell
+sudo ip link add veth0 type veth peer name eth2
+```
+
+Add the virtual network interface *eth2* to the container namespace
+
+```shell
+sudo ip link set eth2 netns ${container_id}
+```
+
+Bring up the virtual interface
+
+```shell
+sudo ip netns exec ${container_id} ip link set eth2 up
+```
+
+Bring up *veth0*
+
+```shell
+sudo ip link set veth0 up
+```
+
+Create a bridge named *br1*. Add *veth0* to it and the host interface *eth1*
+
+```shell
+sudo ip link add br1 type bridge
+sudo ip link set veth0 master br1
+sudo ip link set eth1 master br1
+
+```
+
+Bring up again the host interface and the bridge
+
+```shell
+sudo ip link set eth1 up
+sudo ip link set br1 up
+```
+
+At this point, you should see an additional interface *eth2* inside the container
+
+```shell
+$ kubectl exec -it vcore-5b4c5478f-lxrpb /bin/bash
+$ ip link show
+$ node3:~$ ip link show
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1
+    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+    link/ether c4:54:44:8f:b7:74 brd ff:ff:ff:ff:ff:ff
+3: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+    link/ether d6:84:33:2f:8c:92 brd ff:ff:ff:ff:ff:ff
+```
+
+## Cleanup (remove veth and bridge)
+
+As a follow up of the previous example, let's now try to delete what has been created so far, to bring the system back to the original state.
+
+```shell
+ip link set veth0 down
+ip link delete veth0
+ip netns exec ${container_id} ip link set eth2 down
+ip netns exec ${container_id} ip link delete eth2
+ip link set eth1 down
+ip link set br1 down
+brctl delbr br1
+ip link set eth1 up
+```
diff --git a/prereqs/hardware.md b/prereqs/hardware.md
index 820ff9a..a790698 100644
--- a/prereqs/hardware.md
+++ b/prereqs/hardware.md
@@ -55,6 +55,7 @@
         * OCP Accepted&trade; EdgeCore AS5712-54X
         * OCP Accepted&trade; EdgeCore AS5812-54X
         * QuantaMesh T3048-LY8
+        * Delta AG7648
     * **25G** models (with 100G uplinks)
         * QuantaMesh BMS T7032-IX1/IX1B (with 25G breakout cable)
     * **40G** models
@@ -88,6 +89,9 @@
             * AlphaNetworks PON-34000B (for more info <ed-y_chen@alphanetworks.com>)
                 * Compatible **ONU optics**
                     * Hisense/Ligent: LTF7225-BC, LTF7225-BH+
+            * Iskratel Innbox G108 (for more info <info@innbox.net>)
+                * Compatible **ONU optics**
+                    * SUNSTAR D22799-STCC, EZconn ETP69966-7TB4-I2
 
 * **M-CORD Specific Requirements**
     * **Servers**: Some components of CORD require at least a Intel XEON CPU with Haswell microarchitecture or better.
diff --git a/prereqs/k8s-multi-node.md b/prereqs/k8s-multi-node.md
index 70752fa..95791eb 100644
--- a/prereqs/k8s-multi-node.md
+++ b/prereqs/k8s-multi-node.md
@@ -17,9 +17,10 @@
 ## Requirements
 
 * **Operator/Developer Machine** (1x, either physical or virtual machine)
-    * Has Git installed
-    * Has Python3 installed (<https://www.python.org/downloads/>)
-    * Has a stable version of Ansible installed (<http://docs.ansible.com/ansible/latest/intro_installation.html>)
+    * Has the following software installed:
+        * Git
+        * Python 2 with `virtualenv` installed (used for setting up ansible and modules to run the kubespray playbooks)
+        * Python 3 (for running the kubespray inventory script)
     * Is able to reach the target servers (ssh into them)
 * **Target/Cluster Machines** (at least 3x, either physical or virtual machines)
     * Run Ubuntu 16.04 server
diff --git a/prereqs/openstack-helm.md b/prereqs/openstack-helm.md
index 7627a45..aefa6f1 100644
--- a/prereqs/openstack-helm.md
+++ b/prereqs/openstack-helm.md
@@ -121,7 +121,7 @@
       ml2_type_vxlan:
         vni_ranges: 1001:2000
       onos:
-        url_path: http://onos-cord-ui.default.svc.cluster.local:8181/onos/cordvtn
+        url_path: http://onos-ui.default.svc.cluster.local:8181/onos/cordvtn
         username: onos
         password: rocks
 EOF
diff --git a/profiles/mcord/install.md b/profiles/mcord/install.md
index d97a874..bf842b4 100644
--- a/profiles/mcord/install.md
+++ b/profiles/mcord/install.md
@@ -1,15 +1,19 @@
 # M-CORD
 
+> **Note**: Currently the M-CORD is under maintenance.
+> Check the link below for more information:
+> [https://wiki.opencord.org/display/CORD/GPL+Issue+August+2018](https://wiki.opencord.org/display/CORD/GPL+Issue+August+2018)
+
 ## Quick Start
 
 A convenience script is provided that will install M-CORD on a single
 node, suitable for evaluation or testing.  Requirements:
 
 - An _Ubuntu 16.04.4 LTS_ server with at least 64GB of RAM and 32 virtual CPUs
-- Latest versions of released software installed on the server: `sudo apt update; sudo apt -y upgrade`
+- Latest versions of released software installed on the server: `sudo apt update`
 - User invoking the script has passwordless `sudo` capability
 - Open access to the Internet (not behind a proxy)
-- Google DNS servers (e.g., 8.8.8.8) are accessible
+- Public DNS servers (e.g., 8.8.8.8) are accessible
 
 ### Target server on CloudLab (optional)
 
@@ -18,7 +22,9 @@
 up for an account using your organization's email address and choose "Join
 Existing Project"; for "Project Name" enter `cord-testdrive`.
 
-> NOTE: CloudLab is supporting CORD as a courtesy. It is expected that you will not use CloudLab resources for purposes other than evaluating CORD. If, after a week or two, you wish to continue using CloudLab to experiment with or develop CORD, then you must apply for your own separate CloudLab project.
+> NOTE: CloudLab is supporting CORD as a courtesy.
+It is expected that you will not use CloudLab resources for purposes other than evaluating CORD.
+If, after a week or two, you wish to continue using CloudLab to experiment with or develop CORD, then you must apply for your own separate CloudLab project.
 
 Once your account is approved, start an experiment using the
 `OnePC-Ubuntu16.04-HWE` profile on the Wisconsin cluster. This will provide
@@ -28,8 +34,8 @@
 
 ### Convenience Script
 
-This script takes about an hour to complete.  If you run it, you can skip
-directly to [Validating the Installation](#validating-the-installation) below.
+This script takes about an hour to complete.  If you run it, you can jump
+directly to the [Validating the Installation](#validating-the-installation) section.
 
 ```bash
 mkdir ~/cord
@@ -40,126 +46,31 @@
 
 ## Prerequisites
 
-M-CORD requires OpenStack to run VNFs.  The OpenStack installation
-must be customized with the *onos_ml2* Neutron plugin.
-
-- To install Kubernetes, Helm, and a customized Openstack-Helm on a single node or a multi-node cluster, follow [this guide](../../prereqs/openstack-helm.md)
-- To configure the nodes so that VTN can provide virtual networking for OpenStack, follow [this guide](../../prereqs/vtn-setup.md)
+- Kubernetes: 1.10.0
+- Helm: v2.10.0
 
 ## CORD Components
 
-Bring up the M-CORD controller by installing the following charts in order:
+Bring up M-CORD installing the following helm charts in order:
 
+- [base-kubernetes](../../charts/helm.md)
 - [xos-core](../../charts/xos-core.md)
-- [base-openstack](../../charts/base-openstack.md)
-- [onos-vtn](../../charts/onos.md#onos-vtn)
+
+> **Note:** Install xos-core with `--set xos_projectName="M-CORD"` to get correct
+> name on XOS web UI.
+
 - [onos-fabric](../../charts/onos.md#onos-fabric)
 - [mcord](../../charts/mcord.md)
 
 ## Validating the Installation
 
-Before creating any VMs, check to see that VTN has initialized the nodes
-correctly.  On the OpenStack Helm master node run:
-
-```bash
-# password: rocks
-ssh -p 8101 onos@onos-cord-ssh.default.svc.cluster.local cordvtn-nodes
+Verify all components are installed by Helm:
 ```
-
-> NOTE: If the `cordvtn-nodes` command is not present, or if it does not show any nodes,
-> the most common cause is an issue with resolving the server's hostname.
-> See [this section on adding a hostname to kube-dns](../../prereqs/vtn-setup.md#dns-setup)
-> for a fix; the command should be present shortly after the hostname is added.
-
-You should see all nodes in `COMPLETE` state.
-
-> NOTE: If the node is in `INIT` state rather than `COMPLETE`, try running
-> `cordvtn-node-init <node>` and see if that resolves the issue.
-
-Next, check that the VNF images are loaded into OpenStack (they are quite large
-so this may take a while to complete):
-
-```bash
-export OS_CLOUD=openstack_helm
-openstack image list
+cord@mcord:~$ helm list
+NAME                    REVISION    UPDATED                     STATUS      CHART                     APP VERSION    NAMESPACE
+base-kubernetes         1           Mon Sep 10 19:27:45 2018    DEPLOYED    base-kubernetes-0.1.0     1.0            default
+mcord                   1           Mon Sep 10 19:32:21 2018    DEPLOYED    mcord-subscriber-2.0.0                   default
+onos-fabric             1           Mon Sep 10 19:47:05 2018    DEPLOYED    onos-mwc-0.1.0            1.0            default
+vepcservice             1           Mon Sep 10 19:30:29 2018    DEPLOYED    vepcservice-1.0.0                        default
+xos-core                1           Mon Sep 10 19:25:19 2018    DEPLOYED    xos-core-2.1.0-dev                       default
 ```
-
-You should see output like the following:
-
-```text
-+--------------------------------------+-----------------------------+--------+
-| ID                                   | Name                        | Status |
-+--------------------------------------+-----------------------------+--------+
-| b648f563-d9a2-4770-a6d8-b3044e623366 | Cirros 0.3.5 64-bit         | active |
-| 4287e01f-93b5-497f-9099-f526cb2044ac | image_hss_v0.1              | active |
-| e82e459c-27b4-417e-9f95-19ba3cc3fd9d | image_hssdb_v0.1            | active |
-| c62ab4ce-b95b-4e68-a708-65097c7bbe46 | image_internetemulator_v0.1 | active |
-| f2166c56-f772-4614-8bb5-cb848f9d23e3 | image_mme_v0.1              | active |
-| 472b7f9a-f2be-4c61-8085-8b0d37182d32 | image_sdncontroller_v0.1    | active |
-| 7784877f-e45c-4b1a-9eac-478efdb368cc | image_spgwc_v0.1            | active |
-| b9e2ec93-3177-458b-b3b2-c5c917f2fbcd | image_spgwu_v0.1            | active |
-+--------------------------------------+-----------------------------+--------+
-```
-
-To create a virtual EPC, on the master node run:
-
-```bash
-sudo apt install httpie
-http -a admin@opencord.org:letmein POST http://xos-gui.default.svc.cluster.local:4000/xosapi/v1/vepc/vepcserviceinstances blueprint=mcord_5 site_id=1
-```
-
-Check that the networks are created:
-
-```bash
-export OS_CLOUD=openstack_helm
-openstack network list
-```
-
-You should see output like the following:
-
-```text
-+--------------------------------------+--------------------+--------------------------------------+
-| ID                                   | Name               | Subnets                              |
-+--------------------------------------+--------------------+--------------------------------------+
-| 0bc8cb20-b8c7-474c-a14d-22cc4c49cde7 | s11_network        | da782aac-137a-45ae-86ee-09a06c9f3e56 |
-| 5491d2fe-dcab-4276-bc1a-9ab3c9ae5275 | management         | 4037798c-fd95-4c7b-baf2-320237b83cce |
-| 65f16a5c-f1aa-45d9-a73f-9d25fe366ec6 | s6a_network        | f5804cba-7956-40d8-a015-da566604d0db |
-| 6ce9c7e9-19b4-45fd-8e23-8c55ad84a7d7 | spgw_network       | 699829e1-4e67-46a7-af2d-c1fc72ba988e |
-| 87ffaaa3-e2a9-4546-80fa-487a256781a4 | flat_network_s1u   | 288d6a8c-8737-4e0e-9472-c869ba3e7c92 |
-| 8ec59660-4751-48de-b4a3-871f4ff34d81 | db_network         | 6f14b420-0952-4292-a9f2-cfc8b2d6938e |
-| d63d3490-b527-4a99-ad43-d69412b315b9 | sgi_network        | b445d554-1a47-4f3b-a46d-1e15a01731c0 |
-| dac99c3e-3374-4b02-93a8-994d025993eb | flat_network_s1mme | 32dd201c-8f7f-4e11-8c42-4f05734f716a |
-+--------------------------------------+--------------------+--------------------------------------+
-```
-
-Check that the VMs are created (it will take a few minutes for them to come up):
-
-```bash
-export OS_CLOUD=openstack_helm
-openstack server list --all-projects
-```
-
-You should see output like the following:
-
-```text
-+--------------------------------------+-----------------+--------+----------------------------------------------------------------------------------------------------+------------------+-----------+
-| ID                                   | Name            | Status | Networks                                                                                           | Image            | Flavor    |
-+--------------------------------------+-----------------+--------+----------------------------------------------------------------------------------------------------+------------------+-----------+
-| 7e197142-afb1-459d-b421-cad91306d19f | mysite_vmme-2   | ACTIVE | s6a_network=120.0.0.9; flat_network_s1mme=118.0.0.5; management=172.27.0.15; s11_network=112.0.0.2 | image_mme_v0.1   | m1.large  |
-| 9fe385f5-a064-40e0-94d3-17ea87b955fc | mysite_vspgwu-1 | ACTIVE | management=172.27.0.5; sgi_network=115.0.0.3; spgw_network=117.0.0.3; flat_network_s1u=119.0.0.2   | image_spgwu_v0.1 | m1.xlarge |
-| aa6805fe-3d72-4f1e-a2eb-5546d7916073 | mysite_hssdb-5  | ACTIVE | management=172.27.0.13; db_network=121.0.0.12                                                      | image_hssdb_v0.1 | m1.large  |
-| e53138ed-2893-4073-9c9a-6eb4aa1892f1 | mysite_vhss-4   | ACTIVE | s6a_network=120.0.0.2; management=172.27.0.4; db_network=121.0.0.5                                 | image_hss_v0.1   | m1.large  |
-| 4a5960b5-b5e4-4777-8fe4-f257c244f198 | mysite_vspgwc-3 | ACTIVE | management=172.27.0.7; spgw_network=117.0.0.8; s11_network=112.0.0.4                               | image_spgwc_v0.1 | m1.large  |
-+--------------------------------------+-----------------+--------+----------------------------------------------------------------------------------------------------+------------------+-----------+
-```
-
-Log in to the XOS GUI and verify that the service synchronizers have run.  The
-GUI is available at URL `http:<master-node>:30001` with username
-`admin@opencord.org` and password `letmein`.  Verify that the status of all
-ServiceInstance objects is `OK`.
-
-> NOTE: If you see a status message of `SSH Error: data could not be sent to
-> remote host`, the most common cause is the inability of the synchronizers to
-> resolve the server's hostname.  See [this section on adding a hostname to
-> kube-dns](../../prereqs/vtn-setup.md#dns-setup) for a fix; the issue should
-> resolve itself after the hostname is added.
diff --git a/profiles/rcord/celestica-olt-setup.md b/profiles/rcord/celestica-olt-setup.md
new file mode 100644
index 0000000..cab0435
--- /dev/null
+++ b/profiles/rcord/celestica-olt-setup.md
@@ -0,0 +1,68 @@
+# Celestica OLT setup
+
+Celestica provides a GPON based OLT device that can work with Voltha and CORD.
+The OLT (also known as Microsemi OLT) model is called *CLS Ruby S1010*. For more info on the hardware models and the supported optics, look at the [recommended hardware page](../../prereqs/hardware.md#recommended-hardware).
+
+The following guide explains how to integrate the Celestica OLT with Voltha, and more in general with CORD.
+
+## OLT hardware notes and ports configuration
+
+The OLT has two lines of ports. Upper ports are master ports and should be used first. Lower ports act as backup ports for the upper ports.
+The OLT has 48 UNI ports (24 master, and 24 backups) and 6 NNI ports (3 master, and 3 backups).
+The UNI ports are divided in three PON groups. Each PON group is associated to an NNI port. The most right NNI port group is used for the most left PON group, and so on.
+Each PON group is divided in 4 PONs (each with four PON ports - the two upper, master, and the two lower, backup). Each port can support up to 32 connections.
+Each PON (each couple of vertically grouped ports, one master, one backup) will appear as a different OLT device in Voltha.
+
+## How to manage the OLT (access the CLI)
+
+As far as we know, the out-of-band OLT management port is disabled by default, and the OLT can be managed -including by Voltha- only in-band.
+Also, the OLT is managed as a L2 device by Voltha. As such, no IP addresses can be assigned to the OLT. The OLT doesn't need any specific configuration. Anyway, you may need to access the CLI for debug purposes. The CLI can be accessed from the console port for debugging.
+
+## OLT pre-installed software notes
+
+The Celestica box should come with ONIE and its own OS pre-installed. No additional configurations are required.
+
+## Get the OLTs MAC addresses
+
+The MAC addresses of the OLTs are needed to perform a successful Voltha configuration. To get the OLT MAC address, from the OLT CLI type:
+
+```shell
+/voltapp/onie-syseeprom
+```
+
+The command will only show the MAC address of the first OLT (first couple of ports from the left). To know the MAC addresses of the other OLTs, add 1 to the first MAC address, for each couple of next ports. For example, the MAC address of the second OLT (the second couple of vertical ports from the left) will be the MAC address returned by the command above plus 1.
+
+## Discover the OLT in Voltha
+
+Once the MAC address is known, pre-provision the OLT from Voltha
+
+```shell
+preprovision_olt --device-type=microsemi_olt --mac-address=11:22:33:44:55:66
+```
+
+where *11:22:33:44:55:66* is the MAC address of your OLT device.
+
+Then, enable the OLT, typing
+
+```shell
+enable
+```
+
+Voltha will start to send L2 packets to the OLT, until it gets discovered.
+
+> **NOTE:** at the moment, the microsemi_olt adapter sends only few packets to the OLT box after the *enable* command has been input. Recently, a *reboot* command has been added for this adapter. The command restarts the provisioning process.
+
+## Celestica OLT and R-CORD
+
+As said, the Celestica OLT can be used with Voltha, so in principle with R-CORD as well. At the moment, this requires some additional configuration to allow the in-band management communication between Voltha and the OLT.
+
+As in-band communication is done by L2 MAC address, the NNI port of OLT needs to have a L2 connection with Voltha.
+
+More specifically, in a typical CORD deployment Voltha runs as a set of container managed by k8s, which in turn runs on a (physical or virtual) machine. This machine is usually connected to the management network only. In a deployment using Celestica boxes instead, the server running Voltha will need to have an extra connection to the data plane (usually the CORD fabric switches).
+
+Of course, also the OLT NNI port needs to be connected as well to the same fabric switch.
+If both the OLT and the server running Voltha are connected to the same fabric switch, a path needs to be provisioned between the two. This can be achieved -for example- in the CORD fabric using Trellis, through the configuration of a VLAN cross-connect or a pseudo-wire.
+
+Further more, the Voltha vcore container (called voltha in the Kubernetes based deployment) should be connected to the data plane port, connected to the OLT, which is a quite trivial but manual operation. The steps to connect containers and server ports can be found in the [veth interface configuration guide](../../operating_cord/veth_intf.md).
+
+> **NOTE:** the Celestica OLT is known to work *only* with the top-down R-CORD configuration workflow.
diff --git a/profiles/rcord/configuration.md b/profiles/rcord/configuration.md
index d2cff9a..67f59ee 100644
--- a/profiles/rcord/configuration.md
+++ b/profiles/rcord/configuration.md
@@ -223,207 +223,3 @@
 
 For instructions on how to push TOSCA into a CORD POD, please
 refer to this [guide](../../xos-tosca/README.md).
-
-## Zero-Touch Subscriber Provisioning
-
-This feature, also referred to as "bottom-up" provisioning,
-enables auto-discovery of subscribers and validates them
-using an external OSS.
-
-The expected workflow is as follows:
-
-- When an ONU is attached to the POD, VOLTHA will discover it and send
-   an event to XOS
-- XOS receives the ONU activation event and through an OSS proxy
-   queries the upstream OSS to validate wether that ONU has a valid serial number
-- Once the OSS has approved the ONU, XOS will create `ServiceInstance`
-  chain for this particular subscriber and configure the POD to enable connectivity
-
-To enable the zero-touch provisioning feature, you will need to deploy
-and configure some extra pieces into the system before attaching
-subscribers:
-
-### Deploy Kafka
-
-To enable this feature XOS needs to receive events from `onos-voltha`,
-so a kafka bus needs to be deployed.
-To deploy Kafka, please follow these [instructions](../../charts/kafka.md)
-
-### Deploy OSS Proxy
-
-This is the piece of code that is responsible to connecting CORD to an
-external OSS Database. As a simple reference, we provide a sample
-implemetation, available here:
-[hippie-oss](https://github.com/opencord/hippie-oss)
-
-> **Note:** This implementation currently validates any subscriber that comes online.
-
-To deploy the `hippie-oss` service you can look [here](../../charts/hippie-oss.md).
-
-Once the chart has come online, you will need to add the Hippie-OSS service
-to your service graph. You can use the following TOSCA to do that:
-
-```yaml
-tosca_definitions_version: tosca_simple_yaml_1_0
-imports:
-  - custom_types/hippieossservice.yaml
-  - custom_types/servicedependency.yaml
-  - custom_types/voltservice.yaml
-description: Create an instance of the OSS Service and connect it to the vOLT Service
-topology_template:
-  node_templates:
-
-    # Reference the VOLTService
-    service#volt:
-      type: tosca.nodes.VOLTService
-      properties:
-        name: volt
-        must-exist: true
-
-    # Reference the HippieOSSService
-    service#oss:
-      type: tosca.nodes.HippieOSSService
-      properties:
-        name: hippie-oss
-        kind: oss
-        # blacklist: BRCM1234, BRCM4321 # this is an optional list of ONUs that you don't want to validate
-
-    # Create a ServiceDependency between the two
-    service_dependency#oss_volt:
-      type: tosca.nodes.ServiceDependency
-      properties:
-        connect_method: None
-      requirements:
-        - subscriber_service:
-            node: service#oss
-            relationship: tosca.relationships.BelongsToOne
-        - provider_service:
-            node: service#volt
-            relationship: tosca.relationships.BelongsToOne
-```
-
-For instructions on how to push TOSCA into a CORD POD, please
-refer to this [guide](../../xos-tosca/README.md).
-
-### Know issues
-
-There is a set of issue that we have seen from time to time.
-They are currently undergoing a deeper investigation, but here is how to identify
-and correct them.
-
-#### OLTDevices are not pushed to VOLTHA
-
-If you have configured OLTDevices in XOS and you are not seeing them in VOLTHA,
-you should check the vOLT-synchronizer logs:
-
-```shell
-kubectl logs -f $(kubectl get pods | grep volt- | awk '{print $1}')
-```
-
-If the logs are not moving, restart the synchronizer:
-
-```shell
-kubectl delete pod $(kubectl get pods | grep volt- | awk '{print $1}')
-```
-
-#### ONU Activate events are not received by XOS
-
-Once OLTs have been activated, XOS should receive events and create
-`HippieOssServiceInstance` models for each event.
-
-To check if XOS has been received from XOS you can use this command:
-
-```shell
-kubectl logs -f $(kubectl get pods | grep volt- | awk '{print $1}') | grep "onu.event"
-```
-
-If the events have been received you should see something like:
-
-```shell
-Processing event               msg=ConsumerRecord(topic=u'onu.events', partition=0, offset=42, timestamp=1530296353275, timestamp_type=0, key=None, value='{"timestamp":1530296353275,"status":"activated","serial_number":"ALPHe3d1cfde","uni_port_id":48,"of_dpid":"of:000000000a5a0097"}', checksum=100437027, serialized_key_size=-1, serialized_value_size=128) step=ONUEventStep
-onu.events: received event     value={u'status': u'activated', u'timestamp': 1530296353275, u'uni_port_id': 48, u'of_dpid': u'of:000000000a5a0097', u'serial_number': u'ALPHe3d1cfde'}
-onu.events: activate onu       value={u'status': u'activated', u'timestamp': 1530296353275, u'uni_port_id': 48, u'of_dpid': u'of:000000000a5a0097', u'serial_number': u'ALPHe3d1cfde'}
-onu.events: Calling OSS for ONUDevice with serial_number ALPHe3d1cfde
-```
-
-If you don't see that, you can force ONOS-VOLTHA to send the events again.
-
-Connect to the ONOS-VOLTHA CLI:
-
-```shell
-ssh karaf@<pod-ip> -p $(kubectl get svc -n voltha | grep -i onos-voltha-ssh |  awk '{print substr($5,6,5)}')
-```
-Remove the device:
-
-```shell
-onos> device-remove <device-ofid>
-```
-
-The device will be automatically discovered again, and events are sent.
-
-##### Check if the events appears in Kafka
-
-If you are still not seeing the events in XOS, you can check if they appear into Kafka.
-
-To do that `exec` into any synchronizer container:
-
-```shell
-kubectl exec -it $(kubectl get pods | grep volt- | awk '{print $1}') bash
-apt-get update
-apt-get install kafkacat -y
-kafkacat -b cord-kafka.default.svc.cluster.local:9092 -t onu.events
-```
-
-If the events have reached `kafka` you should see this message:
-
-```shell
-{"timestamp":1530301582776,"status":"activated","serial_number":"ALPHe3d1cfde","uni_port_id":48,"of_dpid":"of:000000000a5a0097"}
-```
-
-If they have not you are probably missing the correct configuration in ONOS-VOLTHA,
-to verify that, ssh into ONOS-VOLTHA:
-
-```shell
-ssh karaf@<pod-ip> -p $(kubectl get svc -n voltha | grep -i onos-voltha-ssh |  awk '{print substr($5,6,5)}')
-```
-
-and check if there is any configuration for the `org.opencord.olt` app:
-
-```shell
-onos> netcfg apps org.opencord.olt
-{
-  "kafka" : {
-    "bootstrapServers" : "cord-kafka.default.svc.cluster.local:9092"
-  }
-}
-```
-
-If you don't have that configuration, you can resubmit it by going into the XOS-GUI,
-search for `ServiceInstanceAttributes` and save the configuration again.
-
-#### Subscribers are not created as a result of an ONU Activate event
-
-If you see `HippieOssServiceInstances` but you don't see any `Subscriber` in XOS,
-you should check if the model policies for the `HippieOssServiceInstances` have been executed:
-
-```shell
-kubectl logs -f $(kubectl get pods | grep hippie- | awk '{print $1}') | grep -i "model_policy"
-```
-
-I this is the only line you are seeing:
-
-```shell
-Loaded model policies          policies=[<class 'model_policy_hippieossserviceinstance.OSSServiceInstancePolicy'>]
-```
-
-Go to the XOS-GUI, search for `HippieOssServiceInstances` and save them again.
-You should see the above command print more logs and `Subscriber`s beeing created.
-
-#### ONU Ports are down
-
-If everything is correctly configured across the POD, but you can't ping
-the gateway from your client, it's possible that the ports on the ONU are not up.
-
-Assuming that your client is connected to the OLT via the interface `eth1`,
-you can check if the client sees the port as up or not. If the port is not up,
-reboot the ONU.
diff --git a/profiles/rcord/install.md b/profiles/rcord/install.md
index 7f260bd..3b23a4b 100644
--- a/profiles/rcord/install.md
+++ b/profiles/rcord/install.md
@@ -19,23 +19,18 @@
 first step to bringing up R-CORD is to install the
 [VOLTHA helm chart](../../charts/voltha.md).
 
-## Install CORD Platform
+## Install ONOS
 
-The R-CORD profile has dependencies on the following platform
-charts, so they need to be installed next:
+Install [onos](../../charts/onos.md#onos-manages-fabric--voltha).
+It will manage both Voltha and the fabric infrastructure.
 
-- [xos-core](../../charts/xos-core.md)
-- [onos-fabric](../../charts/onos.md#onos-fabric)
-- [onos-voltha](../../charts/onos.md#onos-voltha)
+## Install XOS
 
-## Install R-CORD Profile
+The R-CORD profile the orchestrator [xos-core](../../charts/xos-core.md) to be installed.
 
-You are now ready to install the R-CORD profile:
+## Install the R-CORD Profile
 
-```shell 
-helm dep update xos-profiles/rcord-lite
-helm install -n rcord-lite xos-profiles/rcord-lite
-```
+You are now ready to install the [R-CORD profile](../../charts/rcord.md):
 
 Optionally, if you want to use the "bottom up" subscriber provisioning
 workflow described in the [Operations Guide](configuration.md), you
diff --git a/profiles/rcord/workflows/att.md b/profiles/rcord/workflows/att.md
new file mode 100644
index 0000000..90a2556
--- /dev/null
+++ b/profiles/rcord/workflows/att.md
@@ -0,0 +1,161 @@
+# AT&T Workflow
+
+You can find a complete description of the SEBA workflow for At&t in [this document](https://docs.google.com/document/d/1nou2c8AsRzhaDJmA_eYvFgd0Y33KiCsioveU77AOVCI/edit#heading=h.x73smxj2xaib). This pages focus exclusively on the internals details of the workflow such as actions triggered by the environment and decisions taken by NEM.
+
+## Helm charts
+
+To replicate this workflow you'll need to install:
+
+- [xos-core](../../../charts/xos-core.md)
+- [cord-kafka](../../../charts/kafka.md)
+- [voltha](../../../charts/voltha.md)
+- [onos](../../../charts/onos.md#generic-onos)
+- att-workflow
+
+### Install the `att-workflow` chart
+
+```shell
+helm dep update xos-profiles/att-workflow
+helm install -n att-workflow xos-profiles/att-workflow
+```
+
+## Workflow description
+
+1. ONT discovered bottom-up
+2. If ONT serial number is not allowed or unknown (i.e it has NOT been provisioned by OSS), disable the ONT; generate an event to external OSS that an ONU has been discovered but not yet provisioned.
+3. When OSS provisions the ONT, re-enable it & program 802.1x flow - UNI port(s) will be UP
+4. Ensure that DHCP fails here (because subscriber/service-binding has not been provisioned by OSS yet)
+5. 802.1x EAPOL message happens from RG, and ONOS AAA app adds options and sends to radius server. Options are pulled from Sadis/NEM  - no subscriber information is required here
+6. If RG authentication fails, allow it to keep trying (in the future consider redirection to captive / self-help portal). DHCP should not succeed since RG authentication has failed
+7. If RG authentication succeeds, ONOS AAA app notifies via an event on the kafka bus that authentication has succeeded
+8. NEM can listen for the event, and then check to see if subscriber/service-binding has happened on that port from OSS - if not, then nothing to be done
+9. Must ensure that DHCP fails here even though RG has been authenticated (because subscriber/service-binding has not been provisioned by OSS yet)
+10. When OSS provisions the subscriber/service-binding on the UNI port and gives the C and S vlan info, then DHCP trap will be programmed on the port, and DHCP process can start
+11. If RG is disconnected from UNI port, force authentication again (even if subscriber/service-binding has been provisioned by OSS). Upon reconnection  to UNI port, RG must re-authenticate before DHCP/other-traffic can flow on the provisioned VLANs.
+12. DHCP L2 relay -> add option 82, learn public IP address, forward via dataplane to external DHCP server
+
+
+This schema summarizes the workflow, please note:
+
+- in `light blue` are environment events (wether they are triggered from hardware or from an operator)
+- in `yellow` are NEM configuration calls to ONOS or VOLTHA
+- in `green` are decisions
+- in `orange` event published on the kafka bus
+
+![att-workflow](../../../images/att_workflow.png)
+
+> NOTE: when we refer to `service chain` we are talking about the set of
+subscriber specific service instances that will trigger the `add_subscriber`
+call in ONOS-VOLTHA and provision the crossconnect in ONOS-FABRIC
+
+## Operations
+
+We assume your POD is already configured as per[this instructions](../configuration.md)
+(you need to complete only the first section)
+
+### Whitelist population
+
+To configure the ONU whitelist, you can use this TOSCA:
+
+```yaml
+tosca_definitions_version: tosca_simple_yaml_1_0
+imports:
+  - custom_types/attworkflowdriverwhitelistentry.yaml
+  - custom_types/attworkflowdriverservice.yaml
+description: Create an entry in the whitelist
+topology_template:
+  node_templates:
+
+    service#att:
+      type: tosca.nodes.AttWorkflowDriverService
+      properties:
+        name: att-workflow-driver
+        must-exist: true
+
+    whitelist:
+      type: tosca.nodes.AttWorkflowDriverWhiteListEntry
+      properties:
+        serial_number: BRCM22222222
+        pon_port_id: 536870912
+        device_id: of:000000000a5a0072
+      requirements:
+        - owner:
+            node: service#att
+            relationship: tosca.relationships.BelongsToOne
+```
+
+For instructions on how to push TOSCA into a CORD POD, please
+refer to this [guide](../../../xos-tosca/README.md).
+
+### Pre-provision subscribers
+
+You can `pre-provision` subscribers using this TOSCA:
+
+```yaml
+tosca_definitions_version: tosca_simple_yaml_1_0
+imports:
+  - custom_types/rcordsubscriber.yaml
+
+description: Pre-provsion a subscriber
+
+topology_template:
+  node_templates:
+
+    # Pre-provision the subscriber the subscriber
+    my_house:
+      type: tosca.nodes.RCORDSubscriber
+      properties:
+        name: My House
+        status: pre-provisioned
+        c_tag: 111
+        onu_device: BRCM22222222
+        nas_port_id : "PON 1/1/03/1:1.1.1"
+        circuit_id: foo
+```
+
+For instructions on how to push TOSCA into a CORD POD, please
+refer to this [guide](../../../xos-tosca/README.md).
+
+### OLT Activation
+
+Once the system knows about whitelisted ONUs and subscribers,
+you can activate the OLT:
+
+```yaml
+tosca_definitions_version: tosca_simple_yaml_1_0
+imports:
+  - custom_types/oltdevice.yaml
+  - custom_types/voltservice.yaml
+description: Create a simulated OLT Device in VOLTHA
+topology_template:
+  node_templates:
+
+    service#volt:
+      type: tosca.nodes.VOLTService
+      properties:
+        name: volt
+        must-exist: true
+
+    olt_device:
+      type: tosca.nodes.OLTDevice
+      properties:
+        name: ONF OLT
+        device_type: openolt
+        host: 10.90.0.114
+        port: 9191
+        switch_datapath_id: of:0000000000000001
+        switch_port: "1"
+        outer_tpid: "0x8100"
+        uplink: "128"
+      requirements:
+        - volt_service:
+            node: service#volt
+            relationship: tosca.relationships.BelongsToOne
+```
+
+For instructions on how to push TOSCA into a CORD POD, please
+refer to this [guide](../../../xos-tosca/README.md).
+
+### Device monitoring
+
+Please refer to the [monitoring](../../../charts/logging-monitoring.md) chart.
diff --git a/styles/website.css b/styles/website.css
new file mode 100644
index 0000000..0957a64
--- /dev/null
+++ b/styles/website.css
@@ -0,0 +1,14 @@
+.page-inner {
+    max-width: 85% !important;
+}
+
+.page-toc {
+    max-width: 400px;
+    margin-left: 10px;
+}
+
+.page-toc li {
+    list-style: disc;
+    text-indent: -1.5em;
+    padding-left: 1.5em;
+}
\ No newline at end of file
diff --git a/versioning.md b/versioning.md
index dd1a00c..1c821b2 100644
--- a/versioning.md
+++ b/versioning.md
@@ -12,8 +12,9 @@
 [PEP440](https://www.python.org/dev/peps/pep-0440/) syntax (`.dev#` instead of
 `-dev#`) if using Python code are the recommended formats.
 
-To avoid confusion, all components that existed prior to 6.0 started their
-independent versioning at version `6.0.0`.
+As this is the first time that many of these components received a version
+number, most of them started with version `1.0.0`, except for the `xos` core,
+which starts at `2.0.0`.
 
 ## CORD Releases
 
@@ -48,6 +49,58 @@
    version string, and Docker images with the tag will be built and sent to
    Docker Hub.
 
+## How to create a new CORD release
+
+For 6.0 and later releases, please follow these steps to create a new release.
+
+### Pre-release steps
+
+Per the instructions above, create versioned releases of all the individual
+components, primarily the docker images.
+
+In the `helm-charts` repo, check that all charts use only released versions of
+container images (no `master` or `candidate` tags), and that the `Chart.yaml`
+has the correct *new* `version` field set for the release.
+
+Test the release with this version of the charts, making sure to not override
+the image versions.
+
+### Release steps
+
+Update the `VERSION` file on both the `automation-tools` and `helm-charts`
+repos then commit and merge the patch. Jenkins will then create git tags from
+the contents of the `VERSION` file.
+
+Create a branch in Gerrit using the form `cord-#.#` on those repos starting
+with the new tagged commit.
+
+In the `docs` project, on the `master` branch add an entry to the `book.json`
+to add the new branch, and create a directory on the `guide.opencord.org` site
+for the branch docs.  Create a `cord-#.#` branch on the `docs` repo in Gerrit,
+then update the `git_refs` file within that repo with the commits that have
+the most pertinent documentation.  The `make freeze` command can help generate
+the contents of the `git_refs` file by listing the current git `HEAD` of each
+repo.
+
+Create a `cord-#.#` branch on the `manifest` repo, setting the `revision`
+attribute to `cord-#.#` on the `automation-tools`, `docs`, and `helm-charts`
+projects (and any other projects that have that patch branch).
+
+### Updates to a released branch
+
+Patches to individual components are generally done on the `master` branch of
+their respective projects and given component versions.
+
+Once released, these component versions are integrated into charts, the
+`version` field of the `Chart.yaml` for the chart is updated, and a new point
+release of the `helm-charts` repo is created on the `cord-#.#` branch.
+
+All patches on a released branch of the `helm-charts` and `automation-tools`
+repos should be SemVer release versions, not development versions.
+
+Documentation updates are less strict, with changes within the repo or to
+`git_refs` happening on the patch branch as needed.
+
 ## Details of the release process
 
 To create a new version, the version string is updated in the language or
@@ -64,10 +117,10 @@
 version string in the `VERSION` file, Jenkins jobs have been put in place to
 prevent this from happening. The implementation is as follows:
 
-- When a patchset is submitted to Gerrit, the `tag-collision-reject` Jenkins
-  job runs. This checks that the version string in `VERSION` is does not
-  already exist as a git tag, and rejects any patchsets that have duplicate
-  released versions. It ignores development and non-SemVer version strings.
+- When a patchset is submitted to Gerrit, the `tag-collision` Jenkins job runs.
+  This checks that the version string in `VERSION` is does not already exist as
+  a git tag, and rejects any patchsets that have duplicate released versions.
+  It ignores development and non-SemVer version strings.
 
   This job also checks that if a released version number is used, any
   Dockerfile parent images are also using a fixed parent version, to better