Merge "Adding swap removal and different user support to Kubespray inst docs"
diff --git a/Makefile b/Makefile
index 4c5d630..b5c7dd9 100644
--- a/Makefile
+++ b/Makefile
@@ -20,7 +20,7 @@
 build: setup
 	gitbook build
 
-setup: automation-tools cord-tester openstack fabric hippie-oss kubernetes-service olt-service onos-service openolt rcord vrouter xos xos-gui xos-tosca swagger $(GENERATED_DOCS)
+setup: automation-tools cord-tester simpleexampleservice openstack fabric hippie-oss kubernetes-service olt-service onos-service openolt rcord vrouter xos xos-gui xos-tosca swagger $(GENERATED_DOCS)
 	gitbook init
 	gitbook install
 
@@ -71,6 +71,9 @@
 openstack:
 	ln -s ../orchestration/xos_services/openstack/docs openstack
 
+simpleexampleservice:
+	ln -s ../orchestration/xos_services/simpleexampleservice/docs simpleexampleservice
+
 xos:
 	ln -s ../orchestration/xos/docs xos
 
@@ -87,4 +90,4 @@
 	rm -rf $(GENERATED_DOCS)
 	rm -rf _book
 	rm -rf node_modules
-	rm -rf openstack automation-tools cord-tester fabric hippie-oss kubernetes-service olt-service onos-service openolt rcord vrouter test xos xos-gui xos-tosca
+	rm -rf openstack automation-tools cord-tester fabric hippie-oss kubernetes-service olt-service onos-service openolt rcord vrouter test xos xos-gui xos-tosca simpleexampleservice
diff --git a/README.md b/README.md
index ffadeed..87b8637 100644
--- a/README.md
+++ b/README.md
@@ -1,35 +1,15 @@
 # Installation Guide
 
-This guide describes how to install CORD.
-
-## Prerequisites
-
-Start by satisfying the following prerequisites:
-
-* [Hardware Requirements](./prereqs/hardware.md)
-* [Connectivity Requirements](./prereqs/networking.md)
-* [Software Requirements](./prereqs/software.md)
-
-## Deploy CORD
-
-The next step is select the configuration (profile) you want to
-install:
+This guide describes how to install CORD. It identifies a set of
+[prerequisites](prereqs/README.md), and then walks through
+the steps involved in bringing up one of two CORD profiles:
 
 * [R-CORD](./profiles/rcord/install.md)
 * [M-CORD](./profiles/mcord/install.md)
 
-## Additional Information
+If you are anxious to jump straight to a [Quick Start](quickstart.md)
+procedure that brings up the an emulated version of CORD running
+on your laptop (sorry, no subscriber data plane), then that's an option.
 
-The following are optional steps you may want to take
-
-### Offline Installation
-
-If your environment does not permit connecin your POD to ther public
-Internet, you may want to take advantage of a local Docker registery.
-The following [registry setup](./prereqs/docker-registry.md) will help.
-
-### OpenStack Installation
-
-If you need OpenStack included in your deployment, so you can bring up
-VMs on your POD, you will need to following the following
-[OpenStack deployment](./prereqs/openstack-helm.md) guide.
+Alternatively, if you want to get a broader layof-of-the-land, you
+might step back and start with an [Overview](overview.md).
diff --git a/SUMMARY.md b/SUMMARY.md
index 598619f..2950f39 100644
--- a/SUMMARY.md
+++ b/SUMMARY.md
@@ -1,17 +1,21 @@
 # Summary
 
 * [Overview](overview.md)
+    * [Navigating the Guide](navigate.md)
+    * [Quick Start](quickstart.md)
 * [Installation Guide](README.md)
-    * [Hardware Requirements](prereqs/hardware.md)
-    * [Connectivity Requirements](prereqs/networking.md)
-    * [Software Requirements](prereqs/software.md)
-        * [Kubernetes](prereqs/kubernetes.md)
-            * [Single Node](prereqs/k8s-single-node.md)
-            * [Multi-Node](prereqs/k8s-multi-node.md)
-        * [Helm](prereqs/helm.md)
-        * [Docker Registry (optional)](prereqs/docker-registry.md)
-        * [OpenStack (optional)](prereqs/openstack-helm.md)
-    * [Fabric Setup](prereqs/fabric-setup.md)
+    * [Prerequisites](prereqs/README.md)
+        * [Hardware Requirements](prereqs/hardware.md)
+        * [Connectivity Requirements](prereqs/networking.md)
+        * [Software Requirements](prereqs/software.md)
+            * [Kubernetes](prereqs/kubernetes.md)
+                * [Single Node](prereqs/k8s-single-node.md)
+                * [Multi-Node](prereqs/k8s-multi-node.md)
+            * [Helm](prereqs/helm.md)
+            * [Optional Packages](prereqs/optional.md)
+                * [Docker Registry](prereqs/docker-registry.md)
+                * [OpenStack](prereqs/openstack-helm.md)
+    * [Fabric Software Setup](fabric-setup.md)
     * [Bringing Up CORD](profiles/intro.md)
         * [R-CORD](profiles/rcord/install.md)
             * [OLT Setup](openolt/README.md)
@@ -43,22 +47,25 @@
         * [RCORD](rcord/README.md)
         * [vOLT](olt-service/README.md)
         * [vRouter](vrouter/README.md)
-* [Modeling Guide](xos/README.md)
-    * [XOS Modeling Framework](xos/dev/xproto.md)
-    * [Core Models](xos/core_models.md)
-    * [Security Policies](xos/security_policies.md)
-    * [Writing Synchronizers](xos/dev/synchronizers.md)
-        * [Design Guidelines](xos/dev/sync_arch.md)
-        * [Implementation Details](xos/dev/sync_impl.md)
-        * [Synchronizer Reference](xos/dev/sync_reference.md)
 * [Development Guide](developer/developer.md)
     * [Getting the Source Code](developer/getting_the_code.md)
+    * [Writing Models and Synchronizers](xos/README.md)
+        * [XOS Modeling Framework](xos/dev/xproto.md)
+            * [XOS Tool Chain (Internals)](xos/dev/xosgenx.md)
+        * [XOS Synchronizer Framework](xos/dev/synchronizers.md)
+            * [Synchronizer Design](xos/dev/sync_arch.md)
+            * [Synchronizer Implementation](xos/dev/sync_impl.md)
+            * [Synchronizer Reference](xos/dev/sync_reference.md)
+        * [Core Models](xos/core_models.md)
+        * [Security Policies](xos/security_policies.md)
     * [Developer Workflows](developer/workflows.md)
     * [Building Docker Images](developer/imagebuilder.md)
     * [Platform Services](developer/platform.md)
         * [Kubernetes](kubernetes-service/kubernetes-service.md)
         * [OpenStack](openstack/openstack-service.md)
         * [VTN and Service Composition](xos/xos_vtn.md)
+    * [Example Services](examples/examples.md)
+        * [SimpleExampleService](simpleexampleservice/simple-example-service.md)
     * [GUI Development](xos-gui/developer/README.md)
         * [Quickstart](xos-gui/developer/quickstart.md)
         * [Service Graph](xos-gui/developer/service_graph.md)
@@ -72,3 +79,4 @@
     * [Test Setup](cord-tester/qa_testsetup.md)
     * [Test Environment](cord-tester/qa_testenv.md)
     * [System Tests](cord-tester/validate_pods.md)
+
diff --git a/charts/helm.md b/charts/helm.md
index a4c68ff..63ce16f 100644
--- a/charts/helm.md
+++ b/charts/helm.md
@@ -1,11 +1,10 @@
 # Helm Reference
 
-For information on how to install `helm` please refer to [Installing helm](../prereqs/helm.md)
-
-## What is Helm?
-
 {% include "/partials/helm/description.md" %}
 
+For information on how to install `helm` please refer to
+[Installing Helm](../prereqs/helm.md).
+
 ## CORD Helm Charts
 
 All helm charts used to install CORD can be found in the `helm-chart`
diff --git a/developer/developer.md b/developer/developer.md
index fef811f..a1fc60c 100644
--- a/developer/developer.md
+++ b/developer/developer.md
@@ -1,9 +1,8 @@
 # Development Guide
 
-This guide describes workflows and best practices for developers. If
-you are a service developer, you will need to consult this guide and
-the companion [Modeling Guide](../xos/README.md) that describes how
-define models and synchronizers for services being onboarded into
+This guide describes workflows and best practices for developers.
+If you are a service developer, this includes information on how to
+write models and synchronizers for services being on-boarded into
 CORD. If you are a platform developer, you will find information about
 the platform services typically integrated into CORD (e.g.,
 Kubernetes, OpenStack, VTN). Service developers may be interested in
diff --git a/developer/workflows.md b/developer/workflows.md
index 3480ee8..aa8cf5f 100644
--- a/developer/workflows.md
+++ b/developer/workflows.md
@@ -27,13 +27,11 @@
 on DockerHub:
 
 ```shell
-cd ~/cord/build/helm-charts
-helm install xos-core -n xos-core
-helm dep update xos-profiles/rcord-lite
-helm install xos-profiles/rcord-lite -n rcord-lite
+cd ~/cord/helm-charts
 ```
 
-> **Note:** You can replace the `rcord-lite` profile with the one you want to work on. 
+In this folder you can choose from the different charts which one to deploy.
+For example to deploy rcord-lite you can follow [this guide](../profiles/rcord/install.md)
 
 ### Deploy a Single Instance of Kafka
 
@@ -82,11 +80,9 @@
 All that is left is to teardown and re-deploy the containers.
 
 ```shell
-helm del --purge xos-core
-helm del --purge rcord-lite
-helm install xos-core -n xos-core -f examples/image-tag-candidate.yaml -f examples/imagePullPolicy-IfNotPresent.yaml
-helm dep update xos-profiles/rcord-lite
-helm install xos-profiles/rcord-lite -n rcord-lite -f examples/image-tag-candidate.yaml -f examples/imagePullPolicy-IfNotPresent.yaml
+helm del --purge <chart-name>
+helm dep update <cart-name>
+helm install <chart-name> -n <chart-name> -f examples/image-tag-candidate.yaml -f examples/imagePullPolicy-IfNotPresent.yaml
 ```
 
 In some cases it is possible to use the `helm` upgrade command,
diff --git a/fabric-setup.md b/fabric-setup.md
new file mode 100644
index 0000000..dfe98e3
--- /dev/null
+++ b/fabric-setup.md
@@ -0,0 +1,47 @@
+# Fabric Software Setup
+
+CORD uses the Trellis fabric to connect the data plane components together.
+This section describes how to setup the software for these switches.
+
+The latest [Trellis Fabric](https://wiki.opencord.org/display/CORD/Trellis%3A+CORD+Network+Infrastructure) documentation can be found on the CORD wiki.
+
+## Supported Switches
+
+The list of supported hardware can be found in the [hardware requirements page](prereqs/hardware.md).
+
+## Operating System
+
+All CORD-compatible switches use [Open Networking Linux (ONL)](https://opennetlinux.org/) as operating system.
+The [latest compatible ONL image](https://github.com/opencord/OpenNetworkLinux/releases/download/2017-10-19.2200-1211610/ONL-2.0.0_ONL-OS_2017-10-19.2200-1211610_AMD64_INSTALLED_INSTALLER) can be downloaded from [here](https://github.com/opencord/OpenNetworkLinux/releases/download/2017-10-19.2200-1211610/ONL-2.0.0_ONL-OS_2017-10-19.2200-1211610_AMD64_INSTALLED_INSTALLER).
+
+**Checksum**: *sha256:2db316ea83f5dc761b9b11cc8542f153f092f3b49d82ffc0a36a2c41290f5421*
+
+Guidelines on how to install ONL on top of an ONIE compatible device can be found directly on the [ONL website](https://opennetlinux.org/docs/deploy).
+
+This specific version of ONL has been customized to accept an IP address through DHCP on the management interface, *ma0*. If you'd like to use a static IP, first give
+it an IP address through DHCP, then login and change the configuration in
+*/etc/network/interfaces*.
+
+The default *username* and *password* are *root* / *onl*.
+
+## OFDPA Drivers
+
+Once ONL is installed OFDPA drivers will need to be installed as well.
+Each switch model requires a specific version of OFDPA. All driver packages are distributed as DEB packages, which makes the installation process straightforward.
+
+First, copy the package to the switch. For example
+
+```shell
+scp your-ofdpa.deb root@fabric-switch-ip:
+```
+
+Then, install the DEB package
+
+```shell
+dpkg -i your-ofdpa.deb
+```
+
+Two OFDPA drivers are available:
+
+* [EdgeCore 5712-54X / 5812-54X / 6712-32X](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa_3.0.5.5%2Baccton1.7-1_amd64.deb?raw=true) - *checksum: sha256:db228b6e79fb15f77497b59689235606b60abc157e72fc3356071bcc8dc4c01f*
+* [QuantaMesh T3048-LY8](https://github.com/onfsdn/atrium-docs/blob/master/16A/ONOS/builds/ofdpa-ly8_0.3.0.5.0-EA5-qct-01.01_amd64.deb?raw=true) - *checksum: sha256:f8201530b1452145c1a0956ea1d3c0402c3568d090553d0d7b3c91a79137da9e*
diff --git a/navigate.md b/navigate.md
new file mode 100644
index 0000000..a92bb01
--- /dev/null
+++ b/navigate.md
@@ -0,0 +1,88 @@
+# Navigating the Guide
+
+The guide is organized around the major stages in the lifecycle of CORD:
+
+* [Installation](README.md): Installing (and later upgrading) CORD.
+* [Operations](operating_cord/operating_cord.md): Operating an already
+  installed CORD deployment.
+* [Development](developer/developer.md): Developing new functionality
+  to be included in CORD.
+* [Testing](cord-tester/README.md): Testing functionality to be
+ included in CORD.
+
+## Navigating CORD
+
+These are all fairly obvious. What's less obvious is the relationship among
+the toolset (and corresponding specification files) used for each stage.
+Understanding these relationships is helpful in navigating CORD.
+
+* **Installation (Helm):** Installing CORD means installing a collection
+  of Docker containers in a Kubernetes cluster. We use Helm to carry out
+  the installation, with the valid configurations defined by a set of
+  `helm-charts`. These charts specify the version of each container to be
+  deployed, and so they also play a role in upgrading a running system.
+  More information about `helm-charts` can be found [here](charts/helm.md).
+
+* **Operations (TOSCA):** A running CORD POD supports multiple Northbound
+  Interfaces (e.g,. a GUI and REST API), but we typically use `TOSCA` to specify
+  a workflow for configuring and provisioning a running system. A freshly
+  installed CORD POD has a set of control plane and platform level containers
+  running (e.g., XOS, ONOS, OpenStack), but until provisioned using `TOSCA`,
+  there are no services and no service graph. More information about `TOSCA`
+  can be found [here](xos-tosca/README.md).
+
+* **Development (XOS):** The services running in an operational system
+  are typically deployed as Docker containers, paired with a model that
+  specifies how the service is to be on-boarded into CORD. This model is
+  writen in the `xproto` modeling language, and processed by the XOS
+  tool-chain. Among other things, this tool-chain generates the
+  TOSCA-engine that is used to process the configuration and provisioning
+  workflows used to operate CORD. More information about `xproto` (and
+  other details about on-boarding a service) can be found
+  [here](xos/dev/xproto.md).
+
+* **Testing (Jenkins):** Full CORD PODS (as well as individual components
+  of CORD) are installed on both physical and virtual environment, and run
+  through a series of tests. Full PODS are tested nightly, and individual
+  components are tested upon every commit Gerrit. These tests are specified
+  using `Jenkinfiles` and `JJB`. An overview of how CORD is tested can be found
+  [here](cord-tester/README.md).
+
+These tools and containers are inter-related as follows:
+
+* An initial install brings up a set of XOS-related containers (e.g., `xos-core`,
+  `xos-gui`, `xos-tosca`) that have been configured with a base set of models.
+  Of these, the `xos-tosca` container implements the TOSCA engine, which
+  takes TOSCA workflows as input and configures/provisions CORD accordingly.
+
+* While the install and operate stages are distinct, for convenience,
+  some helm-charts elect to launch a `tosca-loader` container
+  (in Kubernetes parlance, it's a *job* and not a *service*) to load an initial
+  TOSCA workflow into a newly deployed set of services. This is how a
+  service graph is typically instantiated.
+
+* Not all services run as Docker containers. Some services run in VMs
+  managed by OpenStack (this is currently the case for M-CORD) and
+  some services are implemented as ONOS applications that have been
+  packaged using Maven. In such cases, the VM image and the Maven
+  package are still specified in the TOSCA workflow.
+
+* Every service (whether implemented in Docker, OpenStack, or ONOS)
+  has counter-part *synchronizer* container running as part of the CORD
+  control plane (e.g., `volt-synchronizer` for the vOLT service). Typically,
+  the helm-chart for a service launches this synchronizer container, whereas
+  the TOSCA worflow creates, provisions, and initializes the backend container,
+  VM, or ONOS app.
+
+* Bringing up additional services in a running POD involves executing
+  helm-charts to install the new service's synchronizer container, which
+  in turn loads the corresponding new models into XOS. This load then
+  triggers and upgrade and restart of the TOSCA engine (and other NBIs),
+  which is a pre-requisite for configuring and provisioning that new service.
+
+* Upgrading an existing service is similar to bringing up a new service,
+  where we depend on Kubernetes to incrermentally roll out the containers
+  that implement the service (and rollback if necessarily), and we depend
+  on XOS to migrate from the old model to the new model (and support
+  both old and new APIs during the transition period). Upgrading existing
+  services has not been thoroughly tested.
diff --git a/partials/helm/description.md b/partials/helm/description.md
index 383710a..1c04574 100644
--- a/partials/helm/description.md
+++ b/partials/helm/description.md
@@ -1,5 +1,3 @@
 Helm is the package manager for Kubernetes. It lets you define, install,
-and upgrade Kubernetes base application.
-
-For more informations about helm,
-please the visit the official website: <https://helm.sh>
\ No newline at end of file
+and upgrade Kubernetes base application. For more information about Helm,
+please the visit the official website: <https://helm.sh>.
diff --git a/prereqs/README.md b/prereqs/README.md
new file mode 100644
index 0000000..9195069
--- /dev/null
+++ b/prereqs/README.md
@@ -0,0 +1,12 @@
+# Prerequisites
+
+The latest release of CORD decouples setting up the deployment environment from
+installing CORD. This means more prerequisites must be satisfied (as enumerated
+in this section), but doing so provides more latitude in how you prep a POD to best
+match your local environment.
+
+There are three categories of requirements that must be met before installing CORD:
+
+* [Hardware Requirements](hardware.md) 
+* [Connectivity Requirements](networking.md) 
+* [Software Requirements](software.md) 
diff --git a/prereqs/docker-registry.md b/prereqs/docker-registry.md
index 2b7c74d..a94587e 100644
--- a/prereqs/docker-registry.md
+++ b/prereqs/docker-registry.md
@@ -1,4 +1,4 @@
-# Docker Registry (optional)
+# Docker Registry (Optional)
 
 The section describes how to install an **insecure** *docker registry* in Kubernetes, using the standard Kubernetes helm charts.
 
diff --git a/prereqs/fabric-setup.md b/prereqs/fabric-setup.md
deleted file mode 100644
index 471fe2d..0000000
--- a/prereqs/fabric-setup.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# Fabric Setup
-
diff --git a/prereqs/hardware.md b/prereqs/hardware.md
index f29c1fc..20eb576 100644
--- a/prereqs/hardware.md
+++ b/prereqs/hardware.md
@@ -55,6 +55,8 @@
         * OCP Accepted&trade; EdgeCore AS5712-54X
         * OCP Accepted&trade; EdgeCore AS5812-54X
         * QuantaMesh T3048-LY8
+    * **25G** models (with 100G uplinks)
+        * QuantaMesh BMS T7032-IX1/IX1B (with 25G breakout cable)
     * **40G** models
         * OCP Accepted&trade; EdgeCore AS6712-32X
     * **100G** models
@@ -106,8 +108,8 @@
 For a more realistic deployment, you can build a POD with the
 following elements:
 
-* 3x x86 server (maybe 10G/40G/100G interfaces if need to support VNFs)
-* 4x fabric switches (10G/40G/100G)
+* 3x x86 server (maybe 10G/25G/40G/100G interfaces if need to support VNFs)
+* 4x fabric switches (10G/25G/40G/100G)
 * 7 DAC cables + 3 to connect servers (if need to support VNFs)
 * Ethernet copper cables as needed
 * Access equipment as needed
diff --git a/prereqs/kubernetes.md b/prereqs/kubernetes.md
index 7a3e3d0..e840b38 100644
--- a/prereqs/kubernetes.md
+++ b/prereqs/kubernetes.md
@@ -51,8 +51,3 @@
 If you've just installed Kubernetes, likely you won't see any pod, yet.
 That's fine, as long as you don't see errors.
 
-## Install Helm
-
-CORD uses a tool called Helm to deploy containers on Kubernetes.
-As such, Helm needs to be installed before being able to deploy CORD containers.
-More info on Helm and how to install it can be found [here](helm.md).
diff --git a/prereqs/openstack-helm.md b/prereqs/openstack-helm.md
index 1d9f651..d60f9a8 100644
--- a/prereqs/openstack-helm.md
+++ b/prereqs/openstack-helm.md
@@ -1,4 +1,4 @@
-# OpenStack (optional)
+# OpenStack (Optional)
 
 The [openstack-helm](https://github.com/openstack/openstack-helm)
 project can be used to install a set of Kubernetes nodes as OpenStack
@@ -148,6 +148,6 @@
 * Install software like Kubernetes and Helm
 * Build the Helm charts and install them in a local Helm repository
 * Install requried packages
-* Configure DNS on the nodes
+* Configure DNS on the nodes (_NOTE: The `openstack-helm` install overwrites `/etc/resolv.conf` on the compute hosts and points the upstream nameservers to Google DNS.  If a local upstream is required, [see this note](https://docs.openstack.org/openstack-helm/latest/install/developer/kubernetes-and-common-setup.html#clone-the-openstack-helm-repos)_.)
 * Generate `values.yaml` files based on the environment and install Helm charts using these files
 * Run post-install tests on the OpenStack services
diff --git a/prereqs/optional.md b/prereqs/optional.md
new file mode 100644
index 0000000..370041e
--- /dev/null
+++ b/prereqs/optional.md
@@ -0,0 +1,14 @@
+# Optional Packages
+
+Although not required, you may want to install one or both of the following
+packages:
+
+* **Local Registry:** If your environment does not permit connecting your
+  POD to ther public Internet, you may want to take advantage of a local Docker
+  registery. The following [registry setup](docker-registry.md) will help.
+  (Having a local registry is also useful when doing local development, as outlined
+  in the [Developer Guide](../developer/workflows.md).)
+
+* **OpenStack:** If you need to include OpenStack in your deployment,
+  so you can bring up VMs on your POD, you will need to following the
+  [OpenStack deployment](openstack-helm.md) guide.
diff --git a/prereqs/vtn-setup.md b/prereqs/vtn-setup.md
index e5ff99b..1d41e2e 100644
--- a/prereqs/vtn-setup.md
+++ b/prereqs/vtn-setup.md
@@ -2,7 +2,12 @@
 
 The ONOS VTN app provides virtual networking between VMs on an OpenStack cluster.  Prior to installing the [base-openstack](../charts/base-openstack.md) chart that installs and configures VTN, make sure that the following requirements are satisfied.
 
-First, VTN requires the ability to SSH to each compute node _using an account with passwordless `sudo` capability_.  Before installing this chart, first create an SSH keypair and copy it to the `authorized_keys` files of all nodes in the cluster:
+## SSH access to hosts
+
+VTN requires the ability to SSH to each compute node _using an account with
+passwordless `sudo` capability_.  Before installing this chart, first create
+an SSH keypair and copy it to the `authorized_keys` files of all nodes in the
+cluster:
 
 Generate a keypair:
 
@@ -22,7 +27,38 @@
 cp ~/.ssh/id_rsa xos-profiles/base-openstack/files/node_key
 ```
 
-Second, the VTN app requires a fabric interface on the compute nodes.  VTN will not successfully initialize if this interface is not present. By default the name of this interface is expected to be named `fabric`. If there is not an actual fabric interface on the compute node, create a dummy interface as follows:
+## Fabric interface
+
+The VTN app requires a fabric interface on the compute nodes.  VTN will not
+successfully initialize if this interface is not present. By default the name
+of this interface is expected to be `fabric`.
+
+### Interface not named 'fabric'
+
+If you have a fabric interface on the compute node but it is not named
+`fabric`, create a bridge named `fabric` and add the interface to it.
+Assuming the fabric interface is named `eth2`:
+
+```shell
+sudo brctl addbr fabric
+sudo brctl addif fabric eth2
+sudo ifconfig fabric up
+sudo ifconfig eth2 up
+```
+
+To make this configuration persistent, add the following to
+`/etc/network/interfaces`:
+
+```text
+auto fabric
+iface fabric inet manual
+  bridge_ports eth2
+```
+
+### Dummy interface
+
+If there is not an actual fabric
+interface on the compute node, create a dummy interface as follows:
 
 ```shell
 sudo modprobe dummy
@@ -30,7 +66,9 @@
 sudo ifconfig fabric up
 ```
 
-Finally, in order to be added to the VTN configuration, each compute node must
+## DNS setup
+
+In order to be added to the VTN configuration, each compute node must
 be resolvable in DNS.  If a server's hostname is not resolvable, it can be
 added to the local `kube-dns` server (substitute _HOSTNAME_ with the output of
 the `hostname` command, and _HOST-IP-ADDRESS_ with the node's primary IP
diff --git a/profiles/mcord/install.md b/profiles/mcord/install.md
index 3fda193..27159ad 100644
--- a/profiles/mcord/install.md
+++ b/profiles/mcord/install.md
@@ -6,16 +6,36 @@
 node, suitable for evaluation or testing.  Requirements:
 
 - An _Ubuntu 16.04.4 LTS_ server with at least 64GB of RAM and 32 virtual CPUs
+- Latest versions of released software installed on the server: `sudo apt update; sudo apt -y upgrade`
 - User invoking the script has passwordless `sudo` capability
+- Open access to the Internet (not behind a proxy)
+- Google DNS servers (e.g., 8.8.8.8) are accessible
+
+### Target server on CloudLab (optional)
+
+If you do not have a target server available that meets the above
+requirements, you can borrow one on [CloudLab](https://www.cloudlab.us). Sign
+up for an account using your organization's email address and choose "Join
+Existing Project"; for "Project Name" enter `cord-testdrive`.
+
+> NOTE: CloudLab is supporting CORD as a courtesy. It is expected that you will not use CloudLab resources for purposes other than evaluating CORD. If, after a week or two, you wish to continue using CloudLab to experiment with or develop CORD, then you must apply for your own separate CloudLab project.
+
+Once your account is approved, start an experiment using the
+`OnePC-Ubuntu16.04-HWE` profile on the Wisconsin cluster. This will provide
+you with a temporary target server meeting the above requirements.
+
+Refer to the [CloudLab documentation](http://docs.cloudlab.us/) for more information.
+
+### Convenience Script
+
+This script takes about an hour to complete.  If you run it, you can skip
+directly to [Validating the Installation](#validating-the-installation) below.
 
 ```bash
 git clone https://gerrit.opencord.org/automation-tools
 automation-tools/mcord/mcord-in-a-box.sh
 ```
 
-This script takes about an hour to complete.  If you run it, you can skip
-directly to [Validating the Installation](#validating-the-installation) below.
-
 ## Prerequisites
 
 M-CORD requires OpenStack to run VNFs.  The OpenStack installation
diff --git a/profiles/rcord/configuration.md b/profiles/rcord/configuration.md
index d0ff9af..437f6ae 100644
--- a/profiles/rcord/configuration.md
+++ b/profiles/rcord/configuration.md
@@ -37,6 +37,7 @@
       type: tosca.nodes.SwitchPort
       properties:
         portId: 1
+        host_learning: false
       requirements:
         - switch:
             node: switch#my_fabric_switch
diff --git a/quickstart.md b/quickstart.md
new file mode 100644
index 0000000..b42cf11
--- /dev/null
+++ b/quickstart.md
@@ -0,0 +1,8 @@
+# Quick Start
+
+This section walks you through the installation sequence to bring up a
+demonstration configuration of CORD that includes a simple example
+service. If you'd prefer to understand the installation process in more
+depth, you might start with the [Installation Guide](README.md).
+
+More to come...