[CORD-1924]
Documentation followup patch

Change-Id: I4387182ca58fa3fbb6ebda12f94f46e1a847e68f
diff --git a/.gitignore b/.gitignore
index 7f48339..94514c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,7 +50,7 @@
 docs/build_glossary.md
 docs/platform-install
 docs/profiles
-docs/test
+docs/cord-tester
 docs/xos
 docs/xos-gui
 docs/xos-tosca
diff --git a/Makefile b/Makefile
index 66597e7..c86d29b 100644
--- a/Makefile
+++ b/Makefile
@@ -121,7 +121,7 @@
 clean-all: virsh-domain-destroy vagrant-destroy clean-profile clean-genconfig
 	rm -f $(ALL_MILESTONES)
 
-clean-local: clean-profile clean-genconfig
+clean-local: clean-profile clean-genconfig clean-images
 	rm -f $(LOCAL_MILESTONES)
 
 clean-onos:
diff --git a/ansible/roles/genconfig/templates/config.yml.j2 b/ansible/roles/genconfig/templates/config.yml.j2
index 646d923..eba0be5 100644
--- a/ansible/roles/genconfig/templates/config.yml.j2
+++ b/ansible/roles/genconfig/templates/config.yml.j2
@@ -1,4 +1,4 @@
-
+---
 {#
 Copyright 2017-present Open Networking Foundation
 
@@ -15,8 +15,6 @@
 limitations under the License.
 #}
 
-
----
 # config.yml - generated from ansible/roles/genconfig/templates/config.yml.j2
 # ** DO NOT EDIT THIS FILE MANUALLY! **
 # Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
diff --git a/ansible/roles/genconfig/templates/inventory.ini.j2 b/ansible/roles/genconfig/templates/inventory.ini.j2
index 54b71c1..ba4b7f5 100644
--- a/ansible/roles/genconfig/templates/inventory.ini.j2
+++ b/ansible/roles/genconfig/templates/inventory.ini.j2
@@ -1,4 +1,3 @@
-
 {#
 Copyright 2017-present Open Networking Foundation
 
@@ -15,7 +14,6 @@
 limitations under the License.
 #}
 
-
 ; inventory.ini, generated from ansible/roles/genconfig/templates/inventory.ini.j2
 ; ** DO NOT EDIT THIS FILE MANUALLY! **
 ; Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
diff --git a/docs/Makefile b/docs/Makefile
index df03d5b..892bae1 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,3 +1,10 @@
+# Makefile for building CORD docs site, guide.opencord.org
+# Building docs requires the following tools:
+#  - Gitbook toolchain: https://toolchain.gitbook.com/setup.html
+#  - NPM (for Gitbook and Swagger)
+#  - Python (for build glossary script)
+#  - linkchecker (for test target) http://wummel.github.io/linkchecker/
+
 default: serve
 
 # use bash for pushd/popd, and to fail if commands within  a pipe fail
@@ -11,15 +18,20 @@
 build: setup
 	gitbook build
 
-setup: clean platform-install test profiles xos xos-gui xos-tosca swagger $(GENERATED_DOCS)
+setup: clean platform-install cord-tester profiles xos xos-gui xos-tosca swagger $(GENERATED_DOCS)
 	gitbook init
 	gitbook install
 
+test: linkcheck
+
+linkcheck: build
+	linkchecker -a _book/
+
 platform-install:
 	ln -s ../platform-install/docs platform-install
 
-test:
-	ln -s ../../test/cord-tester/docs test
+cord-tester:
+	ln -s ../../test/cord-tester/docs cord-tester
 
 profiles:
 	ln -s ../../orchestration/profiles profiles
diff --git a/docs/README.md b/docs/README.md
index 4dd14b3..92f6c75 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -15,7 +15,7 @@
 by [bringing up a virtual POD on a single physical server](install_virtual.md).
 
 If you want to work on the CORD core or develop a service, please see [Getting
-the Source Code](cord_repo.md) and [Developing for CORD](develop.md).
+the Source Code](getting_the_code.md) and [Developing for CORD](develop.md).
 
 ## Getting Help
 
@@ -28,7 +28,7 @@
 
 ## Making Changes to Documentation
 
-The [http://guide.opencord.org](guide.opencord.org) website is built using the
+The [http://guide.opencord.org](http://guide.opencord.org) website is built using the
 [GitBook Toolchain](https://toolchain.gitbook.com/), with the documentation
 root in [build/docs](https://github.com/opencord/cord/blob/{{ book.branch
 }}/docs) in a checked out source tree.  It is build with `make`, and requires
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index c96ac6f..a91f04e 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -3,7 +3,8 @@
 * [Guide Overview](README.md)
 * [Terminology](terminology.md)
 * [Building and Installing CORD](install.md)
-    * [Installing a Virtual POD (CORD-in-a-Box)](install_virtual.md)
+    * [POD Quickstarts](quickstarts.md)
+    * [Installing a Virtual POD (CiaB)](install_virtual.md)
     * [Installing a Physical POD](install_physical.md)
         * [Basic Configuration](appendix_basic_config.md)
         * [Network Settings](appendix_network_settings.md)
@@ -12,7 +13,7 @@
         * [vSG Configuration](appendix_vsg.md)
     * [Troubleshooting and Build Internals](troubleshooting.md)
     * [Building Docker Images](build_images.md)
-    * [Build System Variable Glossary](build_glossary.md)
+    * [Build System Config Glossary](build_glossary.md)
 * [Operating and Managing CORD](operate/README.md)
     * [Powering Up a POD](operate/power_up.md)
 # My understanding is that ELK Stack is not working right now
@@ -31,9 +32,8 @@
     * [Migrating Models to 4.0](xos/migrate_4.0.md)
 * [Developing for CORD](develop.md)
     * [Getting the Source Code](getting_the_code.md)
-    * [Workflow: Mock Configuration](xos/dev/workflow_mock.md)
-# this workflow has been added to install_virtual.md
-#    * [Workflow: Cord-in-a-Box](xos/dev/workflow_ciab.md)
+    * [Workflow: Mock Configuration](xos/dev/workflow_mock_single.md)
+    * [Workflow: Virtual POD](xos/dev/workflow_pod.md)
     * [Example Service](xos/example_service.md)
     * [Configuring XOS](xos/modules/xosconfig.md)
     * [GUI Development](xos-gui/developer/README.md)
@@ -43,9 +43,9 @@
         * [GUI Internals](xos-gui/architecture/README.md)
             * [Module Strucure](xos-gui/architecture/gui-modules.md)
             * [Data Sources](xos-gui/architecture/data-sources.md)
-* [Testing CORD](test/README.md)
-    * [Running Tests](test/running.md)
-    * [Test Environment](test/qa_testsetup.md)
+* [Testing CORD](cord-tester/README.md)
+    * [Running Tests](cord-tester/running.md)
+    * [Test Environment](cord-tester/qa_testsetup.md)
 * [Service Profiles](service-profiles.md)
     * [R-CORD](profiles/rcord/README.md)
     * [E-CORD](profiles/ecord/README.md)
diff --git a/docs/book.json b/docs/book.json
index 1f2d12c..176595d 100644
--- a/docs/book.json
+++ b/docs/book.json
@@ -24,7 +24,7 @@
         },
         {
           "value": "http://wiki.opencord.org/",
-          "text": "3.0 and previous (old wiki)"
+          "text": "3.0 and previous (wiki)"
         }
       ]
     }
diff --git a/docs/getting_the_code.md b/docs/getting_the_code.md
index f086399..8c421e6 100644
--- a/docs/getting_the_code.md
+++ b/docs/getting_the_code.md
@@ -23,12 +23,12 @@
 The `cord` repositories are usually checked out to `~/cord` in most of our
 examples and deployments:
 
-```sh
+<pre><code>
 mkdir ~/cord && \
 cd ~/cord && \
 repo init -u https://gerrit.opencord.org/manifest -b {{ book.branch }} && \
 repo sync
-```
+</code></pre>
 
 > NOTE: `-b` specifies the branch name. Development work goes on in `master,
 > and there are also specific stable branches such as `cord-4.0` that can be
@@ -42,7 +42,7 @@
 build		component	incubator	onos-apps	orchestration	test
 ```
 
-## Downloading patchsets
+## Download patchsets
 
 Once you've downloaded a CORD source tree, you can download patchsets from
 Gerrit with the following command:
@@ -56,7 +56,7 @@
 
 Also see [Configuring your Development Environment:cord-bootstrap.sh script
 ](install.md#cord-bootstrap.sh-script) for instructions on downloading
-patchsets during a build that uses the `cord-bootstrap.sh` script.
+patchsets during a build using the `cord-bootstrap.sh` script.
 
 ## Contributing code to CORD
 
diff --git a/docs/install.md b/docs/install.md
index d209817..2a381af 100644
--- a/docs/install.md
+++ b/docs/install.md
@@ -30,7 +30,7 @@
 
  - [Docker](https://www.docker.com/community-edition), for *local* build
    scenarios
- - [Vagrant](https://www.vagrantup.com/downloads.html), for for all other
+ - [Vagrant](https://www.vagrantup.com/downloads.html), for all other
    scenarios
 
 You can manually install these on your development system - see [Getting the
@@ -41,7 +41,7 @@
 
 If you're working on an Ubuntu 14.04 system (CloudLab or another test
 environment), you can use the `cord-bootstrap.sh` script to install these tools
-and check out the CORD repo tree to `~/cord`. This hasn't been tested on
+and check out the CORD source tree to `~/cord`. This hasn't been tested on
 other versions or distributions.
 
 <pre><code>
@@ -131,6 +131,9 @@
 
 The included POD configs are generally named `<profile>-<scenario>.yml`. 
 
+POD configs are used during a build by passing them with the `PODCONFIG`
+variable to `make` - ex: `make PODCONFIG=rcord-virtual.yml config`
+
 ### Profiles
 
 The set of services that XOS on-boards into CORD -- the  _Service Graph_, and
@@ -157,3 +160,4 @@
 - `cord`: Physical or virtual multi-node CORD pod, with MaaS and OpenStack
 - `opencloud`: Physical or virtual multi-node OpenCloud pod, with OpenStack
 
+The scenario is specified in the POD config.
diff --git a/docs/install_physical.md b/docs/install_physical.md
index f8b6ed6..995b28a 100644
--- a/docs/install_physical.md
+++ b/docs/install_physical.md
@@ -6,30 +6,6 @@
 If you are new to CORD and would like to get familiar with it, you should start
 by [bringing up a virtual POD on a single physical server](install_virtual.md).
 
-## Quickstart
-
-After performing the [physical configuration](#physical-configuration), install
-Ubuntu 14.04 on a [suitable head node](#detailed-requirements). On the target
-head node, add a `cord` user with `sudo` rights:
-
-```
-sudo adduser cord && \
-sudo usermod -a -G sudo cord && \
-echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
-```
-
-[Create a POD configuration](install.md#pod-config) file in the
-`~/cord/build/podconfig` directory, then run:
-
-```
-cd ~/cord/build && \
-make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config && \
-make -j4 build |& tee ~/build.out
-```
-
-After a successful build, set the compute nodes and the switches to boot from
-PXE and manually reboot them. They will be automatically deployed.
-
 ## Overview of a CORD POD
 
 The following is a brief description of a full physical POD.
diff --git a/docs/install_virtual.md b/docs/install_virtual.md
index 3dbb79d..974395b 100644
--- a/docs/install_virtual.md
+++ b/docs/install_virtual.md
@@ -7,37 +7,9 @@
 
 The virtual pod is also known as *CORD-in-a-Box* (or just *CiaB*).  The purpose
 of this virtual POD is to enable those interested in understanding how CORD
-works to examine and interact with a running CORD environment. It also serves
-as a common [development environment](develop.md).
-
-## Quickstart
-
-To install a CiaB, on a [suitable](#target-server-requirements) Ubuntu 14.04
-system, run the following commands:
-
-```bash
-cd ~ && \
-wget https://raw.githubusercontent.com/opencord/cord/master/scripts/cord-bootstrap.sh && \
-chmod +x cord-bootstrap.sh && \
-~/cord-bootstrap.sh -v |& tee ~/setup.out
-
-cd ~/cord/build && \
-make PODCONFIG=rcord-virtual.yml config && \
-make -j4 build |& tee ~/build.out && \
-make pod-test |& tee ~/test.out
-```
-
-This will create a virtual R-CORD pod (as specified in the `PODCONFIG`), and go
-through the build and end-to-end test procedure, bringing up vSG and
-ExampleService instances.
-
-If you'll be running these commands frequently, a shortcut is to use the `-t`
-option on the `cord-bootstrap.sh` script to run all the make targets, for a
-more unattended build process, which can be handy when testing:
-
-```
-./cord-bootstrap.sh -v -t "PODCONFIG=rcord-virtual.yml config" -t "build" -t "pod-test"
-```
+works to examine and interact with a running CORD environment. There is also a
+[Development Workflow: Virtual Pod](xos/dev/workflow_pod.md) that allows for a
+tighter loop when developing the XOS core or services.
 
 ## What you need (prerequisites)
 
@@ -94,7 +66,7 @@
 ### Bootstrap the server
 
 See [Configuring your Development Environment:cord-bootstrap.sh script
-](install.md#cord-bootstrap.sh-script) for instructions for running the
+](install.md#cord-bootstrapsh-script) for instructions for running the
 bootstrap script to download the CORD source tree and optionally downloading
 patches from Gerrit. You must specify the `-v` option to this script in order
 to install Vagrant, which is required to build a CiaB.
@@ -347,7 +319,7 @@
 for subscribers by selecting the `Service Graph` item in the left navigation.
 
 Here is a sample output:
-![subscriber-service-graph.png](subscriber-service-graph.png)
+![Subscriber Service Graph](images/subscriber-service-graph.png)
 
 > NOTE: the `Service Graph` will need to be detangled and can be organized by
 > dragging the nodes.
@@ -433,52 +405,6 @@
 }
 ```
 
-## Development Loop using CiaB
-
-For service or core development using CiaB, we have a tighter development
-workflow loop which involves tearing down XOS as well as any active OpenStack
-objects (Instances, Networks, etc), rebuilding XOS container images, and then
-redeploying XOS.
-
-We sometimes refer to this as a "mini-End2End" as it does result in a new XOS
-deployment with an E2E test, but does not require a full reinstall.
-
-1. Make changes to your service code and propagate them to your CiaB host.
-   There are a number of ways to propagate changes to the host depending on
-   developer preference, including using [gerrit
-   patchsets](getting_the_code.md#download-patchsets), rsync, scp, etc. 
-
-2. Teardown the existing XOS installation and clean up OpenStack to
-   remove any leftover instances or networks:
-
-```
-cd ~/cord/build
-make xos-teardown
-make clean-openstack
-```
-
-3. Optional: Teardown ONOS. Sometimes we find it helpful to reinstall the
-   onos-cord and onos-fabric containers, to ensure that all state is wiped
-   clean from ONOS.
-
-```
-cd ~/cord/build
-make clean-onos
-```
-
-4. Build the new XOS container images and deploy to the pod.
-
-```
-cd ~/cord/build
-make -j4 build
-make compute-node-refresh
-make pod-test
-```
-
-5. Test and verify your changes.
-
-6. Go back to step #1
-
 ## Troubleshooting
 
 If the CiaB build fails, you may try simply resuming the build at the place
@@ -501,5 +427,3 @@
 multiple physical compute nodes.  The process for doing so is described in
 [Installing a Physical POD](install_physical.md).
 
-
-
diff --git a/docs/operate/rest_apis.md b/docs/operate/rest_apis.md
index f3c93e2..9fe2287 100644
--- a/docs/operate/rest_apis.md
+++ b/docs/operate/rest_apis.md
@@ -2,9 +2,9 @@
 
 ## XOS
 
-A RESTful interface is available for configuring and controlling XOS. It is 
-auto-generated from the set of [models](xos/README.md) configured 
-into the POD manifest, and includes both core and service-specific models.
+A RESTful interface is available for configuring and controlling XOS. It is
+auto-generated from the set of [models](/xos/README.md) configured into the POD
+manifest, and includes both core and service-specific models.
 
 * [Core](https://guide.opencord.org/{{ book.branch }}/api/xos/#/core)
 * [Address Manager](https://guide.opencord.org/{{ book.branch }}/api/xos/#/addressmanager)
diff --git a/docs/quickstarts.md b/docs/quickstarts.md
new file mode 100644
index 0000000..41406a8
--- /dev/null
+++ b/docs/quickstarts.md
@@ -0,0 +1,64 @@
+# POD Quickstarts
+
+This section provides a short list of essential commands that can be used to
+deploy virtual or physical PODs.
+
+Before you start, you must obtain the CORD source tree and install Vagrat.
+Instructions for doing this can be found at [Configuring your Development
+Environment](install.md#configuring-your-development-environment) - if you're
+on CloudLab, most likely you will want to use the `cord-bootstrap.sh` script
+with the `-v` option.
+
+## Virtual POD (CORD-in-a-Box)
+
+This is a summary of [Installing a Virtual Pod (CORD-in-a-Box)](install_virtual.md).
+
+To install a CiaB, on a [suitable](#target-server-requirements) Ubuntu 14.04
+system, run the following commands:
+
+```
+cd ~/cord/build && \
+make PODCONFIG=rcord-virtual.yml config && \
+make -j4 build |& tee ~/build.out && \
+make pod-test |& tee ~/test.out
+```
+
+This will create a virtual R-CORD pod (as specified in the `PODCONFIG`), and go
+through the build and end-to-end test procedure, bringing up vSG and
+ExampleService instances.
+
+If you'll be running these commands frequently, a shortcut is to use the `-t`
+option on the `cord-bootstrap.sh` script to run all the make targets, for a
+more unattended build process, which can be handy when testing:
+
+```
+./cord-bootstrap.sh -v -t "PODCONFIG=rcord-virtual.yml config" -t "build" -t "pod-test"
+```
+
+## Physical POD
+
+This is a summary of  [Installing a Physical POD](install_physical.md).
+
+After performing the [physical
+configuration](install_physical.md#physical-configuration), install Ubuntu
+14.04 on a [suitable head node](install_physical.md#detailed-requirements). On
+the target head node, add a `cord` user with `sudo` rights:
+
+```
+sudo adduser cord && \
+sudo usermod -a -G sudo cord && \
+echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
+```
+
+[Create a POD configuration](install.md#pod-config) file in the
+`~/cord/build/podconfig` directory, then run:
+
+```
+cd ~/cord/build && \
+make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config && \
+make -j4 build |& tee ~/build.out
+```
+
+After a successful build, set the compute nodes and the switches to boot from
+PXE and manually reboot them. They will be automatically deployed.
+
diff --git a/docs/release-notes/shared-delusion.md b/docs/release-notes/shared-delusion.md
index fc78170..8d6d8f0 100644
--- a/docs/release-notes/shared-delusion.md
+++ b/docs/release-notes/shared-delusion.md
@@ -32,7 +32,7 @@
 * Removed hand-crafted APIs and eliminated the `xos-gui` container.
 
 > Information on migrating services to Shared-Delusion can be found in the
-> [CORD-4.0 Service Migration Guide](../xos/migrate-4.0.md).
+> [CORD-4.0 Service Migration Guide](/xos/migrate_4.0.md).
 
 ## Build System
 
diff --git a/docs/scripts/defaults.md.j2 b/docs/scripts/defaults.md.j2
index 7d7405d..04738cb 100644
--- a/docs/scripts/defaults.md.j2
+++ b/docs/scripts/defaults.md.j2
@@ -1,4 +1,4 @@
-# Build System Variable Glossary
+# Build System Config Glossary
 
 {{ def_docs['frontmatter']['description'] }}
 
diff --git a/docs/scripts/descriptions.md b/docs/scripts/descriptions.md
index 6a15590..7fdd076 100644
--- a/docs/scripts/descriptions.md
+++ b/docs/scripts/descriptions.md
@@ -1,5 +1,9 @@
-This documents every variable available in the build system.  The repos these
-variables are used in are:
+
+This page documents all the configuration variables that can be set in a [POD
+config](install.md#pod-config), [scenario](install.md#scenarios), or
+[profile_manifests](https://github.com/opencord/platform-install/tree/master/profile_manifests).
+
+These variables are used in and apply to the following repositories:
 
  - [cord](https://github.com/opencord/cord) (aka "build" when checked out)
  - [maas](https://github.com/opencord/maas)
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 7bca1d5..91b62a4 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -3,7 +3,7 @@
 ## Debugging make target Failures
 
 `make` targets that are built will create a per-target log file in the `logs`
-directory. These are prefixed with a datestamp which is the same for every
+directory. These are prefixed with a timestamp which is the same for every
 target in a single run of make - re-running make will result in additional sets
 of logs, even for the same target.
 
@@ -21,7 +21,7 @@
 All configuration in CORD is driven off of YAML files which contain variables
 used by Ansible, make, and Vagrant to build development and production
 environments. A [glossary of build system variables](build_glossary.md) is
-available which describes these variables and where they are used. 
+available which describes these variables and where they are used.
 
 When a command to generate config such as `make PODCONFIG=rcord-mock.yml
 config` is run, the following steps happen:
@@ -39,7 +39,7 @@
 
 Note that the combination of the POD and Scenaro config in step #3 is not a
 merge. If you define an item in the root of the POD Config that has subkeys,
-it will overwrite every subkey defined in the Scenario.  This is most noticable
+it will overwrite every subkey defined in the Scenario.  This is most noticeable
 when setting the `inventory_groups` or `docker_image_whitelist`
 variable. If changing either in a POD Config, you must recreate the
 entire structure or list. This may seem inconvenient, but other list
@@ -80,7 +80,11 @@
 
  - `printconfig`: Prints the configured scenario and profile.
 
- - `xos-teardown`: Stop and remove a running set of XOS docker containers
+ - `xos-teardown`: Stop and remove a running set of XOS docker containers,
+   removing the database.
+
+ - `xos-update-images`: Rebuild the images used by XOS, without tearing down
+   running XOS containers.
 
  - `collect-diag`: Collect detailed diagnostic information on a deployed head
    and compute nodes, into `diag-<datestamp>` directory on the head node.
@@ -143,28 +147,3 @@
 This will teardown the XOS container set, tell the build system to rebuild
 images, then perform a build and reload the profile.
 
-#### Use ElasticStack or ONOS with the `single` scenario
-
-The single scenario is a medium-weight scenario for synchronizer development,
-and has optional ElasticStack or ONOS functionality.
-
-To use these, you would invoke the ONOS or ElasticStack milestone target before
-the `build` target:
-
-```
-make PODCONFIG=rcord-single.yml config
-make -j4 milestones/deploy-elasticstack
-make -j4 build
-```
-
-or
-
-```
-make PODCONFIG=opencloud-single.yml config
-make -j4 milestones/deploy-onos
-make -j4 build
-```
-
-If you want to use both in combination, make sure to run the ElasticStack
-target first, so ONOS can send logs to ElasticStack.
-
diff --git a/docs/vrouter.md b/docs/vrouter.md
index e14ec43..9c2e3b6 100644
--- a/docs/vrouter.md
+++ b/docs/vrouter.md
@@ -1,24 +1,42 @@
 # Connecting to Upstream Networks using vRouter
 
-A CORD POD needs to be connected to an upstream network to provide connectivity from within CORD to the outside world. The vRouter service in CORD is designed to enable the CORD POD to communicate with upstream routers and provide this connectivity. Currently the vRouter supports BGP for communicating routes to and from upstream routers.
+A CORD POD needs to be connected to an upstream network to provide connectivity
+from within CORD to the outside world. The vRouter service in CORD is designed
+to enable the CORD POD to communicate with upstream routers and provide this
+connectivity. Currently the vRouter supports BGP for communicating routes to
+and from upstream routers.
 
-Each deployment is different in terms of protocols and features required, so this guide aims to be a general overview of how to set up CORD to communicate with external routers. The operator will have to customize the configurations to be appropriate for their deployment.
+Each deployment is different in terms of protocols and features required, so
+this guide aims to be a general overview of how to set up CORD to communicate
+with external routers. The operator will have to customize the configurations
+to be appropriate for their deployment.
 
-Deploying the vRouter infrastructure is a relatively manual process right now. The intention is that over time the process will become more automated and easier to perform.
+Deploying the vRouter infrastructure is a relatively manual process right now.
+The intention is that over time the process will become more automated and
+easier to perform.
 
 ## Prerequisites
 
-This guide assumes that you have run through the POD install procedure outlined [here](quickstart_physical.md). You must also have installed and configured the fabric ONOS cluster and connected the fabric switches to that controller.
+This guide assumes that you have run through the [physical POD install
+procedure](install_physical.md). You must also have installed and configured
+the fabric ONOS cluster and connected the fabric switches to that controller.
 
 ## Physical Connectivity
 
-External routers must be physically connected to one of the fabric leaf switches. It is possible to connect to multiple routers, but currently there is a limitation that they must all be physically connected to the same leaf switch, and the Quagga instance must be connected to the same fabric switch as the upstream routers.
+External routers must be physically connected to one of the fabric leaf
+switches. It is possible to connect to multiple routers, but currently there is
+a limitation that they must all be physically connected to the same leaf
+switch, and the Quagga instance must be connected to the same fabric switch as
+the upstream routers.
 
 ![phys-connectivity](images/vrouter-connectivity.png)
 
 ## Dedicating a Fabric Interface for Quagga
 
-The CORD build process determines which NICs on each compute node are connected to the fabric and puts these NICs into a bonded interface. The name of this bond is fabric, so if you run `ifconfig` on the compute node you have selected to deploy Quagga, you should see this bonded interface appear in the output.
+The CORD build process determines which NICs on each compute node are connected
+to the fabric and puts these NICs into a bonded interface. The name of this
+bond is fabric, so if you run `ifconfig` on the compute node you have selected
+to deploy Quagga, you should see this bonded interface appear in the output.
 
 ```
 ubuntu@fumbling-reason:~$ ifconfig fabric
@@ -32,7 +50,11 @@
           RX bytes:89101760 (89.1 MB)  TX bytes:0 (0.0 B)
 ```
           
-We need to dedicate one of these fabric interfaces to the Quagga container, so we'll need to remove it from the bond. You should first identify the name of the interface that you want to dedicate. In this example we'll assume it is called mlx1. You can then remove it from the bond by editing the /etc/network/interfaces file:
+We need to dedicate one of these fabric interfaces to the Quagga container, so
+we'll need to remove it from the bond. You should first identify the name of
+the interface that you want to dedicate. In this example we'll assume it is
+called mlx1. You can then remove it from the bond by editing the
+/etc/network/interfaces file:
 
 ```
 sudo vi /etc/network/interfaces
@@ -46,30 +68,50 @@
     bond-master fabric
 ```
     
-Simply remove the line `bond-master fabric`, save the file then restart the networking service on the compute node.
+Simply remove the line `bond-master fabric`, save the file then restart the
+networking service on the compute node.
 
 ## L3 setup
 
-The operator will need to allocate a subnet that exists between the CORD POD and the upstream router that is used for peering between the CORD Quagga instance and the upstream routers. CORD currently has a limitation that it requires 2 IP addresses in the peering subnet, so for 1 upstream router we need to allocate 3 addresses in total. This means the peering subnet cannot be smaller than a /29.
+The operator will need to allocate a subnet that exists between the CORD POD
+and the upstream router that is used for peering between the CORD Quagga
+instance and the upstream routers. CORD currently has a limitation that it
+requires 2 IP addresses in the peering subnet, so for 1 upstream router we need
+to allocate 3 addresses in total. This means the peering subnet cannot be
+smaller than a /29.
 
-The CORD fabric requires 2 IP addresses so that it can separate peering traffic from data traffic. Peering happens using one of the IP addresses, and the routes in the POD are advertised upstream with the next-hop set to the other IP address. This menas that when traffic comes to the fabric leaf switch from outside, the switch is able to distinguish peering traffic from data traffic and treat each appropriately.
+The CORD fabric requires 2 IP addresses so that it can separate peering traffic
+from data traffic. Peering happens using one of the IP addresses, and the
+routes in the POD are advertised upstream with the next-hop set to the other IP
+address. This means that when traffic comes to the fabric leaf switch from
+outside, the switch is able to distinguish peering traffic from data traffic
+and treat each appropriately.
 
 An example of how this is configured is shown in the following figure:
 
 ![phys-connectivity](images/l3-connectivity.png)
 
-In this case the peering subnet is `10.0.1.0/24`. The upstream router is using the `10.0.1.1` address. The CORD Quagga is assigned `10.0.1.3`, which is the address used for peering. The upstream router needs to be configured with `10.0.1.3` as its BGP neighbor, and the BGP peering will be established between `10.0.1.1` and `10.0.1.3`.
+In this case the peering subnet is `10.0.1.0/24`. The upstream router is using
+the `10.0.1.1` address. The CORD Quagga is assigned `10.0.1.3`, which is the
+address used for peering. The upstream router needs to be configured with
+`10.0.1.3` as its BGP neighbor, and the BGP peering will be established between
+`10.0.1.1` and `10.0.1.3`.
 
-The `10.0.1.2` address is used by the fabric switch and for the next-hop for routes advertised by the CORD POD.
+The `10.0.1.2` address is used by the fabric switch and for the next-hop for
+routes advertised by the CORD POD.
 
-Of course you are not obliged to use `10.0.1.0/24`, you should use a subnet that makes sense for your peering environment
+Of course you are not obliged to use `10.0.1.0/24`, you should use a subnet
+that makes sense for your peering environment
 
-## Install and Configure vRouter on ONOS
-The vRouter will be run on the `onos-fabric` cluster that controls the physical fabric switches. 
+## Install and Configure vRouter on ONOS The vRouter will be run on the
+`onos-fabric` cluster that controls the physical fabric switches. 
 
 ### Interface Configuration
 
-Each Quagga-Router pair needs to have interface configuration for the interfaces where the Quagga and upstream router are attached to the fabric. This is where we configure the second IP address that we allocated from the peering subnet. The following shows a configuration example:
+Each Quagga-Router pair needs to have interface configuration for the
+interfaces where the Quagga and upstream router are attached to the fabric.
+This is where we configure the second IP address that we allocated from the
+peering subnet. The following shows a configuration example:
 
 ```
 {
@@ -97,12 +139,19 @@
 ```
 
 * name - an arbitrary name string for the interface
-* ips - configure the second IP from the peering subnet. This will be the same IP address on both the quagga and upstream interfaces.
-* vlan-untagged - configure the same VLAN ID on both interfaces. It doesn't matter exactly what the VLAN ID is, but it must be the same on both the quagga-facing and upstream-facing interfaces.
+* ips - configure the second IP from the peering subnet. This will be the same
+  IP address on both the quagga and upstream interfaces.
+* vlan-untagged - configure the same VLAN ID on both interfaces. It doesn't
+  matter exactly what the VLAN ID is, but it must be the same on both the
+  quagga-facing and upstream-facing interfaces.
 
-This configuration will set up an L2 link between the two fabric switch ports, over which the Quagga and external router can communicate.
+This configuration will set up an L2 link between the two fabric switch ports,
+over which the Quagga and external router can communicate.
 
-The interface configuration can be added to the `/opt/cord_profile/fabric-network-cfg.json` on the head node which contains the initial fabric configuration. Then you can run the following command to refresh the configuration in ONOS:
+The interface configuration can be added to the
+`/opt/cord_profile/fabric-network-cfg.json` on the head node which contains the
+initial fabric configuration. Then you can run the following command to refresh
+the configuration in ONOS:
 
 ```
 docker-compose -p rcord exec xos_ui python /opt/xos/tosca/run.py xosadmin@opencord.org /opt/cord_profile/fabric-service.yaml
@@ -110,9 +159,11 @@
 
 ### Restart the applications
 
-If the segment routing application is already running, it will need to be restarted so that it will notice the new interface configuration.
+If the segment routing application is already running, it will need to be
+restarted so that it will notice the new interface configuration.
 
-The `onos-fabric` CLI can be accessed with the following command run on the head node:
+The `onos-fabric` CLI can be accessed with the following command run on the
+head node:
 
 ```
 $ ssh karaf@onos-fabric -p 8101
@@ -125,46 +176,58 @@
 onos> app activate org.onosproject.segmentrouting
 ```
 
-The `org.onosproject.fpm` application is also needed, and it should be already running after the initial install. Double-check that it is running, and if not activate it.
+The `org.onosproject.fpm` application is also needed, and it should be already
+running after the initial install. Double-check that it is running, and if not
+activate it.
 
 ## Deploy the Quagga Docker Image
 
 ### Download the image and pipework
 
-CORD uses a slightly modified version of Quagga, so the easiest way to deploy this is to use the provided docker image.
+CORD uses a slightly modified version of Quagga, so the easiest way to deploy
+this is to use the provided docker image.
 
 ```
 docker pull opencord/quagga
 ```
 
-We also need to download the `pipework` tool which will be used to connect the docker image to the physical interface that we set aside earlier.
+We also need to download the `pipework` tool which will be used to connect the
+docker image to the physical interface that we set aside earlier.
 
 ```
 wget https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
 chmod +x pipework
 ```
 
-Create a directory for your Quagga configuration files, and create a `bgpd.conf` and `zebra.conf` in there. More on configuring Quagga later.
+Create a directory for your Quagga configuration files, and create a
+`bgpd.conf` and `zebra.conf` in there. More on configuring Quagga later.
 
 ```
 mkdir configs
 ```
 
-Now run the docker image (make sure the path the config directory matches what is on your system):
+Now run the docker image (make sure the path the config directory matches what
+is on your system):
 
 ```
 sudo docker run --privileged -d -v configs:/etc/quagga -n quagga opencord/quagga
 ```
 
-Finally, we can use the pipework tool to add the physical interface into the container so that Quagga can talk out over the fabric:
+Finally, we can use the pipework tool to add the physical interface into the
+container so that Quagga can talk out over the fabric:
 
 ```
 sudo ./pipework mlx1 -i eth1 quagga 10.0.1.3/24
 ```
 
-This will add host interface `mlx1` to the container with name `quagga` with interface name `eth1` inside the container. The newly added interface will have the IP `10.0.1.3`. This IP address should be the peering subnet address that you want to assign to Quagga.
+This will add host interface `mlx1` to the container with name `quagga` with
+interface name `eth1` inside the container. The newly added interface will have
+the IP `10.0.1.3`. This IP address should be the peering subnet address that
+you want to assign to Quagga.
 
-If you need to change anything about the container (for example if you change the Quagga configuration) you can remove the original container and run a new one:
+If you need to change anything about the container (for example if you change
+the Quagga configuration) you can remove the original container and run a new
+one:
 
 ```
 docker rm -f quagga
@@ -173,13 +236,25 @@
 
 ## Configure Quagga
 
-At this point Quagga should have IP connectivity to the external routers, and it should be able to ping them on the peering subnet.
+At this point Quagga should have IP connectivity to the external routers, and
+it should be able to ping them on the peering subnet.
 
-Now Quagga and the upstream routers can be configured to peer with one another. This configuration of Quagga is going to be highly dependent on the configuration of the upstream network, so it won't be possible to give comprehensive configuration examples here. It is recommended to consult the [Quagga documentation](http://www.nongnu.org/quagga/docs/docs-info.html) for exhaustive information on Quagga's capabilities and configuration. Here I will attempt to provide a few basic examples of Quagga configuration to get you started. You'll have to enhance these with the features and functions that are needed in your network.
+Now Quagga and the upstream routers can be configured to peer with one another.
+This configuration of Quagga is going to be highly dependent on the
+configuration of the upstream network, so it won't be possible to give
+comprehensive configuration examples here. It is recommended to consult the
+[Quagga documentation](http://www.nongnu.org/quagga/docs/docs-info.html) for
+exhaustive information on Quagga's capabilities and configuration. Here I will
+attempt to provide a few basic examples of Quagga configuration to get you
+started. You'll have to enhance these with the features and functions that are
+needed in your network.
 
 ### Zebra Configuration
 
-Regardless of which routing protocols you are using in your network, it is important to configure Zebra's FPM connection to send routes to the vRouter app running on ONOS. This feature was enabled by the patch that was applied earlier when we installed Quagga.
+Regardless of which routing protocols you are using in your network, it is
+important to configure Zebra's FPM connection to send routes to the vRouter app
+running on ONOS. This feature was enabled by the patch that was applied earlier
+when we installed Quagga.
 
 A minimal Zebra configuration might look like this:
 
@@ -191,14 +266,16 @@
 fpm connection ip 10.6.0.1 port 2620
 !
 ```
-The FPM connection IP address is the IP address of one of the `onos-fabric` cluster instance that is running the vRouter app.
+The FPM connection IP address is the IP address of one of the `onos-fabric`
+cluster instance that is running the vRouter app.
 
-If you have other configuration that needs to go in zebra.conf you should add that here as well.
+If you have other configuration that needs to go in zebra.conf you should add
+that here as well.
 
 ### BGP configuration
 
-An example simple BGP configuration for peering with one BGP peer might look like this:
-
+An example simple BGP configuration for peering with one BGP peer might look
+like this:
 
 ```
 hostname bgp
@@ -221,7 +298,13 @@
   !
 ```
 
-This configuration peers with one upstream router (`10.0.1.1`) and advertises one route (`192.168.0.0/16`).
+This configuration peers with one upstream router (`10.0.1.1`) and advertises
+one route (`192.168.0.0/16`).
 
-Pay attention to the configuration to rewrite the next hop of routes that are advertised to the upstream router. A route-map is used to set the next hop of advertised routes to `10.0.1.2`, which is different from the address that Quagga is using to peer with the external router. As mentioned above, it is important that this rewriting is done correctly so that the fabric switch is able to distinguish data plane and control plane traffic.
+Pay attention to the configuration to rewrite the next hop of routes that are
+advertised to the upstream router. A route-map is used to set the next hop of
+advertised routes to `10.0.1.2`, which is different from the address that
+Quagga is using to peer with the external router. As mentioned above, it is
+important that this rewriting is done correctly so that the fabric switch is
+able to distinguish data plane and control plane traffic.
 
diff --git a/podconfig/rcord-physical-example.yml b/podconfig/rcord-physical-example.yml
index d7d1d6d..2a63216 100644
--- a/podconfig/rcord-physical-example.yml
+++ b/podconfig/rcord-physical-example.yml
@@ -24,6 +24,9 @@
 vagrant_vms:
  - 'corddev'
 
+# NOTE: Many of these items are described in more detail in the Build System
+# Variable Glossary: https://guide.opencord.org/build_glossary.html
+
 # Location of the cord_profile directory to be mounted in the `corddev` VM by
 # Vagrant.
 #
@@ -31,22 +34,32 @@
 
 # Set these directory paths if the defaults aren't suitable, or if deploying
 # multiple pods from the same source tree where the credentials, keys, and
-# profile should differ.
+# profile should differ. These are paths on the config node.
 #
 # credentials_dir: '/path_to/credentials'
 # pki_dir: '/path_to/pki'
 # ssh_pki_dir: '/path_to/ssh_pki'
 
-# Variables
-# IP address used for the ONOS Fabric
-fabric_ip: '10.6.1.1/24'
+# Variables. Default values are specified here
 
-# IP and range of the management network, which MaaS serves DHCP
-management_ip: '10.6.0.1/24'
-management_network: '10.6.0.0/24'
+# IP address used for the ONOS fabric interface on the head node
+#
+# fabric_ip: '10.6.1.0/24'
 
-# External gateway IP address and range that the head node is on
-external_ip: '10.80.1.1/24'
+# Low and high addresses to assign to on the fabric
+#
+# fabric_range_low: '10.6.1.2'
+# fabric_range_high: '10.6.1.100'
+
+# IP and range of the management network, which MaaS serves DHCP.
+#
+# management_ip: '10.6.0.0/24'
+# management_network: '10.6.0.0/24'
+
+# Low and high addresses to assign to on the management network
+#
+# management_range_low: '10.6.0.2'
+# management_range_high: '10.6.0.127'
 
 # name or IP of the POD head node, used to SSH to the head node. You can also
 # specify the head node user here in user@hostname format.  This could also be
@@ -54,27 +67,47 @@
 # external DNS is set up.
 headnode: 'cord@10.80.1.200'
 
-# the external IP on the head node and docker registry (on the head node)
-external_gw: '10.80.1.200'
+# External interface device name on head node
+external_iface: 'eth2'
+
+# External interface ip/range on head node
+external_ip: '10.80.1.200/24'
+
+# Gateway IP address
+external_gw: '10.80.1.1'
+
+# address and port of docker registry (on the head node)
 deploy_docker_registry: "10.80.1.200:5000"
 
-# Makefile targets
-# primary build target (the final MaaS build step in this case)
-build_targets:
-  - 'setup-automation'
+# Additional network config documentation can be found here:
+# https://guide.opencord.org/appendix_network_settings.html
 
-copy_cord_prereqs:
-  - 'config-ssh-key'
+# Additional makefile targets
 
+# key-based SSH auth is required for some portions of the build, specifically
+# the 'synchronize' task. Enable this to generate a SSH key on the config node
+# and have the head node trust it.
+# copy_cord_prereqs:
+#   - 'config-ssh-key'
+
+# skipTags is used in MaaS options allow various part of the deployment to be skipped
+#
+# switch_support - does not deploy switch boot images to the PXE server
+#
+# reboot - will not reboot the head node after updating its network configuration
+#          this may mean the network configuration will not take effect, but will
+#          also prevent you from being locked out of the server if there is a
+#          network configuration error.
+#
+# interface_config - will not modify the network configuration of the head node,
+#                    including the consistent naming of the network interfaces
 skipTags:
-  - 'set_compute_node_password'
-
-# Wait until headnode prepped before building containers, for consistent DNS
-docker_images_prereqs:
-  - 'prep-headnode'
+#  - 'switch_support'
+#  - 'reboot'
+#  - 'interface_config'
 
 # Inventory for ansible, used to generate inventory.ini
-# There must be a config, build, head, and compute sections
+# There must be config, build, head, and compute sections
 inventory_groups:
 
   config:
@@ -86,10 +119,17 @@
 
   head:
     headnode.site1.opencord.org:
-      ansible_host: 10.80.1.200
-      ansible_port: 22
-      ansible_user: cord
-      ansible_ssh_pass: cordpass
+# If you do not have DNS set up that will resolve the headnode address, you can
+# specify the IP address here.
+#
+#      ansible_host: 10.80.1.200
+#
+# Set these if you don't have SSH keys set up between the config node (where
+# make runs) and the head node. Also, make sure to enable the 'config-ssh-key'
+# further up in the file to create these.
+#
+#      ansible_user: cord
+#      ansible_ssh_pass: cordpass
 
   compute:
 
diff --git a/podconfig/rcord-physical.yml b/podconfig/rcord-physical.yml
deleted file mode 100644
index dba8023..0000000
--- a/podconfig/rcord-physical.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# rcord-physical-example Pod Config
-# Example configuration for a physical R-CORD pod
-
-cord_scenario: cord
-cord_profile: rcord
-
-
-vagrant_vms:
-  - corddev
-
-# Variables
-credentials_dir: '/opt/credentials'
-pki_dir: '/opt/pki'
-ssh_pki_dir: '/opt/ssh_pki'
-
-fabric_ip: '10.6.1.1/24'
-management_ip: '10.6.0.1/24'
-external_ip: '10.90.0.252/16'
-external_gw: '10.90.0.1'
-external_iface: 'eth0'
-management_network: 10.6.0.0/24
-
-deploy_docker_registry: "10.90.0.252:5000"
-
-headnode: cord@10.90.0.252
-
-# NOTE: The `host_cord_profile_dir` variable below is the path to the cord_profile dir that
-# gets mounted to corddev VM.This `cord_profile` dir typically lives in a directory
-# one level up from the `cord` directory
-host_cord_profile_dir: "~/dev/cord_profile"
-
-skipTags:
-  - 'set_compute_node_password'
-  - 'switch_support'
-
-# Wait until headnode prepped before building containers, for consistent DNS
-docker_images_prereqs:
-  - prep-headnode
-
-copy_cord_prereqs:
-  - config-ssh-key
-
-# node topology
-physical_node_list:
-  - name: head1
-    aliases:
-      - head
-
-# Inventory for ansible, used to generate inventory.ini
-inventory_groups:
-
-  config:
-    localhost:
-      ansible_connection: local
-
-  build:
-    corddev:
-
-  head:
-    head1:
-      ansible_host: 10.90.0.252
-      ansible_port: 22
-      ansible_user: cord
-      ansible_ssh_pass: cord
-
-  compute:
diff --git a/podconfig/rcord-virtual-buildlocal.yml b/podconfig/rcord-virtual-buildlocal.yml
deleted file mode 100755
index cb0e1bd..0000000
--- a/podconfig/rcord-virtual-buildlocal.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-# rcord-virtual Pod Config
-# Creates a virtual multi-node R-CORD pod, aka "rcord-in-a-box"
-# Uses the local server as the "build host", assumes Ubuntu 14.04 server
-
-cord_scenario: cord
-cord_profile: rcord
-
-buildnode: localhost
-
-prep_buildnode_prereqs:
-  - build-local-bootstrap
-
-vagrant_up_prereqs:
-  - prereqs-check
-  - ciab-ovs
-
-# Override setting in scenarios/cord/config.yml with noop
-docker_image_prereqs:
-  - prep-buildnode
-
-deploy_docker_registry: "10.100.198.201:5000"
-
-external_iface: 'eth0'
-
-build_targets:
- - compute1-up
-
-skipTags:
-  - 'set_compute_node_password'
-  - 'switch_support'
-  - 'reboot'
-  - 'interface_config'
-
-# Other old config carried over
-cord_in_a_box: True # what is this for?
-fabric_include_names: eth2
-fabric_include_module_types: omit
-fabric_exclude_names: eth0,eth1
-management_include_names: eth1
-management_exclude_names: eth0,eth2
-
-inventory_groups:
-
-  config:
-    localhost:
-      ansible_connection: local
-
-  build:
-    localhost:
-      ansible_connection: local
-
-  head:
-    head1:
-
-  compute: