[CORD-1569]
Autogenerate documentation of build system variables

Change-Id: I839f46d681e8f6954316f0ea4e9a79395501459f
diff --git a/docs/.gitignore b/docs/.gitignore
index 3a050c1..6f60b59 100644
--- a/docs/.gitignore
+++ b/docs/.gitignore
@@ -1,4 +1,9 @@
+# NOTE - do not put any .md files or directories that contain .md files that
+# gitbook needs to access in this file, or they won't be shown - upstream bugs:
+# https://github.com/GitbookIO/gitbook/issues/931
+# https://github.com/GitbookIO/gitbook/issues/1845
+
 node_modules/
 venv-xosdocs/
 xos/_book/
-xos/swagger/
+scripts/
diff --git a/docs/Makefile b/docs/Makefile
index 1f646c7..b725a42 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,34 +1,44 @@
-SHELL := /bin/bash
+default: serve
 
-default: book
+GENERATED_DOCS = build_glossary.md
 
-build: swagger
-	ln -s ../platform-install/docs platform-install && \
-	ln -s ../../test/cord-tester/docs test && \
-	ln -s ../../orchestration/xos/docs xos && \
-	ln -s ../../orchestration/xos-gui/docs xos-gui && \
-	ln -s ../../orchestration/xos-tosca/docs xos-tosca && \
-	ln -s ../../orchestration/profiles profiles && \
-	gitbook install && gitbook build
-
-book: clean build
+serve: setup
 	gitbook serve
-clean:
-	rm -rf _book; \
-	rm -rf node_modules; \
-	rm platform-install; \
-	rm test; \
-	rm profiles; \
-	rm xos; \
-	rm xos-gui; \
-	rm xos-tosca; \
-	rm -f ../platform-install/docs/docs; \
-	rm -f ../../test/cord-tester/docs/docs; \
-	rm -f ../../orchestration/xos/docs/docs; \
-	rm -f ../../orchestration/xos-gui/docs/docs; \
-	rm -f ../../orchestration/xos-tosca/docs/docs; \
-	rm -f ../../orchestration/profiles/profiles
 
-swagger:
+build: setup
+	gitbook build
+
+setup: clean platform-install test profiles xos xos-gui xos-tosca swagger $(GENERATED_DOCS)
+	gitbook init
+	gitbook install
+
+platform-install:
+	ln -s ../platform-install/docs platform-install
+
+test:
+	ln -s ../../test/cord-tester/docs test
+
+profiles:
+	ln -s ../../orchestration/profiles profiles
+
+xos:
+	ln -s ../../orchestration/xos/docs xos
+
+xos-gui:
+	ln -s ../../orchestration/xos-gui/docs xos-gui
+
+xos-tosca:
+	ln -s ../../orchestration/xos-tosca/docs xos-tosca
+
+build_glossary.md: scripts/descriptions.md scripts/defaults.md.j2 scripts/defaultsdoc.py scripts/markedyaml.py
+	python scripts/defaultsdoc.py -o build_glossary.md
+
+swagger: xos
 	pushd ../../orchestration/xos/docs/; make swagger_docs; popd;
 
+clean:
+	rm -rf $(GENERATED_DOCS)
+	rm -rf _book
+	rm -rf node_modules
+	rm -f platform-install test profiles xos xos-gui xos-tosca
+
diff --git a/docs/README.md b/docs/README.md
index 64278b0..4dd14b3 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,15 +1,42 @@
-# Building and Installing CORD
+# CORD Guide
 
-This guide describes how to build and install CORD.
+This is a curated set of guides that describe how to install, operate, test,
+and develop [CORD](https://opencord.org).
 
-If this is your first encounter with CORD, we suggest you start by
-bringing up an emulated version called _CORD-in-a-Box_.
-It installs CORD on a set of virtual machines running on a single
-physical server. Just follow our instructions for
-[Installing CORD-in-a-Box](install_ciab.md).
+CORD is a community-based open source project. In addition to this guide, you
+can find information about this community, its projects, and its governance on
+the [CORD wiki](https://wiki.opencord.org). This includes early white papers
+and design notes that have shaped [CORD's
+architecture](https://wiki.opencord.org/display/CORD/Documentation).
 
-You can also install CORD on a physical POD. This involves first assembling
-a set of servers and switches, and then pointing the build system at
-that target hardware. Just follow our instructoins for
-[Installing a Physical POD](install_pod.md).
+## Getting Started
+
+If you are new to CORD and would like to get familiar with it, you should start
+by [bringing up a virtual POD on a single physical server](install_virtual.md).
+
+If you want to work on the CORD core or develop a service, please see [Getting
+the Source Code](cord_repo.md) and [Developing for CORD](develop.md).
+
+## Getting Help
+
+See [Troubleshooting and Build Internals](troubleshooting.md) if you're
+having trouble with installing a CORD POD.
+
+The best way to ask for help is to join the CORD Slack channel or mailing
+lists. Information about both can be found at the [CORD
+wiki](https://wiki.opencord.org/display/CORD).
+
+## Making Changes to Documentation
+
+The [http://guide.opencord.org](guide.opencord.org) website is built using the
+[GitBook Toolchain](https://toolchain.gitbook.com/), with the documentation
+root in [build/docs](https://github.com/opencord/cord/blob/{{ book.branch
+}}/docs) in a checked out source tree.  It is build with `make`, and requires
+that gitbook, python, and a few other tools are installed.
+
+Source for individual guides is available in the [CORD code
+repository](https://gerrit.opencord.org); look in the `docs` directory of each
+project, with the documentation rooted in `build/docs`. Updates and
+improvements to this documentation can be submitted through Gerrit.
+
 
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 9ebac1c..c96ac6f 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -1,17 +1,18 @@
 # Summary
 
-* [Guide Overview](overview.md)
+* [Guide Overview](README.md)
 * [Terminology](terminology.md)
-* [Building and Installing CORD](README.md)
-    * [Quickstart](quickstarts.md)
-    * [Installing CORD-in-a-Box](install_ciab.md)
-    * [Installing a Physical POD](install_pod.md)
-        * [Network Settings](appendix_network_settings.md)
+* [Building and Installing CORD](install.md)
+    * [Installing a Virtual POD (CORD-in-a-Box)](install_virtual.md)
+    * [Installing a Physical POD](install_physical.md)
         * [Basic Configuration](appendix_basic_config.md)
+        * [Network Settings](appendix_network_settings.md)
+        * [Connecting to Upstream Networks](vrouter.md)
         * [Container Images](appendix_images.md)
         * [vSG Configuration](appendix_vsg.md)
-    * [Connecting to Upstream Networks](vrouter.md)
-    * [Build System Internals](build_internals.md)
+    * [Troubleshooting and Build Internals](troubleshooting.md)
+    * [Building Docker Images](build_images.md)
+    * [Build System Variable Glossary](build_glossary.md)
 * [Operating and Managing CORD](operate/README.md)
     * [Powering Up a POD](operate/power_up.md)
 # My understanding is that ELK Stack is not working right now
@@ -19,7 +20,7 @@
     * [REST API](operate/rest_apis.md)
     * [TOSCA](xos-tosca/README.md)
 # Not targeted at operators; about internal development
-#        * [Development Environment](xos-tosca/devel.md)
+#    * [Development Environment](xos-tosca/devel.md)
 * [Defining Models in CORD](xos/README.md)
     * [XOS Support for Models](xos/dev/xproto.md)
     * [Core Models](xos/core_models.md)
@@ -29,9 +30,10 @@
         * [Implementation Details](xos/dev/sync_impl.md)
     * [Migrating Models to 4.0](xos/migrate_4.0.md)
 * [Developing for CORD](develop.md)
-    * [Getting the Source Code](cord_repo.md)
+    * [Getting the Source Code](getting_the_code.md)
     * [Workflow: Mock Configuration](xos/dev/workflow_mock.md)
-    * [Workflow: Cord-in-a-Box](xos/dev/workflow_ciab.md)
+# this workflow has been added to install_virtual.md
+#    * [Workflow: Cord-in-a-Box](xos/dev/workflow_ciab.md)
     * [Example Service](xos/example_service.md)
     * [Configuring XOS](xos/modules/xosconfig.md)
     * [GUI Development](xos-gui/developer/README.md)
diff --git a/docs/appendix_vsg.md b/docs/appendix_vsg.md
index 253af3f..e38756c 100644
--- a/docs/appendix_vsg.md
+++ b/docs/appendix_vsg.md
@@ -1,61 +1,75 @@
 # vSG Configuration
 
-First, login to the CORD head node CLI and go to the `/opt/cord_profile` directory. To configure the fabric gateway, you will need to edit the file `cord-services.yaml`. You will see a section that looks like this:
+First, login to the CORD head node (`ssh head1` in *CiaB*) and go to the
+`/opt/cord_profile` directory. To configure the fabric gateway, you will need
+to edit the file `cord-services.yaml`. You will see a section that looks like
+this:
 
-```
+```yaml
 addresses_vsg:
-  type: tosca.nodes.AddressPool 
+  type: tosca.nodes.AddressPool
     properties:
-      addresses: 10.6.1.128/26 
-      gateway_ip: 10.6.1.129 
-      gateway_mac: 02:42:0a:06:01:01 
+      addresses: 10.6.1.128/26
+      gateway_ip: 10.6.1.129
+      gateway_mac: 02:42:0a:06:01:01
 ```
 
-Edit this section so that it reflects the fabric address block assigned to the vSGs, as well as the gateway IP and the MAC address that the vSG should use to reach the Internet. 
+Edit this section so that it reflects the fabric address block assigned to the
+vSGs, as well as the gateway IP and the MAC address that the vSG should use to
+reach the Internet.
 
-Once the `cord-services.yaml` TOSCA file has been edited as described above, push it to XOS by running the following:
+Once the `cord-services.yaml` TOSCA file has been edited as described above,
+push it to XOS by running the following:
 
 ```
-cd /opt/cord_profile &&
-docker-compose -p rcord exec xos_ui python /opt/xos/tosca/run.py xosadmin@opencord.org &&
-/opt/cord_profile/cord-services.yaml 
+cd /opt/cord_profile
+docker-compose -p rcord exec xos_ui python /opt/xos/tosca/run.py xosadmin@opencord.org
+/opt/cord_profile/cord-services.yaml
 ```
 
-This step is complete once you see the correct information in the VTN app configuration in XOS and ONOS. 
+This step is complete once you see the correct information in the VTN app
+configuration in XOS and ONOS.
 
 To check that the VTN configuration maintained by XOS:
 
-* Go to the "ONOS apps" page in the CORD GUI:
+ 1. Go to the "ONOS apps" page in the CORD GUI:
    * URL: `http://<head-node>/xos#/onos/onosapp/`
    * Username: `xosadmin@opencord.org`
-   * Password: <content of /opt/cord/build/platform-install/credentials/xosadmin@opencord.org>
-   
-* Select VTN_ONOS_app in the table 
+   * Password: <content of
+     /opt/cord/build/platform-install/credentials/xosadmin@opencord.org>
 
-*Verify that the Backend status is 1 
+ 2. Select VTN_ONOS_app in the table
 
-To check that the network configuration has been successfully pushed to the ONOS VTN app and processed by it:
+ 3. Verify that the `Backend status` is `1`
 
-* Log into ONOS from the head node 
+To check that the network configuration has been successfully pushed to the
+ONOS VTN app and processed by it:
+
+ 1.  Log into ONOS from the head node
+
     * Command: `ssh -p 8102 onos@onos-cord`
-    * Password: "rocks"
+    * Password: `rocks`
 
-* Run the `cordvtn-nodes` command 
+ 2. Run the `cordvtn-nodes` command
 
-* Verify that the information for all nodes is correct 
+ 3. Verify that the information for all nodes is correct
 
-* Verify that the initialization status of all nodes is COMPLETE This will look like the following:
+ 4.  Verify that the initialization status of all nodes is `COMPLETE`.
+
+This will look like the following:
 
 ```
-onos> cordvtn-nodes 
-	Hostname                      Management IP       Data IP             Data Iface     Br-int                  State 
-	sturdy-baseball               10.1.0.14/24        10.6.1.2/24         fabric         of:0000525400d7cf3c     COMPLETE 
-	Total 1 nodes 
+onos> cordvtn-nodes
+	Hostname                      Management IP       Data IP             Data Iface     Br-int                  State
+	sturdy-baseball               10.1.0.14/24        10.6.1.2/24         fabric         of:0000525400d7cf3c     COMPLETE
+	Total 1 nodes
 ```
 
-* Run the netcfg command. Verify that the updated gateway information is present under publicGateways:
+Run the `netcfg` command. Verify that the updated gateway information is
+present under publicGateways:
 
-```
+```json
+onos> netcfg
 "publicGateways" : [
   {
     "gatewayIp" : "10.6.1.193",
@@ -66,3 +80,4 @@
   }
 ],
 ```
+
diff --git a/docs/book.json b/docs/book.json
index 7c92693..1f2d12c 100644
--- a/docs/book.json
+++ b/docs/book.json
@@ -15,16 +15,16 @@
     "versions": {
       "options": [
         {
-          "value": "http://guide.opencord.org",
-          "text": "Master"
+          "value": "/",
+          "text": "Master (Devel)"
         },
         {
-          "value": "http://guide.opencord.org/cord-4.0",
-          "text": "4.0"
+          "value": "/cord-4.0",
+          "text": "4.0 (Stable)"
         },
         {
-          "value": "http://wiki.opencord.org",
-          "text": "3.0 and older"
+          "value": "http://wiki.opencord.org/",
+          "text": "3.0 and previous (old wiki)"
         }
       ]
     }
diff --git a/docs/build_images.md b/docs/build_images.md
new file mode 100644
index 0000000..1fef4df
--- /dev/null
+++ b/docs/build_images.md
@@ -0,0 +1,220 @@
+# Building Docker Images
+
+The current CORD implementation consists of many interrelated Docker images.
+Making sure that the images used in a deployment are consistent with the source
+tree on disk is a challenge and required a tool,
+[imagebuilder](https://github.com/opencord/cord/blob/{{ book.branch
+}}/scripts/imagebuilder.py), to be developed to perform image rebuilds in a
+consistent and efficient manner.
+
+Imagebuilder is currently used for XOS and ONOS images, but not MaaS images.
+
+While imagebuilder will pull down required images from DockerHub and build/tag
+images, it does not push those images or delete obsolete ones.  These tasks are
+left to other software (Ansible, Jenkins) which should take in imagebuilder's
+YAML output and take the appropriate actions.
+
+## Obtaining and rebuilding images
+
+For the normal build process, you won't need to manually download images as the
+`docker-images` make target that runs imagebuilder will automatically be run as
+a part of the build process.
+
+If you do need to rebuild images, there is a `make clean-images` target that
+will force imagebuilder to be run again and images to be moved into place.
+
+## Debugging imagebuilder
+
+If you get a different error or  think that imagebuilder isn't working
+correctly, please rerun it with the `-vv` ("very verbose") option, read through
+the output carefully, and then post about the issue on the mailing list or
+Slack.
+
+If an image is not found on Dockerhub, you may see a 404 error like the
+following in the logs. If this happens, imagebuilder will attempt to build the
+image from scratch rather than pulling it:
+
+```
+NotFound: 404 Client Error: Not Found ("{"message":"manifest for xosproject/xos-gui-extension-builder:<hash> not found"}")
+```
+
+Run `imagebuilder.py -h` for a list of other supported arguments.
+
+## How Imagebuilder works
+
+The imagebuilder program performs the following steps when run:
+
+ 1. Reads the [repo manifest file](https://gerrit.opencord.org/gitweb?p=manifest.git;a=blob;f=default.xml)
+    (checked out as `.repo/manifest`) to get a list of the CORD git repositories.
+
+ 2. Reads the [build/docker_images.yml](https://github.com/opencord/cord/blob/{{ book.branch }}/docker_images.yml)
+    file and the generated `cord/build/genconfig/config.yml` file (which
+    contains a `docker_image_whitelist` list from the scenario), to determine
+    which containers are needed for this POD configuration.
+
+ 3. For every container that is needed, reads the Dockerfile and determines if
+    any parent images are needed, and creates a tree to order image building.
+
+ 4. Determines which images need to be rebuilt based on:
+
+   - Whether the image exists and is has current tags added to it.
+   - If the Docker build context is *dirty* or differs (is on a different
+     branch) from the git tag specified in the repo manifest
+   - If the image's parent (or grandparent, etc.) needs to be rebuilt
+
+ 5. Using this information downloads (pulls) or builds images as needed in a
+    way that is consistent with the CORD source that is on disk.  If an image
+    build is needed, the Docker output of that build is saved to
+    `build/image_logs` on the system where Imagebuilder executes (the
+    `buildhost` in inventory).
+
+ 6. Tags the image with the `candidate` and (if clean) git hash tags.
+
+ 7. Creates a YAML output file that describes the work it performed, for later
+    use (pushing images, retagging, etc.), and optional a graphviz `.dot` graph
+    file showing the relationships between images.
+
+## Image Tagging
+
+CORD container images frequently have multiple tags. The two most common ones
+are:
+
+ * The string `candidate`, which says that the container is ready to be
+   deployed on a CORD POD
+ * The git commit hash, which is either pulled from DockerHub, or applied when
+   a container is built from an untouched (according to git) source tree.
+   Images built from a modified source tree will not be tagged in this way.
+
+Imagebuilder use this git hash tag as well as labels on the image of the git
+repos of parent images to determine whether an image is correctly built from
+the checked out source tree.
+
+## Image labels
+
+Imagebuilder uses a Docker label scheme to determine whether an image needs to
+be rebuilt, which is added to the image when it is built.  Docker images used
+in CORD must apply labels in their Dockerfiles which are specified by
+[label-schema.org](http://label-schema.org) - see there for examples, and below
+for a few notes that clear up the ambiguity within that spec.
+
+Required labels for every CORD image:
+
+ - `org.label-schema.version`
+ - `org.label-schema.name`
+ - `org.label-schema.vcs-url`
+ - `org.label-schema.build-date`
+
+Required for clean builds:
+
+ - `org.label-schema.version` : *git branch name, ex: `opencord/master`,
+   `opencord/cord-4.0`, , etc.*
+ - `org.label-schema.vcs-ref` : *the full 40 character SHA-1 git commit hash,
+   not shortened*
+
+Required for dirty builds:
+
+ - `org.label-schema.version` : *set to the string `dirty` if there is any
+   differences from the master commit to the build context (either on a
+   different branch, or untracked/changed files in context)*
+ - `org.label-schema.vcs-ref` - *set to a commit hash if build context is clean
+   (ie, on another unnamed branch/patchset), or the empty string if the build
+   context contains untracked/changed files.*
+
+For images that use components from another repo (like chameleon being
+integrated with the XOS containers, or maven repo which contains artifacts from
+multiple onos-apps repos), the following labels should be set for every
+sub-component, with the repo name (same as org.label-schema.name) replacing
+`<reponame>`, and the value being the same value as the label-schema
+one would be:
+
+ - `org.opencord.component.<reponame>.version`
+ - `org.opencord.component.<reponame>.vcs-ref`
+ - `org.opencord.component.<reponame>.vcs-url`
+
+These labels are applied by using the `ARG` and `LABEL` option in the
+Dockerfile. The following is an example set of labels for an image that uses
+files from the chameleon and XOS repositories as components:
+
+```
+# Label image
+ARG org_label_schema_schema_version=1.0
+ARG org_label_schema_name=openstack-synchronizer
+ARG org_label_schema_version=unknown
+ARG org_label_schema_vcs_url=unknown
+ARG org_label_schema_vcs_ref=unknown
+ARG org_label_schema_build_date=unknown
+ARG org_opencord_vcs_commit_date=unknown
+ARG org_opencord_component_chameleon_version=unknown
+ARG org_opencord_component_chameleon_vcs_url=unknown
+ARG org_opencord_component_chameleon_vcs_ref=unknown
+ARG org_opencord_component_xos_version=unknown
+ARG org_opencord_component_xos_vcs_url=unknown
+ARG org_opencord_component_xos_vcs_ref=unknown
+
+LABEL org.label-schema.schema-version=$org_label_schema_schema_version \
+      org.label-schema.name=$org_label_schema_name \
+      org.label-schema.version=$org_label_schema_version \
+      org.label-schema.vcs-url=$org_label_schema_vcs_url \
+      org.label-schema.vcs-ref=$org_label_schema_vcs_ref \
+      org.label-schema.build-date=$org_label_schema_build_date \
+      org.opencord.vcs-commit-date=$org_opencord_vcs_commit_date \
+      org.opencord.component.chameleon.version=$org_opencord_component_chameleon_version \
+      org.opencord.component.chameleon.vcs-url=$org_opencord_component_chameleon_vcs_url \
+      org.opencord.component.chameleon.vcs-ref=$org_opencord_component_chameleon_vcs_ref \
+      org.opencord.component.xos.version=$org_opencord_component_xos_version \
+      org.opencord.component.xos.vcs-url=$org_opencord_component_xos_vcs_url \
+      org.opencord.component.xos.vcs-ref=$org_opencord_component_xos_vcs_ref
+```
+
+Labels on a built image can be seen by running `docker inspect <image name or id>`
+
+## Adding a new Docker image to CORD
+
+There are a few cases when an image would be needed to be added to CORD during
+the development process.
+
+### Adding an image developed outside of CORD
+
+There are cases where a 3rd party image developed outside of CORD may be
+needed. This is the case with ONOS, Redis, and a few other pieces of software
+that are already containerized, and we deploy as-is (or with minor
+modifications).
+
+To do this, add the full name of the image, including a version tag, to the
+`https://github.com/opencord/cord/blob/{{ book.branch }}/docker_images.yml`
+file, and to `docker_image_whitelist` list in the
+`scenarios/<scenario name>/config.yml` file.
+
+These images will be retagged with a `candidate` tag after being pulled.
+
+### Adding a synchronizer image
+
+Adding a synchronizer image is usually as simple as adding it to the
+`buildable_images` list in the `docker_images.yml` file (see that file for the
+), then making sure the image name is listed in the `docker_image_whitelist`
+list in the `scenarios/<scenario name>/config.yml` file.
+
+If you are adding a new service that is not in the repo manifest yet, you may
+have to your service's directory to the `.repo/manifest.xml` file and then list
+it in `build/docker_images.yml`, so it will then build the  synchronizer image
+locally.
+
+### Adding other CORD images
+
+If you want imagebuilder to build an image from a Dockerfile somewhere in the
+CORD source tree, you need to add it to the `buildable_images` list in the
+`docker_images.yml` file (see that file for the specific format), then making
+sure the image name is listed in the `docker_image_whitelist` list in the
+`scenarios/<scenario name>/config.yml` file.
+
+Note that you don't need to add external parent images to the
+`pull_only_images` in this manner - those are determined by the `FROM` line in
+`Dockerfile`
+
+## Automating image builds
+
+There is a [Jenkinsfile.imagebuilder](https://github.com/opencord/cord/blob/{{
+book.branch }}/Jenkinsfile.imagebuilder) that can be run in a Jenkins
+instance and will build and push images to DockerHub. This is how the CORD
+team pre-builds and publishes images for public use.
+
diff --git a/docs/build_internals.md b/docs/build_internals.md
deleted file mode 100644
index 7e02727..0000000
--- a/docs/build_internals.md
+++ /dev/null
@@ -1,356 +0,0 @@
-# Build System Internals
-
-The following describes the internals of the build system, which (in
-brief) walks through the following four steps:
-
-* Setup your build environment. On a bare Ubuntu 14.04 system, this can be
-done by running `scripts/cord-bootstrap.sh` or manually.
-
-* Pick a POD Config, such as `rcord-mock.yml` from the `podconfig` directory.
-
-* Run `make PODCONFIG=podconfig.yml config` (see `podconfig` directory for
-   filenames) to generate a configuration into the `genconfig/` directory.
-
-* Run `make build` to build CORD.
-
-## Setup the Build Environment
-
-### Bootstrap Script
-
-The script `scripts/cord-bootstrap.sh` bootstraps an Ubuntu
-14.04 system (such as a CloudLab node) by installing the proper tools and
-checkout the codebase with repo.
-
-It can be downloaded via:
-
-<pre><code>curl -o ~/cord-bootstrap.sh https://raw.githubusercontent.com/opencord/cord/{{ book.branch }}/scripts/cord-bootstrap.sh
-chmod +x cord-bootstrap.sh</code></pre>
-
-The bootstrap script has the following useful options:
-
-```
-Usage for ./cord-bootstrap.sh:
-  -d                           Install Docker for local scenario.
-  -h                           Display this help message.
-  -p <project:change/revision> Download a patch from gerrit. Can be repeated.
-  -t <target>                  Run 'make -j4 <target>' in cord/build/. Can be repeated.
-  -v                           Install Vagrant for mock/virtual/physical scenarios.
-```
-
-The `-p` option downloads a patch from gerrit, and the syntax for this is
-`<project path>:<changeset>/<revision>`.  It can be used multiple
-time. For example:
-
-```
-./cord-bootstrap.sh -p build/platform-install:1233/4 -p orchestration/xos:1234/2
-```
-
-checks out the `platform-install` repo with changeset 1233, patchset 4, and
-`xos` repo changeset 1234, revision 2.
-
-You can find the project path in the `repo` manifest file: [manifest/default.xml](https://gerrit.opencord.org/gitweb?p=manifest.git;a=blob;f=default.xml).
-
-In some cases, you may see a message like this if you install software that
-adds you to a group and you aren't already a member:
-
-```
-You are not in the group: libvirtd, please logout/login.
-You are not in the group: docker, please logout/login.
-```
-
-In such cases, please logout and login to the system to gain the proper group
-membership. Note that any patches specified will be downloaded, but no make
-targets will be run if you're not in the right groups.
-
-#### Examples: cord-boostrap.sh
-
-Download source code and prep for a local build by installing Docker
-
-```
-./cord-bootstrap.sh -d
-```
-
-An `rcord-local` config is built from the {{ book.branch }} branch. Note that the make targets may not run if you aren't already in the `docker` group, so you'd need to logout/login and
-rerun them.
-
-```
-./cord-bootstrap.sh -d -t "PODCONFIG=rcord-local.yml config" -t "build"
-```
-
-A prep for a mock/virtual/physical build, with a gerrit patchset applied:
-
-```
-./cord-bootstrap.sh -v -p orchestration/xos:1000/1
-```
-
-A virtual rcord pod, with tests runs afterward. Assumes that you're already in
-the `libvirtd` group:
-
-```
-./cord-bootstrap.sh -v -t "PODCONFIG=rcord-virtual.yml config" -t "build" -t "pod-test"
-```
-
-### Manual Setup
-
-The following tools are required to get started up CORD:
-
- - [Ansible](https://docs.ansible.com/ansible/intro_installation.html)
- - [Vagrant](https://www.vagrantup.com/downloads.html)
- - [Repo](https://source.android.com/source/downloading#installing-repo)
- - [Docker](https://www.docker.com/community-edition)
-
-Downloading the source tree can be done by running:
-
-<pre><code>mkdir cord && \
-cd cord && \
-repo init -u https://gerrit.opencord.org/manifest -b {{ book.branch }} && \
-repo sync</code></pre>
-
-The build system can be found in the `cord/build/` directory.
-
-## Configuring a Build
-
-### POD Config
-
-Configuration for a specific build, specified in a YAML file that is used to
-generate other configuration files.  These also specify the scenario and
-profile to be used, allow for override the configuration in various ways, such
-as hostnames, passwords, and other ansible inventory specific items. These
-are specified in the `podconfigs` directory.
-
-A minimal POD Config file must define:
-
-`cord_scenario` - the name of the scenario to use, which is defined in a
-directory under `scenarios`.
-
-`cord_profile` - the name of a profile to use, defined as a YAML file in
-`platform-install/profile_manifests`.
-
-### Scenarios
-
-Defines the physical or virtual environment that CORD will be installed
-into, a default mapping of ansible groups to nodes, the set of Docker images
-that can be built, and software and platform features are installed onto those
-nodes. Scenarios are subdirectories of the `scenarios` directory, and consist
-of a `config.yaml` file and possibly VM's specified in a `Vagrantfile`.
-
-#### Included Scenarios
-
-- `local`: Minimal set of containers running locally on the development host
-- `mock`: Creates a single Vagrant VM with containers and DNS set up, without
-  synchronizers
-- `single`: Creates a single Vagrant VM with containers and DNS set up, with
-  synchronizers and optional ElasticStack/ONOS
-- `cord`: Physical or virtual multi-node CORD pod, with MaaS and OpenStack
-- `opencloud`: Physical or virtual multi-node OpenCloud pod, with OpenStack
-
-### Profile
-
-The set of services that XOS on-boards into CORD -- the  _Service
-Graph_, and other per-profile configuration for a CORD deployment.
-These are located in `platform-install/profile_manifests`.
-
-## Config Generation Overview
-
-When a command to generate config such as `make PODCONFIG=rcord-mock.yml
-config` is run, the following steps happen:
-
-1. The POD Config file is read, in this case `genconfig/rcord-mock.yml`, which
-   specifies the scenario and profile.
-2. The Scenario config file is read, in this case `scenario/mock/config.yml`.
-3. The contents of these files are combined into a master config variable, with
-   the POD Config overwriting any config set in the Scenario.
-4. The entire master config is written to `genconfig/config.yml`.
-5. The `inventory_groups` variable is used to generate an ansible inventory
-   file and put in `genconfig/inventory.ini`.
-6. Various variables are used to generate the makefile config file
-   `genconfig/config.mk`. This sets the targets invoked by `make build`
-
-Note that the combination of the POD and Scenaro config in step #3 is not a
-merge. If you define an item in the root of the POD Config that has subkeys,
-it will overwrite every subkey defined in the Scenario.  This is most noticable
-when setting the `inventory_groups` or `docker_image_whitelist`
-variable. If changing either in a POD Config, you must recreate the
-entire structure or list. This may seem inconvenient, but other list
-or tree merging strategies lack a way to remove items from a tree
-structure.
-
-## Build Process Overview
-
-The build process is driven by running `make`. The two most common makefile
-targets are `config` and `build`, but there are also utility targets that are
-handy to use during development.
-
-### `config` make target
-
-`config` requires a `PODCONFIG` argument, which is a name of a file in the
-`podconfig` directory.  `PODCONFIG` defaults to `invalid`, so if you get errors
-claiming an invalid config, you probably didn't set it, or set it to a filename
-that doesn't exist.
-
-#### Examples: `make config`
-
-`make PODCONFIG=rcord-local.yml config`
-
-`make PODCONFIG=opencloud-mock.yml config`
-
-### `build` make target
-
-`make build` performs the build process, and takes no arguments.  It may run
-different targets specified by the scenario.
-
-Most of the build targets in the Makefile don't leave artifacts behind, so we
-write a placeholder file (aka "sentinels" or "empty targets") in the
-`milestones` directory.
-
-### Utility make targets
-
-There are various utility targets:
-
- - `printconfig`: Prints the configured scenario and profile.
-
- - `xos-teardown`: Stop and remove a running set of XOS docker containers
-
- - `collect-diag`: Collect detailed diagnostic information on a deployed head
-   and compute nodes, into `diag-<datestamp>` directory on the head node.
-
- - `compute-node-refresh`: Reload compute nodes brought up by MaaS into XOS,
-   useful in the cord virtual and physical scenarios
-
- - `pod-test`: Run the `platform-install/pod-test-playbook.yml`, testing the
-   virtual/physical cord scenario.
-
- - `vagrant-destroy`: Destroy Vagrant containers (for mock/virtual/physical
-   installs)
-
- - `clean-images`: Have containers rebuild during the next build cycle. Does
-   not actually delete any images, just causes imagebuilder to be run again.
-
- - `clean-genconfig`: Deletes the `make config` generated config files in
-   `genconfig`, useful when switching between podconfigs
-
- - `clean-profile`: Deletes the `cord_profile` directory
-
- - `clean-all`: Runs `vagrant-destroy`, `clean-genconfig`, and `clean-profile`
-   targets, removes all milestones. Good for resetting a dev environment back
-   to an unconfigured state.
-
- - `clean-local`:  `clean-all` but for the `local` scenario - Runs
-   `clean-genconfig` and `clean-profile` targets, removes local milestones.
-
-The `clean-*` utility targets should modify the contents of the milestones
-directory appropriately to cause the steps they clean up after to be rerun on
-the next `make build` cycle.
-
-### Target Logging
-
-`make` targets that are built will create a per-target log file in the `logs`
-directory. These are prefixed with a datestamp which is the same for every
-target in a single run of make - re-running make will result in additional sets
-of logs, even for the same target.
-
-### Tips and Tricks
-
-#### Debugging Make Failures
-
-If you have a build failure and want to know which targets completed, running:
-
-```
-ls -ltr milestones ; ls -ltr logs
-```
-
-And looking for logfiles without a corresponding milestone will point you to
-the make target(s) that failed.
-
-#### Update XOS Container Images
-
-To rebuild and update XOS container images, run:
-
-```
-make xos-update-images
-make -j4 build
-```
-
-This will build new copies of all the images, then when build is run the newly
-built containers will be restarted.
-
-If you additionally want to stop all the XOS containers, clear the database,
-and reload the profile, use `xos-teardown`:
-
-```
-make xos-teardown
-make -j4 build
-```
-
-This will teardown the XOS container set, tell the build system to rebuild
-images, then perform a build and reload the profile.
-
-#### Use ElasticStack or ONOS with the `single` scenario
-
-The single scenario is a medium-weight scenario for synchronizer development,
-and has optional ElasticStack or ONOS functionality.
-
-To use these, you would invoke the ONOS or ElasticStack milestone target before
-the `build` target:
-
-```
-make PODCONFIG=rcord-single.yml config
-make -j4 milestones/deploy-elasticstack
-make -j4 build
-```
-
-or
-
-```
-make PODCONFIG=opencloud-single.yml config
-make -j4 milestones/deploy-onos
-make -j4 build
-```
-
-If you want to use both in combination, make sure to run the ElasticStack
-target first, so ONOS can send logs to ElasticStack.
-
-### Building Docker Images with imagebuilder.py
-
-For docker images for XOS (and possibly others in the future) the build system
-uses the imagebuilder script.  Run `imagebuilder.py -h` for a list of arguments
-it supports.
-
-For Docker images built by imagebuilder, the docker build logs are located in
-the `image_logs` directory on the build host, which may differ between
-scenarios.
-
-The full list of all buildable images is in `docker_images.yml`, and the set of
-images pulled in a particular build is controlled by the
-`docker_image_whitelist` variable that is set on a per-scenario basis.
-
-This script is in charge of guaranteeing that the code that has been checked
-out and containers used by the system have the same code in them.  This is a
-somewhat difficult task as we have parent/child relationships between
-containers as well as components which are in multiple git repos in the source
-tree and all of which could change independently, be on different branches, or
-be manually modified during development. imagebuilder does this through a
-combination of tagging and labeling which allows images to be prebuilt and
-downloaded from dockerhub while still maintaining these guarantees.
-
-imagebuilder takes as input a YAML file listing the images to be built, where
-the Dockerfiles for those containers are located, and then goes about building
-and tagging images.   The result of an imagebuilder run is:
-
- - Docker images in the local context
- - Optionally:
-   - A YAML file which describes what actions imagebuilder performed (the `-a`
-     option, default is `ib_actions.yml` )
-   - A DOT file for graphviz that shows container relationships
-
-While imagebuilder will pull down required images from dockerhub and build/tag
-images, it does not push those images or delete obsolete ones.  These tasks are
-left to other software (Ansible, Jenkins) which should take in imagebuilder's
-YAML output and take the appropriate actions.
-
-Additionally, there may be several operational tasks that take this as input.
-Updating a running pod might involve stopping containers that have updated
-images, starting containers with the new image, handling any errors if new
-containers don't come up, then removing the obsolete images. These tasks go
-beyond image building and are left to the deployment system.
-
diff --git a/docs/cord_repo.md b/docs/cord_repo.md
deleted file mode 100644
index a087fdb..0000000
--- a/docs/cord_repo.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Getting the Source Code
-
-## Install repo
-Repo is a tool from Google that help us managing the code base.
-
-```
-curl https://storage.googleapis.com/git-repo-downloads/repo > ~/repo && \
-sudo chmod a+x repo && \
-sudo cp repo /usr/bin
-```
-
-## Download CORD Repositories
-
-<pre><code>mkdir ~/cord && \
-cd ~/cord && \
-repo init -u https://gerrit.opencord.org/manifest -b {{ book.branch }} && \
-repo sync</code></pre>
-
->NOTE: `master` is used as an example. You can substitute your favorite
->branch for `master`, for example, `cord-4.0` or `cord-3.0`. You can
->also use  flavor-specific manifests such as `mcord` or `ecord`. The
->flavor you use here is not correlated to the profile you will choose
->to run later, but it is suggested that you use the corresponding
->manifest for the deployment you want. For example, if you use the
->`ecord` manifest then it would be typical to deploy the
->`ecord.yml` service profile. 
-
-When this is complete, a listing (`ls`) inside this directory should yield output similar to:
-
-```
-ls -F
-build/         incubator/     onos-apps/     orchestration/ test/
-```
-
-##  Contribute Code to CORD
-
-We use [Gerrit](https://gerrit.opencord.org) to manage the code base.
-For more information about how to commit patches to Gerrit, click
-[here](https://wiki.opencord.org/display/CORD/Getting+the+Source+Code).
-For a general introduction to ways you can participate and contribute
-to the project, check out the
-[CORD wiki](https://wiki.opencord.org/display/CORD/Contributing+to+CORD).
diff --git a/docs/develop.md b/docs/develop.md
index 4d10299..779a71b 100644
--- a/docs/develop.md
+++ b/docs/develop.md
@@ -1,8 +1,9 @@
 # Developing for CORD
 
-This guide describes how to develop for CORD. It includes two
-example workflows, one based on a lightweight development
-environment that runs on a laptop and a more realistic environment
-that runs an emulated version of a CORD POD. It also documents
-the key elements involved in developing services that can be on-boarded
-into CORD.
+This guide describes how to develop for CORD. It includes two example
+workflows, one based on a lightweight development environment that runs on a
+laptop and a more realistic environment that runs an emulated version of a CORD
+POD. It also documents the key elements involved in developing services that
+can be on-boarded into CORD.
+
+
diff --git a/docs/getting_the_code.md b/docs/getting_the_code.md
new file mode 100644
index 0000000..f086399
--- /dev/null
+++ b/docs/getting_the_code.md
@@ -0,0 +1,70 @@
+# Getting the Source Code
+
+## Install repo
+
+[repo](https://code.google.com/archive/p/git-repo/) is a tool from Google that
+works with Gerrit and allows us to manage the multiple git repos that make up
+the CORD code base.
+
+If you don't already have `repo` installed, this may be possible with your
+system package manager, or using the [instructions on the android source
+site](https://source.android.com/source/downloading#installing-repo), or by
+using the following commands which download/verify/install it:
+
+```sh
+curl -o /tmp/repo https://storage.googleapis.com/git-repo-downloads/repo
+echo 'e147f0392686c40cfd7d5e6f332c6ee74c4eab4d24e2694b3b0a0c037bf51dc5  /tmp/repo' | sha256sum -c -
+sudo mv /tmp/repo /usr/local/bin/repo
+sudo chmod a+x /usr/local/bin/repo
+```
+
+## Download CORD repositories
+
+The `cord` repositories are usually checked out to `~/cord` in most of our
+examples and deployments:
+
+```sh
+mkdir ~/cord && \
+cd ~/cord && \
+repo init -u https://gerrit.opencord.org/manifest -b {{ book.branch }} && \
+repo sync
+```
+
+> NOTE: `-b` specifies the branch name. Development work goes on in `master,
+> and there are also specific stable branches such as `cord-4.0` that can be
+> used.
+
+When this is complete, a listing (`ls`) inside this directory should yield
+output similar to:
+
+```sh
+$ ls
+build		component	incubator	onos-apps	orchestration	test
+```
+
+## Downloading patchsets
+
+Once you've downloaded a CORD source tree, you can download patchsets from
+Gerrit with the following command:
+
+```
+repo download orchestration/xos 1234/3
+```
+
+Which downloads a patch for the `xos` git repo, patchset number `1234` and
+version `3`.
+
+Also see [Configuring your Development Environment:cord-bootstrap.sh script
+](install.md#cord-bootstrap.sh-script) for instructions on downloading
+patchsets during a build that uses the `cord-bootstrap.sh` script.
+
+## Contributing code to CORD
+
+We use [Gerrit](https://gerrit.opencord.org) to manage the CORD code base. For
+more information see [Working with
+Gerrit](https://wiki.opencord.org/display/CORD/Working+with+Gerrit).
+
+For a general introduction to ways you can participate and contribute to the
+project, see [Contributing to
+CORD](https://wiki.opencord.org/display/CORD/Contributing+to+CORD).
+
diff --git a/docs/install.md b/docs/install.md
new file mode 100644
index 0000000..d209817
--- /dev/null
+++ b/docs/install.md
@@ -0,0 +1,159 @@
+# Building and Installing CORD
+
+## Starting points
+
+If this is your first encounter with CORD, we suggest you start by bringing up
+an Virtual Pod, which installs CORD on a set of virtual machines running on a
+single physical server: [Installing a Virtual Pod
+(CORD-in-a-Box)](install_virtual.md).
+
+You can also install CORD on a physical POD, as would be done in a production
+environment, which involves first assembling a set of servers and switches, and
+then pointing the build system at that target hardware: [Installing a Physical
+POD](install_physical.md).
+
+If you are interested in developing a new CORD service, working on the XOS GUI,
+or performing other development tasks, see [Developing for CORD](develop.md).
+
+If you've run into trouble or want to know more about the CORD build process,
+please see [Troubleshooting and Build Internals](troubleshooting.md).
+
+## Configuring your Development Environment
+
+CORD has a unified development and deployment environment which uses the
+following tools:
+
+ - [Ansible](https://docs.ansible.com/ansible/intro_installation.html)
+ - [Repo](https://source.android.com/source/downloading#installing-repo)
+
+And either:
+
+ - [Docker](https://www.docker.com/community-edition), for *local* build
+   scenarios
+ - [Vagrant](https://www.vagrantup.com/downloads.html), for for all other
+   scenarios
+
+You can manually install these on your development system - see [Getting the
+Source Code](getting_the_code.md) for a more detailed instructions for checking
+out the CORD source tree.
+
+### cord-bootstrap.sh script
+
+If you're working on an Ubuntu 14.04 system (CloudLab or another test
+environment), you can use the `cord-bootstrap.sh` script to install these tools
+and check out the CORD repo tree to `~/cord`. This hasn't been tested on
+other versions or distributions.
+
+<pre><code>
+curl -o ~/cord-bootstrap.sh https://raw.githubusercontent.com/opencord/cord/{{ book.branch }}/scripts/cord-bootstrap.sh
+chmod +x cord-bootstrap.sh
+</code></pre>
+
+The bootstrap script has the following options:
+
+```
+Usage for ./cord-bootstrap.sh:
+  -d                           Install Docker for local scenario.
+  -h                           Display this help message.
+  -p <project:change/revision> Download a patch from gerrit. Can be repeated.
+  -t <target>                  Run 'make -j4 <target>' in cord/build/. Can be repeated.
+  -v                           Install Vagrant for mock/virtual/physical scenarios.
+```
+
+Using the `-v` option is required to install Vagrant for running a [Virtual Pod
+(CiaB)](install_virtual.md), whereas `-d` is required to install Docker for a
+[Local Workflow](xos/dev/workflow_local.md).
+
+The `-p` option downloads a patch from gerrit, and the syntax for this is
+`<project path>:<changeset>/<revision>`.  It can be used multiple
+time. For example:
+
+```
+./cord-bootstrap.sh -p build/platform-install:1233/4 -p orchestration/xos:1234/2
+```
+
+checks out the `platform-install` repo with changeset 1233, patchset 4, and
+`xos` repo changeset 1234, revision 2.
+
+You can find the project path in the `repo` manifest file:
+[manifest/default.xml](https://gerrit.opencord.org/gitweb?p=manifest.git;a=blob;f=default.xml).
+
+You can also run make targets with the `-t` option; `-t build` is the same as
+running `cd ~/cord/build ; make -j4 build` after the rest of the installations
+and downloads have completed.  
+
+In some cases, you may see a message like this if you install software that
+adds you to a group and you aren't already a member:
+
+```
+You are not in the group: libvirtd, please logout/login.
+You are not in the group: docker, please logout/login.
+```
+
+In such cases, please logout and login to the system to gain the proper group
+membership.  Another way to tell if you're in the right groups:
+
+```
+~$ groups
+xos-PG0 root
+~$ vagrant status
+Call to virConnectOpen failed: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied
+~$ logout
+~$ ssh node_name.cloudlab.us
+~$ groups
+xos-PG0 root libvirtd
+```
+
+Note that if you aren't in the right group, any patches specified by `-p` will
+be downloaded, but no make targets specified by `-t` will be run - you will
+need to `cd ~/cord/build` and run those targets manually.
+
+## Configuring a Build
+
+The CORD build process is designed to be modular and configurable with only a
+handful of YAML files.
+
+### POD Config
+
+The top level configuration for a build is the *POD config* file, which is a
+YAML file stored in
+[build/podconfig](https://github.com/opencord/cord/tree/master/podconfig) that
+contains a list of variables that control how the build proceeds, and can
+override the configuration of the rest of the build. 
+
+A minimal POD Config file must define two variables:
+
+`cord_scenario` - the name of the *scenario* to use, which is defined in a
+directory under [build/scenarios](https://github.com/opencord/cord/tree/master/scenarios).
+
+`cord_profile` - the name of a *profile* to use, defined as a YAML file in
+[build/platform-install/profile_manifests](https://github.com/opencord/platform-install/tree/master/profile_manifests).
+
+The included POD configs are generally named `<profile>-<scenario>.yml`. 
+
+### Profiles
+
+The set of services that XOS on-boards into CORD -- the  _Service Graph_, and
+other per-profile configuration for a CORD deployment.  These are located in
+[build/platform-install/profile_manifests](https://github.com/opencord/platform-install/tree/master/profile_manifests).
+
+### Scenarios
+
+Scenarios define the physical or virtual environment that CORD will be
+installed into, a default mapping of ansible groups to nodes, the set of Docker
+images that can be built, and software and platform features are installed onto
+those nodes. Scenarios are subdirectories of the
+[build/scenarios](https://github.com/opencord/cord/tree/master/scenarios)
+directory, and consist of a `config.yaml` file and possibly VM's specified in a
+`Vagrantfile`.
+
+The current set of scenarios: 
+
+- `local`: Minimal set of containers running locally on the development host
+- `mock`: Creates a single Vagrant VM with containers and DNS set up, without
+  synchronizers
+- `single`: Creates a single Vagrant VM with containers and DNS set up, with
+  synchronizers and optional ElasticStack/ONOS
+- `cord`: Physical or virtual multi-node CORD pod, with MaaS and OpenStack
+- `opencloud`: Physical or virtual multi-node OpenCloud pod, with OpenStack
+
diff --git a/docs/install_physical.md b/docs/install_physical.md
new file mode 100644
index 0000000..9120567
--- /dev/null
+++ b/docs/install_physical.md
@@ -0,0 +1,623 @@
+# Installing a Physical POD
+
+The following is a detailed, step-by-step recipe for installing a physical POD
+with multiple physical servers and switches.
+
+If you are new to CORD and would like to get familiar with it, you should start
+by [bringing up a virtual POD on a single physical server](install_virtual.md).
+
+## Quickstart
+
+After performing the [physical configuration](#physical-configuration), install
+Ubuntu 14.04 on a [suitable head node](#detailed-requirements). On the target
+head node, add a `cord` user with `sudo` rights:
+
+```
+sudo adduser cord && \
+sudo usermod -a -G sudo cord && \
+echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
+```
+
+[Create a POD configuration](install.md#pod-config) file in the
+`~/cord/build/podconfig` directory, then run:
+
+```
+cd ~/cord/build && \
+make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config && \
+make -j4 build |& tee ~/build.out
+```
+
+After a successful build, set the compute nodes and the switches to boot from
+PXE and manually reboot them. They will be automatically deployed.
+
+## Overview of a CORD POD
+
+The following is a brief description of a full physical POD.
+
+### Physical Configuration
+
+A full POD includes a Top-of-Rack (ToR) management switch, four fabric
+switches, and three standard x86 servers.  The following figure does not show
+access devices or any upstream connectivity to the metro network; those details
+are included later in this section.
+
+<img src="images/physical-overview.png" alt="Drawing" style="width: 400px;"/>
+
+### Logical Configuration: Data Plane Network
+
+The following diagram is a high level logical representation of a typical CORD POD.
+
+<img src="images/dataplane.png" alt="Drawing" style="width: 700px;"/>
+
+The figure shows 40G data plane connections (red), where end-user traffic
+goes from the access devices to the metro network (green). User traffic
+goes through different different leafs, spines and compute nodes,
+depending on the services needed, and where they are located. The
+switches form a leaf and spine fabric. The compute nodes and the head
+node are connected to a port of one of the leaf switches.
+
+### Logical Configuration: Control Plane / Management Network
+
+The following diagram shows in blue how the components of the system are
+connected through the management network.
+
+<img src="images/controlplane.png" alt="Drawing" style="width: 500px;"/>
+
+As shown in this figure, the head node is the only server in the POD connected
+both to Internet and to the other components of the system. The compute nodes
+and the switches are only connected to the head node, which provides them with
+all the software needed.
+
+## Sample Workflow
+
+It is important to have a general picture of installation workflow before
+getting into the details. The following is a list of high-level tasks involved
+in bringing up a CORD POD:
+
+* CORD software is downloaded and built on the dev machine.
+* A POD configuration is created by the operator on the dev machine.
+* The software is pushed from the dev machine to the head node.
+* Compute nodes and fabric switches need to be manually rebooted. The CORD
+  build procedure automatically installs the OS, other software needed and
+  performs the related configurations.
+* The software gets automatically deployed from the head node to the compute
+  nodes.
+
+## Requirements
+
+While the CORD project is for openness and does not have any interest in
+sponsoring specific vendors, it provides a reference implementation for both
+hardware and software to help users in building their PODs. What is reported
+below is a list of hardware that, in the community experience, has worked well.
+
+Also note that the CORD community will be better able to help you debugging
+issues if your hardware and software configuration look as much as possible
+similar to the ones reported in the reference implementation, below.
+
+## Bill Of Materials (BOM) / Hardware Requirements
+
+The section provides a list of hardware required to build a full CORD POD.
+
+### BOM Summary
+
+| Quantity | Category                            | Brand         | Model              | Part Number    |
+|----------|-------------------------------------|---------------|--------------------|----------------|
+| 3        | Compute                             | Quanta (QCT)  | QuantaGrid D51B-1U | QCT-D51B-1U    |
+| 4        | Fabric Switch                       | EdgeCore      | AS6712-32X         | AS6712-32X     |
+| 1        | Management Switch (L2 VLAN support) | *             | *                  | *              |
+| 7        | Cabling (data plane, fiber)         | Robofiber     | QSFP-40G-03C       | QSFP-40G-03C   |
+| 12       | Cabling (management, copper)        | CAT6, 3M      | *                  | *              |
+
+### Detailed Requirements
+
+* 1x Development Machine. It can be either a physical machine or a virtual
+  machine, as long as the VM supports nested virtualization. It doesn’t have to
+  be necessarily Linux (used in the rest of the guide, below); in principle
+  anything able to satisfy the hardware and the software requirements. Generic
+  hardware requirements are 2 cores, 4G of memory, 60G of hdd.
+
+* 3x Physical Servers: one to be used as head node, two to be used as compute
+  nodes.
+
+   * Suggested Model: OCP-qualified QuantaGrid D51B-1U server. Each server is
+     configured with 2x Intel E5-2630 v4 10C 2.2GHz 85W, 64GB of RAM 2133MHz
+     DDR4, 2x 500GB HDD, and a 40 Gig adapter.
+
+   * Strongly Suggested NIC:
+       * Intel Ethernet Converged Network Adapters XL710 10/40 GbE PCIe 3.0, x8
+         Dual port.
+       * ConnectX®-3 EN Single/Dual-Port 10/40/56GbE Adapters w/ PCI Express
+         3.0.
+
+> NOTE: while the machines mentioned above are generic standard x86 servers,
+> and can be potentially substituted with any other machine, it’s quite
+> important to stick with either one of the network card suggested.  CORD
+> scripts will look for either an `i40e` or a `mlx4_en` driver, used by the two
+> cards. To use other cards additional operations will need to be done. Please,
+> see [Network Settings](appendix_network_settings.md) for more information.
+
+* 4x Fabric Switches
+     * Suggested Model: OCP-qualified Accton 6712 switch. Each switch is
+       configured with 32x40GE ports; produced by EdgeCore and HP.
+
+* 7x Fiber Cables with QSFP+ (Intel compatible) or 7 DAC QSFP+ (Intel
+  compatible) cables
+
+     * Suggested Model: Robofiber QSFP-40G-03C QSFP+ 40G direct attach passive
+       copper cable, 3m length - S/N: QSFP-40G-03C.
+
+* 1x 1G L2 copper management switch supporting VLANs or 2x 1G L2 copper
+  management switches
+
+## Connectivity Requirements
+
+The dev machine and the head node have to download software from different
+Internet sources, so they currently need unfettered Internet access.  (In the
+future, only the dev machine, and not the head node, will require Internet
+connectivity.) Sometimes firewalls, proxies, and software that prevents to
+access local DNSs generate issues and should be avoided.
+
+## Cabling a POD
+
+This section describes how the hardware components should be interconnected to
+form a fully functional CORD POD.
+
+### Management / Control Plane Network
+
+The management network is divided in two broadcast domains: one connecting the
+POD to the Internet and giving access to the deployer (called "external" and
+shown in green in the figure below), and one connecting the servers and
+switches inside the POD (called "internal" or "management" and shown in blue).
+The figure also shows data plane connections in red (as described in the next
+paragraph).
+
+<img src="images/physical-cabling-diagram.png" alt="Drawing" style="width: 800px;"/>
+
+The external and the management networks can be separated either using two
+different switches, or the same physical switch and by using VLANs.
+
+> NOTE: Head node IPMI connectivity is optional.
+
+> NOTE: IPMI ports do not have to be necessarily connected to the external
+> network. The requirement is that compute node IPMI interfaces need to be
+> reachable from the head node. This is possible also through the internal /
+> management network.
+
+> NOTE: Vendors often allow a shared management port to provide IPMI
+> functionalities. One of the NICs used for system management (e.g., eth0) can
+> be shared, to be used at the same time also as IPMI port.
+
+#### External Network
+
+The external network allows POD servers to be reached from the Internet. This
+would likely not be supported in a production system, but is useful in
+development and evaluation settings, for example, making it easy to directly
+start/stop/reboot the head and the compute nodes.  Moreover, using CORD
+automated scripts and tools for Jenkins pipeline requires Jenkins direct access
+to these interfaces. This is why IPMI/BMC interfaces of the nodes are also
+connected to the external network. In summary, following is the list of
+equipment/interfaces usually connected to the external network:
+
+* Internet
+* Dev machine
+* Head node - 1x 1G interface (following defined as external)
+* Head node - 1x IPMI/BMC interface (optional)
+* Compute node 1 - 1x IPMI/BMC interface (optional, but recommended)
+* Compute node 2 - 1x IPMI/BMC interface (optional, but recommended)
+
+#### Internal Network
+
+The internal/management network is separate from the external one. It has the
+goal to connect the head node to the rest of the system components (compute
+nodes and fabric switches). For a typical POD, the internal network includes:
+
+* Head node - 1x 1G interface (following defined as management)
+* Compute node 1 - 1x 1G interface
+* Compute node 2 - 1x 1G interface
+* Fabric 1 - management interface
+* Fabric 2 - management interface
+* Fabric 3 - management interface
+* Fabric 4 - management interface
+
+### User / Data Plane Network
+
+The data plane network (represented in red in the figure) carries user traffic
+(in green), from the access devices to the point the POD connects to the metro
+network.
+
+<img src="images/dataplane.png" alt="Drawing" style="width: 700px;"/>
+
+The fabric switches are assembled to form a leaf and spine topology. A typical
+full POD has two leafs and two spines. Currently, this is a pure 40G network.
+While spines are not connected together, each leaf is connected to both spines.
+In summary, the following are the devices connecting to the leaf switches:
+
+* Head node  - 1x 40G interface
+* Compute node 1 - 1x 40G interface
+* Compute node 2 - 1x 40G interface
+* Access devices - 1 or more 40G interfaces
+* Metro devices - 1 or more 40G interfaces
+
+### Best Practices
+
+The community follows a set of best practices to better be able to remotely
+debug issues, for example via mailing-lists. The following is not mandatory,
+but is strongly suggested:
+
+* Leaf nodes are connected to the spines nodes starting at the highest port
+  number on the leaf.
+
+* For a given leaf node, its connections to the spine nodes terminate on the
+  same port number on each spine.
+
+* Leaf _n_ connections to spine nodes terminate at port _n_ on each spine node.
+
+* Leaf-spine switches are connected into the management TOR starting from the
+  highest port number.
+
+* Compute node _n_ connects to the internal (management) network switch on port
+  _n_.
+
+* Compute node _n_ connects to its leaf at port _n_.
+
+* The head node connects to the internal (management) network using the lowest
+  1G management interface.
+
+* The head node connects to the external network using its highest 1G
+  management interface.
+
+* All servers connect to the leafs using the lowest fabric (40G NIC) interface.
+
+## Software Environment Requirements
+
+Only the dev machine and the head node need to be prepped for installation.
+The other machines will be fully provisioned by CORD itself.
+
+### Development Machine
+
+It should run either Ubuntu 16.04 LTS (recommended) or Ubuntu 14.04 LTS.  See
+[Configuring your Development
+Environment](install.md#configuring-your-development-environment) for more
+details.
+
+This documentation assumes that you checked out the CORD source tree into
+`~/cord`.
+
+### Head Node
+
+It should run Ubuntu 14.04 LTS.  Then, configure the following.
+
+#### Create a User with "sudoer" permissions (no password)
+
+```
+sudo adduser cord && \
+sudo adduser cord sudo && \
+echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
+```
+
+### Compute Nodes
+
+The CORD build process installs the compute nodes. The only thing to be
+configured are the BIOS settings, so that they can PXE boot from the head node
+through the internal (management) network. In doing this, make sure that:
+
+* The network card connected to the internal / management network is configured
+  with DHCP (no static IPs).
+
+* The IPMI (sometime called BMC) interface is configured with a statically
+  assigned IP, reachable from the head node. It’s strongly suggested to have
+  them deterministically assigned, so you will be able to control your node as
+  you like.
+
+* Their boot sequence has (a) the network card connected to the internal /
+  management network as the first boot device; and (b) the primary hard drive
+  as second boot device.
+
+> NOTE: Some users prefer to connect as well the IPMI interfaces of the compute
+> nodes to the external network, so they can have control on them also from
+> outside the POD. This way the head node will be able to control them anyway.
+
+### Fabric Switches: ONIE
+
+The ONIE installer should be already installed on the switch and set to boot in
+installation mode. This is usually the default for new switches sold without an
+Operating System. It might not be the case instead if switches have already an
+Operating System installed. In this case rebooting the switch in ONIE
+installation mode depends by different factors, such the version of the OS
+installed and the specific model of the switch.
+
+## Prepare POD Configuration
+
+Each CORD POD deployment requires a POD configuration file that describes how
+the system should be configured, including what IP addresses should be used for
+the external and the internal networks, what users the system should run during
+the automated installation, and much more.
+
+POD configuration files are YAML files with extension .yml, contained in the
+`/cord/build/podconfig` directory in the dev VM. You can either create a new
+file with your favorite editor or copy-and-edit an existing file. The
+[rcord-physical-example.yml](https://github.com/opencord/cord/blob/master/podconfig/rcord-physical-example.yml)
+configuration file is there for this purpose. All parameters have a
+description. Optional lines have been commented out, but can be used as needed.
+
+More information about how the network configuration for the POD can be
+customized can be found in [Network Settings](appendix_network_settings.md).
+
+A full set of all possible build system variables can be found in the [Build
+System Variable Glossary](build_glossary.md).
+
+Once the POD config YAML file has been created, the composite configuration
+file should be generated with the following command.
+
+```
+cd ~/cord/build && \
+make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config
+```
+
+The process generates a set of files in `~/cord/build/genconfig`
+
+> NOTE: Before the configuration process the `~/cord/build/genconfig` directory
+> contains a README.md file only.
+
+## Head Node Deployment
+
+Head node deployment works as follows:
+
+* Makes the head node a MAAS server from which the other POD elements (fabric
+  switches and compute nodes) can PXE boot (both to load their OS and to be
+  configured).
+* Installs and configures the containers needed to configure other nodes of the
+  network.
+* Installs and configures OpenStack.
+* Provisions XOS, which provides service provisioning and orchestration for the
+  CORD POD.
+
+This step is started with the following command:
+
+```
+cd ~/cord/build && \
+make build
+```
+
+> NOTE: Be patient: this step can take an hour to complete.
+
+> WARNING: This command sometimes fails for various reasons.  Simply re-running
+> the command often solves the problem. If the command fails it’s better to
+> start from a clean head node.
+
+This step is complete when the command successfully runs.
+
+### MAAS
+
+As previously mentioned, once the deployment is complete the head node becomes
+a MAAS region and rack controller, basically acting as a PXE server and serving
+images through the management network to compute nodes and fabric switches
+connected to it.
+
+The Web UI for MaaS can be viewed by browsing to the head node, using a URL of
+the from `http://head-node-ip-address/MAAS`.
+
+To login to the web page, use `cord` as the username. If you have set a
+password in the deployment configuration password use that, otherwise the
+password used can be found in your build directory under
+`<base>/build/maas/passwords/maas_user.txt`.
+
+After the deployment process finishes, MAAS initiates the download of an Ubuntu
+14.04 boot image that will be used to boot the other POD devices. This download
+can take some time and the process cannot continue until the download is
+complete. The status of the download can be verified through the UI by visiting
+the URL `http://head-node-ip-address/MAAS/images/`, or via the command line
+from head node via the following command:
+
+```
+APIKEY=$(sudo maas-region-admin apikey --user=cord) && \
+maas login cord http://localhost/MAAS/api/1.0 "$APIKEY" && \
+maas cord boot-resources read | jq 'map(select(.type != "Synced"))'
+```
+
+If the output of of the above commands is not an empty list ([]) then the
+images have not yet been completely downloaded. Depending on your network
+speed, this could take several minutes. Please wait and then attempt the last
+command again, until the returned list is empty.
+
+When the list is empty you can proceed.
+
+### Compute Node and Fabric Switch Deployment
+
+The section describes how to provision and configure software on POD compute
+nodes and fabric switches.
+
+#### General Workflow
+
+Once it has been verified that the Ubuntu boot image has been downloaded, the
+compute nodes and the fabric switches may be PXE booted.
+
+Compute nodes and switches should be simply rebooted. The head node (through
+MaaS) will act as DHCP and PXE server. It will install the OSs and will make
+sure they are correctly configured.
+
+At the end of the process, the compute and switch elements should be visible
+through the CORD CLI utilities and MAAS.
+
+> WARNING: make sure your computes nodes and fabric switches are configured as
+> prescribed in the _Software Environment Requirements_ section.
+
+#### Important Commands: cord harvest and cord prov
+
+Two important commands are available to debug and check the status of the
+provisioning. They can be used from the head node CLI.
+
+* `cord harvest`: Tracks the nodes harvesting process. Nodes and switches
+  should appear here, as soon as they get an IP and are recognized by MaaS. To
+  see if your devices have been recognized, use the following command:
+
+```
+cord harvest list
+```
+
+* `cord prov`: Tracks the provisioning process, meaning the configuration
+  process that happen soon after the OS has been installed on your devices. To
+  see the provisioning status of your devices, use the following command:
+
+```
+cord prov list
+```
+
+The following status values are defined for the provisioning status:
+
+* **Pending:** The request has been accepted by the provisioner but not yet
+  started
+* **Processing:** The request is being processed and the node is being
+  provisioned
+* **Complete:** The provisioning has been completed successfully
+* **Error:** The provisioning has failed and the message will be populated with
+  the exit message from provisioning.
+
+Logs of the post deployment provisioning can be found in
+`/etc/maas/ansible/logs` on the head node.
+
+For a given node, the provisioning re-starts automatically if the related entry
+gets manually removed. This can be done with the following command:
+
+```
+cord prov delete node_name
+```
+
+#### Static IP Assignment
+
+If you want to assign a specific IP to either a compute node or a fabric
+switch, it should be done before booting the device. This is achieved through a
+configuration file: `/etc/dhcp/dhcpd.reservations`.
+
+To help you, a sample file is available: `/etc/dhcp/dhcpd.reservations.sample`.
+For each host you want to statically assign an IP, use this syntax:
+
+```
+host <name-of-your choice> {
+	hardware ethernet <host-mac-address>;
+	fixed-address  <desired-ip>;
+	}
+```
+
+#### Compute Nodes
+
+The compute node provisioning process installs the servers as
+OpenStack compute nodes.
+
+The compute node will boot, register with MaaS, and then restart
+(eventually multiple times).
+
+Compute nodes are given a random hostname, in the "Canonical way", of
+an adjective and a noun (e.g., `popular-feast.cord.lab`).
+The name will be different for every deployment.
+
+After this is complete, an entry for each node will be visible:
+
+* From the MaaS UI, at `http://head-node-ip-address/MAAS/#/nodes`
+
+* From the OpenStack CLI on the head node, using the command
+
+```
+source ~/admin-openrc.sh &&
+nova hypervisor-list
+```
+
+* From CORD head node CLI, using the `cord harvest` command
+
+In MaaS, the new node will be initially in a _New_ state. As the machines boot,
+they should automatically transition from _New_ through the states
+_Commissioned_, _Acquired_ and _Deployed_.
+
+Once the node is in the _Deployed_ state, it will be provisioned for use in a
+CORD POD by the automated execution of an Ansible playbook.
+
+The post deployment provisioning of the compute nodes can be queried using the
+`cord prov` command.
+
+After a correct provisioning you should see something similar to:
+
+```
+cord prov list
+ID                                         NAME                   MAC                IP          STATUS      MESSAGE
+node-c22534a2-bd0f-11e6-a36d-2c600ce3c239  steel-ghost.cord.lab   2c:60:0c:cb:00:3c  10.6.0.107  Complete
+node-c238ea9c-bd0f-11e6-8206-2c600ce3c239  feline-shirt.cord.lab  2c:60:0c:e3:c4:2e  10.6.0.108  Complete
+```
+
+Once the post deployment provisioning on the compute node is complete, this
+task is complete.
+
+#### Fabric Switches
+
+Similar to the compute nodes, the fabric switches will boot, register with
+MaaS, and then restart (eventually multiple times).
+
+If a name hasn’t been assigned to the switches (see the static IP assignment
+section above), usually switches have a name in the form `UKN-XXXXXX`.
+
+When the fabric switches get an IP and go through the harvesting process, they
+should be visible in MaaS, under the devices tab
+(`http://head-node-ip-address/MAAS/#/devices`).
+
+As with the compute nodes, following the harvest process, the provisioning will
+happen.  After a correct provisioning you should see something similar to:
+
+```
+cord prov list
+ID                                         NAME                    MAC                IP          STATUS      MESSAGE
+cc:37:ab:7c:b7:4c                          UKN-ABCD                cc:37:ab:7c:b7:4c  10.6.0.23   Complete
+cc:37:ab:7c:ba:58                          UKN-EFGH                cc:37:ab:7c:ba:58  10.6.0.20   Complete
+cc:37:ab:7c:bd:e6                          UKN-ILMN                cc:37:ab:7c:bd:e6  10.6.0.52   Complete
+cc:37:ab:7c:bf:6c                          UKN-OPQR                cc:37:ab:7c:bf:6c  10.6.0.22   Complete
+```
+
+> NOTE: `cord prov list` output for compute nodes is not shown here for
+> simplicity.
+
+Once the post deployment provisioning on the fabric switches is complete, the
+task is complete.
+
+##Access to CORD Services
+
+Your POD is now installed. You can now try to access the basic services as
+described below.
+
+### ONOS (Underlay)
+
+A dedicated ONOS instance is installed on the head node to control the underlay
+infrastructure (the fabric). You can access it with password `rocks`:
+
+* From the head node CLI: `ssh -p 8101 onos@onos-fabric`
+
+* Using the ONOS UI, at: `http://<head-node-ip>/fabric`
+
+### ONOS (Overlay)
+
+A dedicated ONOS instance is installed on the head node to control the overlay
+infrastructure (tenant networks). You can access it with password `rocks`:
+
+* From the head node CLI: `ssh -p 8102 onos@onos-cord`
+
+* Using the ONOS UI, at: `http://<head-node-ip>/vtn`
+
+### OpenStack
+
+From the head node CLI
+
+```
+$ sudo lxc list
+```
+
+lists the set of LXC containers running the various OpenStack-related services.
+These containers can be entered as follows:
+
+```
+$ ssh ubuntu@<container-name>
+```
+
+### XOS UI
+
+XOS is the cloud orchestrator that controls the entire POD. It allows you to
+define new service and service dependencies. You can access XOS at:
+
+* Using the XOS GUI at `http://<head-node-ip>/xos`
+
diff --git a/docs/install_pod.md b/docs/install_pod.md
deleted file mode 100644
index 7294982..0000000
--- a/docs/install_pod.md
+++ /dev/null
@@ -1,520 +0,0 @@
-# Installing a Physical POD
-
-The following is a detailed, step-by-step recipe for installing a physical POD.
-
->NOTE: Looking for a quick list of essential build commands? You can find it [here](quickstarts.md)
-
->NOTE: If you are new to CORD and would like to get familiar with it, you should start by bringing up a development POD on a single physical server, called [CORD-in-a-Box](install_ciab.md).
-
-## Overview of a CORD POD
-
-The following is a brief description of a generic full POD.
-
-### Physical Configuration
-
-A full POD includes a Top-of-Rack (ToR) management switch,
-four fabric switches, and three standard x86 servers.
-The following figure does not show access devices
-or any upstream connectivity to the metro network; those details are included
-later in this section.
-
-<img src="images/physical-overview.png" alt="Drawing" style="width: 400px;"/>
-
-### Logical Configuration: Data Plane Network
-
-The following diagram is a high level logical representation of a typical CORD POD.
-
-<img src="images/dataplane.png" alt="Drawing" style="width: 700px;"/>
-
-The figure shows 40G data plane connections (red), where end-user traffic
-goes from the access devices to the metro network (green). User traffic
-goes through different different leafs, spines and compute nodes,
-depending on the services needed, and where they are located. The
-switches form a leaf and spine fabric. The compute nodes and the head
-node are connected to a port of one of the leaf switches. 
-
-### Logical Configuration: Control Plane / Management Network
-
-The following diagram shows in blue how the components of the system are
-connected through the management network.
-
-<img src="images/controlplane.png" alt="Drawing" style="width: 500px;"/>
-
-As shown in this figure, the head node is the only server in the POD connected both
-to Internet and to the other components of the system. The compute nodes and the switches are only connected to the head node, which provides them with all the software needed.
-
-## Sample Workflow
-
-It is important to have a general picture of installation workflow before
-getting into the details. The following is a list of high-level tasks involved
-in bringing up a CORD POD:
-
-* CORD software is downloaded and built on the dev machine.
-* A POD configuration is created by the operator on the dev machine.
-* The software is pushed from the dev machine to the head node.
-* Compute nodes and fabric switches need to be manually rebooted. The CORD
-build procedure automatically installs the OS, other software needed and performs
-the related configurations.
-* The software gets automatically deployed from the head node to the compute nodes. 
-
-## Requirements
-
-While the CORD project is for openness and does not have any interest in sponsoring specific vendors, it provides a reference implementation for both hardware and software to help users in building their PODs. What is reported below is a list of hardware that, in the community experience, has worked well.
-
-Also note that the CORD community will be better able to help you debugging issues if your hardware and software configuration look as much as possible similar to the ones reported in the reference implementation, below.
-
-## Bill Of Materials (BOM) / Hardware Requirements
-
-The section provides a list of hardware required to build a full CORD POD.
-
-### BOM Summary
-
-| Quantity | Category | Brand              | Model                            | Part Num          |
-|--------|--------|------------|-------------------|-------------|
-| 3             | Compute | Quanta (QCT) | QuantaGrid D51B-1U     | QCT-D51B-1U |
-| 4             | Fabric Switch | EdgeCore | AS6712-32X                  | AS6712-32X    |
-| 1             | Management Switch (L2 VLAN support) | * | * | *                        |
-| 7             | Cabling (data plane) | Robofiber | QSFP-40G-03C | QSFP-40G-03C |
-| 12           | Cabling (Mgmt) | CAT6 copper cables 3M) | * | * |
-
-### Detailed Requirements
-
-* 1x Development Machine. It can be either a physical machine or a virtual machine, as long as the VM supports nested virtualization. It doesn’t have to be necessarily Linux (used in the rest of the guide, below); in principle anything able to satisfy the hardware and the software requirements. Generic hardware requirements are 2 cores, 4G of memory, 60G of hdd.
-
-* 3x Physical Servers: one to be used as head node, two to be used as compute nodes.
-
-   * Suggested Model: OCP-qualified QuantaGrid D51B-1U server. Each server is configured with 2x Intel E5-2630 v4 10C 2.2GHz 85W, 64GB of RAM 2133MHz DDR4, 2x hdd500GB and a 40 Gig adapter.
-
-   * Strongly Suggested NIC:
-       * Intel Ethernet Converged Network Adapters XL710 10/40 GbE PCIe 3.0, x8 Dual port.
-       * ConnectX®-3 EN Single/Dual-Port 10/40/56GbE Adapters w/ PCI Express 3.0.
-	   >NOTE: while the machines mentioned above are generic standard x86 servers, and can be potentially substituted with any other machine, it’s quite important to stick with either one of the network card suggested. CORD scripts will look for either an i40e or a mlx4_en driver, used by the two cards cards. To use other cards additional operations will need to be done. Please, see [Network Settings](appendix_network_settings.md) for more information.
-	   
-* 4x Fabric Switches
-     * Suggested Model: OCP-qualified Accton 6712 switch. Each switch
-       is configured with 32x40GE ports; produced by EdgeCore and HP.
-
-* 7x Fiber Cables with QSFP+ (Intel compatible) or 7 DAC QSFP+ (Intel compatible) cables
-
-     * Suggested Model: Robofiber QSFP-40G-03C QSFP+ 40G direct attach passive copper cable, 3m length - S/N: QSFP-40G-03C.
-
-* 1x 1G L2 copper management switch supporting VLANs or 2x 1G L2 copper management switches
-
-## Connectivity Requirements
-
-The dev machine and the head node have to download software from
-different Internet sources, so they currently need unfettered Internet access.
-(In the future, only the dev
-machine, and not the head node, will require Internet connectivity.)
-Sometimes firewalls, proxies, and software that prevents to access
-local DNSs generate issues and should be avoided.
-
-## Cabling a POD
-
-This section describes how the hardware components should be
-interconnected to form a fully functional CORD POD.
-
-### Management / Control Plane Network
-
-The management network is divided in two broadcast domains: one
-connecting the POD to the Internet and giving access to the deployer
-(called “external” and shown in green in the figure below), and one
-connecting the servers and switches inside the POD (called “internal”
-or “management” and shown in blue).
-The figure also shows data plane connections in red
-(as described in the next paragraph).
-
-<img src="images/physical-cabling-diagram.png" alt="Drawing" style="width: 800px;"/>
-
-The external and the management networks can be separated either using two different switches, or the same physical switch and by using VLANs.
-
-> NOTE: Head node IPMI connectivity is optional.
-
->NOTE: IPMI ports do not have to be necessarily connected to the external network. The requirement is that compute node IPMI interfaces need to be reachable from the head node. This is possible also through the internal / management network.
-
->NOTE: Vendors often allow a shared management port to provide IPMI functionalities. One of the NICs used for system management (e.g., eth0) can be shared, to be used at the same time also as IPMI port.
-
-#### External Network
-
-The external network allows POD servers to be reached from the
-Internet. This would likely not be supported in a production system,
-but is useful in development and evaluation settings, for example,
-making it easy to directly start/stop/reboot the head and the compute nodes.
-Moreover, using CORD automated scripts and tools for Jenkins pipeline
-requires Jenkins direct access to these interfaces. This is why
-IPMI/BMC interfaces of the nodes are also connected to the external
-network. In summary, following is the list of equipment/interfaces
-usually connected to the external network:
-
-* Internet
-* Dev machine
-* Head node - 1x 1G interface (following defined as external)
-* Head node - 1x IPMI/BMC interface (optional)
-* Compute node 1 - 1x IPMI/BMC interface (optional, but recommended)
-* Compute node 2 - 1x IPMI/BMC interface (optional, but recommended)
-
-#### Internal Network
-
-The internal/management network is separate from the external one. It has the goal to connect the head node to the rest of the system components (compute nodes and fabric switches). For a typical POD, the internal network includes:
-
-* Head node - 1x 1G interface (following defined as management)
-* Compute node 1 - 1x 1G interface
-* Compute node 2 - 1x 1G interface
-* Fabric 1 - management interface
-* Fabric 2 - management interface
-* Fabric 3 - management interface
-* Fabric 4 - management interface
-
-### User / Data Plane Network
-
-The data plane network (represented in red in the figure) carries user traffic (in green), from the access devices to the point the POD connects to the metro network.
-
-<img src="images/dataplane.png" alt="Drawing" style="width: 700px;"/>
-
-The fabric switches are assembled to form a leaf and spine topology. A typical full
-POD has two leafs and two spines. Currently, this is a pure 40G network.
-While spines are not connected together, each leaf is connected to both spines.
-In summary, the following are the devices connecting to the leaf switches:
-
-* Head node  - 1x 40G interface
-* Compute node 1 - 1x 40G interface
-* Compute node 2 - 1x 40G interface
-* Access devices - 1 or more 40G interfaces
-*Metro devices - 1 or more 40G interfaces
-
-### Best Practices
-
-The community follows a set of best practices to better be able to remotely debug issues, for example via mailing-lists. The following is not mandatory, but is strongly suggested:
-
-* Leaf nodes are connected to the spines nodes starting at the highest port number on the leaf.
-
-* For a given leaf node, its connections to the spine nodes terminate on the same port number on each spine.
-
-* Leaf _n_ connections to spine nodes terminate at port _n_ on each spine node.
-
-* Leaf-spine switches are connected into the management TOR starting from the highest port number.
-
-* Compute node _n_ connects to the internal (management) network switch on port _n_.
-
-* Compute node _n_ connects to its leaf at port _n_.
-
-* The head node connects to the internal (management) network using the lowest 1G management interface.
-
-* The head node connects to the external network using its highest 1G management interface.
-
-* All servers connect to the leafs using the lowest fabric (40G NIC) interface.
-
-## Software Environment Requirements
-
-Only the dev machine and the head node need to be prepped for installation.
-The other machines will be fully provisioned by CORD itself.
-
-### Development Machine
-
-It should run either Ubuntu 16.04 LTS (recommended) or Ubuntu 14.04 LTS.
-
-A script is provided to help you bootstrapping your dev machine and download the CORD repositories.
-
-<pre><code>cd ~ && \
-curl -o ~/cord-bootstrap.sh https://raw.githubusercontent.com/opencord/cord/{{ book.branch }}/scripts/cord-bootstrap.sh && \
-chmod +x cord-bootstrap.sh && \
-./cord-bootstrap.sh -v</code></pre>
-
-After the script successfully runs, logout and login again to make the user becomes part of the libvirtd group.
-
-At this stage a cord directory should be in the cord user home directory.
-
-### Head Node
-
-It should run Ubuntu 14.04 LTS.
-Then, configure the following.
-
-#### Create a User with "sudoer" permissions (no password)
-
-```
-sudo adduser cord && \
-sudo adduser cord sudo && \
-echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
-```
-
-### Compute Nodes
-
-The CORD build process installs the compute nodes. The only thing to be
-configured are the BIOS settings, so that they can PXE boot from the head node
-through the internal (management) network. In doing this, make sure that:
-
-* The network card connected to the internal / management network is configured with DHCP (no static IPs).
-
-* The IPMI (sometime called BMC) interface is configured with a statically assigned IP, reachable from the head node. It’s strongly suggested to have them deterministically assigned, so you will be able to control your node as you like.
-
-* Their boot sequence has (a) the network card connected to the internal / management network as the first boot device; and (b) the primary hard drive as second boot device.
-
->NOTE: Some users prefer to connect as well the IPMI interfaces of the compute nodes to the external network, so they can have control on them also from outside the POD. This way the head node will be able to control them anyway.
-
-### Fabric Switches: ONIE
-
-The ONIE installer should be already installed on the switch and set to boot in installation mode. This is usually the default for new switches sold without an Operating System. It might not be the case instead if switches have already an Operating System installed. In this case rebooting the switch in ONIE installation mode depends by different factors, such the version of the OS installed and the specific model of the switch.
-
-## Prepare POD Configuration
-
-Each CORD POD deployment requires a POD configuration file that
-describes how the system should be configured, including what IP
-addresses should be used for the external and the internal networks,
-what users the system should run during the automated installation,
-and much more.
-
-POD configuration files are YAML files with extension .yml, contained
-in the `/cord/build/podconfig` directory in the dev VM. You can either
-create a new file with your favorite editor or copy-and-edit an
-existing file. The `sample.yml` configuration file is there for this
-purpose. All parameters have a description. Optional lines have been
-commented out, but can be used as needed.
-
-More information about how the network configuration for the POD can
-be customized can be found
-in [Network Settings](appendix_network_settings.md).
-
-Once the POD config yaml file has been created, the composite configuration file should be generated with the following command.
-
-```
-cd ~/cord/build && \
-make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config
-```
-
-The process generates a set of files in `~/cord/build/genconfig`
-
->NOTE: Before the configuration process the `~/cord/build/genconfig` directory contains a README.md file only.
-
-## Head Node Deployment
-
-Head node deployment works as follows:
-
-* Makes the head node a MAAS server from which the other POD elements
-  (fabric switches and compute nodes) can PXE boot (both to load their OS
-  and to be configured).
-* Installs and configures the containers needed to configure other nodes of the network.
-* Installs and configures OpenStack.
-* Provisions XOS, which provides service provisioning and orchestration for the CORD POD.
-
-This step is started with the following command:
-
-```
-cd ~/cord/build && \
-make build
-```
-
->NOTE: Be patient: this step can take an hour to complete.
-
->WARNING: This command sometimes fails for various reasons.
->Simply re-running the command often solves the problem. If the command
->fails it’s better to start from a clean head node.
-
-This step is complete when the command successfully runs.
-
-### MAAS
-
-As previously mentioned, once the deployment is complete the head node becomes a MAAS region and rack controller, basically acting as a PXE server and serving images through the management network to compute nodes and fabric switches connected to it.
-
-The Web UI for MaaS can be viewed by browsing to the head node, using a URL of the from `http://head-node-ip-address/MAAS`.
-
-To login to the web page, use `cord` as the username. If you have set a password in the deployment configuration password use that, otherwise the password used can be found in your build directory under `<base>/build/maas/passwords/maas_user.txt`.
-
-After the deployment process finishes, MAAS initiates the download of an Ubuntu 14.04 boot image that will be used to boot the other POD devices. This download can take some time and the process cannot continue until the download is complete. The status of the download can be verified through the UI by visiting the URL `http://head-node-ip-address/MAAS/images/`, or via the command line from head node via the following command:
-
-```
-APIKEY=$(sudo maas-region-admin apikey --user=cord) && \
-maas login cord http://localhost/MAAS/api/1.0 "$APIKEY" && \
-maas cord boot-resources read | jq 'map(select(.type != "Synced"))'
-```
-
-If the output of of the above commands is not an empty list ([]) then the images have not yet been completely downloaded. Depending on your network speed, this could take several minutes. Please wait and then attempt the last command again, until the returned list is empty. 
-
-When the list is empty you can proceed.
-
-### Compute Node and Fabric Switch Deployment
-
-The section describes how to provision and configure software on POD compute nodes and fabric switches.
-
-#### General Workflow
-
-Once it has been verified that the Ubuntu boot image has been
-downloaded, the compute nodes and the fabric switches may be PXE booted.
-
-Compute nodes and switches should be simply rebooted. The head node (through MaaS) will act as DHCP and PXE server. It will install the OSs and will make sure they are correctly configured.
-
-At the end of the process, the compute and switch elemlents should be visible through the CORD CLI utilities and MAAS.
-
->WARNING: make sure your computes nodes and fabric switches are
->configured as
->prescribed in the _Software Environment Requirements_ section.
-
-#### Important Commands: cord harvest and cord prov
-
-Two important commands are available to debug and check the status of
-the provisioning. They can be used from the head node CLI.
-
-* `cord harvest`: Tracks the nodes harvesting process. Nodes and switches should appear here, as soon as they get an IP and are recognized by MaaS. To see if your devices have been recognized, use the following command:
-
-```
-cord harvest list
-```
-
-* `cord prov`: Tracks the provisioning process, meaning the configuration process that happen soon after the OS has been installed on your devices. To see the provisioning status of your devices, use the following command:
-
-```
-cord prov list
-```
-
-The following status values are defined for the provisioning status:
-
-* **Pending:** The request has been accepted by the provisioner but not yet started
-* **Processing:** The request is being processed and the node is being provisioned
-* **Complete:** The provisioning has been completed successfully
-* **Error:** The provisioning has failed and the message will be populated with the exit message from provisioning.
-
-Logs of the post deployment provisioning can be found in `/etc/maas/ansible/logs` on the head node.
-
-For a given node, the provisioning re-starts automatically if the
-related entry gets manually removed. This can be done with the following command:
-
-```
-cord prov delete node_name
-```
-
-Please refer to [Re-provision Compute Nodes and Switches](quickstart_physical.md)
-for more details.
-
-#### Static IP Assignment
-
-If you want to assign a specific IP to either a compute node or a
-fabric switch, it should be done before booting the device. This
-is achieved through a configuration file: `/etc/dhcp/dhcpd.reservations`.
-
-To help you, a sample file is available:
-`/etc/dhcp/dhcpd.reservations.sample`.
-For each host you want to statically
-assign an IP, use this syntax:
-
-```
-host <name-of-your choice> {
-	hardware ethernet <host-mac-address>;
-	fixed-address  <desired-ip>;
-	}
-```
-	
-#### Compute Nodes
-	
-The compute node provisioning process installs the servers as
-OpenStack compute nodes.
-
-The compute node will boot, register with MaaS, and then restart
-(eventually multiple times).
-
-Compute nodes are given a random hostname, in the “Canonical way”, of
-an adjective and a noun (e.g., `popular-feast.cord.lab`).
-The name will be different for every deployment.
-
-After this is complete, an entry for each node will be visible:
-
-* From the MaaS UI, at `http://head-node-ip-address/MAAS/#/nodes`
-
-* From the OpenStack CLI on the head node, using the command
-
-```
-source ~/admin-openrc.sh &&
-nova hypervisor-list
-```
-
-* From CORD head node CLI, using the `cord harvest` command
-
-In MaaS, the new node will be initially in a _New_ state. As the machines boot, they should automatically transition from _New_ through the states _Commissioned_, _Acquired_ and _Deployed_.
-
-Once the node is in the _Deployed_ state, it will be provisioned for use in a CORD POD by the automated execution of an Ansible playbook.
-
-The post deployment provisioning of the compute nodes can be queried using the `cord prov` command.
-
-After a correct provisioning you should see something similar to:
-
-```
-cord prov list
-ID                                         NAME                   MAC                IP          STATUS      MESSAGE
-node-c22534a2-bd0f-11e6-a36d-2c600ce3c239  steel-ghost.cord.lab   2c:60:0c:cb:00:3c  10.6.0.107  Complete
-node-c238ea9c-bd0f-11e6-8206-2c600ce3c239  feline-shirt.cord.lab  2c:60:0c:e3:c4:2e  10.6.0.108  Complete
-```
-
-Once the post deployment provisioning on the compute node is complete, this task is complete.
-
-#### Fabric Switches
-
-Similar to the compute nodes, the fabric switches will boot, register with MaaS, and then restart (eventually multiple times).
-
-If a name hasn’t been assigned to the switches (see the static IP assignment section above), usually switches have a name in the form `UKN-XXXXXX`.
-
-When the fabric switches get an IP and go through the harvesting process, they should be visible in MaaS, under the devices tab (`http://head-node-ip-address/MAAS/#/devices`).
-
-As with the compute nodes, following the harvest process, the provisioning will happen.
-After a correct provisioning you should see something similar to:
-
-```
-cord prov list
-ID                                         NAME                   MAC                IP          STATUS      MESSAGE
-cc:37:ab:7c:b7:4c                          UKN-ABCD                cc:37:ab:7c:b7:4c  10.6.0.23   Complete
-cc:37:ab:7c:ba:58                          UKN-EFGH                cc:37:ab:7c:ba:58  10.6.0.20   Complete
-cc:37:ab:7c:bd:e6                          UKN-ILMN                cc:37:ab:7c:bd:e6  10.6.0.52   Complete
-cc:37:ab:7c:bf:6c                           UKN-OPQR                cc:37:ab:7c:bf:6c  10.6.0.22   Complete
-```
-
->NOTE: `cord prov list` output for compute nodes is not shown here for simplicity.
-
-Once the post deployment provisioning on the fabric switches is complete, the task is complete.
-
-##Access to CORD Services
-
-Your POD is now installed. You can now try to access the basic
-services as described below.
-
-### ONOS (Underlay)
-
-A dedicated ONOS instance is installed on the head node to control the underlay infrastructure (the fabric). You can access it with password “rocks”
-
-* From the head node CLI: `ssh -p 8101 onos@onos-fabric`
-
-* Using the ONOS UI, at: `http://<head-node-ip>/fabric`
-
-### ONOS (Overlay)
-
-A dedicated ONOS instance is installed on the head node to control the overlay infrastructure (tenant networks). You can access it with password “rocks”
-
-* From the head node CLI: `ssh -p 8102 onos@onos-cord`
-
-* Using the ONOS UI, at: `http://<head-node-ip>/vtn`
-
-### OpenStack
-
-From the head node CLI
-
-```
-$ sudo lxc list
-```
-
-lists the set of LXC containers running the various OpenStack-related services.
-These containers can be entered as follows:
-
-```
-$ ssh ubuntu@<container-name>
-```
-
-### XOS UI
-
-XOS is the cloud orchestrator that controls the entire POD. It allows
-you to define new service and service dependencies.. You can access XOS:
-
-* Using the XOS GUI at `http://<head-node-ip>/xos`
-
-## Getting Help
-
-If it seems that something has gone wrong with your setup,
-the best way to ask for help is to join the CORD Slack channel
-or mailing lists. Information about both can be found at the
-[CORD wiki](https://wiki.opencord.org/display/CORD).
-
diff --git a/docs/install_ciab.md b/docs/install_virtual.md
similarity index 68%
rename from docs/install_ciab.md
rename to docs/install_virtual.md
index 6df2098..3dbb79d 100644
--- a/docs/install_ciab.md
+++ b/docs/install_virtual.md
@@ -1,34 +1,57 @@
-# Installing CORD-in-a-Box
+# Installing a Virtual Pod (CORD-in-a-Box)
 
-This guide walks through the steps to bring up a virtual CORD
-POD, running in virtual machines on a single physical server. This
-version is also known as *CORD-in-a-Box* (or just *CiaB*).
-The purpose of this virtual POD is to enable those interested in
-understanding how CORD works to examine and interact with
-a running CORD environment. It also serves as a common
-[development environment](develop.md).
+This guide walks through the steps to bring up a simplified virtual CORD POD,
+running in multiple virtual machines on a single physical server.  For
+instructions on setting up a full physical pod with multiple servers, please
+see [Install a Physical Pod](install_physical.md).
 
->NOTE: Looking for a quick list of essential build commands?
->You can find it [here](quickstarts.md)
+The virtual pod is also known as *CORD-in-a-Box* (or just *CiaB*).  The purpose
+of this virtual POD is to enable those interested in understanding how CORD
+works to examine and interact with a running CORD environment. It also serves
+as a common [development environment](develop.md).
 
->NOTE: This guide describes how to install a simplified version
->of a CORD POD on a single server using virtual machines.
->If you are looking for instructions on how to install a physical
->POD, you can find it [here](install_pod.md).
+## Quickstart
+
+To install a CiaB, on a [suitable](#target-server-requirements) Ubuntu 14.04
+system, run the following commands:
+
+```bash
+cd ~ && \
+wget https://raw.githubusercontent.com/opencord/cord/master/scripts/cord-bootstrap.sh && \
+chmod +x cord-bootstrap.sh && \
+~/cord-bootstrap.sh -v |& tee ~/setup.out
+
+cd ~/cord/build && \
+make PODCONFIG=rcord-virtual.yml config && \
+make -j4 build |& tee ~/build.out && \
+make pod-test |& tee ~/test.out
+```
+
+This will create a virtual R-CORD pod (as specified in the `PODCONFIG`), and go
+through the build and end-to-end test procedure, bringing up vSG and
+ExampleService instances.
+
+If you'll be running these commands frequently, a shortcut is to use the `-t`
+option on the `cord-bootstrap.sh` script to run all the make targets, for a
+more unattended build process, which can be handy when testing:
+
+```
+./cord-bootstrap.sh -v -t "PODCONFIG=rcord-virtual.yml config" -t "build" -t "pod-test"
+```
 
 ## What you need (prerequisites)
 
 You will need a *target server*, which will run both a build environment
 in a Vagrant VM (used to deploy CORD) as well as CiaB itself.
 
-### Target server requirements:
+### Target server requirements
 
-* 64-bit server, with
+* 64-bit AMD64/x86-64 server, with:
   * 48GB+ RAM
   * 12+ CPU cores
   * 200GB+ disk
 * Access to the Internet (no enterprise proxies)
-* Ubuntu 14.04 LTS freshly installed
+* Ubuntu 14.04.5 LTS freshly installed with updates
 * User account used to install CORD-in-a-Box has password-less *sudo*
   capability (e.g., like the `ubuntu` user)
 
@@ -39,11 +62,10 @@
 account using your organization's email address and choose "Join Existing
 Project"; for "Project Name" enter `cord-testdrive`.
 
->NOTE: CloudLab is supporting CORD as a courtesy.  It is expected that
->you will not use CloudLab resources for purposes other than
->evaluating CORD.  If, after a week or two, you wish to continue using
->CloudLab to experiment with or develop CORD, then you must apply for
->your own separate CloudLab project.
+> NOTE: CloudLab is supporting CORD as a courtesy.  It is expected that you
+> will not use CloudLab resources for purposes other than evaluating CORD.  If,
+> after a week or two, you wish to continue using CloudLab to experiment with
+> or develop CORD, then you must apply for your own separate CloudLab project.
 
 Once your account is approved, start an experiment using the
 `OnePC-Ubuntu14.04.5` profile on the Wisconsin, Clemson, or Utah clusters.
@@ -55,57 +77,38 @@
 
 ## Building CiaB
 
-There are three main steps to building CiaB:
+There are a few steps to building CiaB:
 
-* Bootstrap the server by installing software dependencies and checking out the CORD code
-* Customize the source and configuration if desired
-* Run `make` commands to build and deploy the CORD software, and run tests
+* Bootstrap the server by installing software dependencies and checking out the
+  CORD code
+* (Optional) Customize the source and configuration if desired
+* Run `make` targets to build and deploy the CORD software
+* (Optional) Run end-to-end tests to verify CiaB functionality
 
-### Download and run the bootstrap script
+> NOTE: If you are connecting to a remote target server, it is highly
+> recommended that you run the commands in a `tmux` session on the target
+> server, or use `mosh` to connect to the target rather than `ssh`.  Without
+> one of these, interrupted connectivity between your local machine and the
+> remote server during a build may cause the CiaB installation to hang.
 
-On the target server, download the script that bootstraps the build process and run it:
+### Bootstrap the server
 
-<pre><code>cd ~ && \
-wget https://raw.githubusercontent.com/opencord/cord/{{ book.branch }}/scripts/cord-bootstrap.sh && \
-chmod +x cord-bootstrap.sh && \
-~/cord-bootstrap.sh -v</code></pre>
+See [Configuring your Development Environment:cord-bootstrap.sh script
+](install.md#cord-bootstrap.sh-script) for instructions for running the
+bootstrap script to download the CORD source tree and optionally downloading
+patches from Gerrit. You must specify the `-v` option to this script in order
+to install Vagrant, which is required to build a CiaB.
 
-This script installs software dependencies (e.g., Ansible, Vagrant) as well as the CORD source code (in `~/cord`).
-One of the dependencies installed is `libvirt`.  As access to the libvirt socket depends on being in the `libvirtd` group, you
-may need to to logout and back in to have your shell session gain this group
-membership:
+### (Optional) Customize your build
 
-```
-~$ groups
-xos-PG0 root
-~$ vagrant status
-Call to virConnectOpen failed: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied
-~$ logout
-~$ ssh node_name.cloudlab.us
-~$ groups
-xos-PG0 root libvirtd
-```
-
-### Customize the source and configuration
-
-CiaB can be used as an integration testing platform for patchsets in Gerrit.  There is a `-p` option to `cord-bootstrap.sh` that will checkout a specific
-changeset from a gerrit repo during the run.  The syntax for this is `<project
-path>:<changeset>/<revision>`.  It can be used multiple times - for example:
-
-```
-~/cord-bootstrap.sh -v -p build/platform-install:1233/4 -p orchestration/xos:1234/2
-```
-
-will check out the `platform-install` repo with changeset 1233, revision 4, and
-`xos` repo changeset 1234, revision 2. Note that the `-p` option
-will only have an effect the first time the `cord-bootstrap.sh` script is run.
-You can also just run the `repo` command directly to download patch sets.
-
-You can find the project path used by the `repo` tool in the [manifest/default.xml](https://gerrit.opencord.org/gitweb?p=manifest.git;a=blob;f=default.xml) file.
+You can now modify your CORD source tree that has been checked out in `~/cord`,
+if required, or [download patches manually from gerrit using
+  repo](getting_the_code.md#download-patchsets).
 
 ### Build and deploy the software
 
-Once the system has been bootstrapped, run the following `make` commands to launch the build:
+Once the system has been bootstrapped, run the following `make` commands to
+launch the build:
 
 ```
 cd ~/cord/build
@@ -116,31 +119,28 @@
 The output of the build will be displayed, as well as saved in `~/build.out`.
 Also logs for individual steps of the build are stored in `~/cord/build/logs`.
 
->NOTE: If you are connecting to a remote target server, it is highly
-recommended that you run the above commands in a `tmux` session, or
-use `mosh` to connect to the target rather than `ssh`.  Without one of these,
-interrupted connectivity between your local machine and the remote server
-may cause the CiaB install to hang.
+The `make -j4 build` step takes a *long time* (at least 1 hour) to run.  Be
+patient - if it hasn't completely failed yet, then assume all is well!
 
-The `make -j4 build` step takes a *long time* (at least 1 hour) to run.  Be patient!  If it
-hasn't completely failed yet, then assume all is well!
+### (Optional) Run End-to-End (E2E) tests
 
-### Run basic E2E tests
-
-If the build completed without errors, you can use the following command to run basic end-to-end tests:
+If the build completed without errors, you can use the following command to run
+basic end-to-end tests:
 
 ```
 cd ~/cord/build
 make pod-test
 ```
 
-The output of the tests will be displayed, as well as stored in `~/cord/build/logs`.
+The output of the tests will be displayed, as well as stored in
+`~/cord/build/logs/<iso8601_datetime>_pod-test`.
 
 ## Inspecting CiaB
 
-CiaB creates a virtual CORD POD running inside Vagrant VMs, using
-libvirt as a backend.  You can inspect the status of the VM's by setting the
-`VAGRANT_CWD` environmental variable to `~/cord/build/scenarios/cord` and running `vagrant status`:
+CiaB creates a virtual CORD POD running inside Vagrant VMs, using libvirt as a
+backend. You can inspect the status of the VM's by setting the `VAGRANT_CWD`
+environmental variable to `~/cord/build/scenarios/cord` and running `vagrant
+status`:
 
 ```
 ~$ cd cord/build
@@ -161,11 +161,11 @@
 
 ### corddev VM
 
-The `corddev` VM is a build machine used
-to drive the installation.  It downloads and builds Docker containers and
-publishes them to the virtual head node (see below). It then installs MAAS on
-the virtual head node (for bare-metal provisioning) and the ONOS, XOS, and
-OpenStack services in containers.  This VM can be entered as follows:
+The `corddev` VM is a build machine used to drive the installation.  It
+downloads and builds Docker containers and publishes them to the virtual head
+node (see below). It then installs MAAS on the virtual head node (for
+bare-metal provisioning) and the ONOS, XOS, and OpenStack services in
+containers.  This VM can be entered as follows:
 
 ```
 $ ssh corddev
@@ -221,8 +221,8 @@
 21289d8b63ff        registry-mirror                       registry:2.4.0
 ```
 
-The above shows Docker containers launched by XOS (container names starting with
-`rcord`).  Containers starting with `onos` are running ONOS.  There is
+The above shows Docker containers launched by XOS (container names starting
+with `rcord`).  Containers starting with `onos` are running ONOS.  There is
 also a Docker image registry, a Maven repository containing the CORD ONOS apps,
 and a number of microservices used in bare-metal provisioning.
 
@@ -299,8 +299,9 @@
 +--------------------------------------+-------------------------+--------+------------+-------------+---------------------------------------------------+
 ```
 
-The VM hosting the vSG is called `mysite_vsg-1` and we see it has a management IP of 172.27.0.2.
-Then run `ssh-agent` and add the default key (used to access the OpenStack VMs):
+The VM hosting the vSG is called `mysite_vsg-1` and we see it has a management
+IP of 172.27.0.2.  Then run `ssh-agent` and add the default key (used to access
+the OpenStack VMs):
 
 ```
 vagrant@head1:~$ ssh-agent bash
@@ -308,8 +309,8 @@
 ```
 
 SSH to the compute node with the `-A` option and then to the VM using the
-management IP obtained above.  So if the compute node name is `bony-alley.cord.lab` and
-the management IP is 172.27.0.2:
+management IP obtained above.  So if the compute node name is
+`bony-alley.cord.lab` and the management IP is 172.27.0.2:
 
 ```
 vagrant@head1:~$ ssh -A ubuntu@bony-alley.cord.lab
@@ -323,22 +324,24 @@
 ### MAAS GUI
 
 You can access the MAAS (Metal-as-a-Service) GUI by pointing your browser to
-the URL `http://<target-server>:8080/MAAS/`.  E.g., if you are running on CloudLab,
-your `<target-server>` is the hostname of your CloudLab node.
-The username is `cord` and the auto-generated password is found in `~/cord/build/maas/passwords/maas_user.txt` on the CiaB server.
-For more information on MAAS, see [the MAAS documentation](http://maas.io/docs).
+the URL `http://<target-server>:8080/MAAS/`.  E.g., if you are running on
+CloudLab, your `<target-server>` is the hostname of your CloudLab node.  The
+username is `cord` and the auto-generated password is found in
+`~/cord/build/maas/passwords/maas_user.txt` on the CiaB server.  For more
+information on MAAS, see [the MAAS documentation](http://maas.io/docs).
 
 ### XOS GUI
 
 You can access the XOS GUI by pointing your browser to URL
-`http://<target-server>:8080/xos/`.  The username is
-`xosadmin@opencord.org` and the auto-generated password is found in
+`http://<target-server>:8080/xos/`.  The username is `xosadmin@opencord.org`
+and the auto-generated password is found in
 `/opt/credentials/xosadmin@opencord.org` on the head node.
 
 The state of the system is that all CORD services have been onboarded to XOS.
-You can see them in the `Service Graph` represented in the `Home` page.
-If you want to see more details about the services you navigate to `Core > Services`,
-or searching for `Service` in the top bar (you start searching just pressing `f`)
+You can see them in the `Service Graph` represented in the `Home` page.  If you
+want to see more details about the services you navigate to `Core > Services`,
+or searching for `Service` in the top bar (you start searching just pressing
+`f`)
 
 A sample CORD subscriber has also been created. You can see the `Service Graph`
 for subscribers by selecting the `Service Graph` item in the left navigation.
@@ -346,7 +349,8 @@
 Here is a sample output:
 ![subscriber-service-graph.png](subscriber-service-graph.png)
 
->NOTE: the `Service Graph` will need to be detangled. You can organize the nodes by dragging them around.
+> NOTE: the `Service Graph` will need to be detangled and can be organized by
+> dragging the nodes.
 
 ### Kibana Logging GUI
 
@@ -399,11 +403,9 @@
 ### test-exampleservice
 
 This test builds on `test-vsg` by loading the *exampleservice* described in the
-[Tutorial on Assembling and On-Boarding
-Services](https://wiki.opencord.org/display/CORD/Assembling+and+On-Boarding+Services%3A+A+Tutorial).
-The purpose of the *exampleservice* is to demonstrate how new subscriber-facing
-services can be easily deployed to a CORD POD. This test performs the following
-steps:
+[Developing for Cord:ExampleService](xos/example_service.md).  The purpose of
+the *exampleservice* is to demonstrate how new subscriber-facing services can
+be easily deployed to a CORD POD. This test performs the following steps:
 
  * On-boards *exampleservice* into the CORD POD
  * Creates an *exampleservice* tenant, which causes a VM to be created and
@@ -413,7 +415,8 @@
 
 Success means that the Apache server launched by the *exampleservice* tenant is
 fully configured and is reachable from the subscriber client via the vSG.  If
-it succeeded, you should see some lines like these in the output:
+it succeeded, you should see the following lines near the end the `make
+pod-test` output:
 
 ```
 TASK [test-exampleservice : Output from curl test] *****************************
@@ -430,24 +433,73 @@
 }
 ```
 
+## Development Loop using CiaB
+
+For service or core development using CiaB, we have a tighter development
+workflow loop which involves tearing down XOS as well as any active OpenStack
+objects (Instances, Networks, etc), rebuilding XOS container images, and then
+redeploying XOS.
+
+We sometimes refer to this as a "mini-End2End" as it does result in a new XOS
+deployment with an E2E test, but does not require a full reinstall.
+
+1. Make changes to your service code and propagate them to your CiaB host.
+   There are a number of ways to propagate changes to the host depending on
+   developer preference, including using [gerrit
+   patchsets](getting_the_code.md#download-patchsets), rsync, scp, etc. 
+
+2. Teardown the existing XOS installation and clean up OpenStack to
+   remove any leftover instances or networks:
+
+```
+cd ~/cord/build
+make xos-teardown
+make clean-openstack
+```
+
+3. Optional: Teardown ONOS. Sometimes we find it helpful to reinstall the
+   onos-cord and onos-fabric containers, to ensure that all state is wiped
+   clean from ONOS.
+
+```
+cd ~/cord/build
+make clean-onos
+```
+
+4. Build the new XOS container images and deploy to the pod.
+
+```
+cd ~/cord/build
+make -j4 build
+make compute-node-refresh
+make pod-test
+```
+
+5. Test and verify your changes.
+
+6. Go back to step #1
 
 ## Troubleshooting
 
-If the CiaB build fails, you may try simply resuming the build at the
-place that failed.  The easiest way is to do is to re-run the
-`make build` command. It will skip over the steps that have already completed.
+If the CiaB build fails, you may try simply resuming the build at the place
+that failed.  The easiest way is to do is to re-run the `make build` command.
+It will skip over the steps that have already completed.
 
 If you need to force `make build` to re-run steps that have already completed,
 remove the appropriate file in the `milestones` directory prior to re-running.
 
-For more information about how the build works, see [the build internals guide](./build_internals.md).
+For more information about how the build works, see [Troubleshooting and Build
+Internals](troubleshooting.md).
 
-## Congratulations
+
+## Congratulations!
 
 If you got this far, you successfully built, deployed, and tested your first
 (virtual) CORD POD.
 
 You are now ready to bring up a multi-node POD with a real switching fabric and
-multiple physical compute nodes.  The process for doing so is
-described in the [Physical POD Guide](./quickstart_physical.md).
+multiple physical compute nodes.  The process for doing so is described in
+[Installing a Physical POD](install_physical.md).
+
+
 
diff --git a/docs/overview.md b/docs/overview.md
deleted file mode 100644
index f2eeda4..0000000
--- a/docs/overview.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Guide Overview
-
-This GitBook is a curated set of guides that describe how to install, operate, test, and develop CORD.
-
-Source for individual guides is available in the CORD code repository (https://gerrit.opencord.org); look in the `/docs` directory of each project, with the GitBook rooted in `cord/docs`. Updates and improvements to this documentation can be submitted through Gerrit.
-
-CORD is a community-based open source project. In addition to this guide,
-you can find information about this community, its projects, and its governance
-on the [CORD wiki](https://wiki.opencord.org). This includes early white papers
-and design notes that have shaped [CORD's architecture](https://wiki.opencord.org/display/CORD/Documentation).
diff --git a/docs/quickstarts.md b/docs/quickstarts.md
deleted file mode 100644
index 5baa58b..0000000
--- a/docs/quickstarts.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# Quickstart
-
-This section provides a short list of essential commands that can be
-used to deploy CORD-in-a-Box and a physical POD.
-
->NOTE: Looking for the full Cord-in-a-Box (CiaB) installation guide? You can find it [here](install_ciab.md).
-
->NOTE: Looking for the full physical pod installation guide? You can find it [here](install_pod.md).
-
-## Common Step (for both CiaB and a Physical POD)
-<pre><code>cd ~ && \
-wget https://raw.githubusercontent.com/opencord/cord/{{ book.branch }}/scripts/cord-bootstrap.sh && \
-chmod +x cord-bootstrap.sh && \
-~/cord-bootstrap.sh -v</code></pre>
-
-Logout and log back in.
-
-## CORD-in-a-Box (CiaB)
-To install CiaB, type the following commands:
-
-```
-cd ~/cord/build && \
-make PODCONFIG=rcord-virtual.yml config && \
-make -j4 build |& tee ~/build.out
-```
-
-## Physical POD
-The following steps install a physical POD.
-
-### Prepare the head node
-
-```
-sudo adduser cord && \
-sudo adduser cord sudo && \
-echo 'cord ALL=(ALL) NOPASSWD:ALL' | sudo tee --append /etc/sudoers.d/90-cloud-init-users
-```
-
-### On the development machine
-Create your POD configuration `.yml` file in `~/cord/build/podconfig`.
-
-```
-cd ~/cord/build && \
-make PODCONFIG={YOUR_PODCONFIG_FILE.yml} config && \
-make -j4 build |& tee ~/build.out
-```
-
-### Compute nodes and fabric switches
-After a successful build, set the compute nodes and the switches to boot from PXE and manually reboot them. They will be automatically deployed.
diff --git a/docs/release-notes/shared-delusion.md b/docs/release-notes/shared-delusion.md
index 3e012d7..fc78170 100644
--- a/docs/release-notes/shared-delusion.md
+++ b/docs/release-notes/shared-delusion.md
@@ -5,12 +5,12 @@
 Epics and Stories completed for Shared-Delusion (pulled from Jira) is
 available [here](sd-jira.md), with the highlights summarized below.
 
-#### XOS Modeling Framework
+## XOS Modeling Framework
 
 Completed the XOS modeling framework for on-boarding
 services. Specific features include:
 
-* A new modeling langugae (*xproto*) and generative toolchain
+* A new modeling language (*xproto*) and generative toolchain
   (*xosgen*).
 
 * Support for specifying and enforcing security policies.
@@ -31,20 +31,24 @@
 
 * Removed hand-crafted APIs and eliminated the `xos-gui` container.
 
-> Information on migrating services to Shared-Delusion can be found 
-> [here](../xos/migrate-4.0.md).
+> Information on migrating services to Shared-Delusion can be found in the
+> [CORD-4.0 Service Migration Guide](../xos/migrate-4.0.md).
 
-#### Build System
+## Build System
 
 Redesigned build system to streamline and unify the developer
 workflow. Specific features include:
 
-* Uses `make` and transitions away from `gradle`, providing better
-support for incremental builds.
+* Uses `make` and transitions away from `gradle`, providing better support for
+  incremental builds and unifying the development process.
+
+* Added a *scenario* system that encompasses the *mock* functionality
+  previously available but doesn't require multiple profiles to be maintained.
 
 * Implemented an XOS configuration module.
 
-* Supports building and pushing tagged images to Docker Hub.
+* Supports building and pushing tagged images to Docker Hub, and downloading
+  images that match the code that is checked out.
 
 * Added configure targets that generate credentials for each build layer.
 
@@ -52,23 +56,19 @@
 
 * Added make targets to install versioned POD or CiaB
 
-* Added *mock* targets to master Makefile in support of
-   simpler build environments.
+* Updated `ansible` and `docker-compose` versions.
 
-* Updated `Ansible` and `docker-compose` versions.
+> The new (Make-based) and old (Gradle-based) build systems will co-exist for a
+> while, but the latter is being deprecated. Users are strongly encouraged to
+> [use the new system](../install.md).
 
-> The new (Make-based) and old (Gradle-based) build systems will
-> co-exist for a while, but the latter is being deprecated. Users are
-> strongly encouraged to start using the system, as documented
-> [here](../build_internals.md).
-
-#### Physical POD Deployment
+## Physical POD Deployment
 
 Automated the physical POD deployment system. Specific items
 include:
 
-* Supports fabric configuration in ONOS and load POD
-    configuration files in `pod-configs` repo.
+* Supports fabric configuration in ONOS and load POD configuration files in
+  `pod-configs` repo.
 
 * Automated switch software installation.
 
@@ -76,16 +76,15 @@
   file instead of Jenkins variables, and to parameterize methods
   for commonly used functions.
 
-#### Logging Support
+## Logging Support
 
-Added a comprehensive logging facility that integrates logs across
-multiple components. The logging facility uses ELK Stack and
-can be accessed using Kibana.
+Added a comprehensive logging facility that integrates logs across multiple
+components. The logging facility uses ELK Stack and can be accessed using
+Kibana.
 
-> Most services have not been upgraded to use the new logging
-> system.
+> Most services have not been upgraded to use the new logging system.
 
-#### QA and Testing
+## QA and Testing
 
 Improved test coverage, including:
 
@@ -99,7 +98,7 @@
 
 * Developed scaling tests for subscribers, vRouter, IGMP, vSG and vCPE.
 
-#### Fabric Enhancements
+## Fabric Enhancements
 
 Continued to add features to the fabric, including:
 
@@ -117,22 +116,17 @@
 
 * Refactored DHCP relay
 
-#### Performance Optimizations
+## Performance Optimizations
 
 Optimized DPDK and OvS performance, including:
 
-* Bound fabric interfaces to DPDK 
+* Bound fabric interfaces to DPDK
 
-* Added portbindings to `networking_onos` Neutron plugin 
+* Added portbindings to `networking_onos` Neutron plugin
 
 * Modified JuJu charms to configure optimizations into OpenStack.
 
-* Changed kernel boot options for nodes 
+* Changed kernel boot options for nodes
 
 > This is a beta feature, and is not automatically included in a build.
 
-
-
-
-
-
diff --git a/docs/scripts/defaults.md.j2 b/docs/scripts/defaults.md.j2
new file mode 100644
index 0000000..7d7405d
--- /dev/null
+++ b/docs/scripts/defaults.md.j2
@@ -0,0 +1,22 @@
+# Build System Variable Glossary
+
+{{ def_docs['frontmatter']['description'] }}
+
+{% for key, val in def_docs|dictsort %}
+### {{ key }}
+
+{{ val['description'] }}
+
+Default value:
+```
+{{ val['defval_pp'] }}
+```
+
+Used in:
+
+{% for file in val['reflist']|sort(attribute='path') -%}
+ - [{{ file.path }}]({{ file.link }})
+{% endfor -%}
+
+{% endfor %}
+
diff --git a/docs/scripts/defaultsdoc.py b/docs/scripts/defaultsdoc.py
new file mode 100644
index 0000000..f75fa85
--- /dev/null
+++ b/docs/scripts/defaultsdoc.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+# defaultsdoc.py - documentation for ansible default vaules
+
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import fnmatch
+import jinja2
+import logging
+import os
+import pprint
+import re
+import sys
+import xml.etree.ElementTree as ET
+import yaml
+import markedyaml
+
+# logging setup
+sh = logging.StreamHandler(sys.stderr)
+sh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
+
+LOG = logging.getLogger("defaultsdoc.py")
+LOG.addHandler(sh)
+
+# parse args
+parser = argparse.ArgumentParser()
+
+parser.add_argument('-p', '--playbook_dir', default='../platform-install/',
+                    action='append', required=False,
+                    help="path to base playbook directory")
+
+parser.add_argument('-d', '--descriptions', default='scripts/descriptions.md',
+                    action='store', required=False,
+                    help="markdown file with descriptions")
+
+parser.add_argument('-t', '--template', default='scripts/defaults.md.j2',
+                    action='store', required=False,
+                    help="jinja2 template to fill with defaults")
+
+parser.add_argument('-o', '--output', default='defaults.md',
+                    action='store', required=False,
+                    help="output file")
+
+args = parser.parse_args()
+
+# find the branch we're on via the repo manifest
+manifest_path =  os.path.abspath("../../.repo/manifest.xml")
+try:
+    tree = ET.parse(manifest_path)
+    manifest_xml = tree.getroot()
+    repo_default = manifest_xml.find('default')
+    repo_branch = repo_default.attrib['revision']
+except Exception:
+    LOG.exception("Error loading repo manifest")
+    sys.exit(1)
+
+role_defs = []
+profile_defs = []
+group_defs = []
+
+# frontmatter section is any text at the top of the descriptions.md file, and
+# comes before all other sections
+def_docs = {'frontmatter':{'description':''}}
+
+# find all the files to be processed
+for dirpath, dirnames, filenames in os.walk(args.playbook_dir):
+    basepath = re.sub(args.playbook_dir, '', dirpath)
+    for filename in filenames :
+        filepath = os.path.join(basepath, filename)
+
+        if fnmatch.fnmatch(filepath, "roles/*/defaults/*.yml"):
+            role_defs.append(filepath)
+
+        if fnmatch.fnmatch(filepath, "profile_manifests/*.yml"):
+            profile_defs.append(filepath)
+
+        if fnmatch.fnmatch(filepath, "group_vars/*.yml"):
+            group_defs.append(filepath)
+
+
+
+for rd in role_defs:
+    rd_vars = {}
+    # trim slash so basename grabs the final directory name
+    rd_basedir = os.path.basename(args.playbook_dir[:-1])
+    try:
+        rd_fullpath = os.path.abspath(os.path.join(args.playbook_dir, rd))
+        rd_partialpath = os.path.join(rd_basedir, rd)
+
+        # partial URL, without line nums
+        rd_url = "https://github.com/opencord/platform-install/tree/%s/%s" % (repo_branch, rd)
+
+        
+        rd_fh= open(rd_fullpath, 'r')
+
+        # markedloader is for line #'s
+        loader = markedyaml.MarkedLoader(rd_fh.read())
+        marked_vars = loader.get_data()
+
+        rd_fh.seek(0)  # go to front of file
+
+        # yaml.safe_load is for vars in a better format
+        rd_vars = yaml.safe_load(rd_fh)
+
+        rd_fh.close()
+
+    except yaml.YAMLError:
+        LOG.exception("Problem loading file: %s" % rd)
+        sys.exit(1)
+
+    if rd_vars:
+
+        for key, val in rd_vars.iteritems():
+
+           # build full URL to lines. Lines numbered from zero, so +1 on them to match github
+           if marked_vars[key].start_mark.line == marked_vars[key].end_mark.line:
+               full_url = "%s#L%d" % (rd_url, marked_vars[key].start_mark.line+1)
+           else:
+               full_url = "%s#L%d-L%d" % (rd_url, marked_vars[key].start_mark.line, marked_vars[key].end_mark.line)
+
+           if key in def_docs:
+                if def_docs[key]['defval'] == val:
+                    def_docs[key]['reflist'].append({'path':rd_partialpath, 'link':full_url})
+                else:
+                    LOG.error(" %s has different default > %s : %s" % (rd, key, val))
+           else:
+                to_print = { str(key): val }
+                pp = yaml.dump(to_print, indent=4, allow_unicode=False, default_flow_style=False)
+
+                def_docs[key] = {
+                        'defval': val,
+                        'defval_pp': pp,
+                        'description': "",
+                        'reflist': [{'path':rd_partialpath, 'link':full_url}],
+                        }
+
+# read in descriptions file
+descriptions = {}
+with open(args.descriptions, 'r') as descfile:
+    desc_name = 'frontmatter'
+    desc_lines = ''
+
+    for d_l in descfile:
+        # see if this is a header line at beginning of docs
+        desc_header = re.match(r"##\s+([\w_]+)", d_l)
+
+        if desc_header:
+            # add previous description to dict
+            descriptions[desc_name] = desc_lines
+
+            # set this as the next name, wipe out lines
+            desc_name = desc_header.group(1)
+            desc_lines = ''
+        else:
+            desc_lines += d_l
+
+    descriptions[desc_name] = desc_lines
+
+# add descriptions to def_docs
+for d_name, d_text in descriptions.iteritems():
+    if d_name in def_docs:
+        def_docs[d_name]['description'] = d_text
+    else:
+        LOG.error("Description exists for '%s' but doesn't exist in defaults" % d_name)
+
+# check for missing descriptions
+for key in sorted(def_docs):
+    if not def_docs[key]['description']:
+        LOG.error("No description found for '%s'" % key)
+
+# Add to template and write to output file
+j2env = jinja2.Environment(
+    loader = jinja2.FileSystemLoader('.')
+)
+
+template = j2env.get_template(args.template)
+
+with open(args.output, 'w') as f:
+    f.write(template.render(def_docs=def_docs))
diff --git a/docs/scripts/descriptions.md b/docs/scripts/descriptions.md
new file mode 100644
index 0000000..6a15590
--- /dev/null
+++ b/docs/scripts/descriptions.md
@@ -0,0 +1,660 @@
+This documents every variable available in the build system.  The repos these
+variables are used in are:
+
+ - [cord](https://github.com/opencord/cord) (aka "build" when checked out)
+ - [maas](https://github.com/opencord/maas)
+ - [platform-install](https://github.com/opencord/platform-install)
+
+## apt_cacher_name
+
+DNS name for the apt-cacher-ng server used by compute nodes and LXC containers
+on head node.
+
+## apt_cacher_port
+
+Port number used for apt-cacher-ng.
+
+## apt_ssl_sites
+
+APT package sources are on HTTPS servers.  These bypass apt-cacher-ng, which
+can't proxy SSL connections.
+
+## build_cord_dir
+
+Directory on the `build` node that the `cord` directory (root directory checked
+out by `repo`) is copied to.
+
+## build_docker_tag
+
+Tag applied to all built (and downloaded standalone) Docker images.
+
+## ca_digest
+
+Name of the digest (aka "hash") algorithm used when creating CA SSL
+certificates.
+
+## ca_im_days
+
+Length of time in days that an Intermediate Certificate Authority cert will be
+valid for.
+
+## ca_im_phrase
+
+The passphrase (password) used to encrypt the Intermediate Certificate
+Authority's private key.
+
+## ca_im_subj
+
+The subject (name in the certificate) of the Intermediate Certificate
+Authority.
+
+## ca_root_days
+
+Length of time in days that the Root Certificate Authority cert will be valid
+for.
+
+## ca_root_phrase
+
+The passphrase (password) used to encrypt the Root Certificate Authority's
+private key. Default is for this to be autogenerated by the password lookup in
+ansible and stored in [credentials_dir](#credentials_dir).
+
+## ca_root_subj
+
+The subject (name in the certificate) of the Root Certificate Authority.
+
+## ca_size
+
+Size of the keys used in generating the CA certificates, in bits.
+
+## cert_days
+
+Length of times that a standard server/client certificate will be valid for
+
+## cert_digest
+
+Name of the digest (aka "hash") algorithm used when creating SSL certificates.
+
+## cert_size
+
+Size of the keys used in generating the server/client certificates, in bits.
+
+## charm_versions
+
+List of Juju charms and the versions used.
+
+## client_certs
+
+List of client SSL certificates to generate
+
+## cloudlab_extrafs
+
+Filesystem device to use for extra space when on CloudLab
+
+## cloudlab_links
+
+Symbolic links to create to use the extra space that is mounted when using
+CloudLab
+
+## compute_external_interfaces
+
+List of possible VTN external interfaces on the compute node, for setting up
+OpenStack with the VTN ML2 plugin.
+
+## config_cord_dir
+
+Location of the `cord` directory on the config node. See also
+[build_cord_dir](#build_cord_dir).
+
+## config_cord_profile_dir
+
+Location of the `cord_profile` directory on the config node.
+
+## cord_config_app_version
+
+Version of the CORD config ONOS app to use
+
+## cord_in_a_box
+
+Used to determine if this is a Cord-in-a-Box virtual pod installation
+
+## cord_vtn_app_version
+
+Version of the CORD VTN ONOS app to use
+
+## credentials_dir
+
+The location of the `credentials_dir` directory on the head node
+
+## delete_cord_profile_dir
+
+Boolean value, whether or not to delete the `cord_profile` directory when
+tearing down XOS on a pod
+
+## deploy_docker_registry
+
+DNS name or IP address of the Docker Registry
+
+## deploy_docker_tag
+
+Tag used to identify which docker images to use when performing a deployment.
+
+## deployment_flavors
+
+Names of OpenStack "flavors" of VM's that can be deployed.
+
+## deployment_type
+
+Deployment type, used in XOS to identify the type of deployment.
+
+## dhcpd_subnets
+
+Used to configure the DHCP server used in OpenCloud and other non-MaaS
+deployments.
+
+## dns_check_domain
+
+Domaing to check when performing the prerequisite check.
+
+## dns_check_ipv4
+
+IP address of [dns_check_domain](#dns_check_domain) for DNS resolution
+prerequisite check.
+
+## dns_search
+
+Which domain suffixes to search for hosts in (non-MaaS)
+
+## dns_servers
+
+IP addresses of DNS servers
+
+## dns_ttl
+
+Time-to-live for DNS entries when using NSD (non-MaaS)
+
+## docker_apt_repo
+
+Name of the Docker APT repo to install Docker from
+
+## docker_opts
+
+Options to provide to Docker to configure the
+
+## dpdk_lcore_mask
+
+DPDK option to set which CPU cores to use. More documentation at:
+[http://docs.openvswitch.org/en/latest/intro/install/dpdk/#setup-ovs](http://docs.openvswitch.org/en/latest/intro/install/dpdk/#setup-ovs)
+
+## dpdk_socket_memory
+
+DPDK option concerning memory allocation.
+
+## enabled_gui_extensions
+
+List of GUI extensions enabled and loaded into the Web UI
+
+## fabric_interfaces
+
+External VTN interface connected to the fabric switches.
+
+## fabric_network_cfg_json
+
+Filename of the JSON file  used to configure the Fabric ONOS.
+
+## frontend_only
+
+`frontend_only` suppresses starting synchronzier containers as a part of the
+XOS container set. It is used in testing scenarios where synchronizers aren't
+needed.
+
+## gerrit_changesets
+
+List of gerrit
+
+## gui_api_endpoint
+
+Partial URI to the API endpoint used by the GUI to contact the XOS API
+
+## gui_background
+
+Backgrund image used behind login screen when logging into XOS.
+
+## gui_favicon
+
+Favicon used in URL bar for XOS web UI.
+
+## gui_logo
+
+Logo used in XOS web UI.
+
+## gui_payoff
+
+Text below the Logo in the XOS web UI.
+
+## gui_project_name
+
+Name of the type of POD being deployed, shown in XOS UI.
+
+## gui_routes
+
+Links given in the top-level of the XOS web UI to specific objects, to feature
+them in the sidebar.
+
+## gui_websocket
+
+URI path used by XOS web UI for the websocket connection.
+
+## gw_port
+
+Port on the XOS ws container for XOS UI connections
+
+## head_cord_dir
+
+Location on the headnode where the `cord` directory is copied.
+
+## head_cord_profile_dir
+
+Location on the headnode where the `cord_profile` directory is copied.
+
+## head_credentials_dir
+
+Location on the headnode where the `credentials` diretory is copied.
+
+## head_lxd_list
+
+List of LXD containers (for Juju/OpenStack) to create.
+
+## head_mavenrepo_dir
+
+Location on the headnode to create the `mavenrepo` directory, which contains
+the docker-compose.yml file for the Maven repo docker container that serves up
+ONOS Apps to the ONOS instances on the headnode.
+
+## head_onos_cord_dir
+
+Location on the headnode to create the `onos_cord` directory, which contains
+configuration and the docker-compose.yml file for starting the ONOS instance
+that runs the VTN app.
+
+## head_onos_fabric_dir
+
+Location on the headnode to create the `onos_fabric` directory, which contains
+configuration and the docker-compose.yml file for starting the ONOS instance
+that runs the Fabric app.
+
+## headnode
+
+Name of the headnode on the system, used to configure NSD DNS aliases.
+
+## hugepages
+
+DPDK setting to control memory allocation.
+
+## hwaddr_prefix
+
+MAC address prefix used when creating LXD containers, to assign them DHCP addresses.
+
+## image_dir
+
+Directory to download OpenStack glance images into on the head node.
+
+## juju_config_path
+
+Path to Juju configuration file.
+
+## keystone_admin_password
+
+Password for OpenStack Keystone `admin` user.
+
+## kvm_check
+
+Whether or not to perform a check for processor virtualization features
+required for the KVM hypervisor to work.
+
+## log4j_port
+
+Port used by ONOS containers for sending log4j logging messages to ElasticStack.
+
+## logging_host
+
+Hostname (or IP) for the ElasticStack logging host machine.
+
+## maas_xos_admin_pass
+
+Contains the XOS admin password, used for loading TOSCA with up MaaS.  Can't
+use the standard [xos_admin_pass](#xos_admin_pass) as these playbooks are run
+from the MaaS provisioner container.
+
+## management_hosts_net_cidr
+
+CIDR for the management_hosts VTN network.
+
+## management_hosts_net_range_xos_high
+
+Last IP address to assign as a part of the management_hosts VTN network.
+
+## management_hosts_net_range_xos_low
+
+First IP address to assign as a part of the management_hosts VTN network.
+
+## management_network_cidr
+
+CIDR of the head node management network that connects between the OpenStack
+LXC containers and compute nodes.
+
+## mgmt_interface
+
+Physical management network interface on head node.
+
+## mgmt_ipv4_first_octets
+
+First 3 octets of the IP address of the management network.
+
+## mgmt_name_reverse_unbound
+
+The same value as [mgmt_ipv4_first_octets](#mgmt_ipv4_first_octets) but
+formatted for Unbound for use as a reverse DNS lookup zone.
+
+## mgmtbr_ext_interface
+
+Network interface on head node to add to the `mgmtbr` bridge.
+
+## mgmtbr_nat_interface
+
+Network interface connected to the internet that NAT is performed on for
+nodes that use the `mgmtbr` bridge.
+
+## min_memtotal_mb
+
+Minimum amount of memory to allow for a full virtual POD to be built with.
+
+## min_processor_vcpus
+
+Minimum number of CPU's to allow for a full virtual POD to be built with.
+
+## nsd_conf
+
+Path to the `nsd.conf` file for configuring the NSD authoritative nameserver.
+
+## nsd_group
+
+Group used by the NSD nameserver.
+
+## nsd_ip
+
+IP address of the NSD nameserver. Usually this is set to the loopback address,
+as Unbound runs on the external interfaces.
+
+## nsd_zones
+
+Configuration of DNS Zones that NSD provides authoritative DNS lookups for.
+
+## nsd_zonesdir
+
+Directory where DNS Zone files are kept for NSD.
+
+## onos_cord_port
+
+Port used for SSH connections to the `ONOS CORD` instance.
+
+## onos_debug_appnames
+
+Names of ONOS Apps loaded to change the logging level on for debugging purposes.
+
+## onos_debug_level
+
+The logging level (`INFO`, `DEBUG`, `TRACE`, etc.) to set ONOS Apps listed in
+[onos_debug_appnames](#onos_debug_appnames).
+
+## onos_docker_image
+
+Name of the docker image used to bring up ONOS containers.
+
+## onos_log_level
+
+Default logging level ONOS should log at.
+
+## physical_node_list
+
+List of physical nodes to set up in DNS.
+
+## pki_dir
+
+Location where SSL certificates are generated on the `config` node. Contains
+subdirectories for root and intermediate CA certificates.
+
+## pmd_cpu_mask
+
+DPDK setting for CPU pinning.
+
+## pod_sshkey_name
+
+Name of the SSH key generated to be used by the pod, specifically for logging
+into instance VM's that are brought up.
+
+## profile_library
+
+The name of the profile-specific onboarding TOSCA file.
+
+## pull_docker_registry
+
+DNS Name or IP of the Docker Registry to pull images from.
+
+## pull_docker_tag
+
+Tag for pulling Docker images.
+
+## repo_checksum
+
+Checksum of the [repo](https://code.google.com/archive/p/git-repo/) download.
+
+## repo_dl_url
+
+URL of `repo` to download.
+
+## repo_manifest_url
+
+URL of Gerrit manifest repository that `repo` fetches it's list of git
+repositories from.
+
+## requests_ca_bundle
+
+When using python's requests module, name of the CA certificate bundle file to
+use to validate SSL certificates.
+
+## run_dist_upgrade
+
+Whether or not to run `apt-get dist-upgrrade` on a system in the course of
+setting it up.
+
+## server_certs
+
+List of SSL certificates to generate for server use.
+
+## site_humanname
+
+Human readable name to use for the CORD site.
+
+## site_name
+
+Machine readable name to use for the CORD site. This should be one word, without spaces.
+
+## site_suffix
+
+The DNS suffix applied to all machines created for this site. Must be a valid DNS name.
+
+## ssh_ca_phrase
+
+The passphrase used to encrypt the Root CA key when creating a SSL hierarchy.
+
+## ssh_client_genkeys
+
+Names of SSH Client keys to generate and sign by the SSH CA.
+
+## ssh_host_genkeys
+
+Names of SSH Host keys to generatte and sign by the SSH CA.
+
+## ssh_keysize
+
+Size in bits of SSH keys to generate
+
+## ssh_keytype
+
+The key type of the SSH keys.  `rsa` is used currently, may change this as
+support for newer key algorithms is added to the underlying platform.
+
+## ssh_pki_dir
+
+Directory where SSH keys are generated.
+
+## ssl_cert_subj_prefix
+
+SSL certificate prefix substring to use when generating certificates.
+
+## trust_store_pw
+
+Java KeyStore password used for encrypting SSL certificates.  This currently
+doesn't contain any secure certificates, just the generated CA
+root/intermediate certificates for validation of SSL connections.
+
+## unbound_conf
+
+Path for the Unbound recursive DNS resolver configuration file.
+
+## unbound_group
+
+Group name used by Unbound server.
+
+## unbound_interfaces
+
+List of network interfaces that Unbound should listen on.
+
+## unbound_listen_all
+
+Whether Unbound should listen on all available network interfaces.
+
+## unbound_listen_on_default
+
+Whether Unbound should listen on the default gateway interface (as known to Ansible).
+
+## use_apt_cache
+
+Enables the use of `apt-cacher-ng` to cache APT packages on Head/LXC/Compute nodes.
+
+## use_dpdk
+
+Enable DPDK in OpenStack Nova and Neutron
+
+## use_fabric
+
+Start and use ONOS in a container to manage fabric switches
+
+## use_maas
+
+Use MaaS to manage compute nodes and switches.
+
+## use_management_hosts
+
+Whether the management_hosts network type in VTN should be enabled.
+
+## use_openstack
+
+Bring up and use OpenStack to manage VM's.
+
+## use_redis
+
+Use redis as a message bus inside XOS.
+
+## use_vtn
+
+Use the ONOS VTN app to manage networks for virtual instances.
+
+## vcpu_pin_set
+
+DPDK setting to specify CPU pinning.
+
+## vtn_management_host_net_interface
+
+Network interface to use on the head/compute nodes for the management_host network.
+
+## xos_admin_first
+
+First name of the XOS Admin user
+
+## xos_admin_last
+
+Last tname of the XOS Admin user
+
+## xos_admin_pass
+
+Password of the XOS Admin user (autogenerated by default)
+
+## xos_admin_user
+
+Username (email) of the XOS Admin user
+
+## xos_bootstrap_ui_port
+
+Port to connect to to bootstrap the XOS interface.
+
+## xos_chameleon_port
+
+Port used by Chameleon in XOS.
+
+## xos_db_name
+
+XOS Postgres database name
+
+## xos_db_password
+
+XOS Postgres database password.
+
+## xos_db_username
+
+XOS Postgres database username.
+
+## xos_dir
+
+Path of XOS directory within Docker containers.
+
+## xos_docker_networks
+
+Name of networks created in Docker for XOS containers.
+
+## xos_grpc_insecure_port
+
+Insecure (non-SSL) port used for GRPC connections to the XOS API.
+
+## xos_grpc_secure_port
+
+Secure (SSL) port used for GRPC connections to the XOS API.
+
+## xos_images
+
+List of OpenStack Glance images in QCOW2 format that are downloaded.
+
+## xos_other_templates
+
+List of templates to generate when creating the `cord_profile` directory.
+
+## xos_services
+
+List of XOS services to load, including the name, path and whether SSH keypairs
+should be included for the services.
+
+## xos_tosca_config_templates
+
+List of XOS tosca templates to load that make up the service graph of a
+profile.
+
+## xos_ui_port
+
+XOS Web UI port to use for API access.
+
+## xos_users
+
+List of additional users to create in XOS, in addition to
+[xos_admin_user](#xos_admin_user).
+
diff --git a/docs/scripts/markedyaml.py b/docs/scripts/markedyaml.py
new file mode 100644
index 0000000..f7c1484
--- /dev/null
+++ b/docs/scripts/markedyaml.py
@@ -0,0 +1,118 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# markedyaml.py
+# generates nodes with start/end line and column values
+# start line seems off with single-line items, correct with multiline
+#
+# Original code from here: https://gist.github.com/dagss/5008118
+# Request for licensing clarification made on 2017-09-19
+# Contains improvements to support more types (bool/int/etc.)
+
+import yaml
+from yaml.composer import Composer
+from yaml.reader import Reader
+from yaml.scanner import Scanner
+from yaml.composer import Composer
+from yaml.resolver import Resolver
+from yaml.parser import Parser
+from yaml.constructor import Constructor, BaseConstructor, SafeConstructor
+
+def create_node_class(cls):
+    class node_class(cls):
+        def __init__(self, x, start_mark, end_mark):
+            cls.__init__(self, x)
+            self.start_mark = start_mark
+            self.end_mark = end_mark
+
+        def __new__(self, x, start_mark, end_mark):
+            return cls.__new__(self, x)
+    node_class.__name__ = '%s_node' % cls.__name__
+    return node_class
+
+dict_node = create_node_class(dict)
+list_node = create_node_class(list)
+unicode_node = create_node_class(unicode)
+int_node = create_node_class(int)
+float_node = create_node_class(float)
+
+class NodeConstructor(SafeConstructor):
+    # To support lazy loading, the original constructors first yield
+    # an empty object, then fill them in when iterated. Due to
+    # laziness we omit this behaviour (and will only do "deep
+    # construction") by first exhausting iterators, then yielding
+    # copies.
+    def construct_yaml_map(self, node):
+        obj, = SafeConstructor.construct_yaml_map(self, node)
+        return dict_node(obj, node.start_mark, node.end_mark)
+
+    def construct_yaml_seq(self, node):
+        obj, = SafeConstructor.construct_yaml_seq(self, node)
+        return list_node(obj, node.start_mark, node.end_mark)
+
+    def construct_yaml_str(self, node):
+        obj = SafeConstructor.construct_scalar(self, node)
+        assert isinstance(obj, unicode)
+        return unicode_node(obj, node.start_mark, node.end_mark)
+
+    def construct_yaml_bool(self, node):
+        obj = SafeConstructor.construct_yaml_bool(self, node)
+        return int_node(obj, node.start_mark, node.end_mark)
+
+    def construct_yaml_int(self, node):
+        obj = SafeConstructor.construct_scalar(self, node)
+        return int_node(obj, node.start_mark, node.end_mark)
+
+    def construct_yaml_float(self, node):
+        obj = SafeConstructor.construct_scalar(self, node)
+        return float_node(obj, node.start_mark, node.end_mark)
+
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:map',
+        NodeConstructor.construct_yaml_map)
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:seq',
+        NodeConstructor.construct_yaml_seq)
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:str',
+        NodeConstructor.construct_yaml_str)
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:bool',
+        NodeConstructor.construct_yaml_bool)
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:int',
+        NodeConstructor.construct_yaml_int)
+
+NodeConstructor.add_constructor(
+        u'tag:yaml.org,2002:float',
+        NodeConstructor.construct_yaml_float)
+
+
+class MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        NodeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+def get_data(stream):
+    return MarkedLoader(stream).get_data()
+
diff --git a/docs/terminology.md b/docs/terminology.md
index 81fd726..56edeb8 100644
--- a/docs/terminology.md
+++ b/docs/terminology.md
@@ -1,40 +1,62 @@
-#Terminology
+# Terminology
 
 This guide uses the following terminology. (Additional terminology and
 definitions can be found in an overview of the
 [CORD Ecosystem](https://wiki.opencord.org/display/CORD/Defining+CORD).)
 
-* **POD**: A single physical deployment of CORD.
+**POD**
+:  A single physical deployment of CORD.
 
-* **Full POD**: A typical configuration, used as example in this Guide.
-A full CORD POD consists of three servers and four fabric switches.
-This makes it possibile to experiment with all the core features of CORD, and it
-is what the community typically uses for tests.
+**Full POD**
+:  A typical configuration, used as example in this Guide.  A full CORD POD
+   consists of three servers and four fabric switches.  This makes it possible to
+   experiment with all the core features of CORD, and it is what the community
+   typically uses for tests.
 
-* **Half POD**: A minimum-sized configuration. It is similar to a full POD, but with less hardware. It consists of two servers (one head node and one compute node), and one fabric switch. It does not allow experimentation with all of the core features that
-CORD offers (e.g., a switching fabric), but it is still good for basic experimentation and testing.
+**Half POD**
+:  A minimum-sized configuration. It is similar to a full POD, but with less
+   hardware. It consists of two servers (one head node and one compute node), and
+   one fabric switch. It does not allow experimentation with all of the core
+   features that CORD offers (e.g., a switching fabric), but it is still good for
+   basic experimentation and testing.
 
-* **Development (Dev) Machine**: This is the machine used
-to download, build and deploy CORD onto a POD.
-Sometimes it is a dedicated server, and sometimes the developer's laptop.
-In principle, it can be any machine that satisfies the hardware and software
-requirements.
+**CORD-in-a-Box (CiaB)**
+:  Colloquial name for a Virtual POD.
 
-* **Development (Dev) VM**: Bootstrapping the CORD installation requires a lot of
-software to be installed and some non-trivial configurations to be applied.
-All this should happen on the dev machine.
-To help users with the process, CORD provides an easy way to create a
-VM on the dev machine with all the required software and configurations in place.
+**Development (Dev) Machine**
+:  This is the machine used to download, build and deploy CORD onto a POD.
+   Sometimes it is a dedicated server, and sometimes the developer's laptop.  In
+   principle, it can be any machine that satisfies the hardware and software
+   requirements.
 
-* **Compute Node(s)**: A server in a POD that run VMs or containers associated with
-one or more tenant services. This terminology is borrowed from OpenStack.
+**Development (Dev) VM**
+:  Bootstrapping the CORD installation requires a lot of software to be
+   installed and some non-trivial configurations to be applied.  All this should
+   happen on the dev machine.  To help users with the process, CORD provides an
+   easy way to create a VM on the dev machine with all the required software and
+   configurations in place.
 
-* **Head Node**: A compute node of the POD that also runs management services. This includes for example XOS (the orchestrator), two instances of ONOS
-(the SDN controller, one to control the underlay fabric and one to control the overlay), MAAS and all the services needed to automatically install and configure the rest of
-the POD devices.
+**Compute Node(s)**
+:  A server in a POD that run VMs or containers associated with one or more
+   tenant services. This terminology is borrowed from OpenStack.
 
-* **Fabric Switch**: A switch in a POD that interconnects other switches and servers
-inside the POD.
+**Head Node**
+:  A compute node of the POD that also runs management services. This includes
+   for example XOS (the orchestrator), two instances of ONOS (the SDN controller,
+   one to control the underlay fabric and one to control the overlay), MAAS and
+   all the services needed to automatically install and configure the rest of
+   the POD devices.
 
-* **vSG**: The virtual Subscriber Gateway (vSG) is the CORD counterpart for existing
-CPEs. It implements a bundle of subscriber-selected functions, such as Restricted Access, Parental Control, Bandwidth Metering, Access Diagnostics and Firewall. These functions run on commodity hardware located in the Central Office rather than on the customer’s premises. There is still a device in the home (which we still refer to as the CPE), but it has been reduced to a bare-metal switch.
+**Fabric Switch**
+:  A switch in a POD that interconnects other switches and servers inside the
+   POD.
+
+**vSG**
+:  The virtual Subscriber Gateway (vSG) is the CORD counterpart for existing
+   CPEs. It implements a bundle of subscriber-selected functions, such as
+   Restricted Access, Parental Control, Bandwidth Metering, Access Diagnostics and
+   Firewall. These functions run on commodity hardware located in the Central
+   Office rather than on the customer’s premises. There is still a device in the
+   home (which we still refer to as the CPE), but it has been reduced to a
+   bare-metal switch.
+
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
new file mode 100644
index 0000000..7bca1d5
--- /dev/null
+++ b/docs/troubleshooting.md
@@ -0,0 +1,170 @@
+# Troubleshooting and Build System Internals
+
+## Debugging make target Failures
+
+`make` targets that are built will create a per-target log file in the `logs`
+directory. These are prefixed with a datestamp which is the same for every
+target in a single run of make - re-running make will result in additional sets
+of logs, even for the same target.
+
+If you have a build failure and want to know which targets completed, running:
+
+```
+ls -ltr milestones ; ls -ltr logs
+```
+
+And looking for logfiles without a corresponding milestone will point you to
+the make target(s) that failed.
+
+## Config Generation Overview
+
+All configuration in CORD is driven off of YAML files which contain variables
+used by Ansible, make, and Vagrant to build development and production
+environments. A [glossary of build system variables](build_glossary.md) is
+available which describes these variables and where they are used. 
+
+When a command to generate config such as `make PODCONFIG=rcord-mock.yml
+config` is run, the following steps happen:
+
+1. The POD Config file is read, in this case `genconfig/rcord-mock.yml`, which
+   specifies the scenario and profile.
+2. The Scenario config file is read, in this case `scenario/mock/config.yml`.
+3. The contents of these files are combined into a master config variable, with
+   the POD Config overwriting any config set in the Scenario.
+4. The entire master config is written to `genconfig/config.yml`.
+5. The `inventory_groups` variable is used to generate an ansible inventory
+   file and put in `genconfig/inventory.ini`.
+6. Various variables are used to generate the makefile config file
+   `genconfig/config.mk`. This sets the targets invoked by `make build`
+
+Note that the combination of the POD and Scenaro config in step #3 is not a
+merge. If you define an item in the root of the POD Config that has subkeys,
+it will overwrite every subkey defined in the Scenario.  This is most noticable
+when setting the `inventory_groups` or `docker_image_whitelist`
+variable. If changing either in a POD Config, you must recreate the
+entire structure or list. This may seem inconvenient, but other list
+or tree merging strategies lack a way to remove items from a tree
+structure.
+
+## Build Process Overview
+
+The build process is driven by running `make`. The two most common makefile
+targets are `config` and `build`, but there are also utility targets that are
+handy to use during development.
+
+### `config` make target
+
+`config` requires a `PODCONFIG` argument, which is a name of a file in the
+`podconfig` directory.  `PODCONFIG` defaults to `invalid`, so if you get errors
+claiming an invalid config, you probably didn't set it, or set it to a filename
+that doesn't exist.
+
+#### Examples: `make config`
+
+`make PODCONFIG=rcord-local.yml config`
+
+`make PODCONFIG=opencloud-mock.yml config`
+
+### `build` make target
+
+`make build` performs the build process, and takes no arguments.  It may run
+different targets specified by the scenario.
+
+Most of the build targets in the Makefile don't leave artifacts behind, so we
+write a placeholder file (aka "sentinels" or "empty targets") in the
+`milestones` directory.
+
+### Utility make targets
+
+There are various utility targets:
+
+ - `printconfig`: Prints the configured scenario and profile.
+
+ - `xos-teardown`: Stop and remove a running set of XOS docker containers
+
+ - `collect-diag`: Collect detailed diagnostic information on a deployed head
+   and compute nodes, into `diag-<datestamp>` directory on the head node.
+
+ - `compute-node-refresh`: Reload compute nodes brought up by MaaS into XOS,
+   useful in the cord virtual and physical scenarios
+
+ - `pod-test`: Run the `platform-install/pod-test-playbook.yml`, testing the
+   virtual/physical cord scenario.
+
+ - `vagrant-destroy`: Destroy Vagrant containers (for mock/virtual/physical
+   installs)
+
+ - `clean-images`: Have containers rebuild during the next build cycle. Does
+   not actually delete any images, just causes imagebuilder to be run again.
+
+ - `clean-genconfig`: Deletes the `make config` generated config files in
+   `genconfig`, useful when switching between podconfigs
+
+ - `clean-onos`: Stops the ONOS containers on the head node
+
+ - `clean-openstack`: Cleans up and deletes all instances and networks created
+   in OpenStack.
+
+ - `clean-profile`: Deletes the `cord_profile` directory
+
+ - `clean-all`: Runs `vagrant-destroy`, `clean-genconfig`, and `clean-profile`
+   targets, removes all milestones. Good for resetting a dev environment back
+   to an unconfigured state.
+
+ - `clean-local`:  `clean-all` but for the `local` scenario - Runs
+   `clean-genconfig` and `clean-profile` targets, removes local milestones.
+
+The `clean-*` utility targets should modify the contents of the milestones
+directory appropriately to cause the steps they clean up after to be rerun on
+the next `make build` cycle.
+
+### Development workflow
+
+#### Updating XOS Container Images on a running pod
+
+To rebuild and update XOS container images, run:
+
+```
+make xos-update-images
+make -j4 build
+```
+
+This will build new copies of all the images, then when build is run the newly
+built containers will be restarted.
+
+If you additionally want to stop all the XOS containers, clear the database,
+and reload the profile, use `xos-teardown`:
+
+```
+make xos-teardown
+make -j4 build
+```
+
+This will teardown the XOS container set, tell the build system to rebuild
+images, then perform a build and reload the profile.
+
+#### Use ElasticStack or ONOS with the `single` scenario
+
+The single scenario is a medium-weight scenario for synchronizer development,
+and has optional ElasticStack or ONOS functionality.
+
+To use these, you would invoke the ONOS or ElasticStack milestone target before
+the `build` target:
+
+```
+make PODCONFIG=rcord-single.yml config
+make -j4 milestones/deploy-elasticstack
+make -j4 build
+```
+
+or
+
+```
+make PODCONFIG=opencloud-single.yml config
+make -j4 milestones/deploy-onos
+make -j4 build
+```
+
+If you want to use both in combination, make sure to run the ElasticStack
+target first, so ONOS can send logs to ElasticStack.
+