[CORD-795]
Docker image building/tagging/labeling, Makefile based build

Change-Id: I0d70ab01353999c0e0585547582af9c62d247987
diff --git a/.gitignore b/.gitignore
index 95b37f0..cc8785b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,3 +46,8 @@
 
 # GitBook
 _book
+
+# imagebuilder generated files
+ib_actions.yml
+ib_graph.dot
+
diff --git a/Jenkinsfile.imagebuilder b/Jenkinsfile.imagebuilder
new file mode 100644
index 0000000..ca45649
--- /dev/null
+++ b/Jenkinsfile.imagebuilder
@@ -0,0 +1,33 @@
+
+stage('checkout') {
+    node('master'){
+      checkout(changelog: false, poll: false, scm: [$class: 'RepoScm', \
+        manifestRepositoryUrl: 'https://gerrit.opencord.org/manifest.git', \
+        manifestBranch: 'master', currentBranch: true, \
+        manifestFile: 'default.xml', \
+        destinationDir: 'cord', \
+        forceSync: true, resetFirst: true, \
+        quiet: false, jobs: 4, showAllChanges: true])
+   }
+}
+
+stage('imagebuilder'){
+  node('master') {
+    sh '$WORKSPACE/imagebuilder.py -v -b -r cord/ -c cord/docker_images.yml -l image_logs -a ib_actions.yml -g ib_graph.dot'
+  }
+}
+
+stage('push'){
+  node('master'){
+    def ib_actions = readYaml( file:"$WORKSPACE/ib_actions.yml" )
+
+    withDockerRegistry([credentialsId: 'docker-artifact-push-credentials']) {
+      for(image in ib_actions.ib_built){
+        echo "Pushing image: " + image.push_name
+        docker.image(image.push_name).push()
+      }
+    }
+  }
+}
+
+echo "All done"
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..da4f887
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,241 @@
+# CORD Master Makefile
+
+# Timestamp for log files
+TS               := $(shell date +'%Y%m%dT%H%M%SZ')
+
+# Podconfig must be specified, so an invalid default
+PODCONFIG        ?= invalid
+
+# Source path
+BUILD            ?= .
+CORD             ?= ..
+PI               ?= $(BUILD)/platform-install
+MAAS             ?= $(BUILD)/maas
+
+# Configuration paths
+PODCONFIG_D      ?= $(BUILD)/podconfig
+PODCONFIG_PATH   ?= $(PODCONFIG_D)/$(PODCONFIG)
+
+SCENARIOS_D      ?= $(BUILD)/scenarios
+GENCONFIG_D      ?= $(BUILD)/genconfig
+
+# Milestones/logs paths
+M                ?= $(BUILD)/milestones
+LOGS             ?= $(BUILD)/logs
+
+ALL_MILESTONES   ?= $(M)/prereqs-check $(M)/vagrant-up $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/elasticstack $(M)/prep-computenode $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-maas $(M)/deploy-computenode $(M)/docker-images $(M)/core-image $(M)/start-xos $(M)/onboard-profile $(M)/deploy-onos $(M)/onboard-openstack
+
+LOCAL_MILESTONES ?= $(M)/local-cord-config $(M)/local-docker-images $(M)/local-core-image $(M)/local-start-xos $(M)/local-onboard-profile
+
+# Configuration files
+MASTER_CONFIG    ?= $(GENCONFIG_D)/config.yml
+MAKEFILE_CONFIG  ?= $(GENCONFIG_D)/config.mk
+INVENTORY        ?= $(GENCONFIG_D)/inventory.ini
+PROFILE_NAME_F   ?= $(GENCONFIG_D)/cord_profile
+SCENARIO_NAME_F  ?= $(GENCONFIG_D)/cord_scenario
+
+CONFIG_FILES     = $(MASTER_CONFIG) $(MAKEFILE_CONFIG) $(INVENTORY) $(PROFILE_NAME_F) $(SCENARIO_NAME_F)
+
+include $(MAKEFILE_CONFIG)
+
+# Set using files from genconfig
+SCENARIO          = $(shell cat $(SCENARIO_NAME_F))
+PROFILE           = $(shell cat $(PROFILE_NAME_F))
+
+# Host names for SSH commands
+BUILDNODE        ?= head1
+HEADNODE         ?= ${BUILDNODE}
+
+# Vagrant config
+VAGRANT_PROVIDER ?= libvirt
+VAGRANT_VMS      ?= $(HEADNODE)
+VAGRANT_CWD      ?= $(SCENARIOS_D)/$(SCENARIO)/
+SSH_CONFIG       ?= ~/.ssh/config  # Vagrant modifies this, should it always?
+
+# Ansible args, for verbosity and other runtime parameters
+ANSIBLE_ARGS     ?=
+
+# Commands
+SHELL            = bash -o pipefail
+VAGRANT          ?= VAGRANT_CWD=$(VAGRANT_CWD) vagrant
+ANSIBLE          ?= ansible -i $(INVENTORY)
+ANSIBLE_PB       ?= ansible-playbook $(ANSIBLE_ARGS) -i $(INVENTORY) --extra-vars @$(MASTER_CONFIG)
+ANSIBLE_PB_LOCAL ?= ansible-playbook $(ANSIBLE_ARGS) -i $(PI)/inventory/head-localhost --extra-vars "@/opt/cord_profile/genconfig/config.yml"
+ANSIBLE_PB_MAAS  ?= ansible-playbook $(ANSIBLE_ARGS) -i /etc/maas/ansible/pod-inventory --extra-vars "@/opt/cord_profile/genconfig/config.yml"
+IMAGEBUILDER     ?= python $(BUILD)/scripts/imagebuilder.py
+LOGCMD           ?= 2>&1 | tee -a $(LOGS)/$(TS)_$(@F)
+SSH_HEAD         ?= ssh $(HEADNODE)
+SSH_BUILD        ?= ssh $(BUILDNODE)
+
+# default target, prints help
+.DEFAULT: help
+
+help:
+	@echo "Please specify a target (config, build, teardown, ...)"
+
+# Config file generation
+config: $(CONFIG_FILES)
+
+
+$(CONFIG_FILES):
+	ansible-playbook -i 'localhost,' --extra-vars="cord_podconfig='$(PODCONFIG_PATH)' genconfig_dir='$(GENCONFIG_D)' scenarios_dir='$(SCENARIOS_D)'" $(BUILD)/ansible/genconfig.yml $(LOGCMD)
+
+printconfig: config
+	@echo "Scenario: $(SCENARIO)"
+	@echo "Profile: $(PROFILE)"
+
+# Primary Targets
+# Many of these targets use target-specific variables
+# https://www.gnu.org/software/make/manual/html_node/Target_002dspecific.html
+
+build: $(BUILD_TARGETS)
+
+# Utility targets
+
+xos-teardown: xos-update-images
+	$(ANSIBLE_PB) $(PI)/teardown-playbook.yml $(LOGCMD)
+	rm -f $(M)/onboard-profile $(M)/local-onboard-profile
+
+xos-update-images: clean-images
+	rm -f $(M)/core-image $(M)/start-xos $(M)/local-core-image $(M)local-start-xos
+
+compute-node-refresh:
+	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_MAAS) $(PI)/compute-node-refresh-playbook.yml" $(LOGCMD)
+
+vagrant-destroy:
+	$(VAGRANT) destroy $(LOGCMD)
+	rm -f $(M)/vagrant-up
+
+clean-images:
+	rm -f $(M)/docker-images $(M)/local-docker-images
+
+clean-genconfig:
+	rm -f $(CONFIG_FILES)
+
+clean-profile:
+	rm -rf $(CONFIG_CORD_PROFILE_DIR)
+	rm -f $(M)/cord-config $(M)/copy-config
+
+clean-all: vagrant-destroy clean-profile clean-genconfig
+	rm -f $(ALL_MILESTONES)
+
+clean-local: clean-profile clean-genconfig
+	rm -f $(LOCAL_MILESTONES)
+
+# == PREREQS == #
+VAGRANT_UP_PREREQS     ?=
+CORD_CONFIG_PREREQS    ?=
+COPY_CONFIG_PREREQS    ?=
+PREP_BUILDNODE_PREREQS ?=
+PREP_HEADNODE_PREREQS  ?=
+
+# == MILESTONES == #
+# empty target files are touched in the milestones dir to indicate completion
+
+$(M)/prereqs-check:
+	$(ANSIBLE_PB) $(PI)/prereqs-check-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/vagrant-up: | $(VAGRANT_UP_PREREQS)
+	$(VAGRANT) up $(VAGRANT_VMS) --provider $(VAGRANT_PROVIDER) $(LOGCMD)
+	@echo "Configuring SSH for VM's..."
+	$(VAGRANT) ssh-config $(VAGRANT_VMS) > $(SSH_CONFIG)
+	touch $@
+
+$(M)/copy-cord: | $(M)/vagrant-up
+	$(ANSIBLE_PB) $(PI)/copy-cord-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/cord-config: | $(M)/vagrant-up $(CORD_CONFIG_PREREQS)
+	$(ANSIBLE_PB) $(PI)/cord-config-playbook.yml $(LOGCMD)
+	cp -r $(GENCONFIG_D) $(CONFIG_CORD_PROFILE_DIR)/genconfig
+	touch $@
+
+$(M)/copy-config: | $(COPY_CONFIG_PREREQS)
+	$(ANSIBLE_PB) $(PI)/copy-profile-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/prep-buildnode: | $(M)/vagrant-up $(M)/cord-config $(PREP_BUILDNODE_PREREQS)
+	$(ANSIBLE_PB) $(PI)/prep-buildnode-playbook.yml $(LOGCMD)
+	@echo Waiting 10 seconds to timeout SSH ControlPersist, and so future ansible commands gain docker group membership
+	sleep 10
+	touch $@
+
+$(M)/prep-headnode: | $(M)/vagrant-up $(M)/cord-config $(PREP_HEADNODE_PREREQS)
+	$(ANSIBLE_PB) $(PI)/prep-headnode-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/elasticstack: | $(M)/prep-headnode
+	$(ANSIBLE_PB) $(PI)/deploy-elasticstack-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/prep-computenode: | $(M)/prep-headnode
+	$(ANSIBLE_PB) $(PI)/prep-computenode-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/glance-images: | $(M)/prep-headnode
+	$(ANSIBLE_PB) $(PI)/glance-images-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/deploy-openstack: | $(M)/elasticstack $(M)/prep-headnode $(M)/prep-computenode
+	$(ANSIBLE_PB) $(PI)/deploy-openstack-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/deploy-maas: | $(M)/deploy-openstack
+	$(ANSIBLE_PB) $(MAAS)/head.yml $(LOGCMD)
+	touch $@
+
+$(M)/deploy-computenode: | $(M)/deploy-openstack
+	$(ANSIBLE_PB) $(PI)/deploy-computenode-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/docker-images: | $(M)/prep-buildnode
+	$(SSH_BUILD) "cd /opt/cord/build; $(IMAGEBUILDER) -f $(MASTER_CONFIG) -l $(BUILD)/image_logs -g $(BUILD)/ib_graph.dot -a $(BUILD)/ib_actions.yml " $(LOGCMD)
+	touch $@
+
+$(M)/core-image: | $(M)/docker-images $(M)/prep-headnode
+	$(ANSIBLE_PB) $(PI)/build-core-image-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/start-xos: | $(M)/prep-headnode $(M)/core-image
+	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/start-xos-playbook.yml" $(LOGCMD)
+	touch $@
+
+$(M)/onboard-profile: | $(M)/start-xos
+	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/onboard-profile-playbook.yml" $(LOGCMD)
+	touch $@
+
+$(M)/deploy-onos: | $(M)/start-xos $(M)/docker-images
+	$(ANSIBLE_PB) $(PI)/deploy-onos-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/onboard-openstack: | $(M)/deploy-computenode $(M)/glance-images $(M)/deploy-onos $(M)/onboard-profile
+	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/onboard-openstack-playbook.yml" $(LOGCMD)
+	touch $@
+
+# Testing targets
+pod-test: $(M)/onboard-openstack
+	$(ANSIBLE_PB) $(PI)/pod-test-playbook.yml $(LOGCMD)
+
+# Local Targets, bring up XOS containers without a VM
+
+$(M)/local-cord-config:
+	$(ANSIBLE_PB) $(PI)/cord-config-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/local-docker-images: | $(M)/local-cord-config
+	$(IMAGEBUILDER) -f $(MASTER_CONFIG) -l $(BUILD)/image_logs -g $(BUILD)/ib_graph.dot -a $(BUILD)/ib_actions.yml $(LOGCMD)
+	touch $@
+
+$(M)/local-core-image: | $(M)/local-docker-images
+	$(ANSIBLE_PB) $(PI)/build-core-image-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/local-start-xos: | $(M)/local-core-image
+	$(ANSIBLE_PB) $(PI)/start-xos-playbook.yml $(LOGCMD)
+	touch $@
+
+$(M)/local-onboard-profile: | $(M)/local-start-xos
+	$(ANSIBLE_PB) $(PI)/onboard-profile-playbook.yml $(LOGCMD)
+	touch $@
+
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 0000000..1b10379
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,9 @@
+[defaults]
+callback_whitelist = profile_tasks
+host_key_checking = false
+forks=20
+timeout=30
+
+[ssh_connection]
+ssh_args = -o ControlMaster=auto -o ControlPersist=3s
+
diff --git a/ansible/genconfig.yml b/ansible/genconfig.yml
new file mode 100644
index 0000000..33038b4
--- /dev/null
+++ b/ansible/genconfig.yml
@@ -0,0 +1,47 @@
+---
+# ansible/genconfig.yml
+# Generate configuration files
+
+- name: Load pod and scenario config files
+  hosts: localhost
+  connection: local
+  gather_facts: False
+  tasks:
+
+    - name: Set pod_config_path fact
+      set_fact:
+        pod_config_path:  "{{ cord_podconfig | realpath }}"
+
+    - name: Load pod config
+      include_vars: "{{ pod_config_path }}"
+
+    - name: Set scenario_config_path fact
+      set_fact:
+        scenario_config_path:  "{{ (scenarios_dir ~ '/' ~ cord_scenario ~ '/config.yml') | realpath }}"
+
+    - name: Load scenario config
+      include_vars: "{{ scenario_config_path }}"
+
+    - name: Reload pod config to take precedence
+      include_vars: "{{ pod_config_path }}"
+
+    - name: Load pod and scenario configs into facts
+      set_fact:
+        pod_config: "{{ lookup('file', pod_config_path) | from_yaml }}"
+        scenario_config: "{{ lookup('file', scenario_config_path) | from_yaml }}"
+
+    - name: Combine pod and scenario config into master config
+      set_fact:
+        master_config: "{{ scenario_config | combine(pod_config) }}"
+
+    - name: Print vars
+      debug:
+        var: master_config
+
+- name: Generate config files
+  hosts: localhost
+  connection: local
+  gather_facts: False
+  roles:
+    - genconfig
+
diff --git a/ansible/roles/genconfig/tasks/main.yml b/ansible/roles/genconfig/tasks/main.yml
new file mode 100644
index 0000000..5b34654
--- /dev/null
+++ b/ansible/roles/genconfig/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+# genconfig/tasks/main.yml
+
+- name: Create cord_profile file
+  copy:
+    dest: "{{ ( genconfig_dir ~ '/cord_profile' ) | realpath }}"
+    content: "{{ cord_profile }}"
+
+- name: Create cord_scenario file
+  copy:
+    dest: "{{ ( genconfig_dir ~ '/cord_scenario' ) | realpath }}"
+    content: "{{ cord_scenario }}"
+
+- name: Generate config files
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ ( genconfig_dir ~ '/' ~ item ) | realpath }}"
+  with_items:
+    - inventory.ini
+    - config.mk
+    - config.yml
+
diff --git a/ansible/roles/genconfig/templates/config.mk.j2 b/ansible/roles/genconfig/templates/config.mk.j2
new file mode 100644
index 0000000..dd0fc06
--- /dev/null
+++ b/ansible/roles/genconfig/templates/config.mk.j2
@@ -0,0 +1,41 @@
+# config.mk - generated from ansible/roles/genconfig/templates/config.mk.j2
+# ** DO NOT EDIT THIS FILE MANUALLY! **
+# Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
+
+# Scenario specific config
+{% if vagrant_vms is defined %}
+VAGRANT_VMS             = {{ vagrant_vms | join(' ') }}
+{% endif %}
+{% if headnode is defined %}
+HEADNODE                = {{ headnode }}
+{% endif %}
+{% if buildnode is defined %}
+BUILDNODE               = {{ buildnode }}
+{% endif %}
+{% if deploy_docker_tag is defined %}
+DEPLOY_DOCKER_TAG       = {{ deploy_docker_tag }}
+{% endif %}
+{% if config_cord_profile_dir is defined %}
+CONFIG_CORD_PROFILE_DIR = {{ config_cord_profile_dir }}
+{% endif %}
+
+# Targets and prerequisties
+{% if build_targets is defined %}
+BUILD_TARGETS           = $(M)/{{ build_targets | join(" $(M)/") }}
+{% endif %}
+{% if vagrant_up_prereqs is defined %}
+VAGRANT_UP_PREREQS      = $(M)/{{ vagrant_up_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if cord_config_prereqs is defined %}
+CORD_CONFIG_PREREQS     = $(M)/{{ cord_config_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if copy_config_prereqs is defined %}
+COPY_CONFIG_PREREQS     = $(M)/{{ copy_config_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if prep_buildnode_prereqs is defined %}
+PREP_BUILDNODE_PREREQS  = $(M)/{{ prep_buildnode_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if prep_headnode_prereqs is defined %}
+PREP_HEADNODE_PREREQS   = $(M)/{{ prep_headnode_prereqs | join(" $(M)/") }}
+{% endif %}
+
diff --git a/ansible/roles/genconfig/templates/config.yml.j2 b/ansible/roles/genconfig/templates/config.yml.j2
new file mode 100644
index 0000000..09dffe4
--- /dev/null
+++ b/ansible/roles/genconfig/templates/config.yml.j2
@@ -0,0 +1,7 @@
+---
+# config.yml - generated from ansible/roles/genconfig/templates/config.yml.j2
+# ** DO NOT EDIT THIS FILE MANUALLY! **
+# Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
+
+{{ master_config | to_nice_yaml }}
+
diff --git a/ansible/roles/genconfig/templates/inventory.ini.j2 b/ansible/roles/genconfig/templates/inventory.ini.j2
new file mode 100644
index 0000000..f23f475
--- /dev/null
+++ b/ansible/roles/genconfig/templates/inventory.ini.j2
@@ -0,0 +1,24 @@
+; inventory.ini, generated from ansible/roles/genconfig/templates/inventory.ini.j2
+; ** DO NOT EDIT THIS FILE MANUALLY! **
+; Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
+
+{% macro group_template(groupname) %}
+{% if inventory_groups[groupname] %}
+{% for g_key, g_val in inventory_groups[groupname].iteritems() %}
+{{ g_key }}{% if g_val %} {% for key, val in g_val.iteritems() %}{{ key ~ "=" ~ val }}{% endfor %}{% endif %}
+{% endfor %}
+{% endif %}
+{% endmacro %}
+
+[config]
+{{ group_template('config') }}
+
+[build]
+{{ group_template('build') }}
+
+[head]
+{{ group_template('head') }}
+
+[compute]
+{{ group_template('compute') }}
+
diff --git a/config/opencloud_in_a_box.yaml b/config/opencloud_in_a_box.yaml
deleted file mode 100644
index 3f99ffc..0000000
--- a/config/opencloud_in_a_box.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# Opencloud-in-a-Box configuration
-
-public_network: cordpub
-public_network_cidr: "10.230.100.0/24"
-
-mgmt_network: cordmgmt
-
-virt_nets:
-  - name: cordpub
-    ipv4_prefix: 10.230.100
-    nodes:
-      - name: head1
-        ipv4_last_octet: 100
-      - name: compute1
-        ipv4_last_octet: 101
-      - name: compute2
-        ipv4_last_octet: 102
-  - name: cordmgmt
-    ipv4_prefix: 192.168.200
-    nodes:
-      - name: head1
-        ipv4_last_octet: 10
-      - name: compute1
-        ipv4_last_octet: 20
-      - name: compute2
-        ipv4_last_octet: 21
-
diff --git a/docker_images.yml b/docker_images.yml
new file mode 100644
index 0000000..14fe1ba
--- /dev/null
+++ b/docker_images.yml
@@ -0,0 +1,146 @@
+---
+# docker_images.yml
+# master list of docker images used by imagebuilder.py
+
+# tag to set on every container built/downloaded
+
+docker_build_tag: "candidate"
+
+# pull_only_images: images pulled and used directly without changes
+# you must include a tag (':1.0') on these images to pull the correct version
+#
+# do not include images in this list that are parents of images
+# in buildable_images - those are automatically pulled
+
+pull_only_images:
+  - "gliderlabs/consul-server:0.6"
+  - "gliderlabs/registrator:v7"
+  - "nginx:1.13"
+  - "onosproject/onos:1.8.9"
+  - "redis:3.2"
+
+# buildable_images - list of images that we build from Dockerfiles
+#
+# required:
+#  name: Name of docker image
+#  repo: git repo name (not the path! not what it's renamed to be in the repo manifest!)
+# optional:
+#  dockerfile: Name of Dockerfile. Default: "Dockerfile"
+#  path: Path to parent directory of containing dockerfile, relative to. Default: "."
+#  context: Path to docker build context, relative to path item. Default: "."
+#  component: dict of sub-component paths to copy into build context.
+#    repo: Name of component repo to copy from (required)
+#    path: path in to copy from. Relative to component repo. Default "."
+#    dest: destination path to copy components to, relative to context
+
+buildable_images:
+  - name: xosproject/xos-base
+    repo: xos
+    path: "containers/xos"
+    dockerfile: "Dockerfile.base"
+
+  - name: xosproject/xos-postgres
+    repo: xos
+    path: "containers/postgresql"
+
+  - name: xosproject/xos
+    repo: xos
+    path: "containers/xos"
+    dockerfile: "Dockerfile.xos"
+    context: "../.."
+
+  - name: xosproject/xos-corebuilder
+    repo: xos
+    path: "containers/xos"
+    dockerfile: "Dockerfile.corebuilder"
+    context: "../.."
+
+  - name: xosproject/chameleon
+    repo: xos
+    path: "containers/chameleon"
+    dockerfile: "Dockerfile.chameleon"
+    components:
+      - repo: chameleon
+        dest: tmp.chameleon
+
+  - name: xosproject/xos-client
+    repo: xos
+    path: "containers/xos"
+    dockerfile: "Dockerfile.client"
+    context: "../.."
+    components:
+      - repo: chameleon
+        dest: containers/xos/tmp.chameleon
+
+  - name: xosproject/xos-synchronizer-base
+    repo: xos
+    path: "containers/xos"
+    dockerfile: "Dockerfile.synchronizer-base"
+    context: "../.."
+
+  - name: xosproject/xos-ws
+    repo: xos-rest-gw
+
+  - name: xosproject/xos-gui
+    repo: xos-gui
+
+  - name: xosproject/xos-gui-extension-builder
+    repo: xos-gui
+    dockerfile: "Dockerfile.xos-gui-extension-builder"
+
+  - name: xosproject/gui-extension-rcord
+    repo: rcord
+    path: "xos/gui"
+
+  - name: xosproject/gui-extension-vtr
+    repo: vtr
+    path: "xos/gui"
+
+  - name: xosproject/gui-extension-sample
+    repo: xos-sample-gui-extension
+
+  - name: xosproject/openstack-synchronizer
+    repo: openstack
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/onos-synchronizer
+    repo: onos-service
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/volt-synchronizer
+    repo: olt
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/vrouter-synchronizer
+    repo: vrouter
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/vtn-synchronizer
+    repo: vtn
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/vtr-synchronizer
+    repo: vtr
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/vsg-synchronizer
+    repo: vsg
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/fabric-synchronizer
+    repo: fabric
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
+  - name: xosproject/exampleservice-synchronizer
+    repo: exampleservice
+    path: "xos/synchronizer"
+    dockerfile: "Dockerfile.synchronizer"
+
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 8c6b304..b40bab6 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -4,6 +4,7 @@
     * [CORD-in-a-Box](quickstart.md)
     * [Physical POD](quickstart_physical.md)
     * [Internals: platform-install](platform-install/internals.md)
+    * [Make-based build](docs/quickstart_make.md)
 * [Operating CORD](operate/README.md)
     * [Powering Up a POD](operate/power_up.md)
     * [ELK Stack Logs](operate/elk_stack.md)
diff --git a/docs/quickstart_make.md b/docs/quickstart_make.md
new file mode 100644
index 0000000..26ca031
--- /dev/null
+++ b/docs/quickstart_make.md
@@ -0,0 +1,323 @@
+# CORD Quickstart (Makefile edition)
+
+## TL;DR Version
+
+ - Setup your build environment. On a bare Ubuntu 14.04 system, this can be
+   done by running `scripts/cord-bootstrap.sh` or manually.
+ - Pick a Pod Config, such as `rcord-mock.yml` from the podconfig directory.
+ - Run `make PODCONFIG=podconfig.yml config` (see 'podconfig' directory for
+   filenames) to generate a configuration into the `genconfig/` directory.
+ - Run `make build` to build CORD.
+
+## Setup the build environment
+
+### Bootstrap script
+
+There's a script `scripts/cord-bootstrap.sh` that will bootstrap an Ubuntu
+14.04 system (such as a CloudLab node) by installing the proper tools and
+checkout the codebase with repo.
+
+It can be downloaded via:
+
+```
+curl -o ~/cord-bootstrap.sh https://raw.githubusercontent.com/opencord/cord/master/scripts/cord-bootstrap.sh
+chmod +x cord-bootstrap.sh
+```
+
+The bootstrap script has the following useful options:
+
+```
+Usage for ./cord-bootstrap.sh:
+  -d                           Install Docker for local scenario.
+  -h                           Display this help message.
+  -p <project:change/revision> Download a patch from gerrit. Can be repeated.
+  -t <target>                  Run 'make -j4 <target>' in cord/build/. Can be repeated.
+  -v                           Install Vagrant for mock/virtual/physical scenarios.
+```
+
+The `-p` option will download a patch from gerrit, and the syntax for this is
+`<project path>:<changeset>/<revision>`.  It can be used multiple times - for
+example:
+
+```
+./cord-bootstrap.sh -p build/platform-install:1233/4 -p orchestration/xos:1234/2
+```
+
+will check out the `platform-install` repo with changeset 1233, patchset 4, and
+`xos` repo changeset 1234, revision 2.
+
+You can find the project path in the `repo` manifest file: [manifest/default.xml](https://gerrit.opencord.org/gitweb?p=manifest.git;a=blob;f=default.xml).
+
+In some cases, you may see a message like this if you install software that
+adds you to a group and you aren't already a member:
+
+```
+You are not in the group: libvirtd, please logout/login.
+You are not in the group: docker, please logout/login.
+```
+
+In those cases, please logout and login to the system to gain the proper group
+membership. Note that any patches specified will be downloaded, but no make
+targets will be run if you're not in the right groups.
+
+#### cord-boostrap.sh Examples
+
+Download sourcecode and prep for a local build by installing docker
+```
+./cord-bootstrap.sh -d
+```
+
+A `rcord-local` build from master. Note that the make targets may not run if
+you aren't already in the `docker` group, so you'd need to logout/login and
+rerun them.
+
+```
+./cord-bootstrap.sh -d -t "PODCONFIG=rcord-local.yml config" -t "build"
+```
+
+A prep for a mock/virtual/physical build, with a gerrit patchset applied:
+
+```
+./cord-bootstrap.sh -v -p orchestration/xos:1000/1
+```
+
+A virtual rcord pod, with tests run afterward. Assumes that you're already in
+the `libvirtd` group:
+
+```
+./cord-bootstrap.sh -v -t "PODCONFIG=rcord-virtual.yml config" -t "build" -t "pod-test"
+```
+
+### Manual setup
+
+The following tools are required to get started up CORD:
+
+ - [Ansible](https://docs.ansible.com/ansible/intro_installation.html)
+ - [Vagrant](https://www.vagrantup.com/downloads.html)
+ - [Repo](https://source.android.com/source/downloading#installing-repo)
+ - [Docker](https://www.docker.com/community-edition)
+
+Downloading the source tree can be done by running:
+
+```
+mkdir cord && cd cord
+repo init -u https://gerrit.opencord.org/manifest -b master
+repo sync
+```
+
+The build system can be found in the `cord/build/` directory.
+
+## Configuring a build
+
+### Pod Config
+
+Configuration for a specific pod, specified in a YAML file that is used to
+generate other configuration files.  These also specify the scenario and
+profile to be used, allow for override the configuration in various ways, such
+as hostnames, passwords, and other ansible inventory specific items. These
+are specified in the `podconfigs` directory.
+
+A minimal Pod Config file must define:
+
+`cord_scenario` - the name of the scenario to use, which is defined in a
+directory under `scenarios`.
+
+`cord_profile` - the name of a profile to use, defined as a YAML file in
+`platform-install/profile_manifests`.
+
+### Scenarios
+
+Defines the physical or virtual environment that CORD will be installed
+into, a default mapping of ansible groups to nodes, the set of Docker images
+that can be built, and software and platform features are installed onto those
+nodes. Scenarios are subdirectories of the `scenarios` directory, and consist
+of a `config.yaml` file and possibly VM's specified in a `Vagrantfile`.
+
+#### Included Scenarios
+
+- `local`: Minimal set of containers running locally on the development host
+- `mock`: Creates a single Vagrant VM with containers and DNS set up
+- `cord`: Physical or virtual multi-node CORD pod, with MaaS and OpenStack
+- `opencloud`: Physical or virtual multi-node OpenCloud pod, with OpenStack
+
+### Profile
+
+The set of CORD services brought into XOS, the service graph, and other
+per-profile configuration for a CORD deployment. These are located in
+`platform-install/profile_manifests`.
+
+## Config generation overview
+
+When a command to generate config such as `make PODCONFIG=rcord-mock.yml
+config` is run, the following steps happen:
+
+1. The Pod Config file is read, in this case `genconfig/rcord-mock.yml`, which
+   specifies the scenario and profile.
+2. The Scenario config file is read, in this case `scenario/mock/config.yml`.
+3. The contents of these files are combined into a master config variable, with
+   the Pod Config overwriting any config set in the Scenario.
+4. The entire master config is written to `genconfig/config.yml`.
+5. The `inventory_groups` variable is used to generate an ansible inventory
+   file and put in `genconfig/inventory.ini`.
+6. Various variables are used to generate the makefile config file
+   `genconfig/config.mk`. This sets the targets invoked by `make build`
+
+Note that the combination of the Pod and Scenaro config in step #3 is not a
+merge - if you define an item in the root of the Pod Config that has subkeys,
+it will overwrite every subkey defined in the Scenario.  This is most noticable
+when setting the `inventory_groups` or `docker_image_whitelist` variable - if
+changing either in a Pod Config, you must recreate the entire structure or
+list. This may seem inconvenient, but other list or tree merging strategies
+lack a way to remove items from a tree structure.
+
+## Build process overview
+
+The build process is driven by running `make`. The two most common makefile
+targets are `config` and `build`, but there are also utility targets that are
+handy to use during development.
+
+### `config` make target
+
+`config` requires a `PODCONFIG` argument, which is a name of a file in the
+`podconfig` directory.  `PODCONFIG` defaults to `invalid`, so if you get errors
+claiming an invalid config, you probably didn't set it, or set it to a filename
+that doesn't exist.
+
+#### `make config` Examples
+
+`make PODCONFIG=rcord-local.yml config`
+
+`make PODCONFIG=opencloud-mock.yml config`
+
+### `build` make target
+
+`make build` performs the build process, and takes no arguments.  It may run
+different targets specified by the scenario.
+
+Most of the build targets in the Makefile don't leave artifacts behind, so we
+write a placeholder file (aka "sentinels" or "empty targets") in the
+`milestones` directory.
+
+### Utility make targets
+
+There are various utility targets:
+
+ - `printconfig`: Prints the configured scenario and profile.
+
+ - `xos-teardown`: Stop and remove a running set of XOS docker containers
+
+ - `compute-node-refresh`: Reload compute nodes brought up by MaaS into XOS,
+   useful in the cord virtual and physical scenarios
+
+ - `pod-test`: Run the `platform-install/pod-test-playbook.yml`, testing the
+   virtual/physical cord scenario.
+
+ - `vagrant-destroy`: Destroy Vagrant containers (for mock/virtual/physical
+   installs)
+
+ - `clean-images`: Have containers rebuild during the next build cycle. Does
+   not actually delete any images, just causes imagebuilder to be run again.
+
+ - `clean-genconfig`: Deletes the `make config` generated config files in
+   `genconfig`, useful when switching between podconfigs
+
+ - `clean-profile`: Deletes the `cord_profile` directory
+
+ - `clean-all`: Runs `vagrant-destroy`, `clean-genconfig`, and `clean-profile`
+   targets, removes all milestones. Good for resetting a dev environment back
+   to an unconfigured state.
+
+ - `clean-local`:  `clean-all` but for the `local` scenario - Runs
+   `clean-genconfig` and `clean-profile` targets, removes local milestones.
+
+The `clean-*` utility targets should modify the contents of the milestones
+directory appropriately to cause the steps they clean up after to be rerun on
+the next `make build` cycle.
+
+### Target logging
+
+`make` targets that are built will create a per-target log file in the `logs`
+directory. These are prefixed with a datestamp which is the same for every
+target in a single run of make - re-running make will result in additional sets
+of logs, even for the same target.
+
+### Tips and tricks
+
+#### Debugging make failures
+
+If you have a build failure and want to know which targets completed, running:
+
+```
+ls -ltr milestones ; ls -ltr logs
+```
+
+And looking for logfiles without a corresponding milestone will point you to
+the make target(s) that failed.
+
+#### Update XOS container images
+
+To rebuild and update XOS container images, run:
+
+```
+make xos-update-images
+make -j4 build
+```
+
+This will build new copies of all the images, then when build is run the newly
+built containers will be restarted.
+
+If you additionally want to stop all the XOS containers, clear the database,
+and reload the profile, use `xos-teardown`:
+
+```
+make xos-teardown
+make -j4 build
+```
+
+This will teardown the XOS container set, tell the build system to rebuild
+images, then perform a build and reload the profile.
+
+### Building docker images with imagebuilder.py
+
+For docker images for XOS (and possibly others in the future) the build system
+uses the imagebuilder script.  Run `imagebuilder.py -h` for a list of arguments
+it supports.
+
+For Docker images built by imagebuilder, the docker build logs are located in
+the `image_logs` directory on the build host, which may differ between
+scenarios.
+
+The full list of all buildable images is in `docker_images.yml`, and the set of
+images pulled in a particular build is controlled by the
+`docker_image_whitelist` variable that is set on a per-scenario basis.
+
+This script is in charge of guaranteeing that the code that has been checked
+out and containers used by the system have the same code in them.  This is a
+somewhat difficult task as we have parent/child relationships between
+containers as well as components which are in multiple git repos in the source
+tree and all of which could change independently, be on different branches, or
+be manually modified during development. imagebuilder does this through a
+combination of tagging and labeling which allows images to be prebuilt and
+downloaded from dockerhub while still maintaining these guarantees.
+
+imagebuilder takes as input a YAML file listing the images to be built, where
+the Dockerfiles for those containers are located, and then goes about building
+and tagging images.   The result of an imagebuilder run is:
+
+ - Docker images in the local context
+ - Optionally:
+   - A YAML file which describes what actions imagebuilder performed (the `-a`
+     option, default is `ib_actions.yml` )
+   - A DOT file for graphviz that shows container relationships
+
+While imagebuilder will pull down required images from dockerhub and build/tag
+images, it does not push those images or delete obsolete ones.  These tasks are
+left to other software (Ansible, Jenkins) which should take in imagebuilder's
+YAML output and take the appropriate actions.
+
+Additionally, there may be several operational tasks that take this as input.
+Updating a running pod might involve stopping containers that have updated
+images, starting containers with the new image, handling any errors if new
+containers don't come up, then removing the obsolete images. These tasks go
+beyond image building and are left to the deployment system.
+
diff --git a/image_logs/.gitignore b/image_logs/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/image_logs/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/image_logs/README.md b/image_logs/README.md
new file mode 100644
index 0000000..712cbdc
--- /dev/null
+++ b/image_logs/README.md
@@ -0,0 +1,4 @@
+# image_logs
+
+This directory contains logfiles of docker image creation, created by imagebuilder.py
+
diff --git a/logs/.gitignore b/logs/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/logs/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/logs/README.md b/logs/README.md
new file mode 100644
index 0000000..4b10d0f
--- /dev/null
+++ b/logs/README.md
@@ -0,0 +1,4 @@
+# logs
+
+Datestamped log files created during make proces will appear here.
+
diff --git a/milestones/.gitignore b/milestones/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/milestones/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/milestones/README.md b/milestones/README.md
new file mode 100644
index 0000000..1cddeea
--- /dev/null
+++ b/milestones/README.md
@@ -0,0 +1,5 @@
+# Milestones
+
+This directory is used for Makefile milestone files, which are created on
+completion of a target and used for dependency management. See ../Makefile for
+more details.
diff --git a/podconfig/README.md b/podconfig/README.md
new file mode 100644
index 0000000..1b71922
--- /dev/null
+++ b/podconfig/README.md
@@ -0,0 +1,3 @@
+# Pod Config
+
+
diff --git a/podconfig/ecord-local.yml b/podconfig/ecord-local.yml
new file mode 100644
index 0000000..1937285
--- /dev/null
+++ b/podconfig/ecord-local.yml
@@ -0,0 +1,7 @@
+---
+# local-ecord Pod Config
+# Starts a E-CORD container set in the local environment
+
+cord_scenario: local
+cord_profile: ecord
+
diff --git a/podconfig/ecord-mock.yml b/podconfig/ecord-mock.yml
new file mode 100644
index 0000000..85ceb9e
--- /dev/null
+++ b/podconfig/ecord-mock.yml
@@ -0,0 +1,7 @@
+---
+# mock-ecord Pod Config
+# Creates a single-node mock E-CORD pod
+
+cord_scenario: mock
+cord_profile: ecord
+
diff --git a/podconfig/frontend-local.yml b/podconfig/frontend-local.yml
new file mode 100644
index 0000000..e910d9b
--- /dev/null
+++ b/podconfig/frontend-local.yml
@@ -0,0 +1,7 @@
+---
+# local-frontend Pod Config
+# Starts a frontend container set in the local environment
+
+cord_scenario: local
+cord_profile: frontend
+
diff --git a/podconfig/mcord-local.yml b/podconfig/mcord-local.yml
new file mode 100644
index 0000000..2ac6994
--- /dev/null
+++ b/podconfig/mcord-local.yml
@@ -0,0 +1,7 @@
+---
+# local-mcord Pod Config
+# Starts a M-CORD container set in the local environment
+
+cord_scenario: local
+cord_profile: mcord
+
diff --git a/podconfig/mcord-mock.yml b/podconfig/mcord-mock.yml
new file mode 100644
index 0000000..68b3bf8
--- /dev/null
+++ b/podconfig/mcord-mock.yml
@@ -0,0 +1,7 @@
+---
+# mock-mcord Pod Config
+# Creates a single-node mock M-CORD pod
+
+cord_scenario: mock
+cord_profile: mcord
+
diff --git a/podconfig/opencloud-local.yml b/podconfig/opencloud-local.yml
new file mode 100644
index 0000000..a820105
--- /dev/null
+++ b/podconfig/opencloud-local.yml
@@ -0,0 +1,7 @@
+---
+# local-opencloud Pod Config
+# Starts a OpenCloud container set in the local environment
+
+cord_scenario: local
+cord_profile: opencloud
+
diff --git a/podconfig/opencloud-mock.yml b/podconfig/opencloud-mock.yml
new file mode 100644
index 0000000..c30b91e
--- /dev/null
+++ b/podconfig/opencloud-mock.yml
@@ -0,0 +1,7 @@
+---
+# mock-opencloud Pod Config
+# Creates a single-node mock OpenCloud pod
+
+cord_scenario: mock
+cord_profile: opencloud
+
diff --git a/podconfig/opencloud-physical-example.yml b/podconfig/opencloud-physical-example.yml
new file mode 100644
index 0000000..04ae14f
--- /dev/null
+++ b/podconfig/opencloud-physical-example.yml
@@ -0,0 +1,53 @@
+---
+# opencloud-physical Example Pod Config
+# Example Pod Config for a physical multi-node OpenCloud pod
+
+cord_scenario: opencloud
+cord_profile: opencloud
+
+config_cord_dir: /opt/cord
+config_cord_profile_dir: /opt/sites/az_site
+
+# No VM's, or prereqs
+vagrant_vms:
+vagrant_up_prereqs:
+
+buildnode: node01.opencloud.cs.arizona.edu
+headnode: "{{ buildnode }}"
+
+site_name: az
+site_suffix: "opencloud.cs.arizona.edu"
+site_humanname: "Arizona OpenCloud Site"
+
+mgmt_ipv4_first_octets: "10.10.40"
+
+physical_node_list:
+  - name: node01
+    ipv4_last_octet: 1
+    aliases:
+      - head
+      - head1
+  - name: node02
+    ipv4_last_octet: 2
+  - name: node03
+    ipv4_last_octet: 3
+  - name: node04
+    ipv4_last_octet: 4
+
+# Inventory for ansible, used to generate inventory.ini
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    node01.opencloud.cs.arizona.edu:
+
+  head:
+    node01.opencloud.cs.arizona.edu:
+
+  compute:
+    node02.opencloud.cs.arizona.edu:
+    node03.opencloud.cs.arizona.edu:
+    node04.opencloud.cs.arizona.edu:
diff --git a/podconfig/opencloud-virtual.yml b/podconfig/opencloud-virtual.yml
new file mode 100644
index 0000000..1979c42
--- /dev/null
+++ b/podconfig/opencloud-virtual.yml
@@ -0,0 +1,7 @@
+---
+# opencloud-virtual Pod Config
+# Creates a virtual multi-node OpenCloud pod
+
+cord_scenario: opencloud
+cord_profile: opencloud
+
diff --git a/podconfig/rcord-local.yml b/podconfig/rcord-local.yml
new file mode 100644
index 0000000..1b312df
--- /dev/null
+++ b/podconfig/rcord-local.yml
@@ -0,0 +1,7 @@
+---
+# local-rcord Pod Config
+# Starts a R-CORD container set in the local environment
+
+cord_scenario: local
+cord_profile: rcord
+
diff --git a/podconfig/rcord-mock.yml b/podconfig/rcord-mock.yml
new file mode 100644
index 0000000..d375a1d
--- /dev/null
+++ b/podconfig/rcord-mock.yml
@@ -0,0 +1,7 @@
+---
+# mock-rcord Pod Config
+# Creates a single-node mock R-CORD pod
+
+cord_scenario: mock
+cord_profile: rcord
+
diff --git a/podconfig/rcord-physical-example.yml b/podconfig/rcord-physical-example.yml
new file mode 100644
index 0000000..34843ab
--- /dev/null
+++ b/podconfig/rcord-physical-example.yml
@@ -0,0 +1,39 @@
+---
+# rcord-physical-example Pod Config
+# Example configuration for a physical R-CORD pod
+
+cord_scenario: cord
+cord_profile: rcord
+
+# Variables
+credentials_dir: '/opt/credentials'
+pki_dir: '/opt/pki'
+ssh_pki_dir: '/opt/ssh_pki'
+
+fabric_ip: '10.6.1.1/24'
+management_ip: '10.6.0.1/24'
+external_ip: '47.135.132.21/24'
+management_network: 10.6.0.0/24
+
+headnode: headnode.site1.opencord.org
+
+# Inventory for ansible, used to generate inventory.ini
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    localhost:
+      ansible_connection: local
+
+  head:
+    headnode.site1.opencord.org:
+      ansible_host: 10.90.0.2
+      ansible_port: 22
+      ansible_user: ubuntu
+      ansible_ssh_pass: ubuntu
+
+  compute:
+
diff --git a/podconfig/rcord-virtual.yml b/podconfig/rcord-virtual.yml
new file mode 100644
index 0000000..1897b10
--- /dev/null
+++ b/podconfig/rcord-virtual.yml
@@ -0,0 +1,7 @@
+---
+# rcord-virtual Pod Config
+# Creates a virtual multi-node R-CORD pod, aka "rcord-in-a-box"
+
+cord_scenario: cord
+cord_profile: rcord
+
diff --git a/scenarios/cord/Vagrantfile b/scenarios/cord/Vagrantfile
new file mode 100644
index 0000000..a120989
--- /dev/null
+++ b/scenarios/cord/Vagrantfile
@@ -0,0 +1,388 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+$cordpath = ".."
+
+Vagrant.configure(2) do |config|
+
+  config.vm.define "corddev" do |d|
+    d.ssh.forward_agent = true
+    d.vm.box = "ubuntu/trusty64"
+    d.vm.hostname = "corddev"
+    d.vm.network "private_network", ip: "10.100.198.200"
+    d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/corddev.yml -c local"
+    d.vm.provider "virtualbox" do |v|
+      v.memory = 2048
+    end
+    d.vm.provider :libvirt do |v, override|
+      v.memory = 2048
+      override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+    end
+  end
+
+  config.vm.define "prod" do |d|
+    d.vm.box = "ubuntu/trusty64"
+    d.vm.hostname = "prod"
+    d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+    d.vm.network "private_network", ip: "10.100.198.201"
+    d.vm.network "private_network",
+        ip: "0.0.0.0",
+        auto_config: false,
+        virtualbox__intnet: "cord-mgmt-network",
+        libvirt__network_name: "cord-mgmt-network",
+        libvirt__forward_mode: "none",
+        libvirt__dhcp_enabled: false
+    d.vm.network "private_network",
+        ip: "0.1.0.0",
+        mac: "02420a060101",
+        auto_config: false,
+        virtualbox__intnet: "head-node-leaf-1",
+        libvirt__network_name: "head-node-leaf-1",
+        libvirt__forward_mode: "none",
+        libvirt__dhcp_enabled: false
+    d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/prod.yml -c local"
+    d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 cd /cord/build/platform-install; ansible-playbook -i inventory/head-localhost deploy-elasticstack-playbook.yml"
+    d.vm.provider "virtualbox" do |v|
+      v.memory = 2048
+    end
+    d.vm.provider :libvirt do |v, override|
+      v.memory = 24576
+      v.cpus = 8
+      v.storage :file, :size => '100G', :type => 'qcow2'
+      override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+      override.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/add-extra-drive.yml -c local"
+    end
+  end
+
+  config.vm.define "switch" do |s|
+    s.vm.box = "ubuntu/trusty64"
+    s.vm.hostname = "fakeswitch"
+    s.vm.network "private_network", ip: "10.100.198.253"
+    s.vm.network "private_network",
+        type: "dhcp",
+        virtualbox__intnet: "cord-fabric-network",
+        libvirt__network_name: "cord-fabric-network",
+        mac: "cc37ab000001"
+    s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/fakeswitch.yml -c local"
+    s.vm.provider "virtualbox" do |v|
+      v.memory = 1048
+      v.name = "fakeswitch"
+    end
+  end
+
+  config.vm.define "leaf-1" do |s|
+    s.vm.box = "ubuntu/trusty64"
+    s.vm.hostname = "leaf-1"
+    s.vm.network "private_network",
+      #type: "dhcp",
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network",
+      mac: "cc37ab000011"
+    s.vm.network "private_network",
+      ip: "0.1.0.0",
+      auto_config: false,
+      virtualbox__intnet: "head-node-leaf-1",
+      libvirt__network_name: "head-node-leaf-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.2.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-1-leaf-1",
+      libvirt__network_name: "compute-node-1-leaf-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.5.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-1-spine-1",
+      libvirt__network_name: "leaf-1-spine-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.6.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-1-spine-2",
+      libvirt__network_name: "leaf-1-spine-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    if (ENV['FABRIC'] == "1")
+      s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+    else
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'net_prefix=10.6.1'"
+    end
+    s.vm.provider :libvirt do |v, override|
+        v.memory = 512
+        v.cpus = 1
+        override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+    end
+    s.vm.provider "virtualbox" do |v, override|
+        v.memory = 512
+        v.cpus = 1
+    end
+  end
+
+  config.vm.define "leaf-2" do |s|
+    s.vm.box = "ubuntu/trusty64"
+    s.vm.hostname = "leaf-2"
+    s.vm.network "private_network",
+      #type: "dhcp",
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network",
+      mac: "cc37ab000012"
+    s.vm.network "private_network",
+      ip: "0.3.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-2-leaf-2",
+      libvirt__network_name: "compute-node-2-leaf-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.4.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-3-leaf-2",
+      libvirt__network_name: "compute-node-3-leaf-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.7.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-2-spine-1",
+      libvirt__network_name: "leaf-2-spine-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.8.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-2-spine-2",
+      libvirt__network_name: "leaf-2-spine-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    if (ENV['FABRIC'] == "1")
+      s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+    else
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'net_prefix=10.6.1'"
+    end
+    s.vm.provider :libvirt do |v, override|
+        v.memory = 512
+        v.cpus = 1
+        override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+    end
+    s.vm.provider "virtualbox" do |v, override|
+        v.memory = 512
+        v.cpus = 1
+    end
+  end
+
+  config.vm.define "spine-1" do |s|
+    s.vm.box = "ubuntu/trusty64"
+    s.vm.hostname = "spine-1"
+    s.vm.network "private_network",
+      #type: "dhcp",
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network",
+      mac: "cc37ab000021"
+    s.vm.network "private_network",
+      ip: "0.5.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-1-spine-1",
+      libvirt__network_name: "leaf-1-spine-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.7.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-2-spine-1",
+      libvirt__network_name: "leaf-2-spine-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    if (ENV['FABRIC'] == "1")
+      s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+    else
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'net_prefix=10.6.1'"
+    end
+    s.vm.provider :libvirt do |v, override|
+        v.memory = 512
+        v.cpus = 1
+        override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+    end
+    s.vm.provider "virtualbox" do |v, override|
+        v.memory = 512
+        v.cpus = 1
+    end
+  end
+
+  config.vm.define "spine-2" do |s|
+    s.vm.box = "ubuntu/trusty64"
+    s.vm.hostname = "spine-2"
+    s.vm.network "private_network",
+      #type: "dhcp",
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network",
+      mac: "cc37ab000022"
+    s.vm.network "private_network",
+      ip: "0.6.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-1-spine-2",
+      libvirt__network_name: "leaf-1-spine-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.network "private_network",
+      ip: "0.8.0.0",
+      auto_config: false,
+      virtualbox__intnet: "leaf-2-spine-2",
+      libvirt__network_name: "leaf-2-spine-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    if (ENV['FABRIC'] == "1")
+      s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+    else
+      s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'net_prefix=10.6.1'"
+    end
+    s.vm.provider :libvirt do |v, override|
+        v.memory = 512
+        v.cpus = 1
+        override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+    end
+    s.vm.provider "virtualbox" do |v, override|
+        v.memory = 512
+        v.cpus = 1
+    end
+  end
+
+  config.vm.define "testbox" do |d|
+    d.vm.box = "fgrehm/trusty64-lxc"
+    d.ssh.forward_agent = true
+    d.vm.hostname = "testbox"
+    d.vm.network "private_network", ip: "10.0.3.100", lxc__bridge_name: 'lxcbr0'
+    d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+    d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/corddev.yml -c local"
+    config.vm.provider :lxc do |lxc|
+        # Same effect as 'customize ["modifyvm", :id, "--memory", "1024"]' for VirtualBox
+        lxc.customize 'cgroup.memory.limit_in_bytes', '2048M'
+        lxc.customize 'aa_profile', 'unconfined'
+        lxc.customize 'cgroup.devices.allow', 'b 7:* rwm'
+        lxc.customize 'cgroup.devices.allow', 'c 10:237 rwm'
+    end
+  end
+
+  config.vm.define "compute-node-1" do |c|
+    c.vm.communicator = "none"
+    c.vm.hostname = "compute-node-1"
+    c.vm.network "private_network",
+      adapter: 1,
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network"
+    c.vm.network "private_network",
+      adapter: 2,         # The fabric interface for each node
+      ip: "0.2.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-1-leaf-1",
+      libvirt__network_name: "compute-node-1-leaf-1",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    c.vm.provider :libvirt do |v|
+      v.memory = 8192
+      v.cpus = 4
+      v.machine_virtual_size = 100
+      v.storage :file, :size => '100G', :type => 'qcow2'
+      v.boot 'network'
+      v.boot 'hd'
+      v.nested = true
+    end
+    c.vm.provider "virtualbox" do |v, override|
+      override.vm.box = "clink15/pxe"
+      v.memory = 1048
+      v.gui = "true"
+    end
+  end
+
+  config.vm.define "compute-node-2" do |c|
+    c.vm.communicator = "none"
+    c.vm.hostname = "compute-node-2"
+    c.vm.network "private_network",
+      adapter: 1,
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network"
+    c.vm.network "private_network",
+      adapter: 2,         # The fabric interface for each node
+      ip: "0.3.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-2-leaf-2",
+      libvirt__network_name: "compute-node-2-leaf-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    c.vm.provider :libvirt do |v|
+      v.memory = 8192
+      v.cpus = 4
+      v.machine_virtual_size = 100
+      v.storage :file, :size => '100G', :type => 'qcow2'
+      v.boot 'network'
+      v.boot 'hd'
+      v.nested = true
+    end
+    c.vm.provider "virtualbox" do |v, override|
+      override.vm.box = "clink15/pxe"
+      v.memory = 1048
+      v.gui = "true"
+    end
+  end
+
+  config.vm.define "compute-node-3" do |c|
+    c.vm.communicator = "none"
+    c.vm.hostname = "compute-node-3"
+    c.vm.network "private_network",
+      adapter: 1,
+      ip: "0.0.0.0",
+      auto_config: false,
+      virtualbox__intnet: "cord-mgmt-network",
+      libvirt__network_name: "cord-mgmt-network"
+    c.vm.network "private_network",
+      adapter: 2,         # The fabric interface for each node
+      ip: "0.4.0.0",
+      auto_config: false,
+      virtualbox__intnet: "compute-node-3-leaf-2",
+      libvirt__network_name: "compute-node-3-leaf-2",
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    c.vm.provider :libvirt do |v|
+      v.memory = 8192
+      v.cpus = 4
+      v.machine_virtual_size = 100
+      v.storage :file, :size => '100G', :type => 'qcow2'
+      v.boot 'network'
+      v.boot 'hd'
+      v.nested = true
+    end
+    c.vm.provider "virtualbox" do |v, override|
+      override.vm.box = "clink15/pxe"
+      v.memory = 1048
+      v.gui = "true"
+    end
+  end
+
+end
+
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
new file mode 100644
index 0000000..c9522c6
--- /dev/null
+++ b/scenarios/cord/config.yml
@@ -0,0 +1,81 @@
+---
+# cord Scenario
+# for both Physical (using a corddev VM) and Virtual (using multiple VMs)
+
+# make build config
+build_targets:
+ - deploy-maas
+ - onboard-openstack
+
+vagrant_vms:
+  - corddev
+  - prod
+
+vagrant_up_prereqs:
+  - prereqs-check
+
+# have to copy cord and config to physical/virtual nodes
+cord_config_prereqs:
+  - copy-cord
+
+copy_config_prereqs:
+  - cord-config
+
+# can prep build and head node simultaneously
+prep_buildnode_prereqs:
+  - copy-cord
+  - copy-config
+
+prep_headnode_prereqs:
+  - copy-cord
+  - copy-config
+
+buildnode: corddev
+headnode: prod
+
+# cord profile config
+frontend_only: False
+
+# docker config
+deploy_docker_tag: "candidate"
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-extension-builder"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-synchronizer-base"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/gui-extension-rcord"
+  - "xosproject/gui-extension-sample"
+  - "xosproject/gui-extension-vtr"
+  - "xosproject/onos-synchronizer"
+  - "xosproject/openstack-synchronizer"
+  - "xosproject/vrouter-synchronizer"
+  - "xosproject/vtn-synchronizer"
+  - "xosproject/exampleservice-synchronizer"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "nginx"
+  - "onosproject/onos"
+  - "redis"
+
+# Inventory for ansible, used to generate inventory.ini
+inventory_groups:
+
+  config:
+    corddev:
+
+  build:
+    corddev:
+
+  head:
+    prod:
+
+  compute:
+
diff --git a/scenarios/local/config.yml b/scenarios/local/config.yml
new file mode 100644
index 0000000..1032e31
--- /dev/null
+++ b/scenarios/local/config.yml
@@ -0,0 +1,58 @@
+---
+# local Scenario
+# Brings up a minimal set of containers on the host currently being run on
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+# head = config in local scenario
+head_cord_dir: "{{ config_cord_dir }}"
+head_cord_profile_dir: "{{ config_cord_profile_dir }}"
+
+# make targets
+build_targets:
+  - local-onboard-profile
+
+# local scenario configuration
+frontend_only: True
+create_configdirs_become: False
+use_openstack: False
+xos_images: []
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-extension-builder"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/gui-extension-rcord"
+  - "xosproject/gui-extension-sample"
+  - "xosproject/gui-extension-vtr"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "redis"
+  - "nginx"
+
+# Ansible Inventory
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    localhost:
+      ansible_connection: local
+
+  head:
+    localhost:
+      ansible_connection: local
+
+  compute:
+
diff --git a/scenarios/mock/Vagrantfile b/scenarios/mock/Vagrantfile
new file mode 100644
index 0000000..bedc938
--- /dev/null
+++ b/scenarios/mock/Vagrantfile
@@ -0,0 +1,34 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# mock Scenario Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure("2") do |config|
+
+  config.vm.box = "ubuntu/trusty64"
+
+  # sync these folders with VM
+  config.vm.synced_folder "../../../", "/opt/cord/", create: true
+  config.vm.synced_folder "../../../../cord_profile/", "/opt/cord_profile/", create: true
+
+  # set the headnode VM
+  config.vm.define "headnode" do |d|
+    d.vm.hostname = "headnode"
+    d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+    d.vm.network "private_network",
+      ip: settings['head_vm_ip']
+    d.vm.provider :virtualbox do |vb|
+      vb.memory = settings['head_vm_mem']
+      vb.cpus = settings['head_vm_cpu']
+    end
+    d.vm.provider :libvirt do |v|
+      v.memory = settings['head_vm_mem']
+      v.cpus = settings['head_vm_cpu']
+    end
+  end
+
+end
+
diff --git a/scenarios/mock/config.yml b/scenarios/mock/config.yml
new file mode 100644
index 0000000..9c182ac
--- /dev/null
+++ b/scenarios/mock/config.yml
@@ -0,0 +1,75 @@
+---
+# mock ("mockup") scenario
+# Brings up just core containers without synchronizers, for API tests and GUI dev
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..') | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+build_cord_dir: /opt/cord
+
+# Vagrant VM configuration
+head_vm_mem: 2048
+head_vm_cpu: 4
+head_vm_ip: "192.168.46.100"
+
+vagrant_vms:
+  - headnode
+
+buildnode: headnode
+headnode: headnode
+
+physical_node_list:
+  - name: headnode
+    ipv4_last_octet: 1
+    aliases:
+      - head
+
+# make targets
+build_targets:
+  - onboard-profile
+
+prep_headnode_prereqs:
+  - prep-buildnode
+
+# mock profile configuration
+frontend_only: True
+use_maas: False
+use_apt_cache: False
+use_openstack: False
+xos_images: []
+
+# whitelist of images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-extension-builder"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/gui-extension-rcord"
+  - "xosproject/gui-extension-sample"
+  - "xosproject/gui-extension-vtr"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "nginx"
+  - "redis"
+
+# Ansible Inventory
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    headnode:
+
+  head:
+    headnode:
+
+  compute:
+
diff --git a/targets/opencloud-in-a-box/Vagrantfile b/scenarios/opencloud/Vagrantfile
similarity index 76%
rename from targets/opencloud-in-a-box/Vagrantfile
rename to scenarios/opencloud/Vagrantfile
index 83c9ce3..91ad1bd 100644
--- a/targets/opencloud-in-a-box/Vagrantfile
+++ b/scenarios/opencloud/Vagrantfile
@@ -1,9 +1,10 @@
 # -*- mode: ruby -*-
 # vi: set ft=ruby :
 #
-# OpenCloud-in-a-Box Vagrantfile
+# opencloud Vagrantfile
+
 require 'yaml'
-settings = YAML.load_file('config/opencloud_in_a_box.yaml')
+settings = YAML.load_file('genconfig/config.yml')
 
 Vagrant.configure(2) do |config|
 
@@ -13,8 +14,8 @@
     h.vm.hostname = "head1"
     h.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
     h.vm.provider :libvirt do |v|
-      v.memory = 16384
-      v.cpus = 8
+      v.memory = settings['head_vm_mem']
+      v.cpus = settings['head_vm_cpu']
       v.machine_virtual_size = 100
       v.management_network_name = settings['public_network_name'] # public network
       v.management_network_address = settings['public_network_cidr']
@@ -22,7 +23,7 @@
     h.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0", # not used, ignore
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network'],
+      libvirt__network_name: settings['mgmt_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
@@ -30,8 +31,8 @@
   config.vm.define "compute1" do |c|
     c.vm.hostname = "compute1"
     c.vm.provider :libvirt do |v|
-      v.memory = 8192
-      v.cpus = 4
+      v.memory = settings['compute_vm_mem']
+      v.cpus = settings['compute_vm_cpu'] 
       v.machine_virtual_size = 50
       v.nested = true
       v.management_network_name = settings['public_network_name'] # public network
@@ -40,7 +41,7 @@
     c.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0",
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network'],
+      libvirt__network_name: settings['mgmt_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
@@ -48,8 +49,8 @@
   config.vm.define "compute2" do |c|
     c.vm.hostname = "compute2"
     c.vm.provider :libvirt do |v|
-      v.memory = 8192
-      v.cpus = 4
+      v.memory = settings['compute_vm_mem']
+      v.cpus = settings['compute_vm_cpu'] 
       v.machine_virtual_size = 50
       v.nested = true
       v.management_network_name = settings['public_network_name'] # public network
@@ -58,9 +59,10 @@
     c.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0",
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network'],
+      libvirt__network_name: settings['mgmt_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
 
 end
+
diff --git a/scenarios/opencloud/config.yml b/scenarios/opencloud/config.yml
new file mode 100644
index 0000000..cfc9eb2
--- /dev/null
+++ b/scenarios/opencloud/config.yml
@@ -0,0 +1,95 @@
+---
+# opencloud Scenario
+# For a virtual install, physical does not use VMs
+
+# opencloud profile config
+frontend_only: False
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+build_cord_dir: /opt/cord
+
+buildnode: head1
+headnode: head1
+
+# make build config
+build_targets:
+ - onboard-openstack
+
+vagrant_up_prereqs:
+  - prereqs-check
+
+# have to copy cord and config to physical/virtual nodes
+cord_config_prereqs:
+  - copy-cord
+
+copy_config_prereqs:
+  - cord-config
+
+prep_buildnode_prereqs:
+  - copy-cord
+  - copy-config
+
+# have to run build prep before head prep, when build == head
+prep_headnode_prereqs:
+  - prep-buildnode
+
+# Vagrant VM configuration
+vagrant_vms:
+  - head1
+  - compute1
+
+head_vm_mem: 10240
+head_vm_cpu: 8
+
+compute_vm_mem: 16384
+compute_vm_cpu: 8
+
+mgmt_network_name: cordmgmt
+public_network_name: cordpub
+public_network_cidr: "10.230.100.0/24"
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-extension-builder"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-synchronizer-base"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/gui-extension-rcord"
+  - "xosproject/gui-extension-sample"
+  - "xosproject/gui-extension-vtr"
+  - "xosproject/onos-synchronizer"
+  - "xosproject/openstack-synchronizer"
+  - "xosproject/vrouter-synchronizer"
+  - "xosproject/vtn-synchronizer"
+  - "xosproject/exampleservice-synchronizer"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "nginx"
+  - "onosproject/onos"
+  - "redis"
+
+# Ansible Inventory
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    head1:
+
+  head:
+    head1:
+
+  compute:
+    compute1:
+
diff --git a/scripts/cord-bootstrap.sh b/scripts/cord-bootstrap.sh
new file mode 100755
index 0000000..4f848ad
--- /dev/null
+++ b/scripts/cord-bootstrap.sh
@@ -0,0 +1,256 @@
+#!/usr/bin/env bash
+# cord-bootstrap.sh
+# Bootstraps a dev system for CORD, downloads source
+
+set -e -u
+
+# start time, used to name logfiles
+START_T=$(date -u "+%Y%m%d%H%M%SZ")
+
+# Location of 'cord' directory checked out on the local system
+CORDDIR="${CORDDIR:-${HOME}/cord}"
+
+# Commands
+MAKECMD="${MAKECMD:-make -j4}"
+
+# CORD versioning
+REPO_BRANCH="${REPO_BRANCH:-master}"
+
+# Functions
+function run_stage {
+    echo "==> "$1": Starting"
+    $1
+    echo "==> "$1": Complete"
+}
+
+function bootstrap_ansible() {
+
+  if [ ! -x "/usr/bin/ansible" ]
+  then
+    echo "Installing Ansible..."
+    sudo apt-get update
+    sudo apt-get -y install apt-transport-https build-essential curl git python-dev \
+                            python-netaddr python-pip software-properties-common sshpass
+    sudo apt-add-repository -y ppa:ansible/ansible  # latest supported version
+    sudo apt-get update
+    sudo apt-get install -y ansible
+    sudo pip install gitpython graphviz
+  fi
+}
+
+function bootstrap_repo() {
+
+  if [ ! -x "/usr/local/bin/repo" ]
+  then
+    echo "Installing repo..."
+    # v1.23, per https://source.android.com/source/downloading
+    REPO_SHA256SUM="e147f0392686c40cfd7d5e6f332c6ee74c4eab4d24e2694b3b0a0c037bf51dc5"
+    curl -o /tmp/repo https://storage.googleapis.com/git-repo-downloads/repo
+    echo "$REPO_SHA256SUM  /tmp/repo" | sha256sum -c -
+    sudo mv /tmp/repo /usr/local/bin/repo
+    sudo chmod a+x /usr/local/bin/repo
+  fi
+
+  if [ ! -d "$CORDDIR" ]
+  then
+    # make sure we can find gerrit.opencord.org as DNS failures will fail the build
+    dig +short gerrit.opencord.org || (echo "ERROR: gerrit.opencord.org can't be looked up in DNS" && exit 1)
+
+    echo "Downloading CORD/XOS, branch:'${REPO_BRANCH}'..."
+
+    if [ ! -e "${HOME}/.gitconfig" ]
+    then
+      echo "No ${HOME}/.gitconfig, setting testing defaults"
+      git config --global user.name 'Test User'
+      git config --global user.email 'test@null.com'
+      git config --global color.ui false
+    fi
+
+    mkdir $CORDDIR && cd $CORDDIR
+    repo init -u https://gerrit.opencord.org/manifest -b $REPO_BRANCH
+    repo sync
+
+    # download gerrit patches using repo
+    if [[ ! -z ${GERRIT_PATCHES[@]-} ]]
+    then
+      for gerrit_patch in "${GERRIT_PATCHES[@]-}"
+      do
+        echo "Checking out gerrit changeset: '$gerrit_patch'"
+        repo download ${gerrit_patch/:/ }
+      done
+    fi
+  fi
+}
+
+function bootstrap_vagrant() {
+
+  if [ ! -x "/usr/bin/vagrant" ]
+  then
+    echo "Installing vagrant and associated tools..."
+    VAGRANT_SHA256SUM="faff6befacc7eed3978b4b71f0dbb9c135c01d8a4d13236bda2f9ed53482d2c4"  # version 1.9.3
+    curl -o /tmp/vagrant.deb https://releases.hashicorp.com/vagrant/1.9.3/vagrant_1.9.3_x86_64.deb
+    echo "$VAGRANT_SHA256SUM  /tmp/vagrant.deb" | sha256sum -c -
+    sudo dpkg -i /tmp/vagrant.deb
+    sudo apt-get -y install qemu-kvm libvirt-bin libvirt-dev nfs-kernel-server ruby2.0
+    sudo adduser $USER libvirtd
+
+    run_stage cloudlab_setup
+
+    echo "Installing vagrant plugins..."
+    # FIXME: fix for vagrant-libvirt dependency issue that arose on 2017-04-28 - zdw
+    # vagrant plugin list | grep vagrant-libvirt || vagrant plugin install vagrant-libvirt --plugin-version 0.0.35
+    if ! vagrant plugin list | grep vagrant-libvirt
+    then
+      git clone -b remove_xmlrpc_dep https://github.com/zdw/vagrant-libvirt.git ${HOME}/vagrant-libvirt
+      cd ~/vagrant-libvirt
+      gem2.0 build vagrant-libvirt.gemspec
+      vagrant plugin install vagrant-libvirt-0.0.35.gem
+      cd ~
+    fi
+    vagrant plugin list | grep vagrant-mutate || vagrant plugin install vagrant-mutate
+
+    add_box ubuntu/trusty64
+  fi
+}
+
+function add_box() {
+  vagrant box list | grep $1 | grep virtualbox || vagrant box add $1
+  vagrant box list | grep $1 | grep libvirt || vagrant mutate $1 libvirt --input-provider virtualbox
+}
+
+function cloudlab_setup() {
+
+  # Don't do anything if not a CloudLab node
+  [ ! -d /usr/local/etc/emulab ] && return
+
+  # The watchdog will sometimes reset groups, turn it off
+  if [ -e /usr/local/etc/emulab/watchdog ]
+  then
+    sudo /usr/bin/perl -w /usr/local/etc/emulab/watchdog stop
+    sudo mv /usr/local/etc/emulab/watchdog /usr/local/etc/emulab/watchdog-disabled
+  fi
+
+  # Mount extra space, if haven't already
+  if [ ! -d /mnt/extra ]
+  then
+    sudo mkdir -p /mnt/extra
+
+    # for NVME SSD on Utah Cloudlab, not supported by mkextrafs
+    if $(df | grep -q nvme0n1p1) && [ -e /usr/testbed/bin/mkextrafs ]
+    then
+      # set partition type of 4th partition to Linux, ignore errors
+      echo -e "t\n4\n82\np\nw\nq" | sudo fdisk /dev/nvme0n1 || true
+
+      sudo mkfs.ext4 /dev/nvme0n1p4
+      echo "/dev/nvme0n1p4 /mnt/extra/ ext4 defaults 0 0" | sudo tee -a /etc/fstab
+      sudo mount /mnt/extra
+      mount | grep nvme0n1p4 || (echo "ERROR: NVME mkfs/mount failed, exiting!" && exit 1)
+
+    elif [ -e /usr/testbed/bin/mkextrafs ]  # if on Clemson/Wisconsin Cloudlab
+    then
+      # Sometimes this command fails on the first try
+      sudo /usr/testbed/bin/mkextrafs -r /dev/sdb -qf "/mnt/extra/" || sudo /usr/testbed/bin/mkextrafs -r /dev/sdb -qf "/mnt/extra/"
+
+      # Check that the mount succeeded (sometimes mkextrafs succeeds but device not mounted)
+      mount | grep sdb || (echo "ERROR: mkextrafs failed, exiting!" && exit 1)
+    fi
+  fi
+
+  # replace /var/lib/libvirt/images with a symlink
+  [ -d /var/lib/libvirt/images/ ] && [ ! -h /var/lib/libvirt/images ] && sudo rmdir /var/lib/libvirt/images
+  sudo mkdir -p /mnt/extra/libvirt_images
+
+  if [ ! -e /var/lib/libvirt/images ]
+  then
+    sudo ln -s /mnt/extra/libvirt_images /var/lib/libvirt/images
+  fi
+}
+
+function bootstrap_docker() {
+
+  if [ ! -x "/usr/bin/docker" ]
+  then
+    echo "Installing Devel Tools (docker)..."
+    cd ${CORDDIR}/build/platform-install
+    ansible-playbook -i inventory/localhost devel-tools-playbook.yml
+  fi
+}
+
+# Parse options
+GERRIT_PATCHES=()
+MAKE_TARGETS=()
+GROUP_LIST=()
+DOCKER=0
+VAGRANT=0
+
+while getopts "dhp:t:v" opt; do
+  case ${opt} in
+    d ) DOCKER=1
+        GROUP_LIST+=("docker")
+      ;;
+    h ) echo "Usage for $0:"
+      echo "  -d                           Install Docker for local scenario."
+      echo "  -h                           Display this help message."
+      echo "  -p <project:change/revision> Download a patch from gerrit. Can be repeated."
+      echo "  -t <target>                  Run '$MAKECMD <target>' in cord/build/. Can be repeated."
+      echo "  -v                           Install Vagrant for mock/virtual/physical scenarios."
+      exit 0
+      ;;
+    p ) GERRIT_PATCHES+=("$OPTARG")
+      ;;
+    t ) MAKE_TARGETS+=("$OPTARG")
+      ;;
+    v ) VAGRANT=1
+        GROUP_LIST+=("libvirtd")
+      ;;
+    \? ) echo "Invalid option: -$OPTARG"
+      exit 1
+      ;;
+  esac
+done
+
+# Start process
+run_stage bootstrap_ansible
+run_stage bootstrap_repo
+
+if [[ $VAGRANT -ne 0 ]]
+then
+  run_stage bootstrap_vagrant
+fi
+
+if [[ $DOCKER -ne 0 ]]
+then
+  run_stage bootstrap_docker
+fi
+
+# Check for group membership when needed
+if [[ ! -z ${GROUP_LIST[@]-} ]]
+then
+  HAS_NEEDED_GROUPS=0
+  for group_item in "${GROUP_LIST[@]-}"; do
+    if ! $(groups | grep -q "$group_item")
+    then
+      echo "You are not in the group: "$group_item", please logout/login."
+      HAS_NEEDED_GROUPS=1
+    fi
+  done
+
+  if [[ $HAS_NEEDED_GROUPS -ne 0 ]];
+  then
+    exit 1
+  fi
+fi
+
+# run make targets, if specified
+if [[ ! -z ${MAKE_TARGETS[@]-} ]]
+then
+  for make_target in "${MAKE_TARGETS[@]-}"; do
+    makelog=${HOME}/${START_T}_make_`echo $make_target | sed 's/[^[:alnum:]-]/_/g'`
+    echo "Logging to: $makelog"
+    echo "Running '$MAKECMD $make_target'" | tee -a $makelog
+    cd ${CORDDIR}/build/
+    $MAKECMD $make_target 2>&1 | tee -a $makelog
+  done
+fi
+
+exit 0
diff --git a/scripts/imagebuilder.py b/scripts/imagebuilder.py
new file mode 100755
index 0000000..fc54c91
--- /dev/null
+++ b/scripts/imagebuilder.py
@@ -0,0 +1,1256 @@
+#!/usr/bin/env python
+
+# imagebuilder.py
+# rebuilds/fetches docker container images per their git status in repo
+# in addition to docker, needs `sudo apt-get install python-git`
+
+import argparse
+import datetime
+import git
+import json
+import logging
+import os
+import re
+import string
+import sys
+import tarfile
+import tempfile
+import time
+import xml.etree.ElementTree as ET
+import yaml
+
+global args
+global conf
+global build_tag
+global buildable_images
+global pull_only_images
+
+
+def setup_logging(name=None, logfile=False):
+    global args
+
+    if name:
+        log = logging.getLogger("-".join([__name__, name]))
+    else:
+        log = logging.getLogger(__name__)
+
+    slh = logging.StreamHandler(sys.stdout)
+    slh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
+    slh.setLevel(logging.DEBUG)
+
+    log.addHandler(slh)
+
+    # secondary logging to a file, always DEBUG level
+    if logfile:
+        fn = os.path.join(conf.logdir, "%s.log" % name)
+        flh = logging.FileHandler(fn)
+        flh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
+        flh.setLevel(logging.DEBUG)
+        log.addHandler(flh)
+
+    return log
+
+LOG = setup_logging()
+
+
+def parse_args():
+    global args
+
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument('-c', '--container_list', default='docker_images.yml',
+                        type=argparse.FileType('r'),
+                        help="YAML Config and master container list")
+
+    # -f is optional, so using type=argparse.FileType is problematic
+    parser.add_argument('-f', '--filter_images', default=None, action='store',
+                        help="YAML file restricting images to build/fetch")
+
+    parser.add_argument('-a', '--actions_taken', default=None,
+                        help="Save a YAML file with actions taken during run")
+
+    # FIXME - the -b and -p options are currently unimplemented
+    group = parser.add_mutually_exclusive_group()
+
+    group.add_argument('-b', '--build_force', action="store_true",
+                       help="Build (don't fetch) all internal containers")
+
+    group.add_argument('-p', '--pull_force', action="store_true",
+                       help="Only pull containers, fail if build required")
+
+    parser.add_argument('-d', '--dry_run', action="store_true",
+                        help="Don't build/fetch anything")
+
+    parser.add_argument('-g', '--graph', default=None,
+                        help="Filename for DOT graph file of image dependency")
+
+    parser.add_argument('-l', '--build_log_dir', action="store",
+                        help="Log build output to this dir if set")
+
+    parser.add_argument('-r', '--repo_root', default="..", action="store",
+                        help="Repo root directory")
+
+    parser.add_argument('-t', '--build_tag', default=None, action="store",
+                        help="tag all images built/pulled using this tag")
+
+    parser.add_argument('-v', '--verbosity', action='count', default=1,
+                        help="Repeat to increase log level")
+
+    args = parser.parse_args()
+
+    if args.verbosity > 1:
+        LOG.setLevel(logging.DEBUG)
+    else:
+        LOG.setLevel(logging.INFO)
+
+
+def load_config():
+    global args
+    global conf
+    global buildable_images
+    global pull_only_images
+    global build_tag
+
+    try:
+        cl_abs = os.path.abspath(args.container_list.name)
+        LOG.info("Master container list file: %s" % cl_abs)
+
+        conf = yaml.safe_load(args.container_list)
+    except yaml.YAMLError:
+        LOG.exception("Problem loading container list file")
+        sys.exit(1)
+
+    if args.build_tag:
+        build_tag = args.build_tag
+    else:
+        build_tag = conf['docker_build_tag']
+
+    if args.filter_images is None:
+        buildable_images = conf['buildable_images']
+        pull_only_images = conf['pull_only_images']
+    else:
+        fi_abs = os.path.abspath(args.filter_images)
+
+        LOG.info("Filtering image list per 'docker_image_whitelist' in: %s" %
+                 fi_abs)
+        try:
+            fi_fh = open(fi_abs, 'r')
+            filter_list = yaml.safe_load(fi_fh)
+            fi_fh.close()
+
+            if 'docker_image_whitelist' not in filter_list:
+                LOG.error("No 'docker_image_whitelist' defined in: %s" %
+                          fi_abs)
+                sys.exit(1)
+
+            # fail if filter list specifies tags
+            for f_i in filter_list['docker_image_whitelist']:
+                (name, tag) = split_name(f_i)
+                if tag:
+                    LOG.error("filter list may not be tagged")
+                    sys.exit(1)
+
+            buildable_images = [img for img in conf['buildable_images']
+                                if split_name(img['name'])[0]
+                                in filter_list['docker_image_whitelist']]
+
+            pull_only_images = [img for img in conf['pull_only_images']
+                                if split_name(img)[0]
+                                in filter_list['docker_image_whitelist']]
+
+        except:
+            LOG.exception("Problem with filter list file")
+            sys.exit(1)
+
+
+def split_name(input_name):
+    """ split a docker image name in the 'name:tag' format into components """
+
+    name = input_name
+    tag = None
+
+    # split name:tag if given in combined format
+    name_tag_split = string.split(input_name, ":")
+
+    if len(name_tag_split) > 1:  # has tag, return separated version
+        name = name_tag_split[0]
+        tag = name_tag_split[1]
+
+    return (name, tag)
+
+
+class RepoRepo():
+    """ git repo managed by repo tool"""
+
+    manifest_branch = ""
+
+    def __init__(self, name, path, remote):
+
+        self.name = name
+        self.path = path
+        self.remote = remote
+        self.git_url = "%s%s" % (remote, name)
+
+        try:
+            self.git_repo_o = git.Repo(self.abspath())
+            LOG.debug("Repo - %s, path: %s" % (name, path))
+
+            self.head_commit = self.git_repo_o.head.commit.hexsha
+            LOG.debug(" head commit: %s" % self.head_commit)
+
+            commit_t = time.gmtime(self.git_repo_o.head.commit.committed_date)
+            self.head_commit_t = time.strftime("%Y-%m-%dT%H:%M:%SZ", commit_t)
+            LOG.debug(" commit date: %s" % self.head_commit_t)
+
+            self.clean = not self.git_repo_o.is_dirty(untracked_files=True)
+            LOG.debug(" clean: %s" % self.clean)
+
+            # list of untracked files (expensive operation)
+            self.untracked_files = self.git_repo_o.untracked_files
+            for u_file in self.untracked_files:
+                LOG.debug("  Untracked: %s" % u_file)
+
+        except Exception:
+            LOG.exception("Error with git repo: %s" % name)
+            sys.exit(1)
+
+    def abspath(self):
+        global args
+        return os.path.abspath(os.path.join(args.repo_root, self.path))
+
+    def path_clean(self, test_path, branch=""):
+        """ Is working tree on branch and no untracked files in path? """
+        global conf
+
+        if not branch:
+            branch = self.manifest_branch
+
+        LOG.debug("  Looking for changes in path: %s" % test_path)
+
+        p_clean = True
+
+        # diff between branch head and working tree (None)
+        branch_head = self.git_repo_o.commit(branch)
+        diff = branch_head.diff(None, paths=test_path)
+
+        if diff:
+            p_clean = False
+
+        for diff_obj in diff:
+            LOG.debug("  file not on branch: %s" % diff_obj)
+
+        # remove . to compare paths using .startswith()
+        if test_path == ".":
+            test_path = ""
+
+        for u_file in self.untracked_files:
+            if u_file.startswith(test_path):
+                LOG.debug("  untracked file in path: %s" % u_file)
+                p_clean = False
+
+        return p_clean
+
+
+class RepoManifest():
+    """ parses manifest XML file used by repo tool"""
+
+    def __init__(self):
+        global args
+        global conf
+
+        self.manifest_xml = {}
+        self.repos = {}
+        self.branch = ""
+
+        self.manifest_file = os.path.abspath(
+                                os.path.join(args.repo_root,
+                                             ".repo/manifest.xml"))
+
+        LOG.info("Loading manifest file: %s" % self.manifest_file)
+
+        try:
+            tree = ET.parse(self.manifest_file)
+            self.manifest_xml = tree.getroot()
+        except Exception:
+            LOG.exception("Error loading repo manifest")
+            sys.exit(1)
+
+        # Find the default branch
+        default = self.manifest_xml.find('default')
+        self.branch = "%s/%s" % (default.attrib['remote'],
+                                 default.attrib['revision'])
+
+        # Find the remote URL for these repos
+        remote = self.manifest_xml.find('remote')
+        self.remote = remote.attrib['review']
+
+        LOG.info("Manifest is on branch '%s' with remote '%s'" %
+                 (self.branch, self.remote))
+
+        project_repos = {}
+
+        for project in self.manifest_xml.iter('project'):
+            repo_name = project.attrib['name']
+            rel_path = project.attrib['path']
+            abs_path = os.path.abspath(os.path.join(args.repo_root,
+                                       project.attrib['path']))
+
+            if os.path.isdir(abs_path):
+                project_repos[repo_name] = rel_path
+            else:
+                LOG.debug("Repo in manifest but not checked out: %s" %
+                          repo_name)
+
+        for repo_name, repo_path in project_repos.iteritems():
+            self.repos[repo_name] = RepoRepo(repo_name, repo_path, self.remote)
+            self.repos[repo_name].manifest_branch = self.branch
+
+    def get_repo(self, repo_name):
+        return self.repos[repo_name]
+
+# DockerImage Status Constants
+
+DI_UNKNOWN = 'unknown'  # unknown status
+DI_EXISTS = 'exists'  # already exists in docker, has an image_id
+
+DI_BUILD = 'build'  # needs to be built
+DI_FETCH = 'fetch'  # needs to be fetched (pulled)
+DI_ERROR = 'error'  # build or other fatal failure
+
+
+class DockerImage():
+
+    def __init__(self, name, repo_name=None, repo_d=None, path=".",
+                 context=".", dockerfile='Dockerfile', labels=None,
+                 tags=None, image_id=None, components=None, status=DI_UNKNOWN):
+
+        LOG.debug("New DockerImage object from name: %s" % name)
+
+        # name to pull as, usually what is provided on creation.
+        # May be changed by create_tags
+        self.raw_name = name
+
+        # Python's mutable defaults is a landmine
+        if labels is None:
+            self.labels = {}
+        else:
+            self.labels = labels
+
+        self.repo_name = repo_name
+        self.repo_d = repo_d
+        self.path = path
+        self.context = context
+        self.dockerfile = dockerfile
+        self.tags = []  # tags are added to this later in __init__
+        self.image_id = image_id
+        self.components = components
+        self.status = status
+
+        self.parent_name = None  # set by _find_parent_name()
+        self.parent = None  # pointer to parent DockerImage object
+        self.children = []   # list of child DockerImage objects
+
+        # split name:tag if given in combined format
+        (image_name, image_tag) = split_name(name)
+        if image_tag:  # has tag
+            self.name = image_name
+            self.tags.append(image_tag)
+        else:  # no tag
+            self.name = image_name
+
+        # Add the build tag if exists
+        if build_tag not in self.tags:
+            self.tags.append(build_tag)
+
+        # split names from tag list
+        if tags is not None:
+            for tag in tags:
+                thistag = ""
+                (tag_name, tag_tag) = split_name(tag)
+                if tag_tag:  # has name also, use just tag
+                    thistag = tag_tag
+                else:  # just a bare tag
+                    thistag = tag_name
+
+                if thistag not in self.tags:  # don't duplicate tags
+                    self.tags.append(thistag)
+
+        # self.clean only applies to this container
+        self.clean = self._context_clean()
+        self._find_parent_name()
+
+    def __str__(self):
+        return self.name
+
+    def buildable(self):
+        """ Can this image be built from a Dockerfile? """
+        if self.repo_name:  # has a git repo to be built from
+            return True
+        return False
+
+    def _context_clean(self):
+        """ Determine if this is repo and context is clean """
+
+        if self.buildable():
+
+            # check if on master branch
+            repo_clean = self.repo_d.clean
+
+            # only check the Docker context for cleanliness
+            context_path = os.path.normpath(
+                                os.path.join(self.path, self.context))
+            context_clean = self.repo_d.path_clean(context_path)
+
+            # check of subcomponents are clean
+            components_clean = self.components_clean()
+
+            LOG.debug(" Build Context Cleanliness -")
+            LOG.debug("  repo: %s, context: %s, components: %s" %
+                      (repo_clean, context_clean, components_clean))
+
+            if context_clean and repo_clean and components_clean:
+                return True
+            else:
+                return False
+
+        return True  # unbuildable images are clean
+
+    def parent_clean(self):
+        """ if all parents are clean """
+
+        if self.buildable():
+            if self.clean and self.parent.parent_clean():
+                return True
+            else:
+                return False
+
+        return True  # unbuildable images are clean
+
+    def compare_labels(self, other_labels):
+        """ Returns True if image label-schema.org labels match dict """
+
+        comparable_labels_re = [
+                r".*name$",
+                r".*vcs-url$",
+                r".*vcs-ref$",
+                ]
+
+        for clr in comparable_labels_re:  # loop on all comparable labels
+            for label in self.labels:  # loop on all labels
+                if re.match(clr, label) is not None:   # if label matches re
+                    # and label exists in other, and values are same
+                    if label in other_labels and \
+                            self.labels[label] == other_labels[label]:
+                        pass  # continue through loop
+                    else:
+                        LOG.info("Non-matching label: %s" % label)
+                        return False  # False when first difference found
+
+        return True  # only when every label matches
+
+    def same_name(self, other_name):
+        """ compare image name (possibly with tag) against image name/tag """
+
+        (o_name, o_tag) = split_name(other_name)
+
+        if o_tag is None and self.name == o_name:
+            return True
+        elif self.name == o_name and o_tag in self.tags:
+            return True
+
+        return False
+
+    def components_clean(self):
+
+        if self.buildable() and self.components is not None:
+            for component in self.components:
+                if not component['repo_d'].clean or \
+                        not component['repo_d'].path_clean(component['path']):
+                    return False
+
+        return True
+
+    def component_labels(self):
+        """ returns a dict of labels for subcomponents """
+
+        if self.buildable() and self.components is not None:
+
+            comp_l = {}
+
+            for component in self.components:
+
+                LOG.debug(" component %s generating child labels" %
+                          component['repo_name'])
+
+                prefix = "org.opencord.component.%s." % component['repo_name']
+
+                comp_l[prefix + "vcs-url"] = component['repo_d'].git_url
+
+                if component['repo_d'].clean and \
+                        component['repo_d'].path_clean(component['path']):
+                    clean = True
+                else:
+                    clean = False
+
+                if clean:
+                    comp_l[prefix + "version"] = self.repo_d.manifest_branch
+                    comp_l[prefix + "vcs-ref"] = \
+                        component['repo_d'].head_commit
+                else:
+                    comp_l[prefix + "version"] = "dirty"
+                    comp_l[prefix + "vcs-ref"] = ""
+
+            return comp_l
+
+        return None
+
+    def child_labels(self, repo_list=None):
+        """ return a dict of labels to apply to child images """
+
+        LOG.debug(" Parent image %s generating child labels" % self.name)
+
+        # only create labels when they haven't already been created
+        if repo_list is None:
+            repo_list = []
+
+        LOG.debug(" Parents already labeled with: %s" % ", ".join(repo_list))
+
+        cl = {}
+
+        if self.buildable() and self.repo_name not in repo_list:
+
+            LOG.debug("  Adding parent labels from repo: %s" % self.repo_name)
+
+            prefix = "org.opencord.component.%s." % self.repo_name
+
+            cl[prefix + "vcs-url"] = self.repo_d.git_url
+
+            if self.clean:
+                cl[prefix + "version"] = self.repo_d.manifest_branch
+                cl[prefix + "vcs-ref"] = self.repo_d.head_commit
+            else:
+                cl[prefix + "version"] = "dirty"
+                cl[prefix + "vcs-ref"] = ""
+
+            repo_list.append(self.repo_name)
+
+        # include component labels if present
+        if self.components is not None:
+            cl.update(self.component_labels())
+
+        # recursively find labels up the parent chain
+        if self.parent is not None:
+            cl.update(self.parent.child_labels(repo_list))
+
+        return cl
+
+    def create_labels(self):
+        """ Create label-schema.org labels for image """
+
+        if self.buildable():
+
+            LOG.debug("Creating labels for: %s" % self.name)
+
+            self.labels['org.label-schema.name'] = self.name
+            self.labels['org.label-schema.schema-version'] = "1.0"
+
+            # org.label-schema.build-date
+            time_now = datetime.datetime.utcnow()
+            build_date = time_now.strftime("%Y-%m-%dT%H:%M:%SZ")
+            self.labels['org.label-schema.build-date'] = build_date
+
+            # git version related labels
+            self.labels['org.label-schema.vcs-url'] = self.repo_d.git_url
+
+            if self.clean:
+                self.labels['org.label-schema.version'] = \
+                    self.repo_d.manifest_branch
+                self.labels['org.label-schema.vcs-ref'] = \
+                    self.repo_d.head_commit
+                self.labels['org.opencord.vcs-commit-date'] = \
+                    self.repo_d.head_commit_t
+            else:
+                self.labels['org.label-schema.version'] = "dirty"
+                self.labels['org.label-schema.vcs-ref'] = ""
+
+            # include component labels if present
+            if self.components is not None:
+                self.labels.update(self.component_labels())
+
+    def create_tags(self):
+        """ Create docker tags as needed """
+
+        if self.buildable():
+            LOG.debug("Creating tags for image: %s" % self.name)
+
+            # if clean and parents clean, add tags for branch/commit
+            if self.parent_clean():
+                if build_tag not in self.tags:
+                    self.tags.append(build_tag)
+
+                commit_tag = self.repo_d.head_commit
+                if commit_tag not in self.tags:
+                    self.tags.append(commit_tag)
+
+                    # pulling is done via raw_name, set tag to commit
+                    self.raw_name = "%s:%s" % (self.name, commit_tag)
+
+            LOG.debug("All tags: %s" %
+                      ", ".join(self.tags))
+
+    def _find_parent_name(self):
+        """ set self.parent_name using Dockerfile FROM line """
+
+        if self.buildable():
+            # read contents of Dockerfile into df
+            with open(self.dockerfile_abspath()) as dfh:
+                df = dfh.read()
+
+            # find FROM line to determine image parent
+            frompatt = re.compile(r'^FROM\s+(.*)$', re.MULTILINE)
+            fromline = re.search(frompatt, df)
+
+            self.parent_name = fromline.group(1)  # may have tag
+
+    def dockerfile_abspath(self):
+        """ returns absolute path to Dockerfile for this image """
+
+        if self.buildable():
+            return os.path.join(self.repo_d.abspath(),
+                                self.path, self.dockerfile)
+        else:
+            return None
+
+    def dockerfile_rel_path(self):
+        """ returns the path relative to the context of the Dockerfile """
+
+        if self.buildable():
+            if self.context is ".":
+                return self.dockerfile
+            else:
+                return os.path.normpath(os.path.join(self.path,
+                                                     self.dockerfile))
+        else:
+            return None
+
+    def context_tarball(self):
+        """ returns a filehandle to a tarball (tempfile) for the image """
+
+        if self.buildable():
+
+            context_path = os.path.normpath(
+                               os.path.join(self.repo_d.abspath(),
+                                            self.path, self.context))
+
+            LOG.info("Creating context tarball of path: %s" % context_path)
+
+            t_fh = tempfile.NamedTemporaryFile()
+            t = tarfile.open(mode='w', fileobj=t_fh, dereference=True)
+
+            # exclude files in this list
+            exclusion_list = ['.git']
+
+            # see docker-py source for context
+            for path in sorted(
+                    DockerUtils.exclude_paths(context_path, exclusion_list)):
+                t.add(os.path.join(context_path, path),
+                      arcname=path,
+                      recursive=False)
+
+            # add sub-components to tarball if required
+            if self.components is not None:
+                for component in self.components:
+                    c_ctx_p = os.path.normpath(
+                                os.path.join(component['repo_d'].abspath(),
+                                             component['path']))
+
+                    LOG.info("Adding component %s at context %s" %
+                             (component['repo_name'], c_ctx_p))
+
+                    # walk component source path
+                    for path in sorted(
+                          DockerUtils.exclude_paths(c_ctx_p, exclusion_list)):
+
+                        # path to where to put files in the archive
+                        cf_dest = os.path.normpath(
+                                    os.path.join(component['dest'], path))
+
+                        t.add(os.path.join(c_ctx_p, path),
+                              arcname=cf_dest,
+                              recursive=False)
+
+                # t.list()  # prints all files in tarball
+            t.close()
+            t_fh.seek(0)
+            return t_fh
+
+        else:
+            return None
+
+    def buildargs(self):
+        """ returns array of labels in docker buildargs compliant format """
+        ba_a = {}
+
+        for label_k in self.labels:
+            ba_re = re.compile(r'\W')  # non alpha/num/_ chars
+            ba_label = ba_re.sub('_', label_k)
+            ba_a[ba_label] = self.labels[label_k]
+
+        return ba_a
+
+
+class DockerBuilder():
+
+    def __init__(self, repo_manifest):
+
+        global buildable_images
+        global pull_only_images
+
+        self.rm = repo_manifest
+        self.dc = None  # Docker Client object
+
+        self.images = []
+
+        # arrays of images, used for write_actions
+        self.all = []
+        self.preexisting = []
+        self.obsolete = []
+        self.pulled = []
+        self.failed_pull = []
+        self.obsolete_pull = []
+        self.built = []
+        self.failed_build = []
+
+        # create dict of images, setting defaults
+        for image in buildable_images:
+
+            repo_d = self.rm.get_repo(image['repo'])
+
+            if "components" in image:
+                components = []
+
+                for component in image['components']:
+                    comp = {}
+                    comp['repo_name'] = component['repo']
+                    comp['repo_d'] = self.rm.get_repo(component['repo'])
+                    comp['dest'] = component['dest']
+                    comp['path'] = component.get('path', '.')
+                    components.append(comp)
+            else:
+                components = None
+
+            # set the full name in case this is pulled
+            full_name = "%s:%s" % (image['name'], build_tag)
+
+            img_o = DockerImage(full_name, image['repo'], repo_d,
+                                image.get('path', '.'),
+                                image.get('context', '.'),
+                                image.get('dockerfile', 'Dockerfile'),
+                                components=components)
+
+            self.images.append(img_o)
+
+        # add misc images
+        for misc_image in pull_only_images:
+            img_o = DockerImage(misc_image)
+            self.images.append(img_o)
+
+        if not args.dry_run:
+            self._docker_connect()
+
+        self.create_dependency()
+        self.find_preexisting()
+
+        if args.graph is not None:
+            self.dependency_graph(args.graph)
+
+        self.process_images()
+
+        if args.actions_taken is not None:
+            self.write_actions_file(args.actions_taken)
+
+    def _docker_connect(self):
+        """ Connect to docker daemon """
+
+        self.dc = DockerClient()
+
+        if self.dc.ping():
+            LOG.debug("Docker server is responding")
+        else:
+            LOG.error("Unable to ping docker server")
+            sys.exit(1)
+
+    def find_preexisting(self):
+        """ find images that already exist in Docker and mark """
+
+        if self.dc:
+            LOG.debug("Evaluating already built/fetched Docker images")
+
+            # get list of images from docker
+            pe_images = self.dc.images()
+
+            for pe_image in pe_images:
+                raw_tags = pe_image['RepoTags']
+
+                self.all.append({
+                        'id': pe_image['Id'],
+                        'tags': raw_tags,
+                    })
+
+                # ignoring all <none>:<none> images, reasonable?
+                if raw_tags and "<none>:<none>" not in raw_tags:
+                    LOG.debug(" Preexisting Image - ID: %s, tags: %s" %
+                              (pe_image['Id'], ",".join(raw_tags)))
+
+                    image = self.find_image(raw_tags[0])
+
+                    if image is not None:
+                        if image.compare_labels(pe_image['Labels']):
+                            LOG.debug(" Image %s has up-to-date labels" %
+                                      pe_image['Id'])
+
+                            self.preexisting.append({
+                                    'id': pe_image['Id'],
+                                    'tags': raw_tags,
+                                })
+
+                            image.image_id = pe_image['Id']
+                            image.status = DI_EXISTS
+
+                        else:
+                            LOG.debug(" Image %s has obsolete labels" %
+                                      pe_image['Id'])
+
+                            self.obsolete.append({
+                                    'id': pe_image['Id'],
+                                    'tags': raw_tags,
+                                })
+
+    def find_image(self, image_name):
+        """ return image object matching name """
+        LOG.debug("attempting to find image for: %s" % image_name)
+
+        for image in self.images:
+            if image.same_name(image_name):
+                return image
+        return None
+
+    def create_dependency(self):
+        """ set parent/child links for images """
+
+        # list of all parent image names, with dupes
+        parents_with_dupes = [img.parent_name for img in self.images
+                              if img.parent_name is not None]
+
+        # remove duplicates
+        parents = list(set(parents_with_dupes))
+
+        LOG.info("All parent images: %s" % ", ".join(parents))
+
+        # list of "external parents", ones not built internally
+        external_parents = []
+
+        for parent_name in parents:
+            LOG.debug("Evaluating parent image: %s" % parent_name)
+            internal_parent = False
+
+            # match on p_name, without tag
+            (p_name, p_tag) = split_name(parent_name)
+
+            for image in self.images:
+                if image.same_name(p_name):  # internal image is a parent
+                    internal_parent = True
+                    LOG.debug(" Internal parent: %s" % image.name)
+                    break
+
+            if not internal_parent:  # parent is external
+                LOG.debug(" External parent: %s" % parent_name)
+                external_parents.append(parent_name)
+
+        # add unique external parents to image list
+        for e_p_name in set(external_parents):
+            LOG.debug(" Creating external parent image object: %s" % e_p_name)
+            img_o = DockerImage(e_p_name)
+            self.images.append(img_o)
+
+        # now that all images (including parents) are in list, associate them
+        for image in filter(lambda img: img.parent_name is not None,
+                            self.images):
+
+            LOG.debug("Associating image: %s" % image.name)
+
+            parent = self.find_image(image.parent_name)
+            image.parent = parent
+
+            if parent is not None:
+                LOG.debug(" internal image '%s' is parent of '%s'" %
+                          (parent.name, image.name))
+                parent.children.append(image)
+
+            else:
+                LOG.debug(" external image '%s' is parent of '%s'" %
+                          (image.parent_name, image.name))
+
+        # loop again now that parents are linked to create labels
+        for image in self.images:
+            image.create_labels()
+            image.create_tags()
+
+            # if image has parent, get labels from parent(s)
+            if image.parent is not None:
+                LOG.debug("Adding parent labels from %s to child %s" %
+                          (image.parent.name, image.name))
+
+                # don't create component labels for same repo as image
+                repo_list = [image.repo_name]
+                image.labels.update(image.parent.child_labels(repo_list))
+
+    def dependency_graph(self, graph_fn):
+        """ save a DOT dependency graph to a file """
+
+        graph_fn_abs = os.path.abspath(graph_fn)
+
+        LOG.info("Saving DOT dependency graph to: %s" % graph_fn_abs)
+
+        try:
+            import graphviz
+        except ImportError:
+            LOG.error('graphviz pip module not found')
+            raise
+
+        dg = graphviz.Digraph(comment='Image Dependency Graph',
+                              graph_attr={'rankdir': 'LR'})
+
+        component_nodes = []
+
+        # Use raw names, so they match with what's in Dockerfiles
+        # delete colons as python graphviz module breaks with them
+        for image in self.images:
+            name_g = image.raw_name.replace(':', '\n')
+            dg.node(name_g)
+
+            if image.parent is not None:
+                name_p = image.parent.raw_name.replace(':', '\n')
+                dg.edge(name_p, name_g)
+
+            if image.components is not None:
+                for component in image.components:
+                    name_c = "component - %s" % component['repo_name']
+                    if name_c not in component_nodes:
+                        dg.node(name_c)
+                        component_nodes.append(name_c)
+                    dg.edge(name_c, name_g, "", {'style': 'dashed'})
+
+        with open(graph_fn_abs, 'w') as g_fh:
+            g_fh.write(dg.source)
+
+    def write_actions_file(self, actions_fn):
+
+        actions_fn_abs = os.path.abspath(actions_fn)
+
+        LOG.info("Saving actions as YAML to: %s" % actions_fn_abs)
+
+        actions = {
+                "ib_pulled": self.pulled,
+                "ib_built": self.built,
+                "ib_preexisting_images": self.preexisting,
+                "ib_obsolete_images": self.obsolete,
+                "ib_failed_pull": self.failed_pull,
+                "ib_obsolete_pull": self.obsolete_pull,
+                "ib_failed_build": self.failed_build,
+                }
+
+        with open(actions_fn_abs, 'w') as a_fh:
+            yaml.safe_dump(actions, a_fh)
+            LOG.debug(yaml.safe_dump(actions))
+
+    def process_images(self):
+        """ determine whether to build/fetch images """
+
+        # upstream images (have no parents), must be fetched
+        must_fetch_a = filter(lambda img: img.parent is None, self.images)
+
+        for image in must_fetch_a:
+            if image.status is not DI_EXISTS:
+                image.status = DI_FETCH
+
+        # images that can be built or fetched (have parents)
+        b_or_f_a = filter(lambda img: img.parent is not None, self.images)
+
+        for image in b_or_f_a:
+            if not image.parent_clean():
+                # must be built if not clean
+                image.status = DI_BUILD
+            elif image.status is not DI_EXISTS:
+                # try to fetch if clean and doesn't exist
+                image.status = DI_FETCH
+            # otherwise, image is clean and exists (image.status == DI_EXISTS)
+
+        c_and_e_a = filter(lambda img: img.status is DI_EXISTS, self.images)
+        LOG.info("Preexisting and clean images: %s" %
+                 ", ".join(c.name for c in c_and_e_a))
+
+        upstream_a = filter(lambda img: (img.status is DI_FETCH and
+                                         img.parent is None), self.images)
+        LOG.info("Upstream images that must be fetched: %s" %
+                 ", ".join(u.raw_name for u in upstream_a))
+
+        fetch_a = filter(lambda img: (img.status is DI_FETCH and
+                                      img.parent is not None), self.images)
+        LOG.info("Clean, buildable images to attempt to fetch: %s" %
+                 ", ".join(f.raw_name for f in fetch_a))
+
+        build_a = filter(lambda img: img.status is DI_BUILD, self.images)
+        LOG.info("Buildable images, due to unclean context or parents: %s" %
+                 ", ".join(b.raw_name for b in build_a))
+
+        # OK to fetch upstream in any case as they should reduce number of
+        # layers pulled/built later
+
+        for image in upstream_a:
+            if not self._fetch_image(image):
+                LOG.info("Unable to fetch upstream image: %s" % image.raw_name)
+                # FIXME: fail if the upstream image can't be fetched ?
+
+        fetch_sort = sorted(fetch_a, key=(lambda img: len(img.children)),
+                            reverse=True)
+
+        for image in fetch_sort:
+            if not self._fetch_image(image):
+                # if didn't fetch, build
+                image.status = DI_BUILD
+
+        while True:
+            buildable_images = self.get_buildable()
+            if buildable_images:
+                for image in buildable_images:
+                    self._build_image(image)
+            else:
+                LOG.debug("No more images to build, ending build loop")
+                break
+
+    def get_buildable(self):
+        """ Returns list of images that can be built"""
+
+        buildable = []
+
+        for image in filter(lambda img: img.status is DI_BUILD, self.images):
+            if image.parent.status is DI_EXISTS:
+                buildable.append(image)
+
+        LOG.debug("Buildable images: %s" %
+                  ', '.join(image.name for image in buildable))
+
+        return buildable
+
+    def tag_image(self, image):
+        """ Applies tags to an image """
+
+        for tag in image.tags:
+
+            LOG.info("Tagging id: '%s', repo: '%s', tag: '%s'" %
+                     (image.image_id, image.name, tag))
+
+            if self.dc is not None:
+                self.dc.tag(image.image_id, image.name, tag=tag)
+
+    def _fetch_image(self, image):
+
+        LOG.info("Attempting to fetch docker image: %s" % image.raw_name)
+
+        if self.dc is not None:
+            try:
+                for stat_json in self.dc.pull(image.raw_name,
+                                              stream=True):
+
+                    # sometimes Docker's JSON is dirty, per:
+                    # https://github.com/docker/docker-py/pull/1081/
+                    stat_s = stat_json.strip()
+                    stat_list = stat_s.split("\r\n")
+
+                    for s_j in stat_list:
+                        stat_d = json.loads(s_j)
+
+                        if 'stream' in stat_d:
+                            for stat_l in stat_d['stream'].split('\n'):
+                                LOG.debug(stat_l)
+
+                        if 'status' in stat_d:
+                            for stat_l in stat_d['status'].split('\n'):
+                                noisy = ["Extracting", "Downloading",
+                                         "Waiting", "Download complete",
+                                         "Pulling fs layer", "Pull complete",
+                                         "Verifying Checksum",
+                                         "Already exists"]
+                                if stat_l in noisy:
+                                    LOG.debug(stat_l)
+                                else:
+                                    LOG.info(stat_l)
+
+                        if 'error' in stat_d:
+                            LOG.error(stat_d['error'])
+                            sys.exit(1)
+
+            except:
+                LOG.exception("Error pulling docker image")
+
+                self.failed_pull.append({
+                        "tags": [image.raw_name, ],
+                    })
+
+                return False
+
+            # obtain the image_id by inspecting the pulled image. Seems unusual
+            # that the Docker API `pull` method doesn't provide it when the
+            # `build` method does
+            pulled_image = self.dc.inspect_image(image.raw_name)
+
+            # check to make sure that image that was downloaded has the labels
+            # that we expect it to have, otherwise return false, trigger build
+            if not image.compare_labels(
+                        pulled_image['ContainerConfig']['Labels']):
+                LOG.info("Tried fetching image %s, but labels didn't match" %
+                         image.raw_name)
+
+                self.obsolete_pull.append({
+                        "id": pulled_image['Id'],
+                        "tags": pulled_image['RepoTags'],
+                    })
+                return False
+
+            image.image_id = pulled_image['Id']
+            LOG.info("Fetched image %s, id: %s" %
+                     (image.raw_name, image.image_id))
+
+            self.pulled.append({
+                    "id": pulled_image['Id'],
+                    "tags": pulled_image['RepoTags'],
+                })
+
+            self.tag_image(image)
+            image.status = DI_EXISTS
+            return True
+
+    def _build_image(self, image):
+
+        LOG.info("Building docker image for %s" % image.raw_name)
+
+        if self.dc is not None:
+
+            build_tag = "%s:%s" % (image.name, image.tags[0])
+
+            buildargs = image.buildargs()
+            context_tar = image.context_tarball()
+            dockerfile = image.dockerfile_rel_path()
+
+            for key, val in buildargs.iteritems():
+                LOG.debug("Buildarg - %s : %s" % (key, val))
+
+            bl_path = ""
+            start_time = datetime.datetime.utcnow()
+
+            if(args.build_log_dir):
+                bl_name = "%s_%s" % (start_time.strftime("%Y%m%dT%H%M%SZ"),
+                                     re.sub(r'\W', '_', image.name))
+                bl_path = os.path.abspath(
+                            os.path.join(args.build_log_dir, bl_name))
+
+                LOG.info("Build log: %s" % bl_path)
+                bl_fh = open(bl_path, 'w+', 0)  # 0 = unbuffered writes
+            else:
+                bl_fh = None
+
+            try:
+                LOG.info("Building image: %s" % image)
+
+                for stat_d in self.dc.build(tag=build_tag,
+                                            buildargs=buildargs,
+                                            custom_context=True,
+                                            fileobj=context_tar,
+                                            dockerfile=dockerfile,
+                                            rm=True,
+                                            forcerm=True,
+                                            pull=False,
+                                            stream=True,
+                                            decode=True):
+
+                    if 'stream' in stat_d:
+
+                        if bl_fh:
+                            bl_fh.write(stat_d['stream'].encode('utf-8'))
+
+                        for stat_l in stat_d['stream'].split('\n'):
+                            if(stat_l):
+                                LOG.debug(stat_l)
+                        if stat_d['stream'].startswith("Successfully built "):
+                            siid = stat_d['stream'].split(' ')[2]
+                            short_image_id = siid.strip()
+                            LOG.debug("Short Image ID: %s" % short_image_id)
+
+                    if 'status' in stat_d:
+                        for stat_l in stat_d['status'].split('\n'):
+                            if(stat_l):
+                                LOG.info(stat_l)
+
+                    if 'error' in stat_d:
+                        LOG.error(stat_d['error'])
+                        image.status = DI_ERROR
+                        sys.exit(1)
+
+            except:
+                LOG.exception("Error building docker image")
+
+                self.failed_build.append({
+                        "tags": [build_tag, ],
+                    })
+
+                return
+
+            finally:
+                if(bl_fh):
+                    bl_fh.close()
+
+            # the image ID given by output isn't the full SHA256 id, so find
+            # and set it to the full one
+            built_image = self.dc.inspect_image(short_image_id)
+            image.image_id = built_image['Id']
+
+            end_time = datetime.datetime.utcnow()
+            duration = end_time - start_time  # duration is a timedelta
+
+            LOG.info("Built Image: %s, duration: %s, id: %s" %
+                     (image.name, duration, image.image_id))
+
+            self.built.append({
+                    "id": image.image_id,
+                    "tags": [build_tag, ],
+                    "push_name": image.raw_name,
+                    "build_log": bl_path,
+                    "duration": duration.total_seconds(),
+                })
+
+            self.tag_image(image)
+            image.status = DI_EXISTS
+
+
+if __name__ == "__main__":
+    parse_args()
+    load_config()
+
+    # only include docker module if not a dry run
+    if not args.dry_run:
+        try:
+            from distutils.version import LooseVersion
+            from docker import __version__ as docker_version
+            if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+                from docker import APIClient as DockerClient
+                from docker import utils as DockerUtils
+            else:
+                from docker import Client as DockerClient
+                from docker import utils as DockerUtils
+        except ImportError:
+            LOG.error("Unable to load python docker module (dry run?)")
+            sys.exit(1)
+
+    rm = RepoManifest()
+    db = DockerBuilder(rm)
diff --git a/scripts/opencloud-in-a-box.sh b/scripts/opencloud-in-a-box.sh
deleted file mode 100755
index ab57075..0000000
--- a/scripts/opencloud-in-a-box.sh
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/env bash
-# opencloud-in-a-box.sh
-
-set -e -x
-
-# start time, used to name logfiles
-START_T=$(date -u "+%Y%m%d_%H%M%SZ")
-
-# Paths
-CORDDIR=~/cord
-CONFIG=${CORDDIR}/config/opencloud_in_a_box.yml
-SSHCONFIG=~/.ssh/config
-VAGRANT_CWD=${CORDDIR}/build/targets/opencloud-in-a-box
-
-# CORD versioning
-REPO_BRANCH="master"
-VERSION_STRING="OpenCloud Devel"
-
-function add_box() {
-  echo "Downloading image: $1"
-  vagrant box list | grep $1 | grep virtualbox || vagrant box add $1
-  vagrant box list | grep $1 | grep libvirt || vagrant mutate $1 libvirt --input-provider virtualbox
-}
-
-function run_stage {
-    echo "==> "$1": Starting"
-    $1
-    echo "==> "$1": Complete"
-}
-
-function cleanup_from_previous_test() {
-  echo "## Cleanup ##"
-
-  if [ -d $CORDDIR/build ]
-  then
-    echo "Destroying all Vagrant VMs"
-    cd $CORDDIR/build
-    sudo VAGRANT_CWD=$VAGRANT_CWD vagrant destroy
-  fi
-
-  echo "Removing $CORDDIR"
-  cd ~
-  rm -rf $CORDDIR
-}
-
-function bootstrap() {
-
-  if [ ! -x "/usr/bin/ansible" ]
-  then
-    echo "Installing Ansible..."
-    sudo apt-get update
-    sudo apt-get install -y software-properties-common apt-transport-https
-    #sudo apt-add-repository -y ppa:ansible/ansible  # latest supported version
-    sudo apt-get -y install python-dev libffi-dev python-pip libssl-dev sshpass
-    sudo pip install ansible==2.2.2.0
-    sudo apt-get update
-    sudo apt-get install -y python-netaddr
-  fi
-
-  if [ ! -x "/usr/local/bin/repo" ]
-  then
-    echo "Installing repo..."
-    REPO_SHA256SUM="e147f0392686c40cfd7d5e6f332c6ee74c4eab4d24e2694b3b0a0c037bf51dc5" # not versioned...
-    curl -o /tmp/repo https://storage.googleapis.com/git-repo-downloads/repo
-    echo "$REPO_SHA256SUM  /tmp/repo" | sha256sum -c -
-    sudo mv /tmp/repo /usr/local/bin/repo
-    sudo chmod a+x /usr/local/bin/repo
-  fi
-
-  if [ ! -x "/usr/bin/vagrant" ]
-  then
-    echo "Installing vagrant and associated tools..."
-    VAGRANT_SHA256SUM="52ebd6aa798582681d0f1e008982c709136eeccd668a8e8be4d48a1f632de7b8"  # version 1.9.2
-    curl -o /tmp/vagrant.deb https://releases.hashicorp.com/vagrant/1.9.2/vagrant_1.9.2_x86_64.deb
-    echo "$VAGRANT_SHA256SUM  /tmp/vagrant.deb" | sha256sum -c -
-    sudo dpkg -i /tmp/vagrant.deb
-    sudo apt-get -y install qemu-kvm libvirt-bin libvirt-dev curl nfs-kernel-server git build-essential python-pip ruby2.0
-    sudo adduser $USER libvirtd
-    sudo pip install pyparsing python-logstash
-
-    run_stage cloudlab_setup
-
-    echo "Installing vagrant plugins..."
-    # FIXME: fix for vagrant-libvirt dependency issue that arose on 2017-04-28 - zdw
-    # vagrant plugin list | grep vagrant-libvirt || vagrant plugin install vagrant-libvirt --plugin-version 0.0.35
-    if ! vagrant plugin list | grep vagrant-libvirt
-    then
-      git clone -b remove_xmlrpc_dep https://github.com/zdw/vagrant-libvirt.git ~/vagrant-libvirt
-      cd ~/vagrant-libvirt
-      gem2.0 build vagrant-libvirt.gemspec
-      vagrant plugin install vagrant-libvirt-0.0.35.gem
-      cd ~
-    fi
-    vagrant plugin list | grep vagrant-mutate || vagrant plugin install vagrant-mutate
-
-    add_box ubuntu/trusty64
-  fi
-
-  if [ ! -d "$CORDDIR" ]
-  then
-    echo "Downloading CORD/XOS..."
-
-    if [ ! -e "~/.gitconfig" ]
-    then
-      echo "No ~/.gitconfig, setting testing defaults"
-      git config --global user.name 'Test User'
-      git config --global user.email 'test@null.com'
-      git config --global color.ui false
-    fi
-
-    # make sure we can find gerrit.opencord.org as DNS failures will fail the build
-    dig +short gerrit.opencord.org || (echo "ERROR: gerrit.opencord.org can't be looked up in DNS" && exit 1)
-
-    mkdir $CORDDIR && cd $CORDDIR
-    repo init -u https://gerrit.opencord.org/manifest -b $REPO_BRANCH -g build,onos,orchestration,voltha
-    repo sync
-
-    # check out gerrit branches using repo
-    for gerrit_branch in ${GERRIT_BRANCHES[@]}; do
-      echo "Checking out opencord gerrit branch: $gerrit_branch"
-      repo download ${gerrit_branch/:/ }
-    done
-  fi
-}
-
-function cloudlab_setup() {
-
-  # Don't do anything if not a CloudLab node
-  [ ! -d /usr/local/etc/emulab ] && return
-
-  # The watchdog will sometimes reset groups, turn it off
-  if [ -e /usr/local/etc/emulab/watchdog ]
-  then
-    sudo /usr/bin/perl -w /usr/local/etc/emulab/watchdog stop
-    sudo mv /usr/local/etc/emulab/watchdog /usr/local/etc/emulab/watchdog-disabled
-  fi
-
-  # Mount extra space, if haven't already
-  if [ ! -d /mnt/extra ]
-  then
-    sudo mkdir -p /mnt/extra
-
-    # for NVME SSD on Utah Cloudlab, not supported by mkextrafs
-    if $(df | grep -q nvme0n1p1) && [ -e /usr/testbed/bin/mkextrafs ]
-    then
-      # set partition type of 4th partition to Linux, ignore errors
-      echo -e "t\n4\n82\np\nw\nq" | sudo fdisk /dev/nvme0n1 || true
-
-      sudo mkfs.ext4 /dev/nvme0n1p4
-      echo "/dev/nvme0n1p4 /mnt/extra/ ext4 defaults 0 0" | sudo tee -a /etc/fstab
-      sudo mount /mnt/extra
-      mount | grep nvme0n1p4 || (echo "ERROR: NVME mkfs/mount failed, exiting!" && exit 1)
-
-    elif [ -e /usr/testbed/bin/mkextrafs ]  # if on Clemson/Wisconsin Cloudlab
-    then
-      # Sometimes this command fails on the first try
-      sudo /usr/testbed/bin/mkextrafs -r /dev/sdb -qf "/mnt/extra/" || sudo /usr/testbed/bin/mkextrafs -r /dev/sdb -qf "/mnt/extra/"
-
-      # Check that the mount succeeded (sometimes mkextrafs succeeds but device not mounted)
-      mount | grep sdb || (echo "ERROR: mkextrafs failed, exiting!" && exit 1)
-    fi
-  fi
-
-  # replace /var/lib/libvirt/images with a symlink
-  [ -d /var/lib/libvirt/images/ ] && [ ! -h /var/lib/libvirt/images ] && sudo rmdir /var/lib/libvirt/images
-  sudo mkdir -p /mnt/extra/libvirt_images
-
-  if [ ! -e /var/lib/libvirt/images ]
-  then
-    sudo ln -s /mnt/extra/libvirt_images /var/lib/libvirt/images
-  fi
-}
-
-function vagrant_vms_up() {
-
-  # vagrant-libvirt 0.0.35 fails with libvirt networks created by others
-  # echo "Configuring libvirt networking..."
-  # cd ${CORDDIR}/build/ansible
-  # ansible-playbook opencloud-in-a-box-playbook.yml
-
-  echo "Bringing up OpenCloud-in-a-Box Vagrant VM's..."
-  cd $CORDDIR/build
-  sudo VAGRANT_CWD=$VAGRANT_CWD vagrant up head1 compute1 compute2 --provider libvirt
-
-  # This is a workaround for a weird issue with ARP cache timeout breaking 'vagrant ssh'
-  # It allows SSH'ing to the machine via 'ssh <machinename>'
-  echo "Configuring SSH for VM's..."
-  sudo VAGRANT_CWD=$VAGRANT_CWD vagrant ssh-config head1 compute1 compute2 > $SSHCONFIG
-
-  sudo chown -R ${USER} ${VAGRANT_CWD}/.vagrant
-}
-
-function opencloud_buildhost_prep() {
-
-  echo "Preparing build tools..."
-  cd ${CORDDIR}/build/platform-install
-  ansible-playbook -i inventory/opencloud opencloud-prep-playbook.yml
-}
-
-# Parse options
-GERRIT_BRANCHES=
-RUN_TEST=0
-SETUP_ONLY=0
-CLEANUP=0
-
-while getopts "b:chsv" opt; do
-  case ${opt} in
-    b ) GERRIT_BRANCHES+=("$OPTARG")
-      ;;
-    c ) CLEANUP=1
-      ;;
-    h ) echo "Usage:"
-      echo "    $0                install OpenCloud-in-a-Box [default]"
-      echo "    $0 -b <project:changeset/revision>  checkout a changesets from gerrit. Can"
-      echo "                      be used multiple times."
-      echo "    $0 -c             cleanup from previous test"
-      echo "    $0 -h             display this help message"
-      echo "    $0 -s             run initial setup phase only (don't start building CORD)"
-      echo "    $0 -v             print CiaB version and exit"
-      exit 0
-      ;;
-    s ) SETUP_ONLY=1
-      ;;
-    v ) echo "$VERSION_STRING ($REPO_BRANCH branch)"
-      exit 0
-      ;;
-    \? ) echo "Invalid option: -$OPTARG"
-      exit 1
-      ;;
-  esac
-done
-
-# What to do
-if [[ $CLEANUP -eq 1 ]]
-then
-  run_stage cleanup_from_previous_test
-fi
-
-echo ""
-echo "Preparing to install $VERSION_STRING ($REPO_BRANCH branch)"
-echo ""
-
-run_stage bootstrap
-
-
-if [[ $SETUP_ONLY -ne 0 ]]
-then
-  echo "Finished build environment setup, exiting..."
-  exit 0
-fi
-
-run_stage vagrant_vms_up
-run_stage opencloud_buildhost_prep
-
-exit 0