[CORD-1590]
`single` scenario, which is mock w/synchronizers

Change-Id: Ia2e40c2a105464aad93230f689cc295db835496f
diff --git a/Makefile b/Makefile
index 9d8610d..bb1c0c3 100644
--- a/Makefile
+++ b/Makefile
@@ -24,7 +24,7 @@
 M                ?= $(BUILD)/milestones
 LOGS             ?= $(BUILD)/logs
 
-ALL_MILESTONES   ?= $(M)/prereqs-check $(M)/vagrant-up $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/elasticstack $(M)/prep-computenode $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-maas $(M)/deploy-computenode $(M)/docker-images $(M)/core-image $(M)/start-xos $(M)/onboard-profile $(M)/deploy-onos $(M)/onboard-openstack
+ALL_MILESTONES   ?= $(M)/prereqs-check $(M)/vagrant-up $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/deploy-elasticstack $(M)/prep-computenode $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-maas $(M)/deploy-computenode $(M)/docker-images $(M)/core-image $(M)/start-xos $(M)/onboard-profile $(M)/deploy-onos $(M)/onboard-openstack
 
 LOCAL_MILESTONES ?= $(M)/local-cord-config $(M)/local-docker-images $(M)/local-core-image $(M)/local-start-xos $(M)/local-onboard-profile
 
@@ -98,7 +98,7 @@
 	rm -f $(M)/onboard-profile $(M)/local-onboard-profile
 
 xos-update-images: clean-images
-	rm -f $(M)/core-image $(M)/start-xos $(M)/local-core-image $(M)local-start-xos
+	rm -f $(M)/start-xos $(M)/local-start-xos
 
 compute-node-refresh:
 	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_MAAS) $(PI)/compute-node-refresh-playbook.yml" $(LOGCMD)
@@ -108,7 +108,7 @@
 	rm -f $(M)/vagrant-up
 
 clean-images:
-	rm -f $(M)/docker-images $(M)/local-docker-images
+	rm -f $(M)/docker-images $(M)/local-docker-images $(M)/core-image $(M)/local-core-image
 
 clean-genconfig:
 	rm -f $(CONFIG_FILES)
@@ -132,6 +132,8 @@
 COPY_CONFIG_PREREQS    ?=
 PREP_BUILDNODE_PREREQS ?=
 PREP_HEADNODE_PREREQS  ?=
+START_XOS_PREREQS      ?=
+DEPLOY_ONOS_PREREQS    ?=
 
 # == MILESTONES == #
 # empty target files are touched in the milestones dir to indicate completion
@@ -169,7 +171,7 @@
 	$(ANSIBLE_PB) $(PI)/prep-headnode-playbook.yml $(LOGCMD)
 	touch $@
 
-$(M)/elasticstack: | $(M)/prep-headnode
+$(M)/deploy-elasticstack: | $(M)/prep-headnode
 	$(ANSIBLE_PB) $(PI)/deploy-elasticstack-playbook.yml $(LOGCMD)
 	touch $@
 
@@ -181,7 +183,7 @@
 	$(ANSIBLE_PB) $(PI)/glance-images-playbook.yml $(LOGCMD)
 	touch $@
 
-$(M)/deploy-openstack: | $(M)/elasticstack $(M)/prep-headnode $(M)/prep-computenode
+$(M)/deploy-openstack: | $(M)/deploy-elasticstack $(M)/prep-headnode $(M)/prep-computenode
 	$(ANSIBLE_PB) $(PI)/deploy-openstack-playbook.yml $(LOGCMD)
 	touch $@
 
@@ -201,7 +203,7 @@
 	$(ANSIBLE_PB) $(PI)/build-core-image-playbook.yml $(LOGCMD)
 	touch $@
 
-$(M)/start-xos: | $(M)/prep-headnode $(M)/core-image
+$(M)/start-xos: | $(M)/prep-headnode $(M)/core-image $(START_XOS_PREREQS)
 	$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/start-xos-playbook.yml" $(LOGCMD)
 	touch $@
 
@@ -213,7 +215,7 @@
 	$(SSH_BUILD) "cd /opt/cord/onos-apps; make images" $(LOGCMD)
 	touch $@
 
-$(M)/deploy-onos: | $(M)/start-xos $(M)/docker-images $(M)/build-onos-apps
+$(M)/deploy-onos: | $(M)/prep-headnode $(M)/docker-images $(M)/build-onos-apps $(DEPLOY_ONOS_PREREQS)
 	$(ANSIBLE_PB) $(PI)/deploy-mavenrepo-playbook.yml $(LOGCMD)
 	$(ANSIBLE_PB) $(PI)/deploy-onos-playbook.yml $(LOGCMD)
 	touch $@
diff --git a/ansible/roles/genconfig/templates/config.mk.j2 b/ansible/roles/genconfig/templates/config.mk.j2
index dd0fc06..b778176 100644
--- a/ansible/roles/genconfig/templates/config.mk.j2
+++ b/ansible/roles/genconfig/templates/config.mk.j2
@@ -38,4 +38,10 @@
 {% if prep_headnode_prereqs is defined %}
 PREP_HEADNODE_PREREQS   = $(M)/{{ prep_headnode_prereqs | join(" $(M)/") }}
 {% endif %}
+{% if start_xos_prereqs is defined %}
+START_XOS_PREREQS       = $(M)/{{ start_xos_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if deploy_onos_prereqs is defined %}
+DEPLOY_ONOS_PREREQS     = $(M)/{{ deploy_onos_prereqs | join(" $(M)/") }}
+{% endif %}
 
diff --git a/docs/quickstart_make.md b/docs/quickstart_make.md
index 26ca031..1d6420a 100644
--- a/docs/quickstart_make.md
+++ b/docs/quickstart_make.md
@@ -136,7 +136,10 @@
 #### Included Scenarios
 
 - `local`: Minimal set of containers running locally on the development host
-- `mock`: Creates a single Vagrant VM with containers and DNS set up
+- `mock`: Creates a single Vagrant VM with containers and DNS set up, without
+  synchronizers
+- `single`: Creates a single Vagrant VM with containers and DNS set up, with
+  synchronizers and optional ElasticStack/ONOS
 - `cord`: Physical or virtual multi-node CORD pod, with MaaS and OpenStack
 - `opencloud`: Physical or virtual multi-node OpenCloud pod, with OpenStack
 
@@ -277,6 +280,31 @@
 This will teardown the XOS container set, tell the build system to rebuild
 images, then perform a build and reload the profile.
 
+#### Use ElasticStack or ONOS with the `single` scenario
+
+The single scenario is a medium-weight scenario for synchronizer development,
+and has optional ElasticStack or ONOS functionality.
+
+To use these, you would invoke the ONOS or ElasticStack milestone target before
+the `build` target:
+
+```
+make PODCONFIG=rcord-single.yml config
+make -j4 milestones/deploy-elasticstack
+make -j4 build
+```
+
+or
+
+```
+make PODCONFIG=opencloud-single.yml config
+make -j4 milestones/deploy-onos
+make -j4 build
+```
+
+If you want to use both in combination, make sure to run the ElasticStack
+target first, so ONOS can send logs to ElasticStack.
+
 ### Building docker images with imagebuilder.py
 
 For docker images for XOS (and possibly others in the future) the build system
diff --git a/podconfig/opencloud-single.yml b/podconfig/opencloud-single.yml
new file mode 100644
index 0000000..81458f2
--- /dev/null
+++ b/podconfig/opencloud-single.yml
@@ -0,0 +1,7 @@
+---
+# opencloud-single Pod Config
+# Creates a single-node OpenCloud pod w/synchronizers
+
+cord_scenario: single
+cord_profile: opencloud
+
diff --git a/podconfig/rcord-single.yml b/podconfig/rcord-single.yml
new file mode 100644
index 0000000..e913418
--- /dev/null
+++ b/podconfig/rcord-single.yml
@@ -0,0 +1,7 @@
+---
+# rcord-single Pod Config
+# Creates a single-node mock R-CORD pod w/synchronizers
+
+cord_scenario: single
+cord_profile: rcord
+
diff --git a/scenarios/cord/Vagrantfile b/scenarios/cord/Vagrantfile
index bfcf8cf..f3285a9 100644
--- a/scenarios/cord/Vagrantfile
+++ b/scenarios/cord/Vagrantfile
@@ -37,8 +37,8 @@
     d.vm.network "private_network",
         ip: "0.0.0.0", # not used, ignore
         auto_config: false,
-        virtualbox__intnet: settings['mgmt_network_name'],
-        libvirt__network_name: settings['mgmt_network_name'],
+        virtualbox__intnet: settings['vm_management_network_name'],
+        libvirt__network_name: settings['vm_management_network_name'],
         libvirt__forward_mode: "none",
         libvirt__dhcp_enabled: false
     d.vm.network "private_network",
@@ -71,8 +71,8 @@
       #type: "dhcp",
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name'],
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       mac: "cc37ab000011"
     s.vm.network "private_network",
       ip: "0.1.0.0",
@@ -131,8 +131,8 @@
       #type: "dhcp",
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name'],
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       mac: "cc37ab000012"
     s.vm.network "private_network",
       ip: "0.3.0.0",
@@ -191,8 +191,8 @@
       #type: "dhcp",
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name'],
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       mac: "cc37ab000021"
     s.vm.network "private_network",
       ip: "0.5.0.0",
@@ -237,8 +237,8 @@
       #type: "dhcp",
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name'],
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       mac: "cc37ab000022"
     s.vm.network "private_network",
       ip: "0.6.0.0",
@@ -282,8 +282,8 @@
       adapter: 1,
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name']
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name']
     c.vm.network "private_network",
       adapter: 2,         # The fabric interface for each node
       ip: "0.2.0.0",
@@ -316,8 +316,8 @@
       adapter: 1,
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name']
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name']
     c.vm.network "private_network",
       adapter: 2,         # The fabric interface for each node
       ip: "0.3.0.0",
@@ -350,8 +350,8 @@
       adapter: 1,
       ip: "0.0.0.0",
       auto_config: false,
-      virtualbox__intnet: settings['mgmt_network_name'],
-      libvirt__network_name: settings['mgmt_network_name']
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name']
     c.vm.network "private_network",
       adapter: 2,         # The fabric interface for each node
       ip: "0.4.0.0",
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
index b2883c1..729b96b 100644
--- a/scenarios/cord/config.yml
+++ b/scenarios/cord/config.yml
@@ -2,26 +2,33 @@
 # cord Scenario
 # for both Physical (using a corddev VM) and Virtual (using multiple VMs)
 
+# cord profile config
+frontend_only: False
+
+build_cord_dir: "/home/vagrant/cord"
+
+buildnode: corddev
+headnode: prod
+
+# docker config
+deploy_docker_tag: "candidate"
+
 # make build config
 build_targets:
  - deploy-maas
  - onboard-openstack
 
-vagrant_vms:
-  - corddev
-  - prod
-
 vagrant_up_prereqs:
   - prereqs-check
 
-# have to copy cord and config to physical/virtual nodes
+# Copy cord and config to physical/virtual nodes
 cord_config_prereqs:
   - copy-cord
 
 copy_config_prereqs:
   - cord-config
 
-# can prep build and head node simultaneously
+# Can prep build and head node simultaneously
 prep_buildnode_prereqs:
   - copy-cord
   - copy-config
@@ -30,10 +37,17 @@
   - copy-cord
   - copy-config
 
-build_cord_dir: "/home/vagrant/cord"
+# Start elasticstack before XOS/ONOS, which log to it
+start_xos_prereqs:
+  - deploy-elasticstack
 
-buildnode: corddev
-headnode: prod
+deploy_onos_prereqs:
+  - deploy-elasticstack
+
+# Vagrant VM configuration
+vagrant_vms:
+  - corddev
+  - prod
 
 dev_vm_mem: 2048
 dev_vm_cpu: 1
@@ -47,17 +61,11 @@
 switch_vm_mem: 512
 switch_vm_cpu: 1
 
-mgmt_network_name: cordmgmt
+vm_management_network_name: cordmgmt
 
 # Enable experimental fabric
 enable_fabric: False
 
-# cord profile config
-frontend_only: False
-
-# docker config
-deploy_docker_tag: "candidate"
-
 # images for imagebuilder to build/pull (tagged elsewhere)
 docker_image_whitelist:
   - "xosproject/xos-base"
@@ -69,23 +77,34 @@
   - "xosproject/xos-libraries"
   - "xosproject/xos-postgres"
   - "xosproject/xos-tosca"
-  - "xosproject/xos-synchronizer-base"
   - "xosproject/xos-ws"
   - "xosproject/chameleon"
   - "xosproject/gui-extension-rcord"
   - "xosproject/gui-extension-sample"
   - "xosproject/gui-extension-vtr"
+  - "xosproject/xos-synchronizer-base"
+  - "xosproject/exampleservice-synchronizer"
+  - "xosproject/fabric-synchronizer"
   - "xosproject/onos-synchronizer"
   - "xosproject/openstack-synchronizer"
+  - "xosproject/volt-synchronizer"
   - "xosproject/vrouter-synchronizer"
+  - "xosproject/vsg-synchronizer"
   - "xosproject/vtn-synchronizer"
-  - "xosproject/exampleservice-synchronizer"
+  - "xosproject/vtr-synchronizer"
   - "gliderlabs/consul-server"
   - "gliderlabs/registrator"
   - "nginx"
   - "onosproject/onos"
   - "redis"
 
+# node topology
+physical_node_list:
+  - name: prod
+    ipv4_last_octet: 1
+    aliases:
+      - head
+
 # Inventory for ansible, used to generate inventory.ini
 inventory_groups:
 
diff --git a/scenarios/opencloud/Vagrantfile b/scenarios/opencloud/Vagrantfile
index 91ad1bd..626d792 100644
--- a/scenarios/opencloud/Vagrantfile
+++ b/scenarios/opencloud/Vagrantfile
@@ -17,13 +17,13 @@
       v.memory = settings['head_vm_mem']
       v.cpus = settings['head_vm_cpu']
       v.machine_virtual_size = 100
-      v.management_network_name = settings['public_network_name'] # public network
-      v.management_network_address = settings['public_network_cidr']
+      v.management_network_name = settings['vm_public_network_name'] # public network
+      v.management_network_address = settings['vm_public_network_cidr']
     end
     h.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0", # not used, ignore
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
@@ -32,16 +32,16 @@
     c.vm.hostname = "compute1"
     c.vm.provider :libvirt do |v|
       v.memory = settings['compute_vm_mem']
-      v.cpus = settings['compute_vm_cpu'] 
+      v.cpus = settings['compute_vm_cpu']
       v.machine_virtual_size = 50
       v.nested = true
-      v.management_network_name = settings['public_network_name'] # public network
-      v.management_network_address = settings['public_network_cidr']
+      v.management_network_name = settings['vm_public_network_name'] # public network
+      v.management_network_address = settings['vm_public_network_cidr']
     end
     c.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0",
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
@@ -50,16 +50,16 @@
     c.vm.hostname = "compute2"
     c.vm.provider :libvirt do |v|
       v.memory = settings['compute_vm_mem']
-      v.cpus = settings['compute_vm_cpu'] 
+      v.cpus = settings['compute_vm_cpu']
       v.machine_virtual_size = 50
       v.nested = true
-      v.management_network_name = settings['public_network_name'] # public network
-      v.management_network_address = settings['public_network_cidr']
+      v.management_network_name = settings['vm_public_network_name'] # public network
+      v.management_network_address = settings['vm_public_network_cidr']
     end
     c.vm.network "private_network", # management network, eth1
       ip: "0.1.1.0",
       auto_config: false,
-      libvirt__network_name: settings['mgmt_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
   end
diff --git a/scenarios/opencloud/config.yml b/scenarios/opencloud/config.yml
index 7945ca7..f705289 100644
--- a/scenarios/opencloud/config.yml
+++ b/scenarios/opencloud/config.yml
@@ -14,14 +14,14 @@
 buildnode: head1
 headnode: head1
 
-# make build config
+# Make build config
 build_targets:
  - onboard-openstack
 
 vagrant_up_prereqs:
   - prereqs-check
 
-# have to copy cord and config to physical/virtual nodes
+# Copy cord and config to physical/virtual nodes
 cord_config_prereqs:
   - copy-cord
 
@@ -32,10 +32,28 @@
   - copy-cord
   - copy-config
 
-# have to run build prep before head prep, when build == head
+# Run build prep before head prep, when build == head
 prep_headnode_prereqs:
   - prep-buildnode
 
+# Start elasticstack before XOS/ONOS, which log to it
+start_xos_prereqs:
+  - deploy-elasticstack
+
+deploy_onos_prereqs:
+  - deploy-elasticstack
+
+# node topology, used to bring up management interfaces
+physical_node_list:
+  - name: head1
+    ipv4_last_octet: 1
+    aliases:
+      - head
+  - name: compute1
+    ipv4_last_octet: 20
+  - name: compute2
+    ipv4_last_octet: 21
+
 # Vagrant VM configuration
 vagrant_vms:
   - head1
@@ -47,9 +65,9 @@
 compute_vm_mem: 16384
 compute_vm_cpu: 8
 
-mgmt_network_name: cordmgmt
-public_network_name: cordpub
-public_network_cidr: "10.230.100.0/24"
+vm_management_network_name: cordmgmt
+vm_public_network_name: cordpub
+vm_public_network_cidr: "10.230.100.0/24"
 
 # images for imagebuilder to build/pull (tagged elsewhere)
 docker_image_whitelist:
@@ -62,23 +80,25 @@
   - "xosproject/xos-libraries"
   - "xosproject/xos-postgres"
   - "xosproject/xos-tosca"
-  - "xosproject/xos-synchronizer-base"
   - "xosproject/xos-ws"
   - "xosproject/chameleon"
   - "xosproject/gui-extension-rcord"
   - "xosproject/gui-extension-sample"
   - "xosproject/gui-extension-vtr"
+  - "xosproject/xos-synchronizer-base"
+  - "xosproject/exampleservice-synchronizer"
   - "xosproject/onos-synchronizer"
   - "xosproject/openstack-synchronizer"
   - "xosproject/vrouter-synchronizer"
   - "xosproject/vtn-synchronizer"
-  - "xosproject/exampleservice-synchronizer"
   - "gliderlabs/consul-server"
   - "gliderlabs/registrator"
   - "nginx"
   - "onosproject/onos"
   - "redis"
 
+
+
 # Ansible Inventory
 inventory_groups:
 
diff --git a/scenarios/single/Vagrantfile b/scenarios/single/Vagrantfile
new file mode 100644
index 0000000..78a7d21
--- /dev/null
+++ b/scenarios/single/Vagrantfile
@@ -0,0 +1,35 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# single Scenario Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure("2") do |config|
+
+  config.vm.box = "ubuntu/trusty64"
+
+  # sync these folders with VM
+  config.vm.synced_folder "../../../", "/opt/cord/", create: true
+  config.vm.synced_folder "../../../../cord_profile/", "/opt/cord_profile/", create: true
+  config.vm.synced_folder "../../platform-install/credentials/", "/opt/credentials/", create: true
+
+  # set the headnode VM
+  config.vm.define "headnode" do |d|
+    d.vm.hostname = "headnode"
+    d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+    d.vm.network "private_network",
+      ip: settings['head_vm_ip']
+    d.vm.provider :virtualbox do |vb|
+      vb.memory = settings['head_vm_mem']
+      vb.cpus = settings['head_vm_cpu']
+    end
+    d.vm.provider :libvirt do |v|
+      v.memory = settings['head_vm_mem']
+      v.cpus = settings['head_vm_cpu']
+    end
+  end
+
+end
+
diff --git a/scenarios/single/config.yml b/scenarios/single/config.yml
new file mode 100644
index 0000000..4ceb2cb
--- /dev/null
+++ b/scenarios/single/config.yml
@@ -0,0 +1,88 @@
+---
+# single head-node scenario
+# Brings up a VM with synchronizers, for more complicated testing without OpenStack
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..') | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+build_cord_dir: /opt/cord
+
+# Vagrant VM configuration
+head_vm_mem: 4096
+head_vm_cpu: 8
+head_vm_ip: "192.168.46.100"
+
+vagrant_vms:
+  - headnode
+
+buildnode: headnode
+headnode: headnode
+
+physical_node_list:
+  - name: headnode
+    ipv4_last_octet: 1
+    aliases:
+      - head
+
+# make targets
+build_targets:
+  - onboard-profile
+
+prep_headnode_prereqs:
+  - prep-buildnode
+
+# single scenario configuration
+frontend_only: False
+use_maas: False
+use_apt_cache: False
+use_openstack: False
+xos_images: []
+
+# whitelist of images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-extension-builder"
+  - "xosproject/xos-libraries"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-tosca"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/gui-extension-rcord"
+  - "xosproject/gui-extension-sample"
+  - "xosproject/gui-extension-vtr"
+  - "xosproject/xos-synchronizer-base"
+  - "xosproject/exampleservice-synchronizer"
+  - "xosproject/fabric-synchronizer"
+  - "xosproject/onos-synchronizer"
+  - "xosproject/openstack-synchronizer"
+  - "xosproject/volt-synchronizer"
+  - "xosproject/vrouter-synchronizer"
+  - "xosproject/vsg-synchronizer"
+  - "xosproject/vtn-synchronizer"
+  - "xosproject/vtr-synchronizer"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "nginx"
+  - "onosproject/onos"
+  - "redis"
+
+# Ansible Inventory
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    headnode:
+
+  head:
+    headnode:
+
+  compute:
+