[CORD-795]
Docker image building/tagging/labeling, Makefile based build
Change-Id: I0d70ab01353999c0e0585547582af9c62d247987
diff --git a/scenarios/cord/Vagrantfile b/scenarios/cord/Vagrantfile
new file mode 100644
index 0000000..a120989
--- /dev/null
+++ b/scenarios/cord/Vagrantfile
@@ -0,0 +1,388 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+$cordpath = ".."
+
+Vagrant.configure(2) do |config|
+
+ config.vm.define "corddev" do |d|
+ d.ssh.forward_agent = true
+ d.vm.box = "ubuntu/trusty64"
+ d.vm.hostname = "corddev"
+ d.vm.network "private_network", ip: "10.100.198.200"
+ d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/corddev.yml -c local"
+ d.vm.provider "virtualbox" do |v|
+ v.memory = 2048
+ end
+ d.vm.provider :libvirt do |v, override|
+ v.memory = 2048
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ end
+ end
+
+ config.vm.define "prod" do |d|
+ d.vm.box = "ubuntu/trusty64"
+ d.vm.hostname = "prod"
+ d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+ d.vm.network "private_network", ip: "10.100.198.201"
+ d.vm.network "private_network",
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ d.vm.network "private_network",
+ ip: "0.1.0.0",
+ mac: "02420a060101",
+ auto_config: false,
+ virtualbox__intnet: "head-node-leaf-1",
+ libvirt__network_name: "head-node-leaf-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/prod.yml -c local"
+ d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 cd /cord/build/platform-install; ansible-playbook -i inventory/head-localhost deploy-elasticstack-playbook.yml"
+ d.vm.provider "virtualbox" do |v|
+ v.memory = 2048
+ end
+ d.vm.provider :libvirt do |v, override|
+ v.memory = 24576
+ v.cpus = 8
+ v.storage :file, :size => '100G', :type => 'qcow2'
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ override.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/add-extra-drive.yml -c local"
+ end
+ end
+
+ config.vm.define "switch" do |s|
+ s.vm.box = "ubuntu/trusty64"
+ s.vm.hostname = "fakeswitch"
+ s.vm.network "private_network", ip: "10.100.198.253"
+ s.vm.network "private_network",
+ type: "dhcp",
+ virtualbox__intnet: "cord-fabric-network",
+ libvirt__network_name: "cord-fabric-network",
+ mac: "cc37ab000001"
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/fakeswitch.yml -c local"
+ s.vm.provider "virtualbox" do |v|
+ v.memory = 1048
+ v.name = "fakeswitch"
+ end
+ end
+
+ config.vm.define "leaf-1" do |s|
+ s.vm.box = "ubuntu/trusty64"
+ s.vm.hostname = "leaf-1"
+ s.vm.network "private_network",
+ #type: "dhcp",
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network",
+ mac: "cc37ab000011"
+ s.vm.network "private_network",
+ ip: "0.1.0.0",
+ auto_config: false,
+ virtualbox__intnet: "head-node-leaf-1",
+ libvirt__network_name: "head-node-leaf-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.2.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-1-leaf-1",
+ libvirt__network_name: "compute-node-1-leaf-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.5.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-1-spine-1",
+ libvirt__network_name: "leaf-1-spine-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.6.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-1-spine-2",
+ libvirt__network_name: "leaf-1-spine-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ if (ENV['FABRIC'] == "1")
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+ else
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'net_prefix=10.6.1'"
+ end
+ s.vm.provider :libvirt do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ end
+ s.vm.provider "virtualbox" do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ end
+ end
+
+ config.vm.define "leaf-2" do |s|
+ s.vm.box = "ubuntu/trusty64"
+ s.vm.hostname = "leaf-2"
+ s.vm.network "private_network",
+ #type: "dhcp",
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network",
+ mac: "cc37ab000012"
+ s.vm.network "private_network",
+ ip: "0.3.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-2-leaf-2",
+ libvirt__network_name: "compute-node-2-leaf-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.4.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-3-leaf-2",
+ libvirt__network_name: "compute-node-3-leaf-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.7.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-2-spine-1",
+ libvirt__network_name: "leaf-2-spine-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.8.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-2-spine-2",
+ libvirt__network_name: "leaf-2-spine-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ if (ENV['FABRIC'] == "1")
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+ else
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/leafswitch.yml -c local -e 'net_prefix=10.6.1'"
+ end
+ s.vm.provider :libvirt do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ end
+ s.vm.provider "virtualbox" do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ end
+ end
+
+ config.vm.define "spine-1" do |s|
+ s.vm.box = "ubuntu/trusty64"
+ s.vm.hostname = "spine-1"
+ s.vm.network "private_network",
+ #type: "dhcp",
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network",
+ mac: "cc37ab000021"
+ s.vm.network "private_network",
+ ip: "0.5.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-1-spine-1",
+ libvirt__network_name: "leaf-1-spine-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.7.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-2-spine-1",
+ libvirt__network_name: "leaf-2-spine-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ if (ENV['FABRIC'] == "1")
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+ else
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'net_prefix=10.6.1'"
+ end
+ s.vm.provider :libvirt do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ end
+ s.vm.provider "virtualbox" do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ end
+ end
+
+ config.vm.define "spine-2" do |s|
+ s.vm.box = "ubuntu/trusty64"
+ s.vm.hostname = "spine-2"
+ s.vm.network "private_network",
+ #type: "dhcp",
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network",
+ mac: "cc37ab000022"
+ s.vm.network "private_network",
+ ip: "0.6.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-1-spine-2",
+ libvirt__network_name: "leaf-1-spine-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.network "private_network",
+ ip: "0.8.0.0",
+ auto_config: false,
+ virtualbox__intnet: "leaf-2-spine-2",
+ libvirt__network_name: "leaf-2-spine-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ if (ENV['FABRIC'] == "1")
+ s.vm.provision :shell, path: $cordpath + "/build/scripts/install.sh", args: "-3f"
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'fabric=true net_prefix=10.6.1'"
+ else
+ s.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/spineswitch.yml -c local -e 'net_prefix=10.6.1'"
+ end
+ s.vm.provider :libvirt do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ override.vm.synced_folder $cordpath, "/cord", type: "nfs"
+ end
+ s.vm.provider "virtualbox" do |v, override|
+ v.memory = 512
+ v.cpus = 1
+ end
+ end
+
+ config.vm.define "testbox" do |d|
+ d.vm.box = "fgrehm/trusty64-lxc"
+ d.ssh.forward_agent = true
+ d.vm.hostname = "testbox"
+ d.vm.network "private_network", ip: "10.0.3.100", lxc__bridge_name: 'lxcbr0'
+ d.vm.provision :shell, path: $cordpath + "/build/scripts/bootstrap_ansible.sh"
+ d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/build/ansible/corddev.yml -c local"
+ config.vm.provider :lxc do |lxc|
+ # Same effect as 'customize ["modifyvm", :id, "--memory", "1024"]' for VirtualBox
+ lxc.customize 'cgroup.memory.limit_in_bytes', '2048M'
+ lxc.customize 'aa_profile', 'unconfined'
+ lxc.customize 'cgroup.devices.allow', 'b 7:* rwm'
+ lxc.customize 'cgroup.devices.allow', 'c 10:237 rwm'
+ end
+ end
+
+ config.vm.define "compute-node-1" do |c|
+ c.vm.communicator = "none"
+ c.vm.hostname = "compute-node-1"
+ c.vm.network "private_network",
+ adapter: 1,
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network"
+ c.vm.network "private_network",
+ adapter: 2, # The fabric interface for each node
+ ip: "0.2.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-1-leaf-1",
+ libvirt__network_name: "compute-node-1-leaf-1",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ c.vm.provider :libvirt do |v|
+ v.memory = 8192
+ v.cpus = 4
+ v.machine_virtual_size = 100
+ v.storage :file, :size => '100G', :type => 'qcow2'
+ v.boot 'network'
+ v.boot 'hd'
+ v.nested = true
+ end
+ c.vm.provider "virtualbox" do |v, override|
+ override.vm.box = "clink15/pxe"
+ v.memory = 1048
+ v.gui = "true"
+ end
+ end
+
+ config.vm.define "compute-node-2" do |c|
+ c.vm.communicator = "none"
+ c.vm.hostname = "compute-node-2"
+ c.vm.network "private_network",
+ adapter: 1,
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network"
+ c.vm.network "private_network",
+ adapter: 2, # The fabric interface for each node
+ ip: "0.3.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-2-leaf-2",
+ libvirt__network_name: "compute-node-2-leaf-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ c.vm.provider :libvirt do |v|
+ v.memory = 8192
+ v.cpus = 4
+ v.machine_virtual_size = 100
+ v.storage :file, :size => '100G', :type => 'qcow2'
+ v.boot 'network'
+ v.boot 'hd'
+ v.nested = true
+ end
+ c.vm.provider "virtualbox" do |v, override|
+ override.vm.box = "clink15/pxe"
+ v.memory = 1048
+ v.gui = "true"
+ end
+ end
+
+ config.vm.define "compute-node-3" do |c|
+ c.vm.communicator = "none"
+ c.vm.hostname = "compute-node-3"
+ c.vm.network "private_network",
+ adapter: 1,
+ ip: "0.0.0.0",
+ auto_config: false,
+ virtualbox__intnet: "cord-mgmt-network",
+ libvirt__network_name: "cord-mgmt-network"
+ c.vm.network "private_network",
+ adapter: 2, # The fabric interface for each node
+ ip: "0.4.0.0",
+ auto_config: false,
+ virtualbox__intnet: "compute-node-3-leaf-2",
+ libvirt__network_name: "compute-node-3-leaf-2",
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ c.vm.provider :libvirt do |v|
+ v.memory = 8192
+ v.cpus = 4
+ v.machine_virtual_size = 100
+ v.storage :file, :size => '100G', :type => 'qcow2'
+ v.boot 'network'
+ v.boot 'hd'
+ v.nested = true
+ end
+ c.vm.provider "virtualbox" do |v, override|
+ override.vm.box = "clink15/pxe"
+ v.memory = 1048
+ v.gui = "true"
+ end
+ end
+
+end
+
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
new file mode 100644
index 0000000..c9522c6
--- /dev/null
+++ b/scenarios/cord/config.yml
@@ -0,0 +1,81 @@
+---
+# cord Scenario
+# for both Physical (using a corddev VM) and Virtual (using multiple VMs)
+
+# make build config
+build_targets:
+ - deploy-maas
+ - onboard-openstack
+
+vagrant_vms:
+ - corddev
+ - prod
+
+vagrant_up_prereqs:
+ - prereqs-check
+
+# have to copy cord and config to physical/virtual nodes
+cord_config_prereqs:
+ - copy-cord
+
+copy_config_prereqs:
+ - cord-config
+
+# can prep build and head node simultaneously
+prep_buildnode_prereqs:
+ - copy-cord
+ - copy-config
+
+prep_headnode_prereqs:
+ - copy-cord
+ - copy-config
+
+buildnode: corddev
+headnode: prod
+
+# cord profile config
+frontend_only: False
+
+# docker config
+deploy_docker_tag: "candidate"
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+ - "xosproject/xos-base"
+ - "xosproject/xos"
+ - "xosproject/xos-client"
+ - "xosproject/xos-corebuilder"
+ - "xosproject/xos-gui"
+ - "xosproject/xos-gui-extension-builder"
+ - "xosproject/xos-postgres"
+ - "xosproject/xos-synchronizer-base"
+ - "xosproject/xos-ws"
+ - "xosproject/chameleon"
+ - "xosproject/gui-extension-rcord"
+ - "xosproject/gui-extension-sample"
+ - "xosproject/gui-extension-vtr"
+ - "xosproject/onos-synchronizer"
+ - "xosproject/openstack-synchronizer"
+ - "xosproject/vrouter-synchronizer"
+ - "xosproject/vtn-synchronizer"
+ - "xosproject/exampleservice-synchronizer"
+ - "gliderlabs/consul-server"
+ - "gliderlabs/registrator"
+ - "nginx"
+ - "onosproject/onos"
+ - "redis"
+
+# Inventory for ansible, used to generate inventory.ini
+inventory_groups:
+
+ config:
+ corddev:
+
+ build:
+ corddev:
+
+ head:
+ prod:
+
+ compute:
+
diff --git a/scenarios/local/config.yml b/scenarios/local/config.yml
new file mode 100644
index 0000000..1032e31
--- /dev/null
+++ b/scenarios/local/config.yml
@@ -0,0 +1,58 @@
+---
+# local Scenario
+# Brings up a minimal set of containers on the host currently being run on
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+# head = config in local scenario
+head_cord_dir: "{{ config_cord_dir }}"
+head_cord_profile_dir: "{{ config_cord_profile_dir }}"
+
+# make targets
+build_targets:
+ - local-onboard-profile
+
+# local scenario configuration
+frontend_only: True
+create_configdirs_become: False
+use_openstack: False
+xos_images: []
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+ - "xosproject/xos-base"
+ - "xosproject/xos"
+ - "xosproject/xos-client"
+ - "xosproject/xos-corebuilder"
+ - "xosproject/xos-gui"
+ - "xosproject/xos-gui-extension-builder"
+ - "xosproject/xos-postgres"
+ - "xosproject/xos-ws"
+ - "xosproject/chameleon"
+ - "xosproject/gui-extension-rcord"
+ - "xosproject/gui-extension-sample"
+ - "xosproject/gui-extension-vtr"
+ - "gliderlabs/consul-server"
+ - "gliderlabs/registrator"
+ - "redis"
+ - "nginx"
+
+# Ansible Inventory
+inventory_groups:
+
+ config:
+ localhost:
+ ansible_connection: local
+
+ build:
+ localhost:
+ ansible_connection: local
+
+ head:
+ localhost:
+ ansible_connection: local
+
+ compute:
+
diff --git a/scenarios/mock/Vagrantfile b/scenarios/mock/Vagrantfile
new file mode 100644
index 0000000..bedc938
--- /dev/null
+++ b/scenarios/mock/Vagrantfile
@@ -0,0 +1,34 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# mock Scenario Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "ubuntu/trusty64"
+
+ # sync these folders with VM
+ config.vm.synced_folder "../../../", "/opt/cord/", create: true
+ config.vm.synced_folder "../../../../cord_profile/", "/opt/cord_profile/", create: true
+
+ # set the headnode VM
+ config.vm.define "headnode" do |d|
+ d.vm.hostname = "headnode"
+ d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+ d.vm.network "private_network",
+ ip: settings['head_vm_ip']
+ d.vm.provider :virtualbox do |vb|
+ vb.memory = settings['head_vm_mem']
+ vb.cpus = settings['head_vm_cpu']
+ end
+ d.vm.provider :libvirt do |v|
+ v.memory = settings['head_vm_mem']
+ v.cpus = settings['head_vm_cpu']
+ end
+ end
+
+end
+
diff --git a/scenarios/mock/config.yml b/scenarios/mock/config.yml
new file mode 100644
index 0000000..9c182ac
--- /dev/null
+++ b/scenarios/mock/config.yml
@@ -0,0 +1,75 @@
+---
+# mock ("mockup") scenario
+# Brings up just core containers without synchronizers, for API tests and GUI dev
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..') | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+build_cord_dir: /opt/cord
+
+# Vagrant VM configuration
+head_vm_mem: 2048
+head_vm_cpu: 4
+head_vm_ip: "192.168.46.100"
+
+vagrant_vms:
+ - headnode
+
+buildnode: headnode
+headnode: headnode
+
+physical_node_list:
+ - name: headnode
+ ipv4_last_octet: 1
+ aliases:
+ - head
+
+# make targets
+build_targets:
+ - onboard-profile
+
+prep_headnode_prereqs:
+ - prep-buildnode
+
+# mock profile configuration
+frontend_only: True
+use_maas: False
+use_apt_cache: False
+use_openstack: False
+xos_images: []
+
+# whitelist of images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+ - "xosproject/xos-base"
+ - "xosproject/xos"
+ - "xosproject/xos-client"
+ - "xosproject/xos-corebuilder"
+ - "xosproject/xos-gui"
+ - "xosproject/xos-gui-extension-builder"
+ - "xosproject/xos-postgres"
+ - "xosproject/xos-ws"
+ - "xosproject/chameleon"
+ - "xosproject/gui-extension-rcord"
+ - "xosproject/gui-extension-sample"
+ - "xosproject/gui-extension-vtr"
+ - "gliderlabs/consul-server"
+ - "gliderlabs/registrator"
+ - "nginx"
+ - "redis"
+
+# Ansible Inventory
+inventory_groups:
+
+ config:
+ localhost:
+ ansible_connection: local
+
+ build:
+ headnode:
+
+ head:
+ headnode:
+
+ compute:
+
diff --git a/scenarios/opencloud/Vagrantfile b/scenarios/opencloud/Vagrantfile
new file mode 100644
index 0000000..91ad1bd
--- /dev/null
+++ b/scenarios/opencloud/Vagrantfile
@@ -0,0 +1,68 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# opencloud Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure(2) do |config|
+
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.define "head1" do |h|
+ h.vm.hostname = "head1"
+ h.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
+ h.vm.provider :libvirt do |v|
+ v.memory = settings['head_vm_mem']
+ v.cpus = settings['head_vm_cpu']
+ v.machine_virtual_size = 100
+ v.management_network_name = settings['public_network_name'] # public network
+ v.management_network_address = settings['public_network_cidr']
+ end
+ h.vm.network "private_network", # management network, eth1
+ ip: "0.1.1.0", # not used, ignore
+ auto_config: false,
+ libvirt__network_name: settings['mgmt_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+ config.vm.define "compute1" do |c|
+ c.vm.hostname = "compute1"
+ c.vm.provider :libvirt do |v|
+ v.memory = settings['compute_vm_mem']
+ v.cpus = settings['compute_vm_cpu']
+ v.machine_virtual_size = 50
+ v.nested = true
+ v.management_network_name = settings['public_network_name'] # public network
+ v.management_network_address = settings['public_network_cidr']
+ end
+ c.vm.network "private_network", # management network, eth1
+ ip: "0.1.1.0",
+ auto_config: false,
+ libvirt__network_name: settings['mgmt_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+ config.vm.define "compute2" do |c|
+ c.vm.hostname = "compute2"
+ c.vm.provider :libvirt do |v|
+ v.memory = settings['compute_vm_mem']
+ v.cpus = settings['compute_vm_cpu']
+ v.machine_virtual_size = 50
+ v.nested = true
+ v.management_network_name = settings['public_network_name'] # public network
+ v.management_network_address = settings['public_network_cidr']
+ end
+ c.vm.network "private_network", # management network, eth1
+ ip: "0.1.1.0",
+ auto_config: false,
+ libvirt__network_name: settings['mgmt_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+end
+
diff --git a/scenarios/opencloud/config.yml b/scenarios/opencloud/config.yml
new file mode 100644
index 0000000..cfc9eb2
--- /dev/null
+++ b/scenarios/opencloud/config.yml
@@ -0,0 +1,95 @@
+---
+# opencloud Scenario
+# For a virtual install, physical does not use VMs
+
+# opencloud profile config
+frontend_only: False
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+build_cord_dir: /opt/cord
+
+buildnode: head1
+headnode: head1
+
+# make build config
+build_targets:
+ - onboard-openstack
+
+vagrant_up_prereqs:
+ - prereqs-check
+
+# have to copy cord and config to physical/virtual nodes
+cord_config_prereqs:
+ - copy-cord
+
+copy_config_prereqs:
+ - cord-config
+
+prep_buildnode_prereqs:
+ - copy-cord
+ - copy-config
+
+# have to run build prep before head prep, when build == head
+prep_headnode_prereqs:
+ - prep-buildnode
+
+# Vagrant VM configuration
+vagrant_vms:
+ - head1
+ - compute1
+
+head_vm_mem: 10240
+head_vm_cpu: 8
+
+compute_vm_mem: 16384
+compute_vm_cpu: 8
+
+mgmt_network_name: cordmgmt
+public_network_name: cordpub
+public_network_cidr: "10.230.100.0/24"
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+ - "xosproject/xos-base"
+ - "xosproject/xos"
+ - "xosproject/xos-client"
+ - "xosproject/xos-corebuilder"
+ - "xosproject/xos-gui"
+ - "xosproject/xos-gui-extension-builder"
+ - "xosproject/xos-postgres"
+ - "xosproject/xos-synchronizer-base"
+ - "xosproject/xos-ws"
+ - "xosproject/chameleon"
+ - "xosproject/gui-extension-rcord"
+ - "xosproject/gui-extension-sample"
+ - "xosproject/gui-extension-vtr"
+ - "xosproject/onos-synchronizer"
+ - "xosproject/openstack-synchronizer"
+ - "xosproject/vrouter-synchronizer"
+ - "xosproject/vtn-synchronizer"
+ - "xosproject/exampleservice-synchronizer"
+ - "gliderlabs/consul-server"
+ - "gliderlabs/registrator"
+ - "nginx"
+ - "onosproject/onos"
+ - "redis"
+
+# Ansible Inventory
+inventory_groups:
+
+ config:
+ localhost:
+ ansible_connection: local
+
+ build:
+ head1:
+
+ head:
+ head1:
+
+ compute:
+ compute1:
+