CORD-1401 Make targets for CiaB
Change-Id: I2a59ed684b37854a98cf6e2f30ac90200703585c
diff --git a/Makefile b/Makefile
index c408ccd..2695f39 100644
--- a/Makefile
+++ b/Makefile
@@ -24,7 +24,13 @@
M ?= $(BUILD)/milestones
LOGS ?= $(BUILD)/logs
-ALL_MILESTONES ?= $(M)/prereqs-check $(M)/vagrant-up $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/deploy-elasticstack $(M)/prep-computenode $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-maas $(M)/deploy-computenode $(M)/docker-images $(M)/core-image $(M)/start-xos $(M)/onboard-profile $(M)/deploy-onos $(M)/onboard-openstack
+PREP_MS ?= $(M)/prereqs-check $(M)/vagrant-up $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/deploy-elasticstack $(M)/prep-computenode
+MAAS_MS ?= $(M)/build-maas-images $(M)/maas-prime $(M)/publish-maas-images $(M)/deploy-maas
+OPENSTACK_MS ?= $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-computenode $(M)/onboard-openstack
+XOS_MS ?= $(M)/docker-images $(M)/core-image $(M)/publish-docker-images $(M)/start-xos $(M)/onboard-profile
+ONOS_MS ?= $(M)/build-onos-apps $(M)/publish-onos-apps $(M)/deploy-onos $(M)/deploy-mavenrepo
+POST_INSTALL_MS ?= $(M)/setup-automation $(M)/setup-ciab-pcu $(M)/vagrant-up-switches $(M)/compute1-up $(M)/compute2-up $(M)/compute3-up
+ALL_MILESTONES ?= $(PREP_MS) $(MAAS_MS) $(OPENSTACK_MS) $(XOS_MS) $(ONOS_MS) $(POST_INSTALL_MS)
LOCAL_MILESTONES ?= $(M)/local-cord-config $(M)/local-docker-images $(M)/local-core-image $(M)/local-start-xos $(M)/local-onboard-profile
@@ -50,6 +56,7 @@
# Vagrant config
VAGRANT_PROVIDER ?= libvirt
VAGRANT_VMS ?= $(HEADNODE)
+VAGRANT_SWITCHES ?= leaf1
VAGRANT_CWD ?= $(SCENARIOS_D)/$(SCENARIO)/
SSH_CONFIG ?= ~/.ssh/config # Vagrant modifies this, should it always?
@@ -116,7 +123,7 @@
rm -f $(M)/vagrant-up
clean-images:
- rm -f $(M)/docker-images $(M)/local-docker-images $(M)/core-image $(M)/local-core-image
+ rm -f $(M)/docker-images $(M)/local-docker-images $(M)/core-image $(M)/local-core-image $(M)/build-maas-images $(M)/build-onos-apps
clean-genconfig:
rm -f $(CONFIG_FILES)
@@ -134,6 +141,7 @@
local-ubuntu-dev-env:
$(ANSIBLE_PB) $(PI)/bootstrap-dev-env.yml $(LOGCMD)
+
# == PREREQS == #
VAGRANT_UP_PREREQS ?=
CORD_CONFIG_PREREQS ?=
@@ -143,10 +151,13 @@
DOCKER_IMAGES_PREREQS ?=
START_XOS_PREREQS ?=
DEPLOY_ONOS_PREREQS ?=
+DEPLOY_OPENSTACK_PREREQS ?=
+SETUP_AUTOMATION_PREREQS ?=
# == MILESTONES == #
# empty target files are touched in the milestones dir to indicate completion
+# Prep targets
$(M)/prereqs-check:
$(ANSIBLE_PB) $(PI)/prereqs-check-playbook.yml $(LOGCMD)
touch $@
@@ -188,41 +199,77 @@
$(ANSIBLE_PB) $(PI)/prep-computenode-playbook.yml $(LOGCMD)
touch $@
-$(M)/glance-images: | $(M)/prep-headnode
- $(ANSIBLE_PB) $(PI)/glance-images-playbook.yml $(LOGCMD)
+
+# MaaS targets
+$(M)/build-maas-images: | $(M)/prep-buildnode $(BUILD_MAAS_IMAGES_PREREQS)
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/build/maas; make MAKE_CONFIG=../$(MAKEFILE_CONFIG) build" $(LOGCMD)
touch $@
-$(M)/deploy-openstack: | $(M)/deploy-elasticstack $(M)/prep-headnode $(M)/prep-computenode
- $(ANSIBLE_PB) $(PI)/deploy-openstack-playbook.yml $(LOGCMD)
+$(M)/maas-prime: | $(M)/deploy-elasticstack
+ $(ANSIBLE_PB) $(MAAS)/prime-node.yml $(LOGCMD)
touch $@
-$(M)/deploy-maas: | $(M)/deploy-openstack
- $(ANSIBLE_PB) $(MAAS)/head.yml $(LOGCMD)
+$(M)/publish-maas-images: | $(M)/maas-prime $(M)/build-maas-images
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/build/maas; make MAKE_CONFIG=../$(MAKEFILE_CONFIG) publish" $(LOGCMD)
touch $@
-$(M)/deploy-computenode: | $(M)/deploy-openstack
- $(ANSIBLE_PB) $(PI)/deploy-computenode-playbook.yml $(LOGCMD)
+$(M)/deploy-maas: | $(M)/publish-maas-images $(M)/cord-config $(M)/copy-config
+ $(ANSIBLE_PB) $(MAAS)/head-node.yml $(LOGCMD)
touch $@
+
+# ONOS targets
+$(M)/build-onos-apps: | $(M)/prep-buildnode $(BUILD_ONOS_APPS_PREREQS)
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/onos-apps; make MAKE_CONFIG=../$(MAKEFILE_CONFIG) build" $(LOGCMD)
+ touch $@
+
+$(M)/publish-onos-apps: | $(M)/maas-prime $(M)/build-onos-apps
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/onos-apps; make MAKE_CONFIG=../$(MAKEFILE_CONFIG) publish" $(LOGCMD)
+ touch $@
+
+$(M)/deploy-mavenrepo: | $(M)/publish-onos-apps
+ $(ANSIBLE_PB) $(PI)/deploy-mavenrepo-playbook.yml $(LOGCMD)
+ touch $@
+
+$(M)/deploy-onos: | $(M)/docker-images $(DEPLOY_ONOS_PREREQS)
+ $(ANSIBLE_PB) $(PI)/deploy-onos-playbook.yml $(LOGCMD)
+ touch $@
+
+
+# XOS targets
$(M)/docker-images: | $(M)/prep-buildnode $(DOCKER_IMAGES_PREREQS)
- $(SSH_BUILD) "cd /opt/cord/build; $(IMAGEBUILDER) -f $(MASTER_CONFIG) -l $(BUILD)/image_logs -g $(BUILD)/ib_graph.dot -a $(BUILD)/ib_actions.yml " $(LOGCMD)
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/build; $(IMAGEBUILDER) -f $(MASTER_CONFIG) -l $(BUILD)/image_logs -g $(BUILD)/ib_graph.dot -a $(BUILD)/ib_actions.yml " $(LOGCMD)
touch $@
-$(M)/core-image: | $(M)/docker-images $(M)/prep-headnode
+$(M)/core-image: | $(M)/docker-images
$(ANSIBLE_PB) $(PI)/build-core-image-playbook.yml $(LOGCMD)
touch $@
+# Requires ib_actions.yml file which is on the build host
+$(M)/publish-docker-images: | $(M)/maas-prime $(M)/docker-images $(M)/core-image
+ $(SSH_BUILD) "cd $(BUILD_CORD_DIR)/build; $(ANSIBLE_PB_LOCAL) $(PI)/publish-images-playbook.yml" $(LOGCMD)
+ touch $@
+
$(M)/start-xos: | $(M)/prep-headnode $(M)/core-image $(START_XOS_PREREQS)
$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/start-xos-playbook.yml" $(LOGCMD)
touch $@
-$(M)/build-onos-apps: | $(M)/prep-buildnode
- $(SSH_BUILD) "cd /opt/cord/onos-apps; make images" $(LOGCMD)
+$(M)/onboard-profile: | $(M)/start-xos
+ $(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/onboard-profile-playbook.yml" $(LOGCMD)
touch $@
-$(M)/deploy-onos: | $(M)/prep-headnode $(M)/docker-images $(M)/build-onos-apps $(DEPLOY_ONOS_PREREQS)
- $(ANSIBLE_PB) $(PI)/deploy-mavenrepo-playbook.yml $(LOGCMD)
- $(ANSIBLE_PB) $(PI)/deploy-onos-playbook.yml $(LOGCMD)
+
+# OpenStack targets
+$(M)/glance-images: | $(M)/prep-headnode
+ $(ANSIBLE_PB) $(PI)/glance-images-playbook.yml $(LOGCMD)
+ touch $@
+
+$(M)/deploy-openstack: | $(M)/deploy-elasticstack $(M)/prep-headnode $(M)/prep-computenode $(DEPLOY_OPENSTACK_PREREQS)
+ $(ANSIBLE_PB) $(PI)/deploy-openstack-playbook.yml $(LOGCMD)
+ touch $@
+
+$(M)/deploy-computenode: | $(M)/deploy-openstack
+ $(ANSIBLE_PB) $(PI)/deploy-computenode-playbook.yml $(LOGCMD)
touch $@
$(M)/onboard-profile: | $(M)/start-xos $(M)/deploy-onos
@@ -237,12 +284,34 @@
$(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/onboard-openstack-playbook.yml" $(LOGCMD)
touch $@
+
+# Post-onboarding targets
+$(M)/setup-automation: | $(M)/onboard-profile $(M)/deploy-onos $(SETUP_AUTOMATION_PREREQS)
+ $(ANSIBLE_PB) $(PI)/cord-automation-playbook.yml $(LOGCMD)
+ touch $@
+
+
+# Additional CiaB targets
+$(M)/vagrant-up-switches: | $(M)/setup-automation
+ $(VAGRANT) up $(VAGRANT_SWITCHES) --provider $(VAGRANT_PROVIDER) $(LOGCMD)
+ touch $@
+
+$(M)/setup-ciab-pcu: | $(M)/setup-automation
+ $(ANSIBLE_PB) $(MAAS)/setup-ciab-pcu.yml
+ touch $@
+
+$(M)/compute%-up: | $(M)/setup-ciab-pcu $(M)/vagrant-up-switches
+ $(VAGRANT) up compute$* --provider $(VAGRANT_PROVIDER) $(LOGCMD)
+ $(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) ansible/maas-provision.yml --extra-vars='maas_user=maas vagrant_name=cord_compute$*'" $(LOGCMD)
+ touch $@
+
+
# Testing targets
-pod-test: $(M)/onboard-openstack collect-diag
- $(ANSIBLE_PB) $(PI)/pod-test-playbook.yml $(LOGCMD)
+pod-test: $(M)/setup-automation collect-diag
+ $(SSH_HEAD) "cd /opt/cord/build; $(ANSIBLE_PB_LOCAL) $(PI)/pod-test-playbook.yml" $(LOGCMD)
+
# Local Targets, bring up XOS containers without a VM
-
$(M)/local-cord-config:
$(ANSIBLE_PB) $(PI)/cord-config-playbook.yml $(LOGCMD)
touch $@
diff --git a/ansible/head-net.yml b/ansible/head-net.yml
new file mode 100644
index 0000000..8a3c86f
--- /dev/null
+++ b/ansible/head-net.yml
@@ -0,0 +1,7 @@
+---
+
+- hosts: all
+ become: yes
+ serial: 1
+ roles:
+ - head-net
diff --git a/ansible/prod.yml b/ansible/prod.yml
index cf7a678..86b8e1b 100644
--- a/ansible/prod.yml
+++ b/ansible/prod.yml
@@ -5,4 +5,4 @@
serial: 1
roles:
- common
- - prod
+ - head-net
diff --git a/ansible/roles/genconfig/templates/config.mk.j2 b/ansible/roles/genconfig/templates/config.mk.j2
index fa6bcbf..28329aa 100644
--- a/ansible/roles/genconfig/templates/config.mk.j2
+++ b/ansible/roles/genconfig/templates/config.mk.j2
@@ -2,6 +2,9 @@
# ** DO NOT EDIT THIS FILE MANUALLY! **
# Edit the Pod Config (or Scenario) and rerun `make config` to regenerate it
+# Needed for MaaS, not settable
+DOCKER_REGISTRY = docker-registry:5000
+
# Scenario specific config
{% if vagrant_vms is defined %}
VAGRANT_VMS = {{ vagrant_vms | join(' ') }}
@@ -12,12 +15,24 @@
{% if buildnode is defined %}
BUILDNODE = {{ buildnode }}
{% endif %}
+{% if deploy_docker_registry is defined %}
+DEPLOY_DOCKER_REGISTRY = {{ deploy_docker_registry }}
+{% endif %}
+{% if deploy_docker_tag is defined %}
+DEPLOY_DOCKER_TAG = {{ deploy_docker_tag }}
+{% endif %}
{% if deploy_docker_tag is defined %}
DEPLOY_DOCKER_TAG = {{ deploy_docker_tag }}
{% endif %}
{% if config_cord_profile_dir is defined %}
CONFIG_CORD_PROFILE_DIR = {{ config_cord_profile_dir }}
{% endif %}
+{% if build_cord_dir is defined %}
+BUILD_CORD_DIR = {{ build_cord_dir }}
+{% endif %}
+{% if skipTags is defined %}
+ANSIBLE_ARGS += --skip-tags "{{ skipTags | join(",") }}"
+{% endif %}
# Targets and prerequisties
{% if build_targets is defined %}
@@ -47,4 +62,10 @@
{% if deploy_onos_prereqs is defined %}
DEPLOY_ONOS_PREREQS = $(M)/{{ deploy_onos_prereqs | join(" $(M)/") }}
{% endif %}
+{% if deploy_openstack_prereqs is defined %}
+DEPLOY_OPENSTACK_PREREQS = $(M)/{{ deploy_openstack_prereqs | join(" $(M)/") }}
+{% endif %}
+{% if setup_automation_prereqs is defined %}
+SETUP_AUTOMATION_PREREQS = $(M)/{{ setup_automation_prereqs | join(" $(M)/") }}
+{% endif %}
diff --git a/ansible/roles/prod/files/fabric.cfg b/ansible/roles/head-net/files/fabric.cfg
similarity index 100%
rename from ansible/roles/prod/files/fabric.cfg
rename to ansible/roles/head-net/files/fabric.cfg
diff --git a/ansible/roles/prod/files/mgmtbr.cfg b/ansible/roles/head-net/files/mgmtbr.cfg
similarity index 100%
rename from ansible/roles/prod/files/mgmtbr.cfg
rename to ansible/roles/head-net/files/mgmtbr.cfg
diff --git a/ansible/roles/prod/tasks/main.yml b/ansible/roles/head-net/tasks/main.yml
similarity index 100%
rename from ansible/roles/prod/tasks/main.yml
rename to ansible/roles/head-net/tasks/main.yml
diff --git a/build.gradle b/build.gradle
index 5cf2f92..76f25a0 100644
--- a/build.gradle
+++ b/build.gradle
@@ -117,11 +117,12 @@
'name': 'redis',
'digest': 'sha256:0fe5a7afa2c2154f37c8ab56a9a6c5023cb0405cc0e85b34d8dcc1de6c3f143e'
],
+ // Node 7.9.0
'node': [
'type': 'image',
'upstream': upstreamReg,
'name': 'node',
- 'digest': 'sha256:5757581a8ff7e08041512a54aa3f573d33fecdce81d603e48a759956cd99bdd3'
+ 'digest': 'sha256:e64b0d7eeb44034d3f2c301d3050b72c1639e683d5a0fd3b8aae3d6ac4789c7a'
],
'gliderlabs/consul-server': [
'type': 'image',
diff --git a/scenarios/cord/Vagrantfile b/scenarios/cord/Vagrantfile
index f3285a9..e93fc68 100644
--- a/scenarios/cord/Vagrantfile
+++ b/scenarios/cord/Vagrantfile
@@ -24,13 +24,13 @@
d.vm.provider :libvirt do |v, override|
v.memory = settings['dev_vm_mem']
v.cpus = settings['dev_vm_cpu']
- override.vm.synced_folder $cordpath, $build_cord_dir, type: "nfs"
+ override.vm.synced_folder $cordpath, settings['build_cord_dir'], type: "nfs"
end
end
- config.vm.define "prod" do |d|
+ config.vm.define "head1" do |d|
d.vm.box = "ubuntu/trusty64"
- d.vm.hostname = "prod"
+ d.vm.hostname = "head1"
d.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '*'
d.vm.network "private_network", ip: "10.100.198.201"
d.vm.synced_folder $cordpath, $ciab_dir
@@ -45,17 +45,20 @@
ip: "0.1.0.0", # not used, ignore
mac: "02420a060101",
auto_config: false,
- virtualbox__intnet: "head-node-leaf-1",
- libvirt__network_name: "head-node-leaf-1",
+ virtualbox__intnet: "head1-leaf1",
+ libvirt__network_name: "head1-leaf1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
d.vm.provider :virtualbox do |v|
- v.memory = settings['dev_vm_mem']
- v.cpus = settings['dev_vm_cpu']
+ v.memory = settings['head_vm_mem']
+ v.cpus = settings['head_vm_cpu']
+ end
+ d.vm.provision :ansible do |ansible|
+ ansible.playbook = $cordpath + "/build/ansible/head-net.yml"
end
d.vm.provider :libvirt do |v, override|
- v.memory = settings['dev_vm_mem']
- v.cpus = settings['dev_vm_cpu']
+ v.memory = settings['head_vm_mem']
+ v.cpus = settings['head_vm_cpu']
v.storage :file, :size => '100G', :type => 'qcow2'
override.vm.provision :ansible do |ansible|
ansible.playbook = $cordpath + "/build/ansible/add-extra-drive.yml" # Needs testing
@@ -63,9 +66,9 @@
end
end
- config.vm.define "leaf-1" do |s|
+ config.vm.define "leaf1" do |s|
s.vm.box = "ubuntu/trusty64"
- s.vm.hostname = "leaf-1"
+ s.vm.hostname = "leaf1"
s.vm.synced_folder $cordpath, $ciab_dir
s.vm.network "private_network",
#type: "dhcp",
@@ -77,29 +80,29 @@
s.vm.network "private_network",
ip: "0.1.0.0",
auto_config: false,
- virtualbox__intnet: "head-node-leaf-1",
- libvirt__network_name: "head-node-leaf-1",
+ virtualbox__intnet: "head1-leaf1",
+ libvirt__network_name: "head1-leaf1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.2.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-1-leaf-1",
- libvirt__network_name: "compute-node-1-leaf-1",
+ virtualbox__intnet: "compute1-leaf1",
+ libvirt__network_name: "compute1-leaf1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.5.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-1-spine-1",
- libvirt__network_name: "leaf-1-spine-1",
+ virtualbox__intnet: "leaf1-spine1",
+ libvirt__network_name: "leaf1-spine1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.6.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-1-spine-2",
- libvirt__network_name: "leaf-1-spine-2",
+ virtualbox__intnet: "leaf1-spine2",
+ libvirt__network_name: "leaf1-spine2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
if (settings['enable_fabric'])
@@ -123,9 +126,9 @@
end
end
- config.vm.define "leaf-2" do |s|
+ config.vm.define "leaf2" do |s|
s.vm.box = "ubuntu/trusty64"
- s.vm.hostname = "leaf-2"
+ s.vm.hostname = "leaf2"
s.vm.synced_folder $cordpath, $ciab_dir
s.vm.network "private_network",
#type: "dhcp",
@@ -137,29 +140,29 @@
s.vm.network "private_network",
ip: "0.3.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-2-leaf-2",
- libvirt__network_name: "compute-node-2-leaf-2",
+ virtualbox__intnet: "compute2-leaf2",
+ libvirt__network_name: "compute2-leaf2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.4.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-3-leaf-2",
- libvirt__network_name: "compute-node-3-leaf-2",
+ virtualbox__intnet: "compute3-leaf2",
+ libvirt__network_name: "compute3-leaf2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.7.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-2-spine-1",
- libvirt__network_name: "leaf-2-spine-1",
+ virtualbox__intnet: "leaf2-spine1",
+ libvirt__network_name: "leaf2-spine1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.8.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-2-spine-2",
- libvirt__network_name: "leaf-2-spine-2",
+ virtualbox__intnet: "leaf2-spine2",
+ libvirt__network_name: "leaf2-spine2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
if (settings['enable_fabric'])
@@ -183,9 +186,9 @@
end
end
- config.vm.define "spine-1" do |s|
+ config.vm.define "spine1" do |s|
s.vm.box = "ubuntu/trusty64"
- s.vm.hostname = "spine-1"
+ s.vm.hostname = "spine1"
s.vm.synced_folder $cordpath, $ciab_dir
s.vm.network "private_network",
#type: "dhcp",
@@ -197,15 +200,15 @@
s.vm.network "private_network",
ip: "0.5.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-1-spine-1",
- libvirt__network_name: "leaf-1-spine-1",
+ virtualbox__intnet: "leaf1-spine1",
+ libvirt__network_name: "leaf1-spine1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.7.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-2-spine-1",
- libvirt__network_name: "leaf-2-spine-1",
+ virtualbox__intnet: "leaf2-spine1",
+ libvirt__network_name: "leaf2-spine1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
if (settings['enable_fabric'])
@@ -229,9 +232,9 @@
end
end
- config.vm.define "spine-2" do |s|
+ config.vm.define "spine2" do |s|
s.vm.box = "ubuntu/trusty64"
- s.vm.hostname = "spine-2"
+ s.vm.hostname = "spine2"
s.vm.synced_folder $cordpath, $ciab_dir
s.vm.network "private_network",
#type: "dhcp",
@@ -243,15 +246,15 @@
s.vm.network "private_network",
ip: "0.6.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-1-spine-2",
- libvirt__network_name: "leaf-1-spine-2",
+ virtualbox__intnet: "leaf1-spine2",
+ libvirt__network_name: "leaf1-spine2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
s.vm.network "private_network",
ip: "0.8.0.0",
auto_config: false,
- virtualbox__intnet: "leaf-2-spine-2",
- libvirt__network_name: "leaf-2-spine-2",
+ virtualbox__intnet: "leaf2-spine2",
+ libvirt__network_name: "leaf2-spine2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
if (settings['enable_fabric'])
@@ -275,9 +278,9 @@
end
end
- config.vm.define "compute-node-1" do |c|
+ config.vm.define "compute1" do |c|
c.vm.communicator = "none"
- c.vm.hostname = "compute-node-1"
+ c.vm.hostname = "compute1"
c.vm.network "private_network",
adapter: 1,
ip: "0.0.0.0",
@@ -288,8 +291,8 @@
adapter: 2, # The fabric interface for each node
ip: "0.2.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-1-leaf-1",
- libvirt__network_name: "compute-node-1-leaf-1",
+ virtualbox__intnet: "compute1-leaf1",
+ libvirt__network_name: "compute1-leaf1",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
c.vm.provider :libvirt do |v|
@@ -309,9 +312,9 @@
end
end
- config.vm.define "compute-node-2" do |c|
+ config.vm.define "compute2" do |c|
c.vm.communicator = "none"
- c.vm.hostname = "compute-node-2"
+ c.vm.hostname = "compute2"
c.vm.network "private_network",
adapter: 1,
ip: "0.0.0.0",
@@ -322,8 +325,8 @@
adapter: 2, # The fabric interface for each node
ip: "0.3.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-2-leaf-2",
- libvirt__network_name: "compute-node-2-leaf-2",
+ virtualbox__intnet: "compute2-leaf2",
+ libvirt__network_name: "compute2-leaf2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
c.vm.provider :libvirt do |v|
@@ -343,9 +346,9 @@
end
end
- config.vm.define "compute-node-3" do |c|
+ config.vm.define "compute3" do |c|
c.vm.communicator = "none"
- c.vm.hostname = "compute-node-3"
+ c.vm.hostname = "compute3"
c.vm.network "private_network",
adapter: 1,
ip: "0.0.0.0",
@@ -356,8 +359,8 @@
adapter: 2, # The fabric interface for each node
ip: "0.4.0.0",
auto_config: false,
- virtualbox__intnet: "compute-node-3-leaf-2",
- libvirt__network_name: "compute-node-3-leaf-2",
+ virtualbox__intnet: "compute3-leaf2",
+ libvirt__network_name: "compute3-leaf2",
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
c.vm.provider :libvirt do |v|
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
index 6a3e835..742af2f 100644
--- a/scenarios/cord/config.yml
+++ b/scenarios/cord/config.yml
@@ -5,31 +5,59 @@
# cord profile config
frontend_only: False
-build_cord_dir: "/home/vagrant/cord"
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
-buildnode: corddev
-headnode: prod
+build_cord_dir: "/opt/cord"
+
+buildnode: head1
+headnode: head1
# docker config
+deploy_docker_registry: "docker-registry:5000" # IP address of head node giving errors
deploy_docker_tag: "candidate"
+pull_docker_registry: "docker-registry:5000/"
+pull_docker_tag: "candidate"
+
+# Variables required by MAAS
+fabric_ip: '10.6.1.1/24'
+management_ip: '10.1.0.1/24'
+external_iface: 'eth0'
+management_network: '10.1.0.0/24'
+#ansible_ssh_user: "vagrant" # wtf
+
+# Other old config carried over
+skipTags:
+ - 'set_compute_node_password'
+ - 'switch_support'
+ - 'reboot'
+ - 'interface_config'
+cord_in_a_box: True # what is this for?
+fabric_include_names: eth2
+fabric_include_module_types: omit
+fabric_exclude_names: eth0,eth1
+management_include_names: eth1
+management_exclude_names: eth0,eth2
# make build config
build_targets:
- - deploy-maas
- - onboard-openstack
+ - prereqs-check
+ - prep-headnode
+ - setup-automation
+ - compute1-up
vagrant_up_prereqs:
- prereqs-check
-# Copy cord and config to physical/virtual nodes
cord_config_prereqs:
- copy-cord
copy_config_prereqs:
- cord-config
-# Can prep build and head node simultaneously
prep_buildnode_prereqs:
+ - prep-headnode
- copy-cord
- copy-config
@@ -44,14 +72,22 @@
# Start elasticstack before XOS/ONOS, which log to it
start_xos_prereqs:
- deploy-elasticstack
+ - deploy-maas
+ - publish-docker-images
+
+deploy_openstack_prereqs:
+ - deploy-maas
deploy_onos_prereqs:
- - deploy-elasticstack
+ - deploy-maas
+ - deploy-mavenrepo
+
+setup_automation_prereqs:
+ - deploy-openstack
# Vagrant VM configuration
vagrant_vms:
- - corddev
- - prod
+ - head1
dev_vm_mem: 2048
dev_vm_cpu: 1
@@ -70,6 +106,10 @@
# Enable experimental fabric
enable_fabric: False
+# Apt cache depends on DNS provided by MAAS
+# MAAS isn't installed when prep-headnode runs
+use_apt_cache: False
+
# images for imagebuilder to build/pull (tagged elsewhere)
docker_image_whitelist:
- "xosproject/xos-base"
@@ -105,7 +145,7 @@
# node topology
physical_node_list:
- - name: prod
+ - name: head1
ipv4_last_octet: 1
aliases:
- head
@@ -114,13 +154,14 @@
inventory_groups:
config:
- corddev:
+ localhost:
+ ansible_connection: local
build:
- corddev:
+ head1:
head:
- prod:
+ head1:
compute: