[CORD-2608]
Create scenarios suitable for kubespray and helm
Change-Id: I07b19cfc00165046f8fbe6807f2d8add28398cbd
diff --git a/Makefile b/Makefile
index db27358..4975ff7 100644
--- a/Makefile
+++ b/Makefile
@@ -33,7 +33,7 @@
LOGS ?= $(BUILD)/logs
PREP_MS ?= $(M)/prereqs-check $(M)/build-local-bootstrap $(M)/ciab-ovs $(M)/vagrant-up $(M)/vagrant-ssh-install $(M)/copy-cord $(M)/cord-config $(M)/copy-config $(M)/prep-buildnode $(M)/prep-headnode $(M)/deploy-elasticstack $(M)/prep-computenode
-KS_MS ?= $(M)/prep-kubespray $(M)/deploy-kubespray
+KS_MS ?= $(M)/prep-kubespray $(M)/deploy-kubespray $(M)/finish-kubespray $(M)/install-kubernetes-tools $(M)/start-xos-helm
MAAS_MS ?= $(M)/build-maas-images $(M)/maas-prime $(M)/publish-maas-images $(M)/deploy-maas
OPENSTACK_MS ?= $(M)/glance-images $(M)/deploy-openstack $(M)/deploy-computenode $(M)/onboard-openstack
XOS_MS ?= $(M)/docker-images $(M)/core-image $(M)/publish-docker-images $(M)/start-xos $(M)/onboard-profile
@@ -199,22 +199,23 @@
cd docs; make
# == PREREQS == #
-VAGRANT_UP_PREREQS ?=
-COPY_CORD_PREREQS ?=
-CORD_CONFIG_PREREQS ?=
-CONFIG_SSH_KEY_PREREQS ?=
-PREP_BUILDNODE_PREREQS ?=
-PREP_HEADNODE_PREREQS ?=
-PREP_KUBESPRAY_PREREQS ?=
-DOCKER_IMAGES_PREREQS ?=
-START_XOS_PREREQS ?=
-BUILD_ONOS_APPS_PREREQS ?=
-DEPLOY_ONOS_PREREQS ?=
-DEPLOY_MAVENREPO_PREREQS ?=
-DEPLOY_OPENSTACK_PREREQS ?=
-ONBOARD_OPENSTACK_PREREQS ?=
-SETUP_AUTOMATION_PREREQS ?=
-TESTING_PREREQS ?=
+VAGRANT_UP_PREREQS ?=
+COPY_CORD_PREREQS ?=
+CORD_CONFIG_PREREQS ?=
+CONFIG_SSH_KEY_PREREQS ?=
+PREP_BUILDNODE_PREREQS ?=
+PREP_HEADNODE_PREREQS ?=
+PREP_KUBESPRAY_PREREQS ?=
+DOCKER_IMAGES_PREREQS ?=
+PUBLISH_DOCKER_IMAGES_PREREQS ?=
+START_XOS_PREREQS ?=
+BUILD_ONOS_APPS_PREREQS ?=
+DEPLOY_ONOS_PREREQS ?=
+DEPLOY_MAVENREPO_PREREQS ?=
+DEPLOY_OPENSTACK_PREREQS ?=
+ONBOARD_OPENSTACK_PREREQS ?=
+SETUP_AUTOMATION_PREREQS ?=
+TESTING_PREREQS ?=
# == MILESTONES == #
# empty target files are touched in the milestones dir to indicate completion
@@ -278,8 +279,8 @@
touch $@
-# kubespray targets
-$(M)/prep-kubespray: | $(M)/vagrant-ssh-install $(PREP_KUBESPRAY_PREREQS)
+# kubernetes targets
+$(M)/prep-kubespray: | $(M)/prep-headnode $(M)/prep-computenode $(PREP_KUBESPRAY_PREREQS)
$(ANSIBLE_PB) $(BUILD)/ansible/prep-kubespray.yml $(LOGCMD)
touch $@
@@ -287,6 +288,18 @@
cd $(KUBESPRAY); $(ANSIBLE_PB_KS) cluster.yml $(LOGCMD)
touch $@
+$(M)/finish-kubespray: | $(M)/deploy-kubespray
+ cd $(BUILD); $(ANSIBLE_PB) $(BUILD)/ansible/finish-kubespray.yml $(LOGCMD)
+ touch $@
+
+$(M)/install-kubernetes-tools: | $(M)/deploy-kubespray
+ $(ANSIBLE_PB) $(PI)/install-kubernetes-tools-playbook.yml $(LOGCMD)
+ touch $@
+
+$(M)/start-xos-helm: | $(M)/install-kubernetes-tools $(M)/finish-kubespray $(M)/publish-docker-images
+ $(ANSIBLE_PB) $(PI)/start-xos-helm-playbook.yml $(LOGCMD)
+ touch $@
+
# MaaS targets
$(M)/build-maas-images: | $(M)/prep-buildnode $(BUILD_MAAS_IMAGES_PREREQS)
@@ -330,7 +343,7 @@
touch $@
# Requires ib_actions.yml file which is on the build host
-$(M)/publish-docker-images: | $(M)/deploy-maas $(M)/docker-images $(M)/core-image
+$(M)/publish-docker-images: | $(M)/docker-images $(M)/core-image $(PUBLISH_DOCKER_IMAGES_PREREQS)
$(SSH_BUILD) "cd $(BUILD_CORD_DIR)/build; $(ANSIBLE_PB_LOCAL) $(PI)/publish-images-playbook.yml" $(LOGCMD)
touch $@
diff --git a/ansible/finish-kubespray.yml b/ansible/finish-kubespray.yml
new file mode 100644
index 0000000..0fb3525
--- /dev/null
+++ b/ansible/finish-kubespray.yml
@@ -0,0 +1,24 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ansible/finish-kubespray.yml
+
+- name: Finish off kubespray install
+ hosts: localhost
+ connection: local
+ gather_facts: False
+ roles:
+ - finish-kubespray
+
diff --git a/ansible/roles/finish-kubespray/defaults/main.yml b/ansible/roles/finish-kubespray/defaults/main.yml
new file mode 100644
index 0000000..437d960
--- /dev/null
+++ b/ansible/roles/finish-kubespray/defaults/main.yml
@@ -0,0 +1,23 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# finish-kubespray/defaults/main.yml
+
+kubespray_dir: "../kubespray"
+
+management_net_cidr: "10.1.0.0/24"
+
+head_vm_ip: "192.168.46.100"
+
diff --git a/ansible/roles/finish-kubespray/tasks/main.yml b/ansible/roles/finish-kubespray/tasks/main.yml
new file mode 100644
index 0000000..258c0ac
--- /dev/null
+++ b/ansible/roles/finish-kubespray/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# finish-kubespray/tasks/main.yml
+
+- name: Create ~/.kube directory
+ file:
+ state: directory
+ dest: "~/.kube"
+
+- name: Move kubespray admin.conf to ~/.kube/config
+ copy:
+ remote_src: true
+ src: "{{ kubespray_dir }}/artifacts/admin.conf"
+ dest: "~/.kube/config"
+
+- name: Find Vagrant head VM IP address
+ shell: "vagrant ssh-config | grep -m 1 HostName | awk '{print $2}'"
+ args:
+ chdir: "{{ config_cord_dir }}/build"
+ environment:
+ VAGRANT_CWD: "{{ config_cord_dir }}/build/scenarios/{{ cord_scenario }}"
+ register: vagrant_head_vm_ip
+ tags:
+ - skip_ansible_lint # there isn't a module for this, info retrieval
+
+- name: Fix config URL to match Vagrant head VM IP
+ lineinfile:
+ path: "~/.kube/config"
+ regexp: "^ server: https://{{ management_net_cidr | ipaddr(1) | ipaddr('address') }}:6443"
+ line: " server: https://{{ vagrant_head_vm_ip.stdout }}:6443"
+
diff --git a/ansible/roles/genconfig/templates/config.mk.j2 b/ansible/roles/genconfig/templates/config.mk.j2
index ecde390..1d3f916 100644
--- a/ansible/roles/genconfig/templates/config.mk.j2
+++ b/ansible/roles/genconfig/templates/config.mk.j2
@@ -73,6 +73,9 @@
{% if docker_images_prereqs is defined %}
DOCKER_IMAGES_PREREQS = $(M)/{{ docker_images_prereqs | join(" $(M)/") }}
{% endif %}
+{% if publish_docker_images_prereqs is defined %}
+PUBLISH_DOCKER_IMAGES_PREREQS = $(M)/{{ publish_docker_images_prereqs | join(" $(M)/") }}
+{% endif %}
{% if start_xos_prereqs is defined %}
START_XOS_PREREQS = $(M)/{{ start_xos_prereqs | join(" $(M)/") }}
{% endif %}
diff --git a/ansible/roles/prep-kubespray/defaults/main.yml b/ansible/roles/prep-kubespray/defaults/main.yml
index 6c8266d..7e3df07 100644
--- a/ansible/roles/prep-kubespray/defaults/main.yml
+++ b/ansible/roles/prep-kubespray/defaults/main.yml
@@ -18,3 +18,5 @@
kubespray_version: "master"
kubespray_dir: "../kubespray"
+management_net_cidr: "10.1.0.0/24"
+
diff --git a/ansible/roles/prep-kubespray/tasks/main.yml b/ansible/roles/prep-kubespray/tasks/main.yml
index 43a5128..6616ac4 100644
--- a/ansible/roles/prep-kubespray/tasks/main.yml
+++ b/ansible/roles/prep-kubespray/tasks/main.yml
@@ -26,5 +26,3 @@
src: kubespray_inventory.j2
dest: "{{ kubespray_dir }}/inventory/inventory.cord"
mode: 0644
-
-
diff --git a/ansible/roles/prep-kubespray/templates/kubespray_inventory.j2 b/ansible/roles/prep-kubespray/templates/kubespray_inventory.j2
index bb70db4..6782bf2 100644
--- a/ansible/roles/prep-kubespray/templates/kubespray_inventory.j2
+++ b/ansible/roles/prep-kubespray/templates/kubespray_inventory.j2
@@ -1,28 +1,28 @@
-# created by prep-kubespray/templates/kubespray_inventory.j2
+# created by CORD ansible role: prep-kubespray/templates/kubespray_inventory.j2
-[kube-master]
+[cord-headnodes]
{% for node in groups['head'] %}
-{{ node }}
-{% endfor %}
-{% for node in groups['compute'] %}
-{{ node }}
+{% set npi = ( physical_node_list | selectattr("name", "equalto", node) | first ) %}
+{{ node }} ip='{{ management_net_cidr | ipaddr(npi.ipv4_last_octet) | ipaddr("address") }}'
{% endfor %}
-[etcd]
-{% for node in groups['head'] %}
-{{ node }}
-{% endfor %}
+[cord-computenodes]
{% for node in groups['compute'] %}
-{{ node }}
+{% set npi = ( physical_node_list | selectattr("name", "equalto", node) | first ) %}
+{{ node }} ip='{{ management_net_cidr | ipaddr(npi.ipv4_last_octet) | ipaddr("address") }}'
{% endfor %}
-[kube-node]
-{% for node in groups['head'] %}
-{{ node }}
-{% endfor %}
-{% for node in groups['compute'] %}
-{{ node }}
-{% endfor %}
+[kube-master:children]
+cord-headnodes
+cord-computenodes
+
+[etcd:children]
+cord-headnodes
+cord-computenodes
+
+[kube-node:children]
+cord-headnodes
+cord-computenodes
[k8s-cluster:children]
kube-node
diff --git a/docs/kubernetes.md b/docs/kubernetes.md
new file mode 100644
index 0000000..4d88322
--- /dev/null
+++ b/docs/kubernetes.md
@@ -0,0 +1,11 @@
+# Using CORD on Kubernetes (Exeperimental)
+
+
+## Podconfigs & Scenarios
+
+There are kubernetes specific scenarios, `controlkube` and `preppedkube`.
+
+## Commands
+
+Remove all kubespray related targets: `make clean-kubespray`
+
diff --git a/scenarios/controlkube/Vagrantfile b/scenarios/controlkube/Vagrantfile
index 3686e76..a52e989 100644
--- a/scenarios/controlkube/Vagrantfile
+++ b/scenarios/controlkube/Vagrantfile
@@ -26,12 +26,19 @@
v.cpus = settings['head_vm_cpu']
end
h.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0", # unused IP address (setting required)
+ ip: "0.1.0.0", # not used, ignore
auto_config: false,
virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ h.vm.network "private_network", # public network, eth2
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
config.vm.define "compute1" do |c|
@@ -46,12 +53,19 @@
v.cpus = settings['compute_vm_cpu']
end
c.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0", # unused IP address (setting required)
+ ip: "0.1.0.0",
auto_config: false,
virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
config.vm.define "compute2" do |c|
@@ -66,12 +80,19 @@
v.cpus = settings['compute_vm_cpu']
end
c.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0", # unused IP address (setting required)
+ ip: "0.1.0.0",
auto_config: false,
virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
end
diff --git a/scenarios/controlkube/config.yml b/scenarios/controlkube/config.yml
index a277318..b6f6efe 100644
--- a/scenarios/controlkube/config.yml
+++ b/scenarios/controlkube/config.yml
@@ -16,23 +16,38 @@
# controlkube Scenario - installs XOS/ONOS on kubernetes, similar to controlpod
frontend_only: False
+use_k8s: True
use_maas: False
+use_openstack: False
# create a cord_profile dir next to the cord checkout
config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+create_configdirs_become: False
+
build_cord_dir: /opt/cord
+# Vagrant VM configuration
+vagrant_box: "bento/ubuntu-16.04"
+
+head_vm_mem: 2048
+head_vm_cpu: 4
+
+compute_vm_mem: 2048
+compute_vm_cpu: 4
+
buildnode: head1
headnode: head1
+vagrant_vms:
+ - head1
+ - compute1
+ - compute2
+
# Make build config
build_targets:
- - deploy-kubespray
-# - prep-computenode
-# - prep-headnode
-# - core-image
+ - start-xos-helm
config_ssh_key_prereqs:
- vagrant-ssh-install
@@ -56,6 +71,10 @@
docker_images_prereqs:
- prep-headnode
+# have kubespray skip docker installation
+skipTags:
+ - docker
+
# node topology, used to bring up management interfaces
physical_node_list:
- name: head1
@@ -67,25 +86,19 @@
- name: compute2
ipv4_last_octet: 18
-# Vagrant VM configuration
-vagrant_vms:
- - head1
- - compute1
- - compute2
-
-# Vagrant VM configuration
-vagrant_box: "bento/ubuntu-16.04"
-
-head_vm_mem: 2048
-head_vm_cpu: 4
-
-compute_vm_mem: 2048
-compute_vm_cpu: 4
+management_net_bridge: "mgmtbridge"
vm_management_network_name: cordmgmt
vm_public_network_name: cordpub
vm_public_network_cidr: "10.230.100.0/24"
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+ - eth1
+
+fabric_net_interfaces:
+ - eth2
+
# images for imagebuilder to build/pull (tagged elsewhere)
docker_image_whitelist:
- "xosproject/xos-base"
@@ -104,6 +117,7 @@
- "gliderlabs/registrator"
- "nginx"
- "onosproject/onos"
+ - "opencord/mavenrepo"
- "redis"
- "node"
- "sebp/elk"
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
index d52e3d4..efc9289 100644
--- a/scenarios/cord/config.yml
+++ b/scenarios/cord/config.yml
@@ -60,6 +60,9 @@
- copy-cord
- copy-config
+publish_docker_images_prereqs:
+ - deploy-maas
+
# Start elasticstack before XOS/ONOS, which log to it
start_xos_prereqs:
- deploy-maas
@@ -112,6 +115,9 @@
# Used in platform-install / roles/cord-profile/templates/docker-compose.yml.j2
use_elasticstack: True
+# Use the insecure MaaS-installed docker registry
+use_secure_docker_registry: False
+
# images for imagebuilder to build/pull (tagged elsewhere)
docker_image_whitelist:
- "xosproject/xos-base"
diff --git a/scenarios/preppedkube/Vagrantfile b/scenarios/preppedkube/Vagrantfile
new file mode 100644
index 0000000..eaeeddb
--- /dev/null
+++ b/scenarios/preppedkube/Vagrantfile
@@ -0,0 +1,92 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# preppedkube Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure(2) do |config|
+
+ config.vm.box = settings["vagrant_box"]
+
+ config.vm.define "head1" do |h|
+ h.vm.hostname = "head1"
+ h.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '0.0.0.0'
+ h.vm.provider :libvirt do |v|
+ v.memory = settings['head_vm_mem']
+ v.cpus = settings['head_vm_cpu']
+ v.machine_virtual_size = 100
+ end
+ h.vm.network "private_network", # management network, eth1
+ adapter: 1,
+ ip: "0.1.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
+ libvirt__network_name: settings['vm_management_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ h.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+ config.vm.define "compute1" do |c|
+ c.vm.hostname = "compute1"
+ c.vm.provider :libvirt do |v|
+ v.memory = settings['compute_vm_mem']
+ v.cpus = settings['compute_vm_cpu']
+ v.machine_virtual_size = 50
+ v.nested = true
+ end
+ c.vm.network "private_network", # management network, eth1
+ adapter: 1,
+ ip: "0.1.0.0",
+ auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
+ libvirt__network_name: settings['vm_management_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+ config.vm.define "compute2" do |c|
+ c.vm.hostname = "compute2"
+ c.vm.provider :libvirt do |v|
+ v.memory = settings['compute_vm_mem']
+ v.cpus = settings['compute_vm_cpu']
+ v.machine_virtual_size = 50
+ v.nested = true
+ end
+ c.vm.network "private_network", # management network, eth1
+ adapter: 1,
+ ip: "0.1.0.0",
+ auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
+ libvirt__network_name: settings['vm_management_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
+ end
+
+end
+
diff --git a/scenarios/preppedkube/config.yml b/scenarios/preppedkube/config.yml
new file mode 100644
index 0000000..d865e0d
--- /dev/null
+++ b/scenarios/preppedkube/config.yml
@@ -0,0 +1,160 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# preppedkube Scenario - install CORD on kubernetes, on pre-prepared systems with OS installed
+
+frontend_only: False
+use_k8s: True
+use_maas: False
+use_openstack: False
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+create_configdirs_become: False
+
+build_cord_dir: /opt/cord
+
+# Vagrant VM configuration
+vagrant_box: "bento/ubuntu-16.04"
+
+head_vm_mem: 16384
+head_vm_cpu: 8
+
+compute_vm_mem: 16384
+compute_vm_cpu: 8
+
+buildnode: head1
+headnode: head1
+
+vagrant_vms:
+ - head1
+ - compute1
+ - compute2
+
+# Make build config
+build_targets:
+ - start-xos-helm
+
+vagrant_up_prereqs:
+ - prereqs-check
+
+config_ssh_key_prereqs:
+ - vagrant-ssh-install
+
+copy_cord_prereqs:
+ - vagrant-ssh-install
+
+cord_config_prereqs:
+ - vagrant-ssh-install
+ - copy-cord
+
+prep_buildnode_prereqs:
+ - copy-cord
+ - copy-config
+
+# Run build prep before head prep, when build == head
+prep_headnode_prereqs:
+ - prep-buildnode
+
+# Wait until headnode prepped before building containers, for consistent DNS
+docker_images_prereqs:
+ - prep-headnode
+
+# have kubespray skip docker installation
+skipTags:
+ - docker
+
+# node topology, used to bring up management interfaces
+physical_node_list:
+ - name: head1
+ ipv4_last_octet: 1
+ aliases:
+ - head
+ - name: compute1
+ ipv4_last_octet: 17
+ - name: compute2
+ ipv4_last_octet: 18
+
+management_net_bridge: "mgmtbridge"
+
+vm_management_network_name: cordmgmt
+vm_public_network_name: cordpub
+vm_public_network_cidr: "10.230.100.0/24"
+
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+ - eth1
+
+fabric_net_interfaces:
+ - eth2
+
+# veth pair connected between a linux and integration bridge are described here
+# https://wiki.opencord.org/display/CORD/VTN+Manual+Tests#VTNManualTests-Testenvironmentsetup
+# but may not work?
+# vtn_integration_bridge_interface: vethfabric1
+
+vtn_integration_bridge_interface: fabricbond
+
+headnode_fabric_bridge: fabricbridge
+
+use_vtn_net_management_host: False
+
+vtn_net_management_host_interface: vethmgmt1
+
+use_addresspool_vsg: True
+use_addresspool_public: True
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+ - "xosproject/xos-base"
+ - "xosproject/xos"
+ - "xosproject/xos-client"
+ - "xosproject/xos-corebuilder"
+ - "xosproject/xos-gui"
+ - "xosproject/xos-gui-builder"
+ - "xosproject/xos-libraries"
+ - "xosproject/xos-postgres"
+ - "xosproject/xos-tosca"
+ - "xosproject/xos-ws"
+ - "xosproject/chameleon"
+ - "xosproject/xos-synchronizer-base"
+ - "gliderlabs/consul-server"
+ - "gliderlabs/registrator"
+ - "nginx"
+ - "onosproject/onos"
+ - "opencord/mavenrepo"
+ - "redis"
+ - "node"
+ - "sebp/elk"
+
+# Ansible Inventory
+inventory_groups:
+
+ config:
+ localhost:
+ ansible_connection: local
+
+ build:
+ head1:
+
+ head:
+ head1:
+
+ compute:
+ compute1:
+ compute2:
+
diff --git a/scenarios/preppedpod/Vagrantfile b/scenarios/preppedpod/Vagrantfile
index 6cfefe0..b74d574 100644
--- a/scenarios/preppedpod/Vagrantfile
+++ b/scenarios/preppedpod/Vagrantfile
@@ -1,7 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
#
-# opencloud Vagrantfile
+# preppedpod Vagrantfile
require 'yaml'
settings = YAML.load_file('genconfig/config.yml')
@@ -17,15 +17,23 @@
v.memory = settings['head_vm_mem']
v.cpus = settings['head_vm_cpu']
v.machine_virtual_size = 100
- v.management_network_name = settings['vm_public_network_name'] # public network
- v.management_network_address = settings['vm_public_network_cidr']
end
h.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0", # not used, ignore
+ adapter: 1,
+ ip: "0.1.0.0", # not used, ignore
auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ h.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
config.vm.define "compute1" do |c|
@@ -35,15 +43,23 @@
v.cpus = settings['compute_vm_cpu']
v.machine_virtual_size = 50
v.nested = true
- v.management_network_name = settings['vm_public_network_name'] # public network
- v.management_network_address = settings['vm_public_network_cidr']
end
c.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0",
+ adapter: 1,
+ ip: "0.1.0.0",
auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
config.vm.define "compute2" do |c|
@@ -53,15 +69,23 @@
v.cpus = settings['compute_vm_cpu']
v.machine_virtual_size = 50
v.nested = true
- v.management_network_name = settings['vm_public_network_name'] # public network
- v.management_network_address = settings['vm_public_network_cidr']
end
c.vm.network "private_network", # management network, eth1
- ip: "0.1.1.0",
+ adapter: 1,
+ ip: "0.1.0.0",
auto_config: false,
+ virtualbox__intnet: settings['vm_management_network_name'],
libvirt__network_name: settings['vm_management_network_name'],
libvirt__forward_mode: "none",
libvirt__dhcp_enabled: false
+ c.vm.network "private_network", # public network, eth2
+ adapter: 2,
+ ip: "0.2.0.0", # not used, ignore
+ auto_config: false,
+ virtualbox__intnet: settings['vm_public_network_name'],
+ libvirt__network_name: settings['vm_public_network_name'],
+ libvirt__forward_mode: "none",
+ libvirt__dhcp_enabled: false
end
end
diff --git a/scenarios/preppedpod/config.yml b/scenarios/preppedpod/config.yml
index 5ccc1d4..2c38e31 100644
--- a/scenarios/preppedpod/config.yml
+++ b/scenarios/preppedpod/config.yml
@@ -71,9 +71,9 @@
aliases:
- head
- name: compute1
- ipv4_last_octet: 250
+ ipv4_last_octet: 17
- name: compute2
- ipv4_last_octet: 251
+ ipv4_last_octet: 18
# Vagrant VM configuration
vagrant_vms:
@@ -95,6 +95,30 @@
vm_public_network_name: cordpub
vm_public_network_cidr: "10.230.100.0/24"
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+ - eth1
+
+fabric_net_interfaces:
+ - eth2
+
+# veth pair connected between a linux and integration bridge are described here
+# https://wiki.opencord.org/display/CORD/VTN+Manual+Tests#VTNManualTests-Testenvironmentsetup
+# but may not work?
+# vtn_integration_bridge_interface: vethfabric1
+
+vtn_integration_bridge_interface: fabricbond
+
+headnode_fabric_bridge: fabricbridge
+
+use_vtn_net_management_host: False
+
+vtn_net_management_host_interface: vethmgmt1
+
+use_addresspool_vsg: True
+use_addresspool_public: True
+
+
# images for imagebuilder to build/pull (tagged elsewhere)
docker_image_whitelist:
- "xosproject/xos-base"