[CORD-2608]
Create scenarios suitable for kubespray and helm

Change-Id: I07b19cfc00165046f8fbe6807f2d8add28398cbd
diff --git a/scenarios/controlkube/Vagrantfile b/scenarios/controlkube/Vagrantfile
index 3686e76..a52e989 100644
--- a/scenarios/controlkube/Vagrantfile
+++ b/scenarios/controlkube/Vagrantfile
@@ -26,12 +26,19 @@
       v.cpus = settings['head_vm_cpu']
     end
     h.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0", # unused IP address (setting required)
+      ip: "0.1.0.0", # not used, ignore
       auto_config: false,
       virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    h.vm.network "private_network", # public network, eth2
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 
   config.vm.define "compute1" do |c|
@@ -46,12 +53,19 @@
       v.cpus = settings['compute_vm_cpu']
     end
     c.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0", # unused IP address (setting required)
+      ip: "0.1.0.0",
       auto_config: false,
       virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 
   config.vm.define "compute2" do |c|
@@ -66,12 +80,19 @@
       v.cpus = settings['compute_vm_cpu']
     end
     c.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0", # unused IP address (setting required)
+      ip: "0.1.0.0",
       auto_config: false,
       virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 end
 
diff --git a/scenarios/controlkube/config.yml b/scenarios/controlkube/config.yml
index a277318..b6f6efe 100644
--- a/scenarios/controlkube/config.yml
+++ b/scenarios/controlkube/config.yml
@@ -16,23 +16,38 @@
 # controlkube Scenario - installs XOS/ONOS on kubernetes, similar to controlpod
 
 frontend_only: False
+use_k8s: True
 use_maas: False
+use_openstack: False
 
 # create a cord_profile dir next to the cord checkout
 config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
 config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
 
+create_configdirs_become: False
+
 build_cord_dir: /opt/cord
 
+# Vagrant VM configuration
+vagrant_box: "bento/ubuntu-16.04"
+
+head_vm_mem: 2048
+head_vm_cpu: 4
+
+compute_vm_mem: 2048
+compute_vm_cpu: 4
+
 buildnode: head1
 headnode: head1
 
+vagrant_vms:
+  - head1
+  - compute1
+  - compute2
+
 # Make build config
 build_targets:
-  - deploy-kubespray
-# - prep-computenode
-# - prep-headnode
-# - core-image
+  - start-xos-helm
 
 config_ssh_key_prereqs:
   - vagrant-ssh-install
@@ -56,6 +71,10 @@
 docker_images_prereqs:
   - prep-headnode
 
+# have kubespray skip docker installation
+skipTags:
+  - docker
+
 # node topology, used to bring up management interfaces
 physical_node_list:
   - name: head1
@@ -67,25 +86,19 @@
   - name: compute2
     ipv4_last_octet: 18
 
-# Vagrant VM configuration
-vagrant_vms:
-  - head1
-  - compute1
-  - compute2
-
-# Vagrant VM configuration
-vagrant_box: "bento/ubuntu-16.04"
-
-head_vm_mem: 2048
-head_vm_cpu: 4
-
-compute_vm_mem: 2048
-compute_vm_cpu: 4
+management_net_bridge: "mgmtbridge"
 
 vm_management_network_name: cordmgmt
 vm_public_network_name: cordpub
 vm_public_network_cidr: "10.230.100.0/24"
 
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+  - eth1
+
+fabric_net_interfaces:
+  - eth2
+
 # images for imagebuilder to build/pull (tagged elsewhere)
 docker_image_whitelist:
   - "xosproject/xos-base"
@@ -104,6 +117,7 @@
   - "gliderlabs/registrator"
   - "nginx"
   - "onosproject/onos"
+  - "opencord/mavenrepo"
   - "redis"
   - "node"
   - "sebp/elk"
diff --git a/scenarios/cord/config.yml b/scenarios/cord/config.yml
index d52e3d4..efc9289 100644
--- a/scenarios/cord/config.yml
+++ b/scenarios/cord/config.yml
@@ -60,6 +60,9 @@
   - copy-cord
   - copy-config
 
+publish_docker_images_prereqs:
+  - deploy-maas
+
 # Start elasticstack before XOS/ONOS, which log to it
 start_xos_prereqs:
   - deploy-maas
@@ -112,6 +115,9 @@
 # Used in platform-install / roles/cord-profile/templates/docker-compose.yml.j2
 use_elasticstack: True
 
+# Use the insecure MaaS-installed docker registry
+use_secure_docker_registry: False
+
 # images for imagebuilder to build/pull (tagged elsewhere)
 docker_image_whitelist:
   - "xosproject/xos-base"
diff --git a/scenarios/preppedkube/Vagrantfile b/scenarios/preppedkube/Vagrantfile
new file mode 100644
index 0000000..eaeeddb
--- /dev/null
+++ b/scenarios/preppedkube/Vagrantfile
@@ -0,0 +1,92 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# preppedkube Vagrantfile
+
+require 'yaml'
+settings = YAML.load_file('genconfig/config.yml')
+
+Vagrant.configure(2) do |config|
+
+  config.vm.box = settings["vagrant_box"]
+
+  config.vm.define "head1" do |h|
+    h.vm.hostname = "head1"
+    h.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: '0.0.0.0'
+    h.vm.provider :libvirt do |v|
+      v.memory = settings['head_vm_mem']
+      v.cpus = settings['head_vm_cpu']
+      v.machine_virtual_size = 100
+    end
+    h.vm.network "private_network", # management network, eth1
+      adapter: 1,
+      ip: "0.1.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    h.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+  end
+
+  config.vm.define "compute1" do |c|
+    c.vm.hostname = "compute1"
+    c.vm.provider :libvirt do |v|
+      v.memory = settings['compute_vm_mem']
+      v.cpus = settings['compute_vm_cpu']
+      v.machine_virtual_size = 50
+      v.nested = true
+    end
+    c.vm.network "private_network", # management network, eth1
+      adapter: 1,
+      ip: "0.1.0.0",
+      auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+  end
+
+  config.vm.define "compute2" do |c|
+    c.vm.hostname = "compute2"
+    c.vm.provider :libvirt do |v|
+      v.memory = settings['compute_vm_mem']
+      v.cpus = settings['compute_vm_cpu']
+      v.machine_virtual_size = 50
+      v.nested = true
+    end
+    c.vm.network "private_network", # management network, eth1
+      adapter: 1,
+      ip: "0.1.0.0",
+      auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
+      libvirt__network_name: settings['vm_management_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
+  end
+
+end
+
diff --git a/scenarios/preppedkube/config.yml b/scenarios/preppedkube/config.yml
new file mode 100644
index 0000000..d865e0d
--- /dev/null
+++ b/scenarios/preppedkube/config.yml
@@ -0,0 +1,160 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# preppedkube Scenario - install CORD on kubernetes, on pre-prepared systems with OS installed
+
+frontend_only: False
+use_k8s: True
+use_maas: False
+use_openstack: False
+
+# create a cord_profile dir next to the cord checkout
+config_cord_dir: "{{ ( playbook_dir ~ '/../..' ) | realpath }}"
+config_cord_profile_dir: "{{ ( playbook_dir ~ '/../../../cord_profile' ) | realpath }}"
+
+create_configdirs_become: False
+
+build_cord_dir: /opt/cord
+
+# Vagrant VM configuration
+vagrant_box: "bento/ubuntu-16.04"
+
+head_vm_mem: 16384
+head_vm_cpu: 8
+
+compute_vm_mem: 16384
+compute_vm_cpu: 8
+
+buildnode: head1
+headnode: head1
+
+vagrant_vms:
+  - head1
+  - compute1
+  - compute2
+
+# Make build config
+build_targets:
+  - start-xos-helm
+
+vagrant_up_prereqs:
+  - prereqs-check
+
+config_ssh_key_prereqs:
+  - vagrant-ssh-install
+
+copy_cord_prereqs:
+  - vagrant-ssh-install
+
+cord_config_prereqs:
+  - vagrant-ssh-install
+  - copy-cord
+
+prep_buildnode_prereqs:
+  - copy-cord
+  - copy-config
+
+# Run build prep before head prep, when build == head
+prep_headnode_prereqs:
+  - prep-buildnode
+
+# Wait until headnode prepped before building containers, for consistent DNS
+docker_images_prereqs:
+  - prep-headnode
+
+# have kubespray skip docker installation
+skipTags:
+  - docker
+
+# node topology, used to bring up management interfaces
+physical_node_list:
+  - name: head1
+    ipv4_last_octet: 1
+    aliases:
+      - head
+  - name: compute1
+    ipv4_last_octet: 17
+  - name: compute2
+    ipv4_last_octet: 18
+
+management_net_bridge: "mgmtbridge"
+
+vm_management_network_name: cordmgmt
+vm_public_network_name: cordpub
+vm_public_network_cidr: "10.230.100.0/24"
+
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+  - eth1
+
+fabric_net_interfaces:
+  - eth2
+
+# veth pair connected between a linux and integration bridge are described here
+# https://wiki.opencord.org/display/CORD/VTN+Manual+Tests#VTNManualTests-Testenvironmentsetup
+# but may not work?
+# vtn_integration_bridge_interface: vethfabric1
+
+vtn_integration_bridge_interface: fabricbond
+
+headnode_fabric_bridge: fabricbridge
+
+use_vtn_net_management_host: False
+
+vtn_net_management_host_interface: vethmgmt1
+
+use_addresspool_vsg: True
+use_addresspool_public: True
+
+# images for imagebuilder to build/pull (tagged elsewhere)
+docker_image_whitelist:
+  - "xosproject/xos-base"
+  - "xosproject/xos"
+  - "xosproject/xos-client"
+  - "xosproject/xos-corebuilder"
+  - "xosproject/xos-gui"
+  - "xosproject/xos-gui-builder"
+  - "xosproject/xos-libraries"
+  - "xosproject/xos-postgres"
+  - "xosproject/xos-tosca"
+  - "xosproject/xos-ws"
+  - "xosproject/chameleon"
+  - "xosproject/xos-synchronizer-base"
+  - "gliderlabs/consul-server"
+  - "gliderlabs/registrator"
+  - "nginx"
+  - "onosproject/onos"
+  - "opencord/mavenrepo"
+  - "redis"
+  - "node"
+  - "sebp/elk"
+
+# Ansible Inventory
+inventory_groups:
+
+  config:
+    localhost:
+      ansible_connection: local
+
+  build:
+    head1:
+
+  head:
+    head1:
+
+  compute:
+    compute1:
+    compute2:
+
diff --git a/scenarios/preppedpod/Vagrantfile b/scenarios/preppedpod/Vagrantfile
index 6cfefe0..b74d574 100644
--- a/scenarios/preppedpod/Vagrantfile
+++ b/scenarios/preppedpod/Vagrantfile
@@ -1,7 +1,7 @@
 # -*- mode: ruby -*-
 # vi: set ft=ruby :
 #
-# opencloud Vagrantfile
+# preppedpod Vagrantfile
 
 require 'yaml'
 settings = YAML.load_file('genconfig/config.yml')
@@ -17,15 +17,23 @@
       v.memory = settings['head_vm_mem']
       v.cpus = settings['head_vm_cpu']
       v.machine_virtual_size = 100
-      v.management_network_name = settings['vm_public_network_name'] # public network
-      v.management_network_address = settings['vm_public_network_cidr']
     end
     h.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0", # not used, ignore
+      adapter: 1,
+      ip: "0.1.0.0", # not used, ignore
       auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    h.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 
   config.vm.define "compute1" do |c|
@@ -35,15 +43,23 @@
       v.cpus = settings['compute_vm_cpu']
       v.machine_virtual_size = 50
       v.nested = true
-      v.management_network_name = settings['vm_public_network_name'] # public network
-      v.management_network_address = settings['vm_public_network_cidr']
     end
     c.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0",
+      adapter: 1,
+      ip: "0.1.0.0",
       auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 
   config.vm.define "compute2" do |c|
@@ -53,15 +69,23 @@
       v.cpus = settings['compute_vm_cpu']
       v.machine_virtual_size = 50
       v.nested = true
-      v.management_network_name = settings['vm_public_network_name'] # public network
-      v.management_network_address = settings['vm_public_network_cidr']
     end
     c.vm.network "private_network", # management network, eth1
-      ip: "0.1.1.0",
+      adapter: 1,
+      ip: "0.1.0.0",
       auto_config: false,
+      virtualbox__intnet: settings['vm_management_network_name'],
       libvirt__network_name: settings['vm_management_network_name'],
       libvirt__forward_mode: "none",
       libvirt__dhcp_enabled: false
+    c.vm.network "private_network", # public network, eth2
+      adapter: 2,
+      ip: "0.2.0.0", # not used, ignore
+      auto_config: false,
+      virtualbox__intnet: settings['vm_public_network_name'],
+      libvirt__network_name: settings['vm_public_network_name'],
+      libvirt__forward_mode: "none",
+      libvirt__dhcp_enabled: false
   end
 
 end
diff --git a/scenarios/preppedpod/config.yml b/scenarios/preppedpod/config.yml
index 5ccc1d4..2c38e31 100644
--- a/scenarios/preppedpod/config.yml
+++ b/scenarios/preppedpod/config.yml
@@ -71,9 +71,9 @@
     aliases:
       - head
   - name: compute1
-    ipv4_last_octet: 250
+    ipv4_last_octet: 17
   - name: compute2
-    ipv4_last_octet: 251
+    ipv4_last_octet: 18
 
 # Vagrant VM configuration
 vagrant_vms:
@@ -95,6 +95,30 @@
 vm_public_network_name: cordpub
 vm_public_network_cidr: "10.230.100.0/24"
 
+# which network interfaces belong to which bond on nodes
+management_net_interfaces:
+  - eth1
+
+fabric_net_interfaces:
+  - eth2
+
+# veth pair connected between a linux and integration bridge are described here
+# https://wiki.opencord.org/display/CORD/VTN+Manual+Tests#VTNManualTests-Testenvironmentsetup
+# but may not work?
+# vtn_integration_bridge_interface: vethfabric1
+
+vtn_integration_bridge_interface: fabricbond
+
+headnode_fabric_bridge: fabricbridge
+
+use_vtn_net_management_host: False
+
+vtn_net_management_host_interface: vethmgmt1
+
+use_addresspool_vsg: True
+use_addresspool_public: True
+
+
 # images for imagebuilder to build/pull (tagged elsewhere)
 docker_image_whitelist:
   - "xosproject/xos-base"