updated the cord repo to support the new network design

Change-Id: I06d89481206cc03a74281af8c0240baa3efe4186
(cherry picked from commit a858b05bb629fde142a21920d9a5d58384be19c9)
diff --git a/Vagrantfile b/Vagrantfile
index c33d952..5da74ce 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -26,7 +26,9 @@
     d.vm.synced_folder '.', '/vagrant', disable: true
     d.vm.hostname = "prod"
     d.vm.network "private_network", ip: "10.100.198.201"
-    d.vm.network "private_network", ip: "10.1.0.1", virtualbox__intnet: "cord-test-network"
+    d.vm.network "private_network", ip: "0.0.0.0", virtualbox__intnet: "cord-test-network"
+    d.vm.provision :shell, path: "scripts/bootstrap_ansible.sh"
+    d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /cord/ansible/prod.yml -c local"
     d.vm.provider "virtualbox" do |v|
       v.memory = 2048
     end
diff --git a/ansible/prod.yml b/ansible/prod.yml
new file mode 100644
index 0000000..73f08bc
--- /dev/null
+++ b/ansible/prod.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+  remote_user: vagrant
+  serial: 1
+  roles:
+    - prod
diff --git a/ansible/roles/prod/files/mgmtbr.cfg b/ansible/roles/prod/files/mgmtbr.cfg
new file mode 100644
index 0000000..626ab3c
--- /dev/null
+++ b/ansible/roles/prod/files/mgmtbr.cfg
@@ -0,0 +1,8 @@
+auto mgmtbr
+iface mgmtbr inet static
+    address 10.1.0.1
+    network 10.1.0.0
+    netmask 255.255.255.0
+    broadcast 10.1.0.255
+    gateway 10.1.0.1
+    bridge_ports eth2
diff --git a/ansible/roles/prod/tasks/main.yml b/ansible/roles/prod/tasks/main.yml
new file mode 100644
index 0000000..2599096
--- /dev/null
+++ b/ansible/roles/prod/tasks/main.yml
@@ -0,0 +1,17 @@
+- name: Bridge Support is Present
+  apt:
+    name: bridge-utils
+    force: yes
+  tags: [prod]
+
+- name: Ensure Management Bridge
+  copy:
+    src: mgmtbr.cfg
+    dest: /etc/network/interfaces.d/mgmtbr.cfg
+    owner: root
+    group: root
+    mode: 0644
+
+- name: Ensure Management Bridge Up
+  command: ifup mgmtbr
+
diff --git a/config/default.yml b/config/default.yml
index 0be591e..b69f602 100644
--- a/config/default.yml
+++ b/config/default.yml
@@ -3,6 +3,8 @@
 # This deployment configuration can be utilized with the head node created
 # via `vargrant up headnode` from the gerrit.opencord.org/maas repository.
 ---
+debug: false
+
 seedServer:
   ip: '10.100.198.201'
 
@@ -19,10 +21,11 @@
   #                    as it is configured by vagrant to the proper settings
   skipTags:
     - 'switch_support'
+    - 'reboot'
     - 'interface_config'
 
+  fabric_ip: '10.1.1.1/24'
   management_ip: '10.1.0.1/24'
-  management_iface: 'eth2'
   external_iface: 'eth0'
   management_network: '10.1.0.0/24'
 
@@ -38,9 +41,6 @@
 
 otherServers:
   # Specifies the configuration for dynamically added compute nodes
-  location: 'http://gerrit.opencord.org/maas'
-  rolesPath: 'roles'
-  role: 'compute-node'
   fabric:
     network: '10.1.1.1/24'
     range_low: '10.1.1.2'
diff --git a/config/onlab_develop_pod.yml b/config/onlab_develop_pod.yml
index 33ecd86..570ef6a 100644
--- a/config/onlab_develop_pod.yml
+++ b/config/onlab_develop_pod.yml
@@ -23,7 +23,6 @@
   fabric_ip: '10.6.1.1/24'
   management_ip: '10.6.0.1/24'
   external_ip: '10.90.0.2/24'
-  management_iface: 'em2'
   external_iface: 'em1'
   skipTags:
     - 'interface_config'
diff --git a/config/sample.yml b/config/sample.yml
index 7ae0372..07cb222 100644
--- a/config/sample.yml
+++ b/config/sample.yml
@@ -27,8 +27,6 @@
   #                 interface connecting the head node (and the POD) to the
   #                 Internet. All traffic in the POD to external hosts will be
   #                 NAT-ed through this interface
-  # management_iface - the name of the interface that connects the head node to the POD
-  #                    internal network.
   # external_iface   - the name of the interface that connects the head node to the
   #                    Internet
   # management_network - the network and mask bits to used for hosts on the management
@@ -36,7 +34,6 @@
   fabric_ip: '10.6.1.1/24'
   management_ip: '10.6.0.1/24'
   external_ip: '47.135.132.21/24'
-  #management_iface: 'eth3'
   #external_iface: 'eth2'
   management_network: 10.1.0.0/24
 
diff --git a/docs/quickstart_physical.md b/docs/quickstart_physical.md
index e09ad5a..aa92ed0 100644
--- a/docs/quickstart_physical.md
+++ b/docs/quickstart_physical.md
@@ -28,8 +28,8 @@
 node.
 - Leaf spine switches are connected into the management TOR starting from the
 highest port number.
-- Compute nodes 40G interfaces are named *eth0* and *eth1*.
-- Compute nodes 10G interfaces are named *eth2* and *eth3*.
+- Compute nodes fabric interfaces (typically 40G or 10G) are named *eth0* and *eth1*.
+- Compute nodes POD management interfaces (typically 1G) are named *eth2* and *eth3*.
 - Compute node *n* is connected to the management TOR switch on port *n*,
 egressing from the compute node at *eth2*.
 - Compute node *n* is connected to its primary leaf, egressing at *eth0* and terminating on the leaf at port *n*.
@@ -350,11 +350,10 @@
 ### Network configuration
 The proposed configuration for a CORD POD is has the following network configuration on the head node:
 
-   - eth0 / eth1 - 40G interfaces, not relevant for the test environment.
-   - eth2 - the interface on which the head node supports PXE boots and is an internally interface to which all
-   the compute nodes connected
+   - eth0 / eth1 - fabric interfaces (40G or 10G), not relevant for the test environment.
+   - mgmtbr - the bridge on which the head node supports PXE boots and is an internally interface to which all
+   the compute and VM nodes connected. This bridge uses eth2 as its bridge ports
    - eth3 - WAN link. the head node will NAT from eth2 to eth3
-   - mgmtbr - Not associated with a physical network and used to connect in the VM created by the openstack
    install that is part of XOS
 
 The Ansible scripts configure MAAS to support DHCP/DNS/PXE on the eth2 and mgmtbr interfaces.