XMerge branch 'master' of github.com:open-cloud/xos into AddVPNService
diff --git a/containers/synchronizer/Dockerfile b/containers/synchronizer/Dockerfile
index 7170e1c..8557a4d 100644
--- a/containers/synchronizer/Dockerfile
+++ b/containers/synchronizer/Dockerfile
@@ -1,6 +1,5 @@
 FROM       xosproject/xos
 
-# Install custom Ansible
 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
     openssh-client \
     python-crypto \
@@ -8,11 +7,13 @@
     python-paramiko \
     python-yaml \
     python-httplib2 \
+    rsync \
     supervisor
 
 RUN pip install -U \
     jinja2
 
+# Install custom Ansible
 RUN \
     git clone -b release1.8.2 git://github.com/ansible/ansible.git /opt/ansible && \
     git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/lib/ansible/modules/extras && \
diff --git a/containers/xos/Dockerfile b/containers/xos/Dockerfile
index 19126fb..dc9a638 100644
--- a/containers/xos/Dockerfile
+++ b/containers/xos/Dockerfile
@@ -72,13 +72,6 @@
 
 ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/site-packages/suit/static/suit/js/
 
-RUN mkdir -p /usr/local/share /bin
-ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
-RUN tar jxvf /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2 -C /usr/local/share/
-RUN rm -f /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2
-RUN ln -s /usr/local/share/phantomjs-1.7.0-linux-x86_64 /usr/local/share/phantomjs
-RUN ln -s /usr/local/share/phantomjs/bin/phantomjs /bin/phantomjs
-
 # Install XOS
 RUN git clone git://github.com/open-cloud/xos.git /tmp/xos && \
     mv /tmp/xos/xos /opt/ && \
@@ -93,6 +86,7 @@
 
 # Set environment variables.
 ENV HOME /root
+ENV PYTHONPATH /usr/local/lib/python2.7/site-packages:/usr/lib/python2.7/dist-packages
 
 # Define working directory.
 WORKDIR /opt/xos
diff --git a/containers/xos/Dockerfile.templ b/containers/xos/Dockerfile.templ
index f29305b..216ceb5 100644
--- a/containers/xos/Dockerfile.templ
+++ b/containers/xos/Dockerfile.templ
@@ -80,6 +80,7 @@
 
 # Set environment variables.
 ENV HOME /root
+ENV PYTHONPATH /usr/local/lib/python2.7/site-packages:/usr/lib/python2.7/dist-packages
 
 # Define working directory.
 WORKDIR /root
diff --git a/xos/configurations/common/Makefile.cloudlab b/xos/configurations/common/Makefile.cloudlab
index af2ec58..4646fd6 100644
--- a/xos/configurations/common/Makefile.cloudlab
+++ b/xos/configurations/common/Makefile.cloudlab
@@ -1,4 +1,7 @@
 MYFLATLANIF:=$(shell netstat -i |grep "flat"|awk '{print $$1}' )
+ifndef MYFLATLANIF
+$(error MYFLATLANIF is empty)
+endif
 MYFLATLANIP:=$(shell ifconfig $(MYFLATLANIF) | grep "inet addr" | awk -F: '{print $$2}' | awk '{print $$1}' )
 
 all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url node_key
diff --git a/xos/configurations/cord/Makefile b/xos/configurations/cord/Makefile
index 755f704..0dc7a73 100644
--- a/xos/configurations/cord/Makefile
+++ b/xos/configurations/cord/Makefile
@@ -3,7 +3,7 @@
 LAST_CONTAINER=$(shell sudo docker ps -l -q)
 
 cord: common_cloudlab ceilometer_dashboard virtualbng_json vtn_network_cfg_json
-	rm ../../xos_configuration/*
+	if [ -a ../../xos_configuration/* ]; then rm ../../xos_configuration/*; fi
 	echo "# Autogenerated -- do not edit" > Dockerfile
 	cat ../common/Dockerfile.common Dockerfile.cord >> Dockerfile
 	cp ../common/xos_common_config ../../xos_configuration/
diff --git a/xos/configurations/cord/README-VTN.md b/xos/configurations/cord/README-VTN.md
index 2b7d5e1..9827b0e 100644
--- a/xos/configurations/cord/README-VTN.md
+++ b/xos/configurations/cord/README-VTN.md
@@ -1,8 +1,13 @@
 vtn notes:
 
+see also: https://github.com/hyunsun/documentations/wiki/Neutron-ONOS-Integration-for-CORD-VTN#onos-setup
+
 inside the xos container:
 
     python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+    emacs /opt/xos/xos_configuration/xos_common_config
+        [networking]
+        use_vtn=True
 
 ctl node:
 
@@ -10,6 +15,7 @@
     ONOS_VTN_HOSTNAME="cp-2.smbaker-xos5.xos-pg0.clemson.cloudlab.us"
     apt-get -y install python-pip
     pip install -U setuptools pip
+    pip install testrepository
     git clone https://github.com/openstack/networking-onos.git
     cd networking-onos
     python setup.py install
@@ -29,13 +35,20 @@
     # files. Maybe it can be restarted using systemctl instead...
     /usr/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /usr/local/etc/neutron/plugins/ml2/conf_onos.ini
 
+Neutron driver arg-parsing issue
+
+    # For some reason, the VTN Neutron plugin isn't getting its arguments from neutron
+    emacs /usr/local/lib/python2.7/dist-packages/networking_onos/plugins/ml2/driver.py
+        hard-code self.onos_path and self.onos_auth
+    
 Compute node that has the ONOS Container
 
     # we need NAT rule so the neutron vtn plugin can talk to onos
     # change 172.17.0.2 to the IP address for the ONOS container (use "docker inspect")
     iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 8101 -j DNAT --to-destination 172.17.0.2
     iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 8181 -j DNAT --to-destination 172.17.0.2
-
+    iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 6653 -j DNAT --to-destination 172.17.0.2
+    
 Compute nodes (all of them):
 
     systemctl stop neutron-plugin-openvswitch-agent
@@ -44,6 +57,11 @@
     service openvswitch-switch restart
     ovs-vsctl del-br br-int
 
+nm node:
+
+    # neutron-dhcp-agent causes VTN app to throw port errors, because XOS uses --no-gateway
+    systemctl stop neutron-dhcp-agent.service 
+
 VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). You might have to delete them all. I've placed a script in xos/scripts/ called destroy-all-networks.sh that will automate tearing down all of cloudlab's neutron networks.
 
 For development, I suggest using the bash configuration (remember to start the ONOS observer manually) so that 
@@ -51,3 +69,7 @@
 
 Problems:
 * If you have more than one compute node, then the node that isn't running ONOS VTN will report as incomplete in VTN. This is because the openvswitch is trying to contact VTN on 172.17.0.2:6653. 
+
+Notes:
+* Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
+* When using of-vfctl to look at flow rules, if you get a protocol error, try "ovs-ofctl show -O OpenFlow13 br-int "
diff --git a/xos/configurations/cord/dataplane/change_controller.sh b/xos/configurations/cord/dataplane/change_controller.sh
new file mode 100755
index 0000000..2b961ee
--- /dev/null
+++ b/xos/configurations/cord/dataplane/change_controller.sh
@@ -0,0 +1,13 @@
+#! /bin/bash
+
+# put IP address of node running ONOS VTN App here
+DESIRED_CONTROLLER="tcp:130.127.133.24:6653"
+
+while [[ 1 ]]; do
+    CONTROLLER=`ovs-vsctl get-controller br-int`
+    if [[ "$CONTROLLER" == "tcp:172.17.0.2:6653" ]]; then
+       ovs-vsctl set-controller br-int $DESIRED_CONTROLLER
+       echo "changed controller from $CONTROLLER to $DESIRED_CONTROLLER"
+    fi
+    sleep 10s
+done
diff --git a/xos/configurations/devel/Makefile b/xos/configurations/devel/Makefile
index 30960fc..60a4cdd 100644
--- a/xos/configurations/devel/Makefile
+++ b/xos/configurations/devel/Makefile
@@ -9,7 +9,7 @@
 opencloud: common_opencloud xos
 
 xos:
-	rm ../../xos_configuration/*
+	if [ -a ../../xos_configuration/* ]; then rm ../../xos_configuration/*; fi
 	cp ../common/xos_common_config ../../xos_configuration/
 	echo "# Autogenerated -- do not edit" > Dockerfile
 	cat ../common/Dockerfile.common Dockerfile.devel >> Dockerfile
diff --git a/xos/configurations/frontend/Makefile b/xos/configurations/frontend/Makefile
index e30877a..96b0779 100644
--- a/xos/configurations/frontend/Makefile
+++ b/xos/configurations/frontend/Makefile
@@ -6,7 +6,7 @@
 all: frontend
 
 frontend:
-	rm ../../xos_configuration/*
+	if [ -a ../../xos_configuration/* ]; then rm ../../xos_configuration/*; fi
 	sudo apt-get -y install httpie
 	cat ../common/Dockerfile.common Dockerfile.frontend > Dockerfile
 	cp ../common/xos_common_config ../../xos_configuration/
@@ -16,7 +16,7 @@
 	echo $(RUNNING_CONTAINER)
 
 interactive:
-	rm ../../xos_configuration/*
+	if [ -a ../../xos_configuration/* ]; then rm ../../xos_configuration/*; fi
 	cat ../common/Dockerfile.common Dockerfile.frontend > Dockerfile
 	cp ../common/xos_common_config ../../xos_configuration/
 	#cp ../cord/xos_cord_config ../../xos_configuration/
diff --git a/xos/configurations/test/Makefile b/xos/configurations/test/Makefile
index b1cca93..75143fa 100644
--- a/xos/configurations/test/Makefile
+++ b/xos/configurations/test/Makefile
@@ -1,6 +1,7 @@
 MYIP:=$(shell hostname -i)
 
 test: common_cloudlab
+	if [ -a ../../xos_configuration/* ]; then rm ../../xos_configuration/*; fi
 	cat ../common/Dockerfile.common Dockerfile.test > Dockerfile
 	cd ../../..; sudo docker build -t xos -f xos/configurations/test/Dockerfile .
 	# sudo docker run -d --add-host="ctl:$(MYIP)" -p 9999:8000 xos
diff --git a/xos/model_policies/model_policy_Slice.py b/xos/model_policies/model_policy_Slice.py
index 308d8cb..dfdcb4f 100644
--- a/xos/model_policies/model_policy_Slice.py
+++ b/xos/model_policies/model_policy_Slice.py
@@ -1,3 +1,5 @@
+from xos.config import Config
+
 def handle_delete(slice):
     from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
     from collections import defaultdict
@@ -15,6 +17,9 @@
     from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
     from collections import defaultdict
 
+    # only create nat_net if not using VTN
+    support_nat_net = not getattr(Config(), "networking_use_vtn", False)
+
     print "MODEL POLICY: slice", slice
 
     # slice = Slice.get(slice_id)
@@ -49,8 +54,8 @@
                 public_nets.append(network)
             elif network.template.name == 'Private':
                 private_nets.append(network)
-        if not public_nets:
-                    # ensure there is at least one public network, and default it to dedicated
+        if support_nat_net and (not public_nets):
+            # ensure there is at least one public network, and default it to dedicated
             nat_net = Network(
                     name = slice.name+'-nat',
                         template = NetworkTemplate.objects.get(name='Public shared IPv4'),
@@ -78,7 +83,7 @@
                 public_net_slice = net_slice
             elif net_slice.network in private_nets:
                 private_net_slice = net_slice
-        if not public_net_slice:
+        if support_nat_net and (not public_net_slice):
             public_net_slice = NetworkSlice(slice=slice, network=public_nets[0])
             public_net_slice.save()
             print "MODEL POLICY: slice", slice, "made public_net_slice"
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
index 5bb8250..81f6632 100644
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ b/xos/observers/base/SyncInstanceUsingAnsible.py
@@ -113,7 +113,7 @@
             key_name = instance.parent.slice.service.private_key_fn
 
         if not os.path.exists(key_name):
-            raise Exception("Node key %s does not exist" % node_key_name)
+            raise Exception("Node key %s does not exist" % key_name)
 
         key = file(key_name).read()
 
@@ -179,6 +179,43 @@
 
         o.save()
 
-    def delete_record(self, m):
-        pass
+    def delete_record(self, o):
+        try:
+            controller = o.get_controller()
+            controller_register = json.loads(o.node.site_deployment.controller.backend_register)
 
+            if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%o.node.site_deployment.controller.name)
+        except AttributeError:
+            pass
+
+        instance = self.get_instance(o)
+        if isinstance(instance, basestring):
+            # sync to some external host
+
+            # XXX - this probably needs more work...
+
+            fields = { "hostname": instance,
+                       "instance_id": "ubuntu",     # this is the username to log into
+                       "private_key": service.key,
+                     }
+        else:
+            # sync to an XOS instance
+            fields = self.get_ansible_fields(instance)
+
+            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.map_delete_inputs(o))
+
+        fields['delete']=True
+        res = self.run_playbook(o,fields)
+        try:
+                self.map_delete_outputs(o,res)
+        except AttributeError:
+                pass
diff --git a/xos/observers/monitoring_channel/monitoring_channel_observer_config b/xos/observers/monitoring_channel/monitoring_channel_observer_config
index 922a019..5657e1d 100644
--- a/xos/observers/monitoring_channel/monitoring_channel_observer_config
+++ b/xos/observers/monitoring_channel/monitoring_channel_observer_config
@@ -32,7 +32,8 @@
 pretend=False
 backoff_disabled=True
 save_ansible_output=True
-proxy_ssh=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
 full_setup=True
 
 [feefie]
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
index ad6564e..78ad45e 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
@@ -70,5 +70,7 @@
 
         #o.last_ansible_hash = ansible_hash
 
-    def delete_record(self, m):
-        pass
+    def map_delete_inputs(self, o):
+        fields = {"unique_id": o.id,
+                  "delete": True}
+        return fields
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index 3fbd569..c2705ba 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -22,6 +22,14 @@
         {% endfor %}
 
   tasks:
+{% if delete %}
+  - name: Remove tenant
+# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
+    template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+    notify:
+     - stop monitoring-channel
+     - remove container
+{% else %}
 {% if full_setup %}
   - name: Docker repository
     copy: src=/opt/xos/observers/monitoring_channel/files/docker.list
@@ -75,16 +83,17 @@
 
   - name: Make sure Monitoring channel service is running
     service: name=monitoring-channel-{{ unique_id }} state=started
+{% endif %}
 
   handlers:
   - name: restart monitoring-channel
-    shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service vcpe-{{ unique_id }} start
+    shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
 
   - name: stop monitoring-channel
     service: name=monitoring-channel-{{ unique_id }} state=stopped
 
   - name: remove container
-    docker: name=monitoring-channel-{{ unique_id }} state=absent image=docker-vcpe
+    docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
 
   - name: start monitoring-channel
     service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/observers/vcpe/vcpe_observer_config b/xos/observers/vcpe/vcpe_observer_config
index afd1501..d2c9239 100644
--- a/xos/observers/vcpe/vcpe_observer_config
+++ b/xos/observers/vcpe/vcpe_observer_config
@@ -33,7 +33,8 @@
 pretend=False
 backoff_disabled=True
 save_ansible_output=True
-proxy_ssh=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
 full_setup=True
 
 [feefie]
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index b646936..ad1604b 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -12,6 +12,7 @@
 from util.logger import observer_logger as logger
 from observer.ansible import *
 from openstack.driver import OpenStackDriver
+from xos.config import Config
 import json
 
 import pdb
@@ -33,6 +34,17 @@
         cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
         return cidr
 
+    def alloc_gateway(self, uuid):
+        # 16 bits only
+        uuid_masked = uuid & 0xffff
+        a = 10
+        b = uuid_masked >> 8
+        c = uuid_masked & 0xff
+        d = 1
+
+        gateway = '%d.%d.%d.%d'%(a,b,c,d)
+        return gateway
+
 
     def save_controller_network(self, controller_network):
         network_name = controller_network.network.name
@@ -51,6 +63,8 @@
                     'subnet_name':subnet_name,
                     'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
                     'cidr':cidr,
+                    'gateway':self.alloc_gateway(controller_network.pk),
+                    'use_vtn':getattr(Config(), "networking_use_vtn", False),
                     'delete':False	
                     }
         return network_fields
diff --git a/xos/openstack_observer/steps/sync_controller_networks.yaml b/xos/openstack_observer/steps/sync_controller_networks.yaml
index bbf8ec2..b885516 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.yaml
+++ b/xos/openstack_observer/steps/sync_controller_networks.yaml
@@ -28,7 +28,11 @@
         state=absent
         {% else %}
         state=present
+        {% if use_vtn %}
+        gateway_ip={{ gateway }}
+        {% else %}
         no_gateway=true
+        {% endif %}
         dns_nameservers=8.8.8.8
         cidr={{ cidr }}
         {% endif %}