Merge branch 'master' of github.com:open-cloud/xos
diff --git a/containers/xos/Dockerfile.devel b/containers/xos/Dockerfile.devel
index fca77f4..f819ef3 100644
--- a/containers/xos/Dockerfile.devel
+++ b/containers/xos/Dockerfile.devel
@@ -42,7 +42,7 @@
     django-crispy-forms \
     django-encrypted-fields \
     django-extensions \
-    django-filter \
+    django-filter==0.11.0 \
     django-geoposition \
     django-ipware \
     django_rest_swagger \
diff --git a/xos/configurations/common/Makefile.devstack b/xos/configurations/common/Makefile.devstack
index 2764b28..e60434e 100644
--- a/xos/configurations/common/Makefile.devstack
+++ b/xos/configurations/common/Makefile.devstack
@@ -2,7 +2,7 @@
 DEVSTACK_ROOT:=~/devstack
 SETUPDIR:=../setup
 
-all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url node_key net_fix
+all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url other_keys net_fix
 
 prereqs:
 	make -f Makefile.prereqs
@@ -15,10 +15,10 @@
 
 flat_name:
 	echo private|tr -d '\n' > $(SETUPDIR)/flat_net_name
-	bash -c "source admin-openrc.sh; openstack network set --share private"
+	bash -c "source $(SETUPDIR)/admin-openrc.sh; openstack network set --share private"
 
 nodes_yaml:
-	bash ./make-nodes-yaml.sh > $(SETUPDIR)/nodes.yaml
+	export SETUPDIR=$(SETUPDIR); bash ./make-nodes-yaml.sh
 
 ceilometer_url:
 	echo http://`hostname -i`/xosmetering/ > $(SETUPDIR)/ceilometer_url
@@ -32,10 +32,11 @@
 ~/.ssh/id_rsa.pub:
 	cat /dev/zero | ssh-keygen -q -N ""
 
-node_key:
-	sudo cat ~/.ssh/id_rsa > $(SETUPDIR)/node_key
-	sudo cat ~/.ssh/id_rsa.pub > $(SETUPDIR)/node_key.pub
+other_keys: public_key private_key
+	cp $(SETUPDIR)/id_rsa $(SETUPDIR)/node_key
+	cp $(SETUPDIR)/id_rsa.pub $(SETUPDIR)/node_key.pub
+	cp $(SETUPDIR)/id_rsa.pub $(SETUPDIR)/padmin_public_key
 
 net_fix:
 	sudo devstack/net-fix.sh
-	bash -c "source admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
+	bash -c "source $(SETUPDIR)/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
diff --git a/xos/configurations/cord/README-VTN.md b/xos/configurations/cord/README-VTN.md
index 9827b0e..38fc2b4 100644
--- a/xos/configurations/cord/README-VTN.md
+++ b/xos/configurations/cord/README-VTN.md
@@ -41,35 +41,26 @@
     emacs /usr/local/lib/python2.7/dist-packages/networking_onos/plugins/ml2/driver.py
         hard-code self.onos_path and self.onos_auth
     
-Compute node that has the ONOS Container
+Compute nodes and nm nodes:
 
-    # we need NAT rule so the neutron vtn plugin can talk to onos
-    # change 172.17.0.2 to the IP address for the ONOS container (use "docker inspect")
-    iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 8101 -j DNAT --to-destination 172.17.0.2
-    iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 8181 -j DNAT --to-destination 172.17.0.2
-    iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 6653 -j DNAT --to-destination 172.17.0.2
-    
-Compute nodes (all of them):
-
-    systemctl stop neutron-plugin-openvswitch-agent
-    emacs /usr/share/openvswitch/scripts/ovs-ctl
-        update settings as per vtn docs to make port 6640 visible
-    service openvswitch-switch restart
-    ovs-vsctl del-br br-int
-
-nm node:
-
-    # neutron-dhcp-agent causes VTN app to throw port errors, because XOS uses --no-gateway
-    systemctl stop neutron-dhcp-agent.service 
+    cd xos/configurations/cord/dataplane
+    ./generate-bm.sh > hosts-bm
+    ansible-playbook -i hosts-bm dataplane-vtn.yaml
+    # the playbook will:
+    #  1) turn off neutron openvswitch-agent
+    #  2) set openvswitch to listen on port 6641
+    #  3) restart openvswitch
+    #  4) delete any existing br-int bridge
+    #  5) [nm only] turn off neutron-dhcp-agent
 
 VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). You might have to delete them all. I've placed a script in xos/scripts/ called destroy-all-networks.sh that will automate tearing down all of cloudlab's neutron networks.
 
 For development, I suggest using the bash configuration (remember to start the ONOS observer manually) so that 
 there aren't a bunch of preexisting Neutron networks and nova instances to get in the way. 
 
-Problems:
-* If you have more than one compute node, then the node that isn't running ONOS VTN will report as incomplete in VTN. This is because the openvswitch is trying to contact VTN on 172.17.0.2:6653. 
-
 Notes:
+* I've configured the OpenvSwitch switches to use port 6641 instead of port 6640. This is because the VTN app listens on 6640
+itself, and since we're running it in docker 'host' networking mode now, it would conflict with an Openvswitch that was
+also listening on 6640.
 * Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
 * When using of-vfctl to look at flow rules, if you get a protocol error, try "ovs-ofctl show -O OpenFlow13 br-int "
diff --git a/xos/configurations/devel/Makefile b/xos/configurations/devel/Makefile
index 5112311..19e9abd 100644
--- a/xos/configurations/devel/Makefile
+++ b/xos/configurations/devel/Makefile
@@ -2,7 +2,7 @@
 
 cloudlab: common_cloudlab xos
 
-devstack: upgrade_pkgs common_devstack devstack_net_fix xos
+devstack: upgrade_pkgs common_devstack xos
 
 xos:
 	sudo MYIP=$(MYIP) docker-compose up -d
@@ -34,9 +34,5 @@
 enter-synchronizer:
 	sudo docker exec -it devel_xos_synchronizer_openstack_1 bash
 
-devstack_net_fix:
-	sudo ../common/devstack/net-fix.sh
-	sudo bash -c "source ../setup/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
-
 upgrade_pkgs:
 	sudo pip install httpie --upgrade
diff --git a/xos/configurations/frontend/Makefile b/xos/configurations/frontend/Makefile
index c061bc1..4c95f90 100644
--- a/xos/configurations/frontend/Makefile
+++ b/xos/configurations/frontend/Makefile
@@ -1,7 +1,7 @@
 MYIP:=$(shell hostname -i)
 
 frontend:
-	sudo make -f ../common/Makefile.prereq
+	sudo make -f ../common/Makefile.prereqs
 	sudo docker-compose up -d
 	bash ../common/wait_for_xos.sh
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/frontend/sample.yaml
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index c2705ba..6c5fc8c 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -67,6 +67,11 @@
 
   - name: ceilometer proxy config
     template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+    notify:
+#    - restart monitoring-channel
+     - stop monitoring-channel
+     - remove container
+     - start monitoring-channel
 
   - name: Monitoring channel upstart
     template: src=/opt/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
index eb647b6..6c7166f 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
@@ -57,6 +57,32 @@
         {% endfor %}
 
   tasks:
+  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+  - name: make sure ~/bin exists
+    file: path=~/bin state=directory owner=root group=root
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/observers/vcpe/vcpe_stats_notifier.py
+      dest=~/bin/vcpe_stats_notifier.py
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate vcpe_stats_notifier cron job
+    command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+    async: 9999999999999999
+    poll: 0
+    when: cron_job_pids_count.stdout == "0"
+
   - name: vCPE basic dnsmasq config
     copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
     notify: