Merge branch 'xos_auto_scaling_app_changes' of github.com:open-cloud/xos into xos_auto_scaling_app_changes
diff --git a/README.md b/README.md
index 7d279c5..7a72658 100644
--- a/README.md
+++ b/README.md
@@ -3,12 +3,12 @@
 For a general introduction to XOS and how it is used in CORD, see
 http://guide.xosproject.org. The "Developer Guide" at that URL is
 especially helpful, although it is sync'ed with the previous
-release (currently Burwell), which likely lags what's in master.
+release (Burwell), which lags what's in master.
 Additional design notes, presentations, and other collateral are 
-also available at http://xosproject.org.
+also available at http://xosproject.org and http://cord.onosproject.org.
 
 The quickest way to get started is to look at the collection of
-"stock" configurations in *xos/configurations*. The *cord* 
+canned configurations in *xos/configurations*. The *cord* 
 configuration in that directory corresponds to our current 
 CORD development environment, and the README you'll find there
 will help you get started.
diff --git a/xos-apps/auto-scale/README.md b/xos-apps/auto-scale/README.md
index fa66af6..3985719 100644
--- a/xos-apps/auto-scale/README.md
+++ b/xos-apps/auto-scale/README.md
@@ -9,7 +9,7 @@
 ## To verify the autoscaling application:
 1) Login to cloudlab compute nodes <br/>
 2) On each compute node, open /etc/ceilometer/pipeline.yaml file<br/>
-3) Change the polling interval for "cpu_source" meters from 600 to 60 as shown below<br/>
+3) Change the polling interval for "cpu_source" meters from 600 to poll interval period that u wish (eg. 60) as shown below.<br/>
 From:
 ```
     - name: cpu_source
@@ -29,6 +29,36 @@
       sinks:
           - cpu_sink
 ```
+3b) Also ensure the publisher in "cpu_sink" contains the following URL "udp://"IP of Ceilometer PUB-SUB":5004" as shown below.<br/>
+```
+    - name: cpu_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                target:
+                    name: "cpu_util"
+                    unit: "%"
+                    type: "gauge"
+                    scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+      publishers:
+          - notifier://
+```
+
+To:
+```
+    - name: cpu_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                target:
+                    name: "cpu_util"
+                    unit: "%"
+                    type: "gauge"
+                    scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+      publishers:
+          - notifier://
+          - udp://10.11.10.1:5004
+```
 4) sudo service ceilometer-agent-compute restart<br/>
 5) With this change, the autoscaling application should start receiving the CPU utilization samples every 60 seconds<br/>
 6) The REST API to retrieve the cpu utilization samples from autoscaling application: http://<app_ip>:9991/autoscaledata 
@@ -44,4 +74,4 @@
   host: 'http://<your.cord.installation.ip>:9991'
 }
 ```
-- From `xos-apps/auto-scale/gui` run `npm start`
\ No newline at end of file
+- From `xos-apps/auto-scale/gui` run `npm start`
diff --git a/xos-apps/auto-scale/xos_auto_scaling_app.py b/xos-apps/auto-scale/xos_auto_scaling_app.py
index f53d417..ee51bb3 100644
--- a/xos-apps/auto-scale/xos_auto_scaling_app.py
+++ b/xos-apps/auto-scale/xos_auto_scaling_app.py
@@ -48,7 +48,7 @@
             print 'SRIKANTH: HTTP error %s' % e.reason
             break
         except urllib2.URLError, e:
-            print 'SRIKANTH: URL error %(reason)s' % e.reason
+            print 'SRIKANTH: URL error %s' % e.reason
             pass
     return monitoring_channel
 
@@ -65,7 +65,7 @@
 def periodic_print():
      print_samples()
      #Print every 1minute
-     threading.Timer(60, periodic_print).start()
+     threading.Timer(20, periodic_print).start()
 
 
 CPU_UPPER_THRESHOLD = 80 #80%
@@ -196,7 +196,7 @@
               else:
                   projects_map[project]['lthreadshold_count'] = 0
                   projects_map[project]['alarm'] = INITIAL_STATE
-     threading.Timer(60, periodic_cpu_threshold_evaluator).start()
+     threading.Timer(20, periodic_cpu_threshold_evaluator).start()
 
 def read_notification_from_ceilometer(host,port):
    udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
@@ -206,7 +206,18 @@
       data, source = udp.recvfrom(64000)
       try:
          sample = msgpack.loads(data, encoding='utf-8')
-         if sample['counter_name'] != 'cpu_util':
+         if sample['counter_name'] == 'instance':
+             if 'delete' in sample['resource_metadata']['event_type']:
+	          xosTenantInfo = getXosTenantInfo(sample['project_id'])
+                  xosResourceInfo = getXosInstanceInfo(sample['resource_id'])
+                  print "SRIKANTH: Project %s Instance %s is getting deleted" % (xosTenantInfo['slice'] if xosTenantInfo['slice'] else sample['project_id'],xosResourceInfo) 
+                  if sample['project_id'] not in projects_map.keys():
+                       continue
+                  if sample['resource_id'] not in projects_map[sample['project_id']]['resources'].keys():
+                       continue
+                  projects_map[sample['project_id']]['resources'].pop(sample['resource_id'], None)
+             continue
+         elif sample['counter_name'] != 'cpu_util':
               continue
          if sample['project_id'] not in projects_map.keys():
               projects_map[sample['project_id']] = {}
@@ -273,10 +284,18 @@
    subscribe_data = {"sub_info":"cpu_util","app_id":"xos_auto_scale","target":"udp://10.11.10.1:12346"}
    subscribe_url = ceilometer_url + 'v2/subscribe'
    response = requests.post(subscribe_url, data=json.dumps(subscribe_data))
-   print 'SRIKANTH: Ceilometer Subscription status:%s' % response.text
+   print 'SRIKANTH: Ceilometer meter "cpu_util" Subscription status:%s' % response.text
    #TODO: Fix the typo in 'sucess'
    if (not 'sucess' in response.text) and (not 'already exists' in response.text):
-       print 'SRIKANTH: Ceilometer Subscription unsuccessful...Exiting'
+       print 'SRIKANTH: Ceilometer meter "cpu_util" Subscription unsuccessful...Exiting'
+       return
+   subscribe_data = {"sub_info":"instance","app_id":"xos_auto_scale2","target":"udp://10.11.10.1:12346"}
+   subscribe_url = ceilometer_url + 'v2/subscribe'
+   response = requests.post(subscribe_url, data=json.dumps(subscribe_data))
+   print 'SRIKANTH: Ceilometer meter "instance" Subscription status:%s' % response.text
+   #TODO: Fix the typo in 'sucess'
+   if (not 'sucess' in response.text) and (not 'already exists' in response.text):
+       print 'SRIKANTH: Ceilometer meter "instance"Subscription unsuccessful...Exiting'
        return
    periodic_cpu_threshold_evaluator()
    periodic_print()
diff --git a/xos/configurations/cord/Dockerfile.cord b/xos/configurations/cord/Dockerfile.cord
index 8734eef..a436871 100644
--- a/xos/configurations/cord/Dockerfile.cord
+++ b/xos/configurations/cord/Dockerfile.cord
@@ -20,6 +20,7 @@
 RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/vcpe/vcpe_observer_config
 RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
 ADD xos/configurations/cord/virtualbng.json /root/setup/
+ADD xos/configurations/cord/vtn-network-cfg.json /root/setup/
 
 CMD /usr/bin/make -C /opt/xos/configurations/cord -f Makefile.inside; /bin/bash
 
diff --git a/xos/configurations/cord/README-VTN.md b/xos/configurations/cord/README-VTN.md
new file mode 100644
index 0000000..d585d5c
--- /dev/null
+++ b/xos/configurations/cord/README-VTN.md
@@ -0,0 +1,45 @@
+vtn notes:
+
+inside the xos container:
+
+    python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+
+ctl node:
+
+    # set ONOS_VTN_HOSTNAME to the host where the VTN container was installed
+    ONOS_VTN_HOSTNAME="cp-2.smbaker-xos5.xos-pg0.clemson.cloudlab.us"
+    apt-get -y install python-pip
+    pip install -U setuptools pip
+    git clone https://github.com/openstack/networking-onos.git
+    cd networking-onos
+    python setup.py install
+    # the above fails the first time with an error about pbr.json
+    # I ran it again and it succeeded, but I am skeptical there's
+    # not still an issue lurking...
+    cat > /usr/local/etc/neutron/plugins/ml2/conf_onos.ini <<EOF
+    [ml2_onos]
+    url_path = http://$ONOS_VTN_HOSTNAME:8181/onos/vtn
+    username = karaf
+    password = karaf
+    EOF
+    emacs /etc/neutron/plugins/ml2/ml2_conf.ini
+        update settings as per vtn docs ([ml2] and [ml2_type_vxlan] sections)
+    systemctl restart neutron-server
+
+Compute node that has the ONOS Container
+
+    # we need NAT rule so the neutron vtn plugin can talk to onos
+    # change 172.17.0.2 to the IP address for the ONOS container (use "docker inspect")
+    iptables -t nat -A PREROUTING -i br-ex -p tcp --dport 8101 -j DNAT --to-destination 172.17.0.2
+
+Compute nodes (all of them):
+
+    systemctl stop neutron-plugin-openvswitch-agent
+    /usr/share/openvswitch/scripts/ovs-ctl
+        update settings as per vtn docs to make port 6640 visible
+    service openvswitch-switch restart
+
+VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). You might have to delete them all.
+
+For development, I suggest using the bash configuration (remember to start the ONOS observer manually) so that 
+there aren't a bunch of preexisting Neutron networks and nova instances to get in the way. 
diff --git a/xos/configurations/cord/make-vtn-networkconfig-json.sh b/xos/configurations/cord/make-vtn-networkconfig-json.sh
old mode 100644
new mode 100755
index df94597..918674a
--- a/xos/configurations/cord/make-vtn-networkconfig-json.sh
+++ b/xos/configurations/cord/make-vtn-networkconfig-json.sh
@@ -37,8 +37,12 @@
     fi
 done
 
-NEUTRONIP="127.0.0.1"
-KEYSTONEIP="127.0.0.1"
+# get the openstack admin password and username
+source /root/setup/admin-openrc.sh
+
+HOSTNAME=`hostname`
+NEUTRONIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
+KEYSTONEIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
 
 cat >> $FN <<EOF
                 ]
@@ -49,8 +53,8 @@
                  "do_not_push_flows" : "true",
                  "neutron_server" : "http://$NEUTRONIP:9696/v2.0/",
                  "keystone_server" : "http://$KEYSTONEIP:5000/v2.0/",
-                 "user_name" : "admin",
-                 "password" : "passwd"
+                 "user_name" : "$OS_USERNAME",
+                 "password" : "$OS_PASSWORD"
              }
         }
     }
diff --git a/xos/configurations/devel/Makefile b/xos/configurations/devel/Makefile
index 216673d..eb5802a 100644
--- a/xos/configurations/devel/Makefile
+++ b/xos/configurations/devel/Makefile
@@ -4,7 +4,7 @@
 
 cloudlab: common_cloudlab xos
 
-devstack: common_devstack xos
+devstack: upgrade_pkgs common_devstack devstack_net_fix xos
 
 xos:
 	rm ../../xos_configuration/*
@@ -29,3 +29,10 @@
 
 enter:
 	sudo docker exec -t -i $(RUNNING_CONTAINER) bash
+
+devstack_net_fix:
+	sudo ../devstack/net-fix.sh
+	sudo bash -c "source ../common/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
diff --git a/xos/configurations/devel/README.md b/xos/configurations/devel/README.md
index 47cc6e7..54a51a8 100644
--- a/xos/configurations/devel/README.md
+++ b/xos/configurations/devel/README.md
@@ -48,11 +48,11 @@
 ~/xos/xos/configurations/devel$ make devstack
 ```
 
-Note that there are some issues with the networking setup in this configuration;
-you will be able to create VMs but they are not accessible on the network.  However it is
-possible to log into a VM by first entering the appropriate network namespace.
-
 ## What you get
 
 XOS will be set up with a single Deployment and Site.  It should be in a state where
 you can create slices and associate instances with them.
+
+Note that there are some issues with the networking setup in this configuration:
+VMs do not have a working DNS configuration in `/etc/resolv.conf`.  If you fix this
+manually then everything should work.
diff --git a/xos/configurations/devstack/local.conf b/xos/configurations/devstack/local.conf
index 946d16e..dfcf07b 100644
--- a/xos/configurations/devstack/local.conf
+++ b/xos/configurations/devstack/local.conf
@@ -3,6 +3,7 @@
 
 DOWNLOAD_DEFAULT_IMAGES=false
 IMAGE_URLS="http://www.planet-lab.org/cord/trusty-server-multi-nic.img"
+LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
 
 disable_service n-net
 enable_service q-svc
diff --git a/xos/configurations/devstack/net-fix.sh b/xos/configurations/devstack/net-fix.sh
new file mode 100755
index 0000000..5b486bd
--- /dev/null
+++ b/xos/configurations/devstack/net-fix.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+PRIMARY=$( route | grep default | awk '{print $NF}' )
+RULE="POSTROUTING -t nat -o $PRIMARY -s 172.24.4.0/24 -j MASQUERADE"
+
+iptables -C $RULE || iptables -A $RULE
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index fb4b73d..3fbd569 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -31,7 +31,7 @@
     apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
 
   - name: install Docker
-    apt: name=lxc-docker-1.5.0 state=present update_cache=yes
+    apt: name=lxc-docker state=present update_cache=yes
 
   - name: install python-setuptools
     apt: name=python-setuptools state=present
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index db6ecf5..fdc21da 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -65,7 +65,7 @@
     apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
 
   - name: install Docker
-    apt: name=lxc-docker-1.5.0 state=present update_cache=yes
+    apt: name=lxc-docker state=present update_cache=yes
 
   - name: install python-setuptools
     apt: name=python-setuptools state=present