Merge branch 'master' of github.com:open-cloud/xos
diff --git a/Dockerfile.cord b/Dockerfile.cord
index 5a2074d..ee0879d 100644
--- a/Dockerfile.cord
+++ b/Dockerfile.cord
@@ -3,3 +3,5 @@
 
 ADD xos/observers/vcpe/supervisor/vcpe-observer.conf /etc/supervisor/conf.d/
 RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/vcpe/vcpe_observer_config
+ADD xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf /etc/supervisor/conf.d/
+RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
diff --git a/cloudlab-init.sh b/cloudlab-init.sh
index f275517..9a2c94a 100755
--- a/cloudlab-init.sh
+++ b/cloudlab-init.sh
@@ -22,6 +22,8 @@
 then
     cp ~/.ssh/id_rsa.pub xos/observers/vcpe/vcpe_public_key
     cp ~/.ssh/id_rsa     xos/observers/vcpe/vcpe_private_key
+    cp ~/.ssh/id_rsa.pub xos/observers/monitoring_channel/monitoring_channel_public_key
+    cp ~/.ssh/id_rsa     xos/observers/monitoring_channel/monitoring_channel_private_key
 fi
 
 sudo docker build -t xos .
@@ -34,6 +36,8 @@
 
 # OpenStack is using port 8000...
 MYIP=$( hostname -i )
+MYFLATLANIF=$( sudo bash -c "netstat -i" |grep flat|awk '{print $1}' )
+MYFLATLANIP=$( ifconfig $MYFLATLANIF | grep "inet addr" | awk -F: '{print $2}' | awk '{print $1}' )
 sudo docker run -d --add-host="ctl:$MYIP" -p 9999:8000 $IMAGE
 
 echo "Waiting for XOS to come up"
@@ -52,7 +56,17 @@
 sudo chmod a+r /tmp/admin-openrc.sh
 #sudo sed -i 's/:5000/:35357/' /tmp/admin-openrc.sh
 source /tmp/admin-openrc.sh
-http --auth $AUTH POST $XOS/xos/controllers/ name=CloudLab deployment=$XOS/xos/deployments/1/ backend_type=OpenStack version=Juno auth_url=$OS_AUTH_URL admin_user=$OS_USERNAME admin_tenant=$OS_TENANT_NAME admin_password=$OS_PASSWORD domain=Default
+
+if [ "$CORD" -ne 1 ]
+then
+     http --auth $AUTH POST $XOS/xos/controllers/ name=CloudLab deployment=$XOS/xos/deployments/1/ backend_type=OpenStack version=Kilo auth_url=$OS_AUTH_URL admin_user=$OS_USERNAME admin_tenant=$OS_TENANT_NAME admin_password=$OS_PASSWORD domain=Default
+else
+     sudo cp /root/setup/settings /tmp
+     sudo chmod a+r /tmp/settings
+     source /tmp/settings
+     source /tmp/admin-openrc.sh
+     http --auth $AUTH POST $XOS/xos/controllers/ name=CloudLab deployment=$XOS/xos/deployments/1/ backend_type=OpenStack version=Kilo auth_url=$OS_AUTH_URL admin_user=$OS_USERNAME admin_tenant=$OS_TENANT_NAME admin_password=$OS_PASSWORD domain=Default rabbit_host=$MYFLATLANIP rabbit_user=$RABBIT_USER rabbit_password=$RABBIT_PASS
+fi
 
 # Add controller to site
 http --auth $AUTH PATCH $XOS/xos/sitedeployments/1/ controller=$XOS/xos/controllers/1/
@@ -78,5 +92,5 @@
 if [ "$CORD" -ne 0 ]
 then
     DOCKER=$( sudo docker ps|grep $IMAGE|awk '{print $NF}' )
-    sudo docker exec $DOCKER bash -c "cd /opt/xos/tosca; python run.py padmin@vicci.org samples/cord-cloudlab.yaml"
+    sudo docker exec $DOCKER bash -c "cd /opt/xos/tosca; python run.py padmin@vicci.org samples/cord-cloudlab.yaml; python run.py padmin@vicci.org samples/ceilometer.yaml"
 fi
diff --git a/xos/ceilometer/models.py b/xos/ceilometer/models.py
index 50f921b..fbfecd3 100644
--- a/xos/ceilometer/models.py
+++ b/xos/ceilometer/models.py
@@ -27,6 +27,10 @@
 
     KIND = CEILOMETER_KIND
 
+    sync_attributes = ("private_ip", "private_mac",
+                       "ceilometer_ip", "ceilometer_mac",
+                       "nat_ip", "nat_mac",)
+
     default_attributes = {}
     def __init__(self, *args, **kwargs):
         ceilometer_services = CeilometerService.get_service_objects().all()
@@ -66,14 +70,30 @@
         return addresses
 
     @property
+    def nat_ip(self):
+        return self.addresses.get("nat", (None, None))[0]
+
+    @property
+    def nat_mac(self):
+        return self.addresses.get("nat", (None, None))[1]
+
+    @property
     def private_ip(self):
         return self.addresses.get("nat", (None, None))[0]
 
     @property
+    def private_mac(self):
+        return self.addresses.get("nat", (None, None))[1]
+
+    @property
     def ceilometer_ip(self):
         return self.addresses.get("ceilometer", (None, None))[0]
 
     @property
+    def ceilometer_mac(self):
+        return self.addresses.get("ceilometer", (None, None))[1]
+
+    @property
     def site_tenant_list(self):
         tenant_ids = Set()
         for sp in SitePrivilege.objects.filter(user=self.creator):
@@ -109,7 +129,7 @@
     def ceilometer_url(self):
         if not self.ceilometer_ip:
             return None
-        return "http://" + self.ceilometer_ip + "/uri/to/ceilometer/api/"
+        return "http://" + self.private_ip + ":8888/"
 
 def model_policy_monitoring_channel(pk):
     # TODO: this should be made in to a real model_policy
diff --git a/xos/core/admin.py b/xos/core/admin.py
index 5d2996a..a5b89be 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -757,7 +757,7 @@
 
 class ControllerAdmin(XOSBaseAdmin):
     model = Controller
-    fieldList = ['deployment', 'name', 'backend_type', 'version', 'auth_url', 'admin_user', 'admin_tenant','admin_password', 'domain']
+    fieldList = ['deployment', 'name', 'backend_type', 'version', 'auth_url', 'admin_user', 'admin_tenant','admin_password', 'domain', 'rabbit_host', 'rabbit_user', 'rabbit_password']
     fieldsets = [(None, {'fields': fieldList, 'classes':['suit-tab suit-tab-general']})]
     inlines = [ControllerSiteInline] # ,ControllerImagesInline]
     list_display = ['backend_status_icon', 'name', 'version', 'backend_type']
diff --git a/xos/core/models/site.py b/xos/core/models/site.py
index 26ff191..1bdef36 100644
--- a/xos/core/models/site.py
+++ b/xos/core/models/site.py
@@ -263,6 +263,9 @@
     admin_password = StrippedCharField(max_length=200, null=True, blank=True, help_text="Password of theadmin user at this controller")
     admin_tenant = StrippedCharField(max_length=200, null=True, blank=True, help_text="Name of the tenant the admin user belongs to")
     domain = StrippedCharField(max_length=200, null=True, blank=True, help_text="Name of the domain this controller belongs to")
+    rabbit_host = StrippedCharField(max_length=200, null=True, blank=True, help_text="IP address of rabbitmq server at this controller")
+    rabbit_user = StrippedCharField(max_length=200, null=True, blank=True, help_text="Username of rabbitmq server at this controller")
+    rabbit_password = StrippedCharField(max_length=200, null=True, blank=True, help_text="Password of rabbitmq server at this controller")
     deployment = models.ForeignKey(Deployment,related_name='controllerdeployments')
 
     def __init__(self, *args, **kwargs):
diff --git a/xos/core/xoslib/methods/monitoringchannel.py b/xos/core/xoslib/methods/monitoringchannel.py
new file mode 100644
index 0000000..baab346
--- /dev/null
+++ b/xos/core/xoslib/methods/monitoringchannel.py
@@ -0,0 +1,89 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework import status
+from core.models import *
+from django.forms import widgets
+from ceilometer.models import MonitoringChannel, CeilometerService
+from plus import PlusSerializerMixin
+from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
+
+if hasattr(serializers, "ReadOnlyField"):
+    # rest_framework 3.x
+    ReadOnlyField = serializers.ReadOnlyField
+else:
+    # rest_framework 2.x
+    ReadOnlyField = serializers.Field
+
+def get_default_ceilometer_service():
+    ceilometer_services = CeilometerService.get_service_objects().all()
+    if ceilometer_services:
+        return ceilometer_services[0].id
+    return None
+
+class MonitoringChannelSerializer(serializers.ModelSerializer, PlusSerializerMixin):
+        id = ReadOnlyField()
+        service_specific_attribute = ReadOnlyField()
+        ceilometer_url = ReadOnlyField()
+        tenant_list_str = ReadOnlyField()
+        creator = ReadOnlyField()
+        instance = ReadOnlyField()
+        provider_service = serializers.PrimaryKeyRelatedField(queryset=CeilometerService.get_service_objects().all(), default=get_default_ceilometer_service)
+
+        humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+
+        computeNodeName = serializers.SerializerMethodField("getComputeNodeName")
+
+        class Meta:
+            model = MonitoringChannel
+            fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_attribute', 'ceilometer_url', 'tenant_list_str', 'creator', 'instance', 'computeNodeName' )
+
+        def getHumanReadableName(self, obj):
+            return obj.__unicode__()
+
+        def getComputeNodeName(self, obj):
+            instance = obj.instance
+            if not instance:
+                return None
+            return instance.node.name
+
+class MonitoringChannelList(XOSListCreateAPIView):
+    serializer_class = MonitoringChannelSerializer
+
+    method_kind = "list"
+    method_name = "monitoringchannel"
+
+    def get_queryset(self):
+        queryset = MonitoringChannel.get_tenant_objects().select_related().all()
+
+        current_user = self.request.user.username
+        if current_user is not None:
+            ids = [x.id for x in queryset if x.creator.username==current_user]
+            queryset = queryset.filter(id__in=ids)
+
+        return queryset
+
+    def post(self, request, format=None):
+        current_user = request.user.username
+        existing_obj = None
+        for obj in MonitoringChannel.get_tenant_objects().all():
+            if (obj.creator.username == current_user):
+               existing_obj = obj
+               break
+
+        if existing_obj:
+            serializer = MonitoringChannelSerializer(existing_obj)
+            headers = self.get_success_headers(serializer.data)
+            return Response( serializer.data, status=status.HTTP_200_OK )
+
+        return super(MonitoringChannelList, self).post(request, format)
+
+class MonitoringChannelDetail(XOSRetrieveUpdateDestroyAPIView):
+    serializer_class = MonitoringChannelSerializer
+    queryset = MonitoringChannel.get_tenant_objects().select_related().all()
+
+    method_kind = "detail"
+    method_name = "monitoringchannel"
+
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
index 9455780..e22566a 100644
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ b/xos/observers/base/SyncInstanceUsingAnsible.py
@@ -8,7 +8,7 @@
 from xos.config import Config
 from observer.syncstep import SyncStep
 from observer.ansible import run_template_ssh
-from core.models import Service, Slice
+from core.models import Service, Slice, ControllerSlice, ControllerUser
 from util.logger import Logger, logging
 
 logger = Logger(level=logging.INFO)
@@ -71,10 +71,23 @@
 
         service_key = file(self.service_key_name).read()
 
+        cslice = ControllerSlice.objects.get(slice=instance.slice)
+        if not cslice:
+            raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+
+        cuser = ControllerUser.objects.get(user=instance.creator)
+        if not cuser:
+            raise Exception("Controller user object for %s does not exist" % instance.creator)
+
         fields = { "instance_name": instance.name,
                    "hostname": instance.node.name,
                    "instance_id": instance.instance_id,
                    "private_key": service_key,
+                   "keystone_tenant_id": cslice.tenant_id,
+                   "keystone_user_id": cuser.kuser_id,
+                   "rabbit_user": instance.controller.rabbit_user,
+                   "rabbit_password": instance.controller.rabbit_password,
+                   "rabbit_host": instance.controller.rabbit_host,
                    "ansible_tag": "vcpe_tenant_" + str(o.id)
                  }
 
diff --git a/xos/observers/monitoring_channel/files/docker.list b/xos/observers/monitoring_channel/files/docker.list
new file mode 100644
index 0000000..0ee9ae0
--- /dev/null
+++ b/xos/observers/monitoring_channel/files/docker.list
@@ -0,0 +1 @@
+deb https://get.docker.com/ubuntu docker main
diff --git a/xos/observers/monitoring_channel/files/vm-resolv.conf b/xos/observers/monitoring_channel/files/vm-resolv.conf
new file mode 100644
index 0000000..cae093a
--- /dev/null
+++ b/xos/observers/monitoring_channel/files/vm-resolv.conf
@@ -0,0 +1 @@
+nameserver 8.8.8.8
diff --git a/xos/observers/monitoring_channel/model-deps b/xos/observers/monitoring_channel/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/observers/monitoring_channel/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/observers/monitoring_channel/monitoring_channel_observer.py b/xos/observers/monitoring_channel/monitoring_channel_observer.py
new file mode 100755
index 0000000..d6a71ff
--- /dev/null
+++ b/xos/observers/monitoring_channel/monitoring_channel_observer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-observer")
+mod.main()
diff --git a/xos/observers/monitoring_channel/monitoring_channel_observer_config b/xos/observers/monitoring_channel/monitoring_channel_observer_config
new file mode 100644
index 0000000..922a019
--- /dev/null
+++ b/xos/observers/monitoring_channel/monitoring_channel_observer_config
@@ -0,0 +1,40 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=monitoring_channel
+dependency_graph=/opt/xos/observers/monitoring_channel/model-deps
+steps_dir=/opt/xos/observers/monitoring_channel/steps
+sys_dir=/opt/xos/observers/monitoring_channel/sys
+deleters_dir=/opt/xos/observers/monitoring_channel/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+proxy_ssh=True
+full_setup=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
new file mode 100644
index 0000000..919b4ea
--- /dev/null
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
@@ -0,0 +1,74 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from observer.syncstep import SyncStep
+from observer.ansible import run_template_ssh
+from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from ceilometer.models import MonitoringChannel
+from util.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncMonitoringChannel(SyncInstanceUsingAnsible):
+    provides=[MonitoringChannel]
+    observes=MonitoringChannel
+    requested_interval=0
+    template_name = "sync_monitoringchannel.yaml"
+    service_key_name = "/opt/xos/observers/monitoring_channel/monitoring_channel_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncMonitoringChannel, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = MonitoringChannel.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = MonitoringChannel.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know
+        #   1) Allowed tenant ids
+        #   2) Ceilometer API service endpoint URL if running externally
+        #   3) Credentials to access Ceilometer API service
+
+        instance = self.get_instance(o)
+
+        try:
+            full_setup = Config().observer_full_setup
+        except:
+            full_setup = True
+
+        fields = {"unique_id": o.id,
+                  "allowed_tenant_ids": o.tenant_list,
+                  "auth_url":instance.controller.auth_url,
+                  "admin_user":instance.controller.admin_user,
+                  "admin_password":instance.controller.admin_password,
+                  "admin_tenant":instance.controller.admin_tenant,
+                  "full_setup": full_setup}
+
+        return fields
+
+    def run_playbook(self, o, fields):
+        #ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
+        #quick_update = (o.last_ansible_hash == ansible_hash)
+
+        #if quick_update:
+        #    logger.info("quick_update triggered; skipping ansible recipe")
+        #else:
+        super(SyncMonitoringChannel, self).run_playbook(o, fields)
+
+        #o.last_ansible_hash = ansible_hash
+
+    def delete_record(self, m):
+        pass
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
new file mode 100644
index 0000000..bbe284f
--- /dev/null
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -0,0 +1,89 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      unique_id: {{ unique_id }}
+      auth_url: {{ auth_url }}
+      admin_user: {{ admin_user }}
+      admin_password: {{ admin_password }}
+      admin_tenant: {{ admin_tenant }}
+      shared_lan_ip: {{ private_ip }}
+      shared_lan_mac: {{ private_mac }}
+      headnode_flat_lan_ip: {{ rabbit_host }}
+      ceilometer_client_acess_ip: {{ ceilometer_ip }}
+      ceilometer_client_acess_mac: {{ ceilometer_mac }}
+      allowed_tenant_ids:
+        {% for allowed_tenant_id in allowed_tenant_ids %}
+        - {{ allowed_tenant_id }}
+        {% endfor %}
+
+  tasks:
+{% if full_setup %}
+  - name: Docker repository
+    copy: src=/opt/xos/observers/monitoring_channel/files/docker.list
+      dest=/etc/apt/sources.list.d/docker.list
+
+  - name: Import the repository key
+    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+  - name: install Docker
+    apt: name=lxc-docker-1.5.0 state=present update_cache=yes
+
+  - name: install python-setuptools
+    apt: name=python-setuptools state=present
+
+  - name: install pip
+    easy_install: name=pip
+
+  - name: install docker-py
+    pip: name=docker-py version=0.5.3
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+  - name: Disable resolvconf service
+    shell: service resolvconf stop
+    shell: echo manual > /etc/init/resolvconf.override
+    shell: rm -f /etc/resolv.conf
+
+  - name: Install resolv.conf
+    copy: src=/opt/xos/observers/monitoring_channel/files/vm-resolv.conf
+      dest=/etc/resolv.conf
+{% endif %}
+
+  - name: ceilometer proxy config
+    template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+
+  - name: Monitoring channel upstart
+    template: src=/opt/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
+
+  - name: Monitoring channel startup script
+    template: src=/opt/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 dest=/usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh mode=0755
+    notify:
+#    - restart monitoring-channel
+     - stop monitoring-channel
+     - remove container
+     - start monitoring-channel
+
+# These are samples, not necessary for correct function of demo
+
+  - name: Make sure Monitoring channel service is running
+    service: name=monitoring-channel-{{ unique_id }} state=started
+
+  handlers:
+  - name: restart monitoring-channel
+    shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service vcpe-{{ unique_id }} start
+
+  - name: stop monitoring-channel
+    service: name=monitoring-channel-{{ unique_id }} state=stopped
+
+  - name: remove container
+    docker: name=monitoring-channel-{{ unique_id }} state=absent image=docker-vcpe
+
+  - name: start monitoring-channel
+    service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf b/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf
new file mode 100644
index 0000000..1b78703
--- /dev/null
+++ b/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf
@@ -0,0 +1,2 @@
+[program:monitoring_channel_observer]
+command=python /opt/xos/observers/monitoring_channel/monitoring_channel_observer.py -C /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
diff --git a/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel b/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel
new file mode 100644
index 0000000..45defb8
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel
@@ -0,0 +1,26 @@
+FROM       ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+    python-pip \
+    python-dev
+
+RUN pip install web.py
+RUN pip install wsgilog
+RUN pip install python-ceilometerclient
+RUN mkdir -p /usr/local/share
+ADD ceilometer_proxy_server.py /usr/local/share/
+RUN chmod +x /usr/local/share/ceilometer_proxy_server.py
+ADD start_ceilometer_proxy /usr/local/sbin/
+RUN chmod +x /usr/local/sbin/start_ceilometer_proxy
+EXPOSE 8000
+WORKDIR /usr/local/share
+CMD /usr/local/sbin/start_ceilometer_proxy
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
new file mode 100644
index 0000000..cba6f2a
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
@@ -0,0 +1,14 @@
+# This file autogenerated by monitoring-channel observer
+# It contains a list of attributes to be used by ceilometer proxy web server
+# syntax: key=value
+
+[default]
+auth_url={{ auth_url }}
+admin_user={{ admin_user }}
+admin_tenant={{ admin_tenant }}
+admin_password={{ admin_password }}
+
+[allowed_tenants]
+{% for tenant_id in allowed_tenant_ids %}
+{{ tenant_id }}
+{% endfor %}
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py b/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
new file mode 100644
index 0000000..711e996
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+import web
+import ConfigParser
+import io
+import json
+from ceilometerclient import client
+import logging
+from wsgilog import WsgiLog
+
+web.config.debug=False
+
+logfile = "ceilometer_proxy_server.log"
+level=logging.INFO
+logger=logging.getLogger('ceilometer_proxy_server')
+logger.setLevel(level)
+handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
+logger.addHandler(handler)
+
+class FileLog(WsgiLog):
+    def __init__(self, application):
+        WsgiLog.__init__(
+            self,
+            application,
+            logformat = '%(message)s',
+            tofile = True,
+            toprint = True,
+            prnlevel = level,
+            file = logfile,
+            backups =1
+            )
+    def __call__(self, environ, start_response):
+        def hstart_response(status, response_headers, *args):
+             out = start_response(status, response_headers, *args)
+             try:
+                 logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
+             except err:
+                 logline="Could not log <%s> due to err <%s>" % (str(environ), err)
+             logger.info(logline)
+
+             return out
+
+        return super(FileLog, self).__call__(environ, hstart_response)
+
+#TODOs:
+#-See if we can avoid using python-ceilometerclient and instead use the REST calls directly with AuthToken
+#
+urls = (
+    r'^/v2/meters$', 'meter_list',
+    r'^/v2/meters/(?P<meter_name>[A-Za-z0-9_:.\-]+)/statistics$', 'statistics_list',
+    r'^/v2/samples$', 'sample_list',
+    r'^/v2/resources$', 'resource_list',
+)
+
+app = web.application(urls, globals())
+
+config = None
+ceilometer_client = None
+
+
+def parse_ceilometer_proxy_config():
+    global config
+    config = ConfigParser.RawConfigParser(allow_no_value=True)
+    config.read('ceilometer_proxy_config')
+ 
+def ceilometerclient():
+    global config, ceilometer_client
+    if ceilometer_client:
+         return ceilometer_client
+
+    if not config:
+         parse_ceilometer_proxy_config()
+
+    keystone = {}
+    keystone['os_username']=config.get('default','admin_user')
+    keystone['os_password']=config.get('default','admin_password')
+    keystone['os_auth_url']=config.get('default','auth_url')
+    keystone['os_tenant_name']=config.get('default','admin_tenant')
+    ceilometer_client = client.get_client(2,**keystone)
+    logger.info('ceilometer get_client is successful')
+    return ceilometer_client
+
+def make_query(user_id=None, tenant_id=None, resource_id=None,
+               user_ids=None, tenant_ids=None, resource_ids=None):
+    """Returns query built from given parameters.
+
+    This query can be then used for querying resources, meters and
+    statistics.
+
+    :Parameters:
+      - `user_id`: user_id, has a priority over list of ids
+      - `tenant_id`: tenant_id, has a priority over list of ids
+      - `resource_id`: resource_id, has a priority over list of ids
+      - `user_ids`: list of user_ids
+      - `tenant_ids`: list of tenant_ids
+      - `resource_ids`: list of resource_ids
+    """
+    user_ids = user_ids or []
+    tenant_ids = tenant_ids or []
+    resource_ids = resource_ids or []
+
+    query = []
+    if user_id:
+        user_ids = [user_id]
+    for u_id in user_ids:
+        query.append({"field": "user_id", "op": "eq", "value": u_id})
+
+    if tenant_id:
+        tenant_ids = [tenant_id]
+    for t_id in tenant_ids:
+        query.append({"field": "project_id", "op": "eq", "value": t_id})
+
+    if resource_id:
+        resource_ids = [resource_id]
+    for r_id in resource_ids:
+        query.append({"field": "resource_id", "op": "eq", "value": r_id})
+
+    return query
+
+def filter_query_params(query_params):
+    new_query=[]
+    i=0
+    user_specified_tenants=[]
+    for field in query_params['q.field']:
+        if field != 'project_id':
+            query = {}
+            query['field']=field
+            if query_params['q.op'][i] != '':
+                 query['op']=query_params['q.op'][i]
+            query['value']=query_params['q.value'][i]
+            new_query.append(query)
+        else:
+            user_specified_tenants.append(query_params['q.value'][i])
+        i=i+1
+    return new_query,user_specified_tenants
+
+class meter_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        meters=[]
+        for (k,v) in config.items('allowed_tenants'):
+             if user_specified_tenants and (k not in user_specified_tenants):
+                 continue
+             final_query=[]
+             final_query.extend(new_query)
+             query = make_query(tenant_id=k)
+             final_query.extend(query)
+             logger.debug('final query=%s',final_query)
+             results = client.meters.list(q=final_query)
+             meters.extend(results)
+        return json.dumps([ob._info for ob in meters])
+
+class statistics_list:
+    def GET(self, meter_name):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+             "period": None
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        period = query_params.period
+        statistics = []
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.statistics.list(meter_name=meter_name, q=final_query, period=period)
+              statistics.extend(results)
+        return json.dumps([ob._info for ob in statistics])
+
+class sample_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        samples=[]
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.samples.list(q=query)
+              samples.extend(results)
+        return json.dumps([ob._info for ob in samples])
+
+class resource_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        resources=[]
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.resources.list(q=query)
+              resources.extend(results)
+        return json.dumps([ob._info for ob in resources])
+
+if __name__ == "__main__":
+    app.run(FileLog)
diff --git a/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2 b/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2
new file mode 100644
index 0000000..eb937ac
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2
@@ -0,0 +1,10 @@
+# Upstart script for Monitoring channel
+description "Upstart script for Monitoring channel container"
+author "andy@onlab.us"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh
+end script
diff --git a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
new file mode 100755
index 0000000..10d9ef5
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+function mac_to_iface {
+    MAC=$1
+    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function generate_mac_from_ip {
+    IP=$1
+    printf "02:42:%02x:%02x:%02x:%02x\n" `echo $IP|awk -F '.' '{print $1, $2, $3, $4}'`
+}
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
+HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
+
+docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
+    sudo docker pull srikanthvavila/monitoring-channel
+    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p 8888:8000 srikanthvavila/monitoring-channel
+else
+    docker start $MONITORING_CHANNEL
+fi
+
+# Set up networking via pipework
+#SHARED_LAN_IFACE=$( mac_to_iface {{ shared_lan_mac }} )
+#docker exec $MONITORING_CHANNEL ifconfig eth0 >> /dev/null || pipework $SHARED_LAN_IFACE -i eth0 $MONITORING_CHANNEL 192.168.0.1/24
+
+# Make sure VM's eth0 (hpc_client) has no IP address
+#ifconfig $HPC_IFACE 0.0.0.0
+
+# Now copy ceilometer proxy configuration to container
+cat /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config | docker exec -i $MONITORING_CHANNEL bash -c 'cat > /usr/local/share/ceilometer_proxy_config'
+
+# Attach to container
+docker start -a $MONITORING_CHANNEL
diff --git a/xos/observers/monitoring_channel/templates/start_ceilometer_proxy b/xos/observers/monitoring_channel/templates/start_ceilometer_proxy
new file mode 100644
index 0000000..ddaa9c8
--- /dev/null
+++ b/xos/observers/monitoring_channel/templates/start_ceilometer_proxy
@@ -0,0 +1 @@
+/usr/local/share/ceilometer_proxy_server.py 8000
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index 1a30656..fac78d5 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -35,6 +35,11 @@
       private_mac: {{ private_mac }}
       hpc_client_ip: {{ hpc_client_ip }}
       hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
 
   tasks:
 {% if full_setup %}
@@ -73,6 +78,25 @@
   - name: Install resolv.conf
     copy: src=/opt/xos/observers/vcpe/files/vm-resolv.conf
       dest=/etc/resolv.conf
+
+  - name: make sure ~/bin exists
+    file: path=~/bin state=directory owner=root group=root
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/observers/vcpe/vcpe_stats_notifier.py
+      dest=~/bin/vcpe_stats_notifier.py
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+
+  - name: Clean any running vcpe_stats_notifier cron jobs
+    command: pkill vcpe_stats_notifier.py
+    ignore_errors: yes
+
+  - name: Initiate vcpe_stats_notifier cron job
+    command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+    async: 999999999999999999
+    poll: 0
 {% endif %}
 
   - name: vCPE upstart
diff --git a/xos/observers/vcpe/vcpe_stats_notifier.py b/xos/observers/vcpe/vcpe_stats_notifier.py
new file mode 100644
index 0000000..f4bb923
--- /dev/null
+++ b/xos/observers/vcpe/vcpe_stats_notifier.py
@@ -0,0 +1,250 @@
+import six
+import uuid
+import datetime
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+import subprocess
+import re
+import time, threading
+import sys, getopt
+import logging
+
+
+logfile = "vcpe_stats_notifier.log"
+level=logging.INFO
+logger=logging.getLogger('vcpe_stats_notifier')
+logger.setLevel(level)
+handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
+logger.addHandler(handler)
+
+def extract_dns_stats_from_all_vcpes():
+    p = subprocess.Popen('docker ps', shell=True, stdout=subprocess.PIPE) 
+    firstline = True
+    dockercontainers = {}
+    while True:
+        out = p.stdout.readline()
+        if out == '' and p.poll() != None:
+            break
+        if out != '':
+            if firstline is True:
+                firstline = False
+            else:
+                fields = out.split()
+                container_fields = {}
+                container_fields['id'] = fields[0]
+                dockercontainers[fields[-1]] = container_fields
+    for k,v in dockercontainers.iteritems():
+         cmd = 'docker exec ' + v['id'] + ' killall -10 dnsmasq'
+         p = subprocess.Popen (cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+         (output, error) = p.communicate()
+         if error:
+             logger.error("killall dnsmasq command failed with error = %s",error)
+             continue
+         cmd = 'docker exec ' + v['id'] + ' tail -7 /var/log/syslog'
+         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+         (output, error) = p.communicate()
+         if error:
+             logger.error("tail on dnsmasq log command failed with error = %s",error)
+             continue
+         log_list = output.splitlines()
+         i = 0
+         while i < len(log_list):
+             m = re.search('(?<=:\scache size\s)(\S*)(?=,\s),\s(\S*)(?=/)/(\S*)(?=\scache insertions re-used unexpired cache entries)', log_list[i])
+             if m == None:
+                 i = i+1
+                 continue;
+             v['cache_size'] = m.group(1)
+             v['replaced_unexpired_entries'] = m.group(2)
+             v['total_inserted_entries'] = m.group(3)
+             i = i+1
+             m = re.search('(?<=:\squeries forwarded\s)(\S*)(?=,),\squeries answered locally\s(\S*)(?=$)', log_list[i])
+             v['queries_forwarded'] = m.group(1)
+             v['queries_answered_locally'] = m.group(2)
+             break;
+         i = i+2
+         v['server_stats'] = []
+         while i < len(log_list):
+             m = re.search('(?<=:\sserver\s)(\S*)(?=#)#\d*:\squeries sent\s(\S*)(?=,),\sretried or failed\s(\S*)(?=$)', log_list[i])
+             if m == None:
+                 i = i+1
+                 continue
+             dns_server = {}
+             dns_server['id'] = m.group(1)
+             dns_server['queries_sent'] = m.group(2)
+             dns_server['queries_failed'] = m.group(3)
+             v['server_stats'].append(dns_server)
+             i = i+1
+    return dockercontainers
+
+
+keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
+keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
+rabbit_user='openstack'
+rabbit_password='80608318c273f348a7c3'
+rabbit_host='10.11.10.1'
+vcpeservice_rabbit_exchange='vcpeservice'
+cpe_publisher_id='vcpe_publisher'
+
+producer = None
+
+def setup_rabbit_mq_channel():
+     global producer
+     global rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange,cpe_publisher_id
+     vcpeservice_exchange = Exchange(vcpeservice_rabbit_exchange, "topic", durable=False)
+     # connections/channels
+     connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
+     logger.info('Connection to RabbitMQ server successful')
+     channel = connection.channel()
+     # produce
+     producer = Producer(channel, exchange=vcpeservice_exchange, routing_key='notifications.info')
+     p = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE)
+     (hostname, error) = p.communicate()
+     cpe_publisher_id = cpe_publisher_id + '_on_' + hostname
+     logger.info('cpe_publisher_id=%s',cpe_publisher_id)
+
+def publish_cpe_stats():
+     global producer
+     global keystone_tenant_id, keystone_user_id, cpe_publisher_id
+     cpe_container_stats = extract_dns_stats_from_all_vcpes()
+
+     for k,v in cpe_container_stats.iteritems():
+          msg = {'event_type': 'vcpe', 
+                 'message_id':six.text_type(uuid.uuid4()),
+                 'publisher_id': cpe_publisher_id,
+                 'timestamp':datetime.datetime.now().isoformat(),
+                 'priority':'INFO',
+                 'payload': {'vcpe_id':k, 
+                             'user_id':keystone_user_id, 
+                             'tenant_id':keystone_tenant_id 
+                            }
+                }
+          producer.publish(msg)
+          if 'cache_size' in v:
+               msg = {'event_type': 'vcpe.dns.cache.size', 
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'vcpe_id':k, 
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'cache_size':v['cache_size'] 
+                                 }
+                     }
+               producer.publish(msg)
+          if 'total_inserted_entries' in v:
+               msg = {'event_type': 'vcpe.dns.total_inserted_entries', 
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'vcpe_id':k, 
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'total_inserted_entries':v['total_inserted_entries'] 
+                                 }
+                     }
+               producer.publish(msg)
+          if 'replaced_unexpired_entries' in v:
+               msg = {'event_type': 'vcpe.dns.replaced_unexpired_entries', 
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'vcpe_id':k, 
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'replaced_unexpired_entries':v['replaced_unexpired_entries'] 
+                                 }
+                     }
+               producer.publish(msg)
+
+          if 'queries_forwarded' in v:
+               msg = {'event_type': 'vcpe.dns.queries_forwarded', 
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'vcpe_id':k, 
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'queries_forwarded':v['queries_forwarded'] 
+                                 }
+                     }
+               producer.publish(msg)
+
+          if 'queries_answered_locally' in v:
+               msg = {'event_type': 'vcpe.dns.queries_answered_locally', 
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'vcpe_id':k, 
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'queries_answered_locally':v['queries_answered_locally'] 
+                                 }
+                     }
+               producer.publish(msg)
+
+          if 'server_stats' in v:
+               for server in v['server_stats']:
+                   msg = {'event_type': 'vcpe.dns.server.queries_sent', 
+                          'message_id':six.text_type(uuid.uuid4()),
+                          'publisher_id': cpe_publisher_id,
+                          'timestamp':datetime.datetime.now().isoformat(),
+                          'priority':'INFO',
+                          'payload': {'vcpe_id':k, 
+                                      'user_id':keystone_user_id,
+                                      'tenant_id':keystone_tenant_id, 
+                                      'upstream_server':server['id'],
+                                      'queries_sent':server['queries_sent'] 
+                                     }
+                         }
+                   producer.publish(msg)
+
+                   msg = {'event_type': 'vcpe.dns.server.queries_failed', 
+                          'message_id':six.text_type(uuid.uuid4()),
+                          'publisher_id': cpe_publisher_id,
+                          'timestamp':datetime.datetime.now().isoformat(),
+                          'priority':'INFO',
+                          'payload': {'vcpe_id':k, 
+                                      'user_id':keystone_user_id,
+                                      'tenant_id':keystone_tenant_id, 
+                                      'upstream_server':server['id'],
+                                      'queries_failed':server['queries_failed'] 
+                                     }
+                         }
+                   producer.publish(msg)
+
+def periodic_publish():
+     publish_cpe_stats()
+     #Publish every 5minutes
+     threading.Timer(300, periodic_publish).start()
+
+def main(argv):
+   global keystone_tenant_id, keystone_user_id, rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange
+   try:
+      opts, args = getopt.getopt(argv,"",["keystone_tenant_id=","keystone_user_id=","rabbit_host=","rabbit_user=","rabbit_password=","vcpeservice_rabbit_exchange="])
+   except getopt.GetoptError:
+      print 'vcpe_stats_notifier.py keystone_tenant_id=<keystone_tenant_id> keystone_user_id=<keystone_user_id> rabbit_host=<IP addr> rabbit_user=<user> rabbit_password=<password> vcpeservice_rabbit_exchange=<exchange name>'
+      sys.exit(2)
+   for opt, arg in opts:
+      if opt in ("--keystone_tenant_id"):
+         keystone_tenant_id = arg
+      elif opt in ("--keystone_user_id"):
+         keystone_user_id = arg
+      elif opt in ("--rabbit_user"):
+         rabbit_user = arg
+      elif opt in ("--rabbit_password"):
+         rabbit_password = arg
+      elif opt in ("--rabbit_host"):
+         rabbit_host = arg
+      elif opt in ("--vcpeservice_rabbit_exchange"):
+         vcpeservice_rabbit_exchange = arg
+   logger.info("vcpe_stats_notifier args:keystone_tenant_id=%s keystone_user_id=%s rabbit_user=%s rabbit_host=%s vcpeservice_rabbit_exchange=%s",keystone_tenant_id,keystone_user_id,rabbit_user,rabbit_host,vcpeservice_rabbit_exchange)
+   setup_rabbit_mq_channel()
+   periodic_publish()
+
+if __name__ == "__main__":
+   main(sys.argv[1:])
diff --git a/xos/xos/xosapi.py b/xos/xos/xosapi.py
index bec3ea2..6532c72 100644
--- a/xos/xos/xosapi.py
+++ b/xos/xos/xosapi.py
@@ -1806,7 +1806,7 @@
             return None
     class Meta:
         model = Controller
-        fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','backend_type','version','auth_url','admin_user','admin_password','admin_tenant','domain','deployment','dashboardviews',)
+        fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','backend_type','version','auth_url','admin_user','admin_password','admin_tenant','domain','rabbit_host','rabbit_user','rabbit_password','deployment','dashboardviews',)
 
 class ControllerIdSerializer(XOSModelSerializer):
     id = IdField()
@@ -4644,7 +4644,7 @@
     serializer_class = ControllerSerializer
     id_serializer_class = ControllerIdSerializer
     filter_backends = (filters.DjangoFilterBackend,)
-    filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','backend_type','version','auth_url','admin_user','admin_password','admin_tenant','domain','deployment','dashboardviews',)
+    filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','backend_type','version','auth_url','admin_user','admin_password','admin_tenant','domain','rabbit_host','rabbit_user','rabbit_password','deployment','dashboardviews',)
 
     def get_serializer_class(self):
         no_hyperlinks=False