move ceilometer service over from xos repo

Change-Id: I9402876545cf552675cd0a836e02e2c72fea5f6b
diff --git a/xos/synchronizer/files/docker.list b/xos/synchronizer/files/docker.list
new file mode 100644
index 0000000..0ee9ae0
--- /dev/null
+++ b/xos/synchronizer/files/docker.list
@@ -0,0 +1 @@
+deb https://get.docker.com/ubuntu docker main
diff --git a/xos/synchronizer/files/vm-resolv.conf b/xos/synchronizer/files/vm-resolv.conf
new file mode 100644
index 0000000..cae093a
--- /dev/null
+++ b/xos/synchronizer/files/vm-resolv.conf
@@ -0,0 +1 @@
+nameserver 8.8.8.8
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..c679225
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,26 @@
+templates/Dockerfile.monitoring_channel
+templates/ceilometer_proxy_config.j2
+templates/Dockerfile.sflowpubsub
+templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
+templates/sflow_pub_sub/README
+templates/sflow_pub_sub/sflow_sub_records.py
+templates/sflow_pub_sub/start_sflow_pub_sub
+templates/sflow_pub_sub/sflow_pub_sub_main.py
+templates/sflow_pub_sub/sflow_pub_sub_config.j2
+templates/start-monitoring-channel.sh.j2
+templates/monitoring-channel.conf.j2
+templates/ceilometer_proxy_server.py
+templates/start_ceilometer_proxy
+manifest
+monitoring_channel_synchronizer_config
+steps/sync_sflowtenant.yaml
+steps/sync_sflowtenant.py
+steps/sync_monitoringchannel.yaml
+steps/sync_monitoringchannel.py
+steps/sync_sflowservice.yaml
+steps/sync_sflowservice.py
+files/vm-resolv.conf
+files/docker.list
+model-deps
+supervisor/monitoring_channel_observer.conf
+monitoring_channel_synchronizer.py
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/monitoring_channel_synchronizer.py b/xos/synchronizer/monitoring_channel_synchronizer.py
new file mode 100755
index 0000000..84bec4f
--- /dev/null
+++ b/xos/synchronizer/monitoring_channel_synchronizer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizer/monitoring_channel_synchronizer_config b/xos/synchronizer/monitoring_channel_synchronizer_config
new file mode 100644
index 0000000..8c6578f
--- /dev/null
+++ b/xos/synchronizer/monitoring_channel_synchronizer_config
@@ -0,0 +1,41 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=monitoring_channel
+dependency_graph=/opt/xos/synchronizers/monitoring_channel/model-deps
+steps_dir=/opt/xos/synchronizers/monitoring_channel/steps
+sys_dir=/opt/xos/synchronizers/monitoring_channel/sys
+deleters_dir=/opt/xos/synchronizers/monitoring_channel/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
+full_setup=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.py b/xos/synchronizer/steps/sync_monitoringchannel.py
new file mode 100644
index 0000000..2c0ba10
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoringchannel.py
@@ -0,0 +1,84 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.ceilometer.models import CeilometerService, MonitoringChannel
+from xos.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncMonitoringChannel(SyncInstanceUsingAnsible):
+    provides=[MonitoringChannel]
+    observes=MonitoringChannel
+    requested_interval=0
+    template_name = "sync_monitoringchannel.yaml"
+    service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncMonitoringChannel, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = MonitoringChannel.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = MonitoringChannel.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know
+        #   1) Allowed tenant ids
+        #   2) Ceilometer API service endpoint URL if running externally
+        #   3) Credentials to access Ceilometer API service
+
+        ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
+        if not ceilometer_services:
+            raise "No associated Ceilometer service"
+        ceilometer_service = ceilometer_services[0]
+        ceilometer_pub_sub_url = ceilometer_service.ceilometer_pub_sub_url
+        if not ceilometer_pub_sub_url:
+            ceilometer_pub_sub_url = ''
+        instance = self.get_instance(o)
+
+        try:
+            full_setup = Config().observer_full_setup
+        except:
+            full_setup = True
+
+        fields = {"unique_id": o.id,
+                  "allowed_tenant_ids": o.tenant_list,
+                  "auth_url":instance.controller.auth_url,
+                  "admin_user":instance.controller.admin_user,
+                  "admin_password":instance.controller.admin_password,
+                  "admin_tenant":instance.controller.admin_tenant,
+                  "ceilometer_pub_sub_url": ceilometer_pub_sub_url,
+                  "full_setup": full_setup}
+
+        return fields
+
+    def run_playbook(self, o, fields):
+        #ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
+        #quick_update = (o.last_ansible_hash == ansible_hash)
+
+        #if quick_update:
+        #    logger.info("quick_update triggered; skipping ansible recipe")
+        #else:
+        super(SyncMonitoringChannel, self).run_playbook(o, fields)
+
+        #o.last_ansible_hash = ansible_hash
+
+    def map_delete_inputs(self, o):
+        fields = {"unique_id": o.id,
+                  "delete": True}
+        return fields
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.yaml b/xos/synchronizer/steps/sync_monitoringchannel.yaml
new file mode 100644
index 0000000..ca72c5f
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoringchannel.yaml
@@ -0,0 +1,145 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      unique_id: {{ unique_id }}
+      auth_url: {{ auth_url }}
+      admin_user: {{ admin_user }}
+      admin_password: {{ admin_password }}
+      admin_tenant: {{ admin_tenant }}
+      shared_lan_ip: {{ private_ip }}
+      shared_lan_mac: {{ private_mac }}
+      headnode_flat_lan_ip: {{ rabbit_host }}
+      ceilometer_client_acess_ip: {{ ceilometer_ip }}
+      ceilometer_client_acess_mac: {{ ceilometer_mac }}
+      ceilometer_host_port: {{ ceilometer_port }}
+      ceilometer_pub_sub_url: {{ ceilometer_pub_sub_url }}
+      allowed_tenant_ids:
+        {% for allowed_tenant_id in allowed_tenant_ids %}
+        - {{ allowed_tenant_id }}
+        {% endfor %}
+
+  tasks:
+{% if delete %}
+  - name: Remove tenant
+# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
+    template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+    notify:
+     - stop monitoring-channel
+     - remove container
+{% else %}
+{% if full_setup %}
+#  - name: Docker repository
+#    copy: src=/opt/xos/synchronizers/monitoring_channel/files/docker.list
+#      dest=/etc/apt/sources.list.d/docker.list
+#
+#  - name: Import the repository key
+#    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+#
+#  - name: install Docker
+#    apt: name=lxc-docker state=present update_cache=yes
+#
+#  - name: install python-setuptools
+#    apt: name=python-setuptools state=present
+#
+#  - name: install pip
+#    easy_install: name=pip
+#
+#  - name: install docker-py
+#    pip: name=docker-py version=0.5.3
+#
+#  - name: install Pipework
+#    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+#       dest=/usr/local/bin/pipework
+#       mode=0755
+#
+#  - name: Disable resolvconf service
+#    shell: service resolvconf stop
+#    shell: echo manual > /etc/init/resolvconf.override
+#    shell: rm -f /etc/resolv.conf
+#
+#  - name: Install resolv.conf
+#    copy: src=/opt/xos/synchronizers/monitoring_channel/files/vm-resolv.conf
+#      dest=/etc/resolv.conf
+{% endif %}
+
+# FIXME: Temporary workaround to delete the monitoring-channel_ceilometer_proxy_config file always 
+# to trigger ansible notify handlers in the following task. 
+# Due to some issue, ansible "changed" flag is set to false even though there is a change of 
+# ceilometer configuration file, because of which the configuration change is not reflecting in 
+# ceilometer containers 
+#  - file: path=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config state=absent
+
+  - name: ceilometer proxy config
+    template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+    notify:
+     - copy ceilo-config-file
+     - restart monitoring-channel container
+#     - stop monitoring-channel
+#     - remove container
+#     - start monitoring-channel
+
+  - name: Monitoring channel upstart
+    template: src=/opt/xos/synchronizers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
+
+  - name: Monitoring channel startup script
+    template: src=/opt/xos/synchronizers/monitoring_channel/templates/start-monitoring-channel.sh.j2 dest=/usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh mode=0755
+    notify:
+#    - restart monitoring-channel
+     - stop monitoring-channel
+     - remove container
+     - start monitoring-channel
+
+#  - name: Start monitoring-channel container
+#    docker:
+#      docker_api_version: "1.18"
+#      name: monitoring-channel-{{ unique_id }}
+#      # was: reloaded
+#      state: running
+#      image: srikanthvavila/monitoring-channel
+#      expose:
+#      - 8000
+#      ports:
+#      - "{{ ceilometer_port }}:8000"
+#      volumes:
+#      - /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config
+#
+#  - name: Get Docker IP
+#    #TODO: copy dockerip.sh to monitoring service synchronizer
+#    script: /opt/xos/synchronizers/onos/scripts/dockerip.sh monitoring-channel-{{ unique_id }}
+#    register: dockerip
+#
+#  - name: Wait for Monitoring channel to come up
+#    wait_for:
+#      host={{ '{{' }} dockerip.stdout {{ '}}' }}
+#      port={{ '{{' }} item {{ '}}' }}
+#      state=present
+#    with_items:
+#    - {{ ceilometer_port }}
+# These are samples, not necessary for correct function of demo
+
+  - name: Make sure Monitoring channel service is running
+    service: name=monitoring-channel-{{ unique_id }} state=started
+{% endif %}
+
+  handlers:
+  - name: copy ceilo-config-file
+    shell: docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config monitoring-channel-{{ unique_id }}:/usr/local/share/ceilometer_proxy_config
+
+  - name: restart monitoring-channel container
+    shell: docker restart monitoring-channel-{{ unique_id }}
+
+  - name: restart monitoring-channel
+    shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
+
+  - name: stop monitoring-channel
+    service: name=monitoring-channel-{{ unique_id }} state=stopped
+
+  - name: remove container
+    docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
+
+  - name: start monitoring-channel
+    service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/synchronizer/steps/sync_sflowservice.py b/xos/synchronizer/steps/sync_sflowservice.py
new file mode 100644
index 0000000..154c5ab
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowservice.py
@@ -0,0 +1,74 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.ceilometer.models import SFlowService
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncSFlowService(SyncInstanceUsingAnsible):
+    provides=[SFlowService]
+    observes=SFlowService
+    requested_interval=0
+    template_name = "sync_sflowservice.yaml"
+    service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncSFlowService, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = SFlowService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = SFlowService.get_deleted_service_objects()
+
+        return objs
+
+    def get_instance(self, o):
+        # We assume the ONOS service owns a slice, so pick one of the instances
+        # inside that slice to sync to.
+
+        serv = o
+
+        if serv.slices.exists():
+            slice = serv.slices.all()[0]
+            if slice.instances.exists():
+                return slice.instances.all()[0]
+
+        return None
+
+    def get_extra_attributes(self, o):
+        fields={}
+        fields["instance_hostname"] = self.get_instance(o).instance_name.replace("_","-")
+        fields["sflow_port"] = o.sflow_port
+        fields["sflow_api_port"] = o.sflow_api_port
+        fields["sflow_container"] = "sflowpubsub"
+        return fields
+
+    def sync_fields(self, o, fields):
+        # the super causes the playbook to be run
+        super(SyncSFlowService, self).sync_fields(o, fields)
+
+    def run_playbook(self, o, fields):
+        instance = self.get_instance(o)
+        if (instance.isolation=="container"):
+            # If the instance is already a container, then we don't need to
+            # install ONOS.
+            return
+        super(SyncSFlowService, self).run_playbook(o, fields)
+
+    def delete_record(self, m):
+        pass
diff --git a/xos/synchronizer/steps/sync_sflowservice.yaml b/xos/synchronizer/steps/sync_sflowservice.yaml
new file mode 100644
index 0000000..8d853a2
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowservice.yaml
@@ -0,0 +1,74 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+     sflow_port: {{ sflow_port }}
+     sflow_api_port: {{ sflow_api_port }}
+
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ instance_hostname }}"
+
+  - name: Add repo key
+    apt_key:
+      keyserver=hkp://pgp.mit.edu:80
+      id=58118E89F3A912897C070ADBF76221572C52609D
+
+  - name: Install Docker repo
+    apt_repository:
+      repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+      state=present
+
+  - name: Install Docker
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - docker-engine
+    - python-pip
+    - python-httplib2
+
+  - name: Install docker-py
+    pip:
+      name=docker-py
+      state=latest
+
+  - name: sflow pub-sub config
+    template: src=/opt/xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_pub_sub_config.j2 dest=/usr/local/share/sflow_pub_sub.conf mode=0777
+
+  - name: Start SFLOW pub-sub container
+    docker:
+      docker_api_version: "1.18"
+      name: {{ sflow_container }}
+      # was: reloaded
+      state: running
+      image: srikanthvavila/sflowpubsub
+      expose:
+      - {{ sflow_api_port }}
+      - {{ sflow_port }}/udp
+      ports:
+      - "{{ sflow_port }}:{{ sflow_port }}/udp"
+      - "{{ sflow_api_port }}:{{ sflow_api_port }}"
+      volumes:
+      - /usr/local/share/sflow_pub_sub.conf:/usr/local/share/sflow_pub_sub/sflow_pub_sub.conf
+
+  - name: Get Docker IP
+    #TODO: copy dockerip.sh to monitoring service synchronizer
+    script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
+    register: dockerip
+
+  - name: Wait for SFlow service to come up
+    wait_for:
+      host={{ '{{' }} dockerip.stdout {{ '}}' }}
+      port={{ '{{' }} item {{ '}}' }}
+      state=present
+    with_items:
+    - {{ sflow_api_port }}
diff --git a/xos/synchronizer/steps/sync_sflowtenant.py b/xos/synchronizer/steps/sync_sflowtenant.py
new file mode 100644
index 0000000..a15fa54
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowtenant.py
@@ -0,0 +1,82 @@
+import hashlib
+import os
+import socket
+import socket
+import sys
+import base64
+import time
+import re
+import json
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice, ControllerSlice, ControllerUser
+from services.ceilometer.models import SFlowService, SFlowTenant
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncSFlowTenant(SyncInstanceUsingAnsible):
+    provides=[SFlowTenant]
+    observes=SFlowTenant
+    requested_interval=0
+    template_name = "sync_sflowtenant.yaml"
+    service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncSFlowTenant, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = SFlowTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = SFlowTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_sflow_service(self, o):
+        sflows = SFlowService.get_service_objects().filter(id=o.provider_service.id)
+        if not sflows:
+           raise "No associated SFlow service"
+
+        return sflows[0]
+
+    def get_instance(self, o):
+        # We assume the SFlow service owns a slice, so pick one of the instances
+        # inside that slice to sync to.
+
+        serv = self.get_sflow_service(o)
+
+        if serv.slices.exists():
+            slice = serv.slices.all()[0]
+            if slice.instances.exists():
+                return slice.instances.all()[0]
+
+        return None
+
+    def get_extra_attributes(self, o):
+        instance = self.get_instance(o)
+
+        fields={}
+        fields["sflow_api_base_url"] = self.get_sflow_service(o).sflow_api_url
+        fields["sflow_api_port"] = self.get_sflow_service(o).sflow_api_port
+        fields["listening_endpoint"] = o.listening_endpoint
+        fields["sflow_container"] = "sflowpubsub"
+
+        return fields
+
+    def sync_fields(self, o, fields):
+        # the super causes the playbook to be run
+        super(SyncSFlowTenant, self).sync_fields(o, fields)
+
+    def run_playbook(self, o, fields):
+        super(SyncSFlowTenant, self).run_playbook(o, fields)
+
+    def delete_record(self, m):
+        pass
diff --git a/xos/synchronizer/steps/sync_sflowtenant.yaml b/xos/synchronizer/steps/sync_sflowtenant.yaml
new file mode 100644
index 0000000..701ce5c
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowtenant.yaml
@@ -0,0 +1,28 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  tasks:
+
+  - name: Get Docker IP
+    #TODO: copy dockerip.sh to monitoring service synchronizer
+    script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
+    register: sflowserviceaddr
+
+  - name: Wait for SFlow service to come up
+    wait_for:
+      host={{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}
+      port={{ '{{' }} item {{ '}}' }}
+      state=present
+    with_items:
+    - {{ sflow_api_port }}
+
+  - name: Invoke SFlow service REST API to subscribe
+    uri:
+      url: http://{{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}:{{ sflow_api_port }}/subscribe
+      body: "{{ listening_endpoint }}"
+      body_format: raw
+      method: POST
diff --git a/xos/synchronizer/supervisor/monitoring_channel_observer.conf b/xos/synchronizer/supervisor/monitoring_channel_observer.conf
new file mode 100644
index 0000000..1c2dd42
--- /dev/null
+++ b/xos/synchronizer/supervisor/monitoring_channel_observer.conf
@@ -0,0 +1,2 @@
+[program:monitoring_channel_observer]
+command=python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config
diff --git a/xos/synchronizer/templates/Dockerfile.monitoring_channel b/xos/synchronizer/templates/Dockerfile.monitoring_channel
new file mode 100644
index 0000000..45defb8
--- /dev/null
+++ b/xos/synchronizer/templates/Dockerfile.monitoring_channel
@@ -0,0 +1,26 @@
+FROM       ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+    python-pip \
+    python-dev
+
+RUN pip install web.py
+RUN pip install wsgilog
+RUN pip install python-ceilometerclient
+RUN mkdir -p /usr/local/share
+ADD ceilometer_proxy_server.py /usr/local/share/
+RUN chmod +x /usr/local/share/ceilometer_proxy_server.py
+ADD start_ceilometer_proxy /usr/local/sbin/
+RUN chmod +x /usr/local/sbin/start_ceilometer_proxy
+EXPOSE 8000
+WORKDIR /usr/local/share
+CMD /usr/local/sbin/start_ceilometer_proxy
diff --git a/xos/synchronizer/templates/Dockerfile.sflowpubsub b/xos/synchronizer/templates/Dockerfile.sflowpubsub
new file mode 100644
index 0000000..c9025ee
--- /dev/null
+++ b/xos/synchronizer/templates/Dockerfile.sflowpubsub
@@ -0,0 +1,22 @@
+FROM       ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+    python-pip \
+    python-dev
+
+RUN pip install Flask
+RUN mkdir -p /usr/local/share/
+ADD sflow_pub_sub /usr/local/share/sflow_pub_sub
+RUN chmod +x /usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
+RUN chmod +x /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
+WORKDIR /usr/local/share/sflow_pub_sub/
+CMD /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
diff --git a/xos/synchronizer/templates/ceilometer_proxy_config.j2 b/xos/synchronizer/templates/ceilometer_proxy_config.j2
new file mode 100644
index 0000000..bd6c521
--- /dev/null
+++ b/xos/synchronizer/templates/ceilometer_proxy_config.j2
@@ -0,0 +1,17 @@
+# This file autogenerated by monitoring-channel observer
+# It contains a list of attributes to be used by ceilometer proxy web server
+# syntax: key=value
+
+[default]
+auth_url={{ auth_url }}
+admin_user={{ admin_user }}
+admin_tenant={{ admin_tenant }}
+admin_password={{ admin_password }}
+ceilometer_pub_sub_url={{ ceilometer_pub_sub_url }}
+
+[allowed_tenants]
+{% if allowed_tenant_ids %}
+{% for tenant_id in allowed_tenant_ids %}
+{{ tenant_id }}
+{% endfor %}
+{% endif %}
diff --git a/xos/synchronizer/templates/ceilometer_proxy_server.py b/xos/synchronizer/templates/ceilometer_proxy_server.py
new file mode 100644
index 0000000..c81b941
--- /dev/null
+++ b/xos/synchronizer/templates/ceilometer_proxy_server.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python
+import web
+import ConfigParser
+import io
+import json
+from ceilometerclient import client
+import logging
+import urllib
+import urllib2
+from urlparse import urlparse
+from wsgilog import WsgiLog
+
+web.config.debug=False
+
+logfile = "ceilometer_proxy_server.log"
+level=logging.INFO
+logger=logging.getLogger('ceilometer_proxy_server')
+logger.setLevel(level)
+handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
+logger.addHandler(handler)
+
+class FileLog(WsgiLog):
+    def __init__(self, application):
+        WsgiLog.__init__(
+            self,
+            application,
+            logformat = '%(message)s',
+            tofile = True,
+            toprint = True,
+            prnlevel = level,
+            file = logfile,
+            backups =1
+            )
+    def __call__(self, environ, start_response):
+        def hstart_response(status, response_headers, *args):
+             out = start_response(status, response_headers, *args)
+             try:
+                 logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
+             except err:
+                 logline="Could not log <%s> due to err <%s>" % (str(environ), err)
+             logger.info(logline)
+
+             return out
+
+        return super(FileLog, self).__call__(environ, hstart_response)
+
+#TODOs:
+#-See if we can avoid using python-ceilometerclient and instead use the REST calls directly with AuthToken
+#
+urls = (
+    r'^/v2/meters$', 'meter_list',
+    r'^/v2/meters/(?P<meter_name>[A-Za-z0-9_:.\-]+)/statistics$', 'statistics_list',
+    r'^/v2/samples$', 'sample_list',
+    r'^/v2/resources$', 'resource_list',
+    r'^/v2/subscribe$', 'pubsub_handler',
+)
+
+app = web.application(urls, globals())
+
+config = None
+ceilometer_client = None
+
+
+def parse_ceilometer_proxy_config():
+    global config
+    config = ConfigParser.RawConfigParser(allow_no_value=True)
+    config.read('ceilometer_proxy_config')
+ 
+def ceilometerclient():
+    global config, ceilometer_client
+    if ceilometer_client:
+         return ceilometer_client
+
+    if not config:
+         parse_ceilometer_proxy_config()
+
+    keystone = {}
+    keystone['os_username']=config.get('default','admin_user')
+    keystone['os_password']=config.get('default','admin_password')
+    keystone['os_auth_url']=config.get('default','auth_url')
+    keystone['os_tenant_name']=config.get('default','admin_tenant')
+    ceilometer_client = client.get_client(2,**keystone)
+    logger.info('ceilometer get_client is successful')
+    return ceilometer_client
+
+def make_query(user_id=None, tenant_id=None, resource_id=None,
+               user_ids=None, tenant_ids=None, resource_ids=None):
+    """Returns query built from given parameters.
+
+    This query can be then used for querying resources, meters and
+    statistics.
+
+    :Parameters:
+      - `user_id`: user_id, has a priority over list of ids
+      - `tenant_id`: tenant_id, has a priority over list of ids
+      - `resource_id`: resource_id, has a priority over list of ids
+      - `user_ids`: list of user_ids
+      - `tenant_ids`: list of tenant_ids
+      - `resource_ids`: list of resource_ids
+    """
+    user_ids = user_ids or []
+    tenant_ids = tenant_ids or []
+    resource_ids = resource_ids or []
+
+    query = []
+    if user_id:
+        user_ids = [user_id]
+    for u_id in user_ids:
+        query.append({"field": "user_id", "op": "eq", "value": u_id})
+
+    if tenant_id:
+        tenant_ids = [tenant_id]
+    for t_id in tenant_ids:
+        query.append({"field": "project_id", "op": "eq", "value": t_id})
+
+    if resource_id:
+        resource_ids = [resource_id]
+    for r_id in resource_ids:
+        query.append({"field": "resource_id", "op": "eq", "value": r_id})
+
+    return query
+
+def filter_query_params(query_params):
+    new_query=[]
+    i=0
+    user_specified_tenants=[]
+    for field in query_params['q.field']:
+        if (field != 'project_id') and (field != 'project'):
+            query = {}
+            query['field']=field
+            if query_params['q.op'][i] != '':
+                 query['op']=query_params['q.op'][i]
+            query['value']=query_params['q.value'][i]
+            new_query.append(query)
+        else:
+            user_specified_tenants.append(query_params['q.value'][i])
+        i=i+1
+    return new_query,user_specified_tenants
+
+class meter_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        meters=[]
+        for (k,v) in config.items('allowed_tenants'):
+             if user_specified_tenants and (k not in user_specified_tenants):
+                 continue
+             final_query=[]
+             final_query.extend(new_query)
+             query = make_query(tenant_id=k)
+             final_query.extend(query)
+             logger.debug('final query=%s',final_query)
+             results = client.meters.list(q=final_query)
+             meters.extend(results)
+        return json.dumps([ob._info for ob in meters])
+
+class statistics_list:
+    def GET(self, meter_name):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+             "period": None
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        period = query_params.period
+        statistics = []
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.statistics.list(meter_name=meter_name, q=final_query, period=period)
+              statistics.extend(results)
+        return json.dumps([ob._info for ob in statistics])
+
+class sample_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+             "limit": None,
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        limit=query_params.limit
+        samples=[]
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.new_samples.list(q=final_query,limit=limit)
+              samples.extend(results)
+        return json.dumps([ob._info for ob in samples])
+
+class resource_list:
+    def GET(self):
+        global config
+        keyword_args = {
+             "q.field": [],
+             "q.op": [],
+             "q.type": [],
+             "q.value": [],
+             "limit": None,
+             "links": None,
+        }
+        query_params = web.input(**keyword_args)
+        new_query, user_specified_tenants = filter_query_params(query_params)
+
+        client = ceilometerclient()
+        limit=query_params.limit
+        links=query_params.links
+        resources=[]
+        for (k,v) in config.items('allowed_tenants'):
+              if user_specified_tenants and (k not in user_specified_tenants):
+                  continue
+              final_query=[]
+              final_query.extend(new_query)
+              query = make_query(tenant_id=k)
+              final_query.extend(query)
+              logger.debug('final query=%s',final_query)
+              results = client.resources.list(q=final_query, limit=limit, links=links)
+              resources.extend(results)
+        return json.dumps([ob._info for ob in resources])
+
+class pubsub_handler:
+    def POST(self):
+        global config
+        parse_ceilometer_proxy_config()
+        ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
+        url = urlparse(ceilometer_pub_sub_url)
+        if (not url.scheme) or (not url.netloc): 
+             raise Exception("Ceilometer PUB/SUB URL not set")
+        ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/subscribe"
+        data_str = unicode(web.data(),'iso-8859-1')
+        post_data = json.loads(data_str)
+        final_query=[]
+        for (k,v) in config.items('allowed_tenants'):
+             query = make_query(tenant_id=k)
+             final_query.extend(query)
+        if not final_query:
+             raise Exception("Not allowed to subscribe to any meters")
+        post_data["query"] = final_query
+        #TODO: The PUB/SUB url needs to be read from config
+        put_request = urllib2.Request(ceilometer_pub_sub_url, json.dumps(post_data))
+        put_request.get_method = lambda: 'SUB'
+        put_request.add_header('Content-Type', 'application/json')
+        response = urllib2.urlopen(put_request)
+        response_text = response.read()
+        return json.dumps(response_text)
+
+    def DELETE(self):
+        ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
+        url = urlparse(ceilometer_pub_sub_url)
+        if (not url.scheme) or (not url.netloc): 
+             raise Exception("Ceilometer PUB/SUB URL not set")
+        ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/unsubscribe"
+        data_str = web.data()
+        #TODO: The PUB/SUB url needs to be read from config
+        put_request = urllib2.Request(ceilometer_pub_sub_url, data_str)
+        put_request.get_method = lambda: 'UNSUB'
+        put_request.add_header('Content-Type', 'application/json')
+        response = urllib2.urlopen(put_request)
+        response_text = response.read()
+        return json.dumps(response_text)
+
+if __name__ == "__main__":
+    app.run(FileLog)
diff --git a/xos/synchronizer/templates/monitoring-channel.conf.j2 b/xos/synchronizer/templates/monitoring-channel.conf.j2
new file mode 100644
index 0000000..eb937ac
--- /dev/null
+++ b/xos/synchronizer/templates/monitoring-channel.conf.j2
@@ -0,0 +1,10 @@
+# Upstart script for Monitoring channel
+description "Upstart script for Monitoring channel container"
+author "andy@onlab.us"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh
+end script
diff --git a/xos/synchronizer/templates/sflow_pub_sub/README b/xos/synchronizer/templates/sflow_pub_sub/README
new file mode 100644
index 0000000..ee8ad9b
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/README
@@ -0,0 +1,37 @@
+
+Subscribe-Publish Frame Work:
+1.Command to Install Flask Webserver frame work.
+  sudo pip install Flask
+
+  Along with flask we need the following packages:
+   msgpack
+   fnmatch
+   operator
+   logging
+   oslo_utils
+   ConfigParser
+ 
+2.Files: i.sub_main.py
+         ii.pubrecords.py
+         iii.pub_sub.conf
+
+3.Command to start the server:
+    #python sun_main.py
+4.Command for subscription:
+      i.app_id:Application ID,should be unique.
+      ii.target:
+           Presently only udp is supported.
+           a.udp:<ip:portno>
+           b.kafka:<kafkaip:kafkaport>
+      iii.sub_info:Sunscription notifications.ex:cpu_util,cpu_*
+      iv.query:
+         Below information need to provide as part of query.
+         a.field:fileds like user id ,porject id etc.,
+         b.op:"eq","gt","lt" etc.,
+         c.value:value of the fileds.
+     Example:
+  		 curl -i -H "Content-Type: application/json" -X SUB -d '{"app_id":"10","target":"udp://10.11.10.1:5006","sub_info":"cpu_util","query":[{"field":"user_id","op":"eq","value":"e1271a86bd4e413c87248baf2e5f01e0"},{"field":"project_id","op":"eq","value":"b1a3bf16d2014b47be9aefea88087318"},{"field":"resource_id","op":"eq","value":"658cd03f-d0f0-4f55-9f48-39e7222a8646"}]}' -L http://10.11.10.1:4455/subscribe
+
+5.Command for unsunscription:
+    For unsubcription only appid will be needed.
+    curl -i -H "Content-Type: application/json" -X UNSUB -d '{"app_id":"10"}' http://10.11.10.1:4455/unsubscribe
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample b/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
new file mode 100644
index 0000000..40b5bf5
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
@@ -0,0 +1,11 @@
+[LOGGING]
+level = DEBUG
+filename = sflow_pub_sub.log
+
+[WEB_SERVER]
+webserver_host = 0.0.0.0
+webserver_port = 33333
+
+[SFLOW]
+listening_ip_addr = 0.0.0.0
+listening_port = 6343
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2 b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
new file mode 100644
index 0000000..1c5c88c
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
@@ -0,0 +1,15 @@
+# This file autogenerated by sflow service synchronizer
+# It contains a list of attributes to be used by sflow service
+# syntax: key=value
+
+[LOGGING]
+level = DEBUG
+filename = sflow_pub_sub.log
+
+[WEB_SERVER]
+webserver_host = 0.0.0.0
+webserver_port = {{ sflow_api_port }}
+
+[SFLOW]
+listening_ip_addr = 0.0.0.0
+listening_port = {{ sflow_port }}
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
new file mode 100644
index 0000000..1276721
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+import socket,thread
+import sys
+import fnmatch
+import operator
+import logging
+import ConfigParser
+from urlparse import urlparse
+from sflow_sub_records import *
+
+from flask import request, Request, jsonify
+from flask import Flask
+from flask import make_response
+app = Flask(__name__)
+
+COMPARATORS = {
+    'gt': operator.gt,
+    'lt': operator.lt,
+    'ge': operator.ge,
+    'le': operator.le,
+    'eq': operator.eq,
+    'ne': operator.ne,
+}
+
+LEVELS = {'DEBUG': logging.DEBUG,
+          'INFO': logging.INFO,
+          'WARNING': logging.WARNING,
+          'ERROR': logging.ERROR,
+          'CRITICAL': logging.CRITICAL}
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+@app.route('/subscribe',methods=['POST'])
+def subscribe():
+    logging.debug(" SUB data:%s",request.data)
+    target = request.data
+    parse_target=urlparse(target)
+    if not parse_target.netloc:
+        err_str = "Error:Invalid target format"
+        logging.error("* Invalid target format")
+        return err_str 
+
+    status = "" 
+    if parse_target.scheme == "udp" :
+         host=parse_target.hostname
+         port=parse_target.port
+         scheme = parse_target.scheme
+         app_ip = host 
+         app_port = port
+ 
+         if host == None or port == None :
+             err_str = "* Error: Invalid IP Address format"
+             logging.error("* Invalid IP Address format")
+             return err_str
+  
+         subscrip_obj=sflow_sub_record(scheme,None,app_ip,app_port,None,None)
+         status = add_sflow_sub_record(subscrip_obj)
+         print_sflow_sub_records()
+
+    if parse_target.scheme == "kafka" :
+         pass
+    if parse_target.scheme == "file" :
+         pass
+    return status 
+
+@app.route('/unsubscribe',methods=['POST'])
+def unsubscribe():
+    try :  
+        target = request.data
+        parse_target=urlparse(target)
+        if not parse_target.netloc:
+            err_str = "Error:Invalid target format"
+            logging.error("* Invalid target format")
+            return err_str 
+
+        status = "" 
+        if parse_target.scheme == "udp" :
+            host=parse_target.hostname
+            port=parse_target.port
+            scheme = parse_target.scheme
+            app_ip = host 
+            app_port = port
+ 
+            delete_sflow_sub_record(app_ip, app_port)
+    except Exception as e:
+         logging.error("* %s",e.__str__())
+         return e.__str__()
+    return "UnSubscrition is sucessful! \n"
+
+@app.errorhandler(404)
+def not_found(error):
+    return make_response(jsonify({'error': 'Not found'}), 404)
+
+def sflow_recv(host,port):
+   udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
+   udp.bind((host, port))
+   logging.info("Started sflow receive thread on %s:%s",host, str(port))
+
+   while True:
+      data, source = udp.recvfrom(64000)
+      for obj in sflow_sub_database:
+         target_host = obj.ipaddress
+         target_port = int(obj.portno)
+         try:  
+             logging.debug("Replicating the sFlow data to:%s:%s",target_host, str(target_port))
+             udp.sendto(data,(target_host,target_port))
+         except Exception:
+             logging.error ("Unable to send sFlow data to target %s:%s ",target_host,str(target_port))
+   logging.warn("Exiting sflow receive thread")
+
+     
+def initialize(host,port):
+     thread.start_new(sflow_recv,(host,port,))
+        
+if __name__ == "__main__":
+
+    try:
+        config = ConfigParser.ConfigParser()
+        config.read('sflow_pub_sub.conf')
+        webserver_host = config.get('WEB_SERVER','webserver_host')
+        webserver_port = int (config.get('WEB_SERVER','webserver_port'))
+        sflow_listening_ip_addr  = config.get('SFLOW','listening_ip_addr')
+        sflow_listening_port  = int (config.get('SFLOW','listening_port'))
+
+        log_level    = config.get('LOGGING','level')
+        log_file       = config.get('LOGGING','filename')
+   
+        level = LEVELS.get(log_level, logging.NOTSET) 
+        logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',\
+                    datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level) 
+    except Exception as e:
+        print("* Error in config file:",e.__str__())
+        logging.error("* Error in confing file:%s",e.__str__())
+    else: 
+        initialize(sflow_listening_ip_addr,sflow_listening_port)
+        app.run(host=webserver_host,port=webserver_port,debug=False)
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py b/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
new file mode 100644
index 0000000..f8b0038
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+import fnmatch
+import logging
+
+class sflow_sub_record:
+    def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter):
+        logging.debug("* Updating subscription_info ") 
+        self.scheme = scheme
+        self.app_id = app_id
+        self.ipaddress = app_ip 
+        self.portno = app_port 
+        self.subscription_info = subscription_info
+        self.sub_info_filter = sub_info_filter 
+
+sflow_sub_database=[] 
+def add_sflow_sub_record(record):
+    logging.info("* inside %s",add_sflow_sub_record.__name__)
+    if not sflow_sub_database:
+        logging.debug("* -----------List is EMpty -------------") 
+        sflow_sub_database.append(record)
+        logging.debug("* Subscription is sucessful") 
+        return "Subscription is sucessful \n" 
+    for x in sflow_sub_database:
+        if (record.ipaddress == x.ipaddress) and (record.portno == x.portno) :
+            logging.warning("* entry already exists\n")
+            return "entry already exists \n" 
+    sflow_sub_database.append(record)
+    return "Subscription is sucessful \n"
+ 
+def delete_sflow_sub_record(ip,port):
+    logging.info("* inside %s",delete_sflow_sub_record.__name__)
+    Flag = False 
+    for x in sflow_sub_database:
+        if (ip == x.ipaddress) and (port == x.portno) :
+            sflow_sub_database.remove(x)
+            Flag = True
+            logging.debug("* Un-Subscription is sucessful") 
+            return "Un-Subscription is sucessful \n"
+    if not Flag :
+       err_str = "No subscription exists with target: udp://" + ip + ":" + str(port) + "\n"
+       logging.error(err_str)
+       raise Exception (err_str)
+       
+def print_sflow_sub_records():
+    logging.info("* inside %s",print_sflow_sub_records.__name__)
+    for obj in sflow_sub_database:
+        logging.debug("* ------------------------------------------------") 
+        logging.debug("* scheme:%s",obj.scheme)  
+        logging.debug("* app_id:%s",obj.app_id)
+        logging.debug("* portno:%s",obj.portno ) 
+        logging.debug("* ipaddress:%s",obj.ipaddress)  
+        logging.debug("* portno:%s",obj.portno)  
+        logging.debug("* subscription_info:%s",obj.subscription_info)
+        logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
+        logging.debug("* ------------------------------------------------")
+ 
+def get_sflow_sub_records(notif_subscription_info):
+    logging.info("* inside %s",get_sflow_sub_records.__name__)
+    sub_list=[]  
+    for obj in sflow_sub_database:
+        if obj.subscription_info == notif_subscription_info:
+            sub_list.append(obj)
+    return sub_list
diff --git a/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub b/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
new file mode 100644
index 0000000..e2edda2
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
@@ -0,0 +1 @@
+/usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
diff --git a/xos/synchronizer/templates/start-monitoring-channel.sh.j2 b/xos/synchronizer/templates/start-monitoring-channel.sh.j2
new file mode 100755
index 0000000..4486985
--- /dev/null
+++ b/xos/synchronizer/templates/start-monitoring-channel.sh.j2
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function mac_to_iface {
+    MAC=$1
+    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function generate_mac_from_ip {
+    IP=$1
+    printf "02:42:%02x:%02x:%02x:%02x\n" `echo $IP|awk -F '.' '{print $1, $2, $3, $4}'`
+}
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
+HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
+HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
+
+docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
+    #sudo docker pull srikanthvavila/monitoring-channel
+if [ -z "$HEADNODEFLATLANIP" ] || [ "$HEADNODEFLATLANIP" == "None" ]
+then
+#    docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
+    docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
+else
+#    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
+    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
+fi
+else
+    docker start $MONITORING_CHANNEL
+fi
+
+# Set up networking via pipework
+#SHARED_LAN_IFACE=$( mac_to_iface {{ shared_lan_mac }} )
+#docker exec $MONITORING_CHANNEL ifconfig eth0 >> /dev/null || pipework $SHARED_LAN_IFACE -i eth0 $MONITORING_CHANNEL 192.168.0.1/24
+
+# Make sure VM's eth0 (hpc_client) has no IP address
+#ifconfig $HPC_IFACE 0.0.0.0
+
+# Now copy ceilometer proxy configuration to container
+#cat /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config | sudo docker exec -i $MONITORING_CHANNEL bash -c 'cat > /usr/local/share/ceilometer_proxy_config'
+docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config $MONITORING_CHANNEL:/usr/local/share/ceilometer_proxy_config
+
+# Attach to container
+docker start -a $MONITORING_CHANNEL
diff --git a/xos/synchronizer/templates/start_ceilometer_proxy b/xos/synchronizer/templates/start_ceilometer_proxy
new file mode 100644
index 0000000..ddaa9c8
--- /dev/null
+++ b/xos/synchronizer/templates/start_ceilometer_proxy
@@ -0,0 +1 @@
+/usr/local/share/ceilometer_proxy_server.py 8000