Monitoring publisher tenant changes
Code changes includes:
disable_monitoring_service.yaml for diabling monitoring of the service.
enable_monitoring_service.yaml for enabling monitoring of the service,
install_monitoring_ceilometer.sh for installing monitoring agent in ceilometer and compute nodes.

Change-Id: I7f8d845cc59786eb25382b51573932fc6c9e5fac
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/README b/xos/synchronizer/ceilometer/monitoring_agent/README
new file mode 100644
index 0000000..c367075
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/README
@@ -0,0 +1,9 @@
+Sample curl commands to test Monitoring agent:
+---------------------------------------------
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}}' -L http://nova-compute-1:5004/monitoring/agent/onos/start
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455"}' -L http://nova-compute-1:5004/monitoring/agent/openstack/start
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455"}' -L http://nova-compute-1:5004/monitoring/agent/openstack/stop
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}}' -L http://nova-compute-1:5004/monitoring/agent/onos/stop
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml b/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml
new file mode 100644
index 0000000..9d36bcb
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml
@@ -0,0 +1,79 @@
+---
+- hosts: '{{ instance_name }}'
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+
+  tasks:
+
+  - name: Installing python-dev
+    apt: name=python-dev state=present update_cache=yes
+
+  - name: Installing Flask
+    pip: name=Flask
+
+  - name: Verify if  ([monitoring_agent] is to avoid capturing the shell process) is already running
+    shell: pgrep -f [m]onitoring_agent | wc -l
+    register: monitoringagent_job_pids_count
+
+  - name: DEBUG
+    debug: var=monitoringagent_job_pids_count.stdout
+
+  - name: stop /usr/local/share/monitoring_agent if already running
+    shell: pkill -f /usr/local/share/monitoring_agent/monitoring_agent.py
+    ignore_errors: True
+    when:  monitoringagent_job_pids_count.stdout !=  "0"
+
+  - name: Deleting monitoring agent folder(if already exists)
+    file: path=/usr/local/share/monitoring_agent state=absent owner=root group=root
+
+  - name: make sure /usr/local/share/monitoring_agent exists
+    file: path=/usr/local/share/monitoring_agent state=directory owner=root group=root
+
+  - name: Copying monitoring agent conf file
+    when : "'ceilometer' in instance_name"
+    set_fact: ceilometer_services="ceilometer-agent-central,ceilometer-agent-notification,ceilometer-collector,ceilometer-api"
+
+  - name: Copying monitoring agent conf file
+    when : "'ceilometer' not in instance_name"
+    set_fact: ceilometer_services="ceilometer-agent-compute"
+
+  - name : DEBUG
+    debug: var=ceilometer_services
+
+  - name: Copying monitoring agent conf file  
+    template: src=monitoring_agent.conf.j2 dest=/usr/local/share/monitoring_agent/monitoring_agent.conf  mode=0777 
+
+  - name: Copying file to /usr/local/share
+    copy: src=monitoring_agent.py dest=/usr/local/share/monitoring_agent/monitoring_agent.py mode=0777
+
+  - name: Copying file to /usr/local/share
+    copy: src=generate_pipeline.py dest=/usr/local/share/monitoring_agent/generate_pipeline.py mode=0777
+
+  - name: Copying file to /usr/local/share
+    copy: src=pipeline.yaml.j2 dest=/usr/local/share/monitoring_agent/pipeline.yaml.j2 mode=0777
+ 
+  - name: Copying file to /usr/local/share
+    copy: src=start_monitoring_agent.sh dest=/usr/local/share/monitoring_agent/start_monitoring_agent.sh mode=0777
+
+  - name: Starting monitoring agent
+    command: nohup python /usr/local/share/monitoring_agent/monitoring_agent.py &
+    args:
+       chdir: /usr/local/share/monitoring_agent/
+    async: 9999999999999999
+    poll: 0
+ 
+  - name: Configuring monitoring agent
+    shell: /usr/local/share/monitoring_agent/start_monitoring_agent.sh
+
+#TODO: 
+#Copy ONOS notification handlers 
+#from ~/xos_services/monitoring/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos
+#to /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/onos in the headnode ceilometer node
+#Copy a file from ~/xos_services/monitoring/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/__init__.py
+#to /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/ folder 
+#Also, update the entry_points.txt with the following changes:
+#[network.statistics.drivers]
+#....
+#onos = ceilometer.network.statistics.onos.driver:ONOSDriver
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml b/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml
new file mode 100644
index 0000000..4f4bc7f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml
@@ -0,0 +1,23 @@
+---
+- hosts: '{{ instance_name }}'
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  tasks:
+  - name : stopping onos service on {{ instance_name }}
+    uri:
+      url: http://{{ instance_name }}:5004/monitoring/agent/onos/stop
+      method: POST
+      body: '"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]} '
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json
+  - name:  stopping openstack service on {{ instance_name }}
+    uri:
+      url: http://{{ instance_name }}:5004/monitoring/agent/openstack/stop
+      method: POST
+      body: '"target":"udp://9.9.9.9:4455"'
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml b/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml
new file mode 100644
index 0000000..f4d4de5
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml
@@ -0,0 +1,24 @@
+---
+- hosts: '{{ instance_name }}'
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  tasks:
+  - name : starting onos service on {{ instance_name }}
+    uri:
+      url: http://{{ instance_name }}:5004/monitoring/agent/onos/start
+      method: POST
+      body: '{ "target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}} '
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json    
+  - name:  starting openstack service on {{ instance_name }}
+    uri: 
+      url: http://{{ instance_name }}:5004/monitoring/agent/openstack/start
+      method: POST
+      body: '"target":"udp://9.9.9.9:4455"'
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json
+
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py b/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py
new file mode 100644
index 0000000..beab6d6
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py
@@ -0,0 +1,125 @@
+from jinja2 import Environment, FileSystemLoader
+from urlparse import urlparse
+import os
+
+# Capture our current directory
+THIS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+openstack_service_info=[]
+onos_service_info=[]
+class Openstack_Service():
+   def __init__(self,service_name,target):
+      self.service_name=service_name
+      self.target=target
+      self.service_enable=True
+   def update_openstack_service_info(self):
+       if not openstack_service_info:
+           openstack_service_info.append(self)
+       else:
+           for obj in openstack_service_info:
+               openstack_service_info.remove(obj)
+           openstack_service_info.append(self)
+           #openstack_service_info[0].target.append(target)
+         
+class Onos_Service():
+   def __init__(self,service_name,target,resources):
+      self.service_name=service_name
+      self.target=target
+      self.resources=resources
+      self.service_enable=True
+   def update_onos_service_info(self):
+       if not onos_service_info:
+          onos_service_info.append(self)
+       else:
+           for obj in onos_service_info:
+               onos_service_info.remove(obj)
+           onos_service_info.append(self)
+          #onos_service_info[0].target.append(target)
+
+def generate_pipeline_yaml_for_openstack(target,Flag):
+    # Create the jinja2 environment.
+    # Notice the use of trim_blocks, which greatly helps control whitespace.
+    op_service=Openstack_Service("OPENSTACK",target)
+    op_service.update_openstack_service_info() 
+    parse_target=urlparse(target)
+    host = parse_target.hostname
+    port =  parse_target.port
+    with open("pipeline.yaml", 'w') as f:
+        j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+                         trim_blocks=True)
+        context = {
+             'openstack' : Flag, 
+             'listen_ip_addr': host,
+             'port_number' : port
+        }
+        fp = j2_env.get_template('pipeline.yaml.j2').render (
+            context)
+        f.write(fp)
+
+def generate_pipeline_yaml_for_onos(target,resources,Flag):
+     
+    onos_service=Onos_Service("ONOS",target,resources)
+    onos_service.update_onos_service_info() 
+    with open("pipeline.yaml", 'w') as f:
+        j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+                         trim_blocks=True)
+        context = {
+             'onos' : Flag,
+             'onos_endpoints' : resources,
+             'onos_target' : target,
+             'new_line': '\n',
+             'new_tab': '      '    
+        }
+        fp = j2_env.get_template('pipeline.yaml.j2').render (
+            context)
+        f.write(fp)
+
+def generate_pipeline_yaml_for_openstack_onos(target,Flag):
+
+    op_service=Openstack_Service("OPENSTACK",target)
+    op_service.update_openstack_service_info() 
+    parse_target=urlparse(target)
+    host = parse_target.hostname
+    port =  parse_target.port
+    with open("pipeline.yaml", 'w') as f:
+        j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+                         trim_blocks=True)
+        context = {
+             'openstack' : Flag, 
+             'listen_ip_addr': host,
+             'port_number' : port,
+             'onos' : Flag,
+             'onos_endpoints' : onos_service_info[0].resources,
+             'onos_target' : onos_service_info[0].target,
+             'new_line': '\n',
+             'new_tab': '      '
+        }
+        fp = j2_env.get_template('pipeline.yaml.j2').render (
+            context)
+        f.write(fp)
+
+def generate_pipeline_yaml_for_onos_openstack(target,resources,Flag):
+
+    onos_service=Onos_Service("ONOS",target,resources)
+    onos_service.update_onos_service_info() 
+ 
+    parse_target=urlparse(openstack_service_info[0].target)
+    host = parse_target.hostname
+    port =  parse_target.port
+   
+    with open("pipeline.yaml", 'w') as f:
+        j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+                         trim_blocks=True)
+        context = {
+             'onos' : Flag,
+             'onos_endpoints' : resources,
+             'onos_target' : target,
+             'new_line': '\n',
+             'new_tab': '      ',
+             'openstack' : Flag,
+             'listen_ip_addr': host,
+             'port_number' : port
+        }
+        fp = j2_env.get_template('pipeline.yaml.j2').render (
+            context)
+        f.write(fp)
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh b/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh
new file mode 100755
index 0000000..8cec15f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh
@@ -0,0 +1,12 @@
+#! /bin/bash
+#set -x 
+COMPUTENODES=$( bash -c "source ~/service-profile/cord-pod/admin-openrc.sh ; nova hypervisor-list" |grep "cord.lab"|awk '{print $4}')
+
+echo $COMPUTENODES
+
+for NODE in $COMPUTENODES; do
+    ansible-playbook -i /etc/maas/ansible/pod-inventory ~/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml -e instance_name=$NODE
+done
+
+CEILOMETERNODE="ceilometer-1"
+ansible-playbook ~/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml -e instance_name=$CEILOMETERNODE
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py b/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py
new file mode 100644
index 0000000..5128d50
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+from flask import request, Request, jsonify
+from flask import Flask
+from flask import make_response
+import logging
+import logging.handlers
+import logging.config
+import subprocess
+import ConfigParser
+import generate_pipeline
+app = Flask(__name__)
+
+
+@app.route('/monitoring/agent/openstack/start',methods=['POST'])
+def openstack_start():
+    try:
+        # To do validation of user inputs for all the functions
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        if not generate_pipeline.onos_service_info:
+            logging.debug (" ONOS Service is not enalble,Only openstack need to be enabled ")
+            generate_pipeline.generate_pipeline_yaml_for_openstack(target,True)
+        else:
+            logging.debug(" ONOS Service is also enabled ,please generate yaml file for both onos and openstack")
+            generate_pipeline.generate_pipeline_yaml_for_openstack_onos(target,True)
+        restart_ceilometer_services() 
+        return "Openstack start service called \n"
+    except Exception as e:
+            return e.__str__()
+
+@app.route('/monitoring/agent/onos/start',methods=['POST'])
+def onos_start():
+    try:
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        metadata = request.json['meta_data'] 
+        logging.debug("metadata:%s",metadata)
+        logging.debug(type(target))
+        resources = metadata['resources']
+        logging.debug("resources:%s",resources)
+        if not generate_pipeline.openstack_service_info:
+            logging.debug("Openstak Service is not enabled,Only ONOS need to be enabled")
+            generate_pipeline.generate_pipeline_yaml_for_onos(target,resources,True)
+        else:
+            logging.debug(" Openstack Service is also enabled ,please generate yaml file for both onos and openstack")
+            generate_pipeline.generate_pipeline_yaml_for_onos_openstack(target,resources,True)
+
+        restart_ceilometer_services() 
+        return "ONOS start service called \n"
+    except Exception as e:
+            return e.__str__()
+
+@app.route('/monitoring/agent/vsg/start',methods=['POST'])
+def vsg_start():
+    try:
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        return "vsg start service called \n"
+    except Exception as e:
+            return e.__str__()
+
+
+@app.route('/monitoring/agent/openstack/stop',methods=['POST'])
+def openstack_stop():
+    try:
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        if not generate_pipeline.onos_service_info:
+             generate_pipeline.generate_pipeline_yaml_for_openstack(target,False)
+        else:
+             generate_pipeline.generate_pipeline_yaml_for_onos(generate_pipeline.onos_service_info[0].target,generate_pipeline.onos_service_info[0].resources,True)
+        logging.debug("Delete Openstack object")
+        for obj in generate_pipeline.openstack_service_info:
+               generate_pipeline.openstack_service_info.remove(obj)
+   
+        restart_ceilometer_services() 
+        return "Openstack stop service called \n"
+      
+    except Exception as e:
+            return e.__str__()
+
+@app.route('/monitoring/agent/onos/stop',methods=['POST'])
+def onos_stop():
+    try:
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        metadata = request.json['meta_data'] 
+        logging.debug("metadata:%s",metadata)
+        resources = metadata['resources']
+        logging.debug("resources:%s",resources)
+         
+        if not generate_pipeline.openstack_service_info:
+             generate_pipeline.generate_pipeline_yaml_for_onos(target,resources,False)
+        else:
+            generate_pipeline.generate_pipeline_yaml_for_openstack(generate_pipeline.openstack_service_info[0].target,True)
+
+        logging.debug("Delete ONOS Object")
+        for obj in generate_pipeline.onos_service_info:
+               generate_pipeline.onos_service_info.remove(obj)
+
+        restart_ceilometer_services() 
+        return "ONOS stop service called \n"
+    except Exception as e:
+            return e.__str__()
+
+@app.route('/monitoring/agent/vsg/stop',methods=['POST'])
+def vsg_stop():
+    try:
+        target = request.json['target']
+        logging.debug("target:%s",target)
+        return "vsg stop service called \n"
+    except Exception as e:
+            return e.__str__()
+
+
+def restart_ceilometer_services():
+    try :
+       config = ConfigParser.ConfigParser()
+       config.read('monitoring_agent.conf')
+       services = config.get('SERVICE','Ceilometer_service')
+       service = services.split(",")
+       subprocess.call("sudo cp pipeline.yaml /etc/ceilometer/pipeline.yaml",shell=True)
+    except Exception as e:
+        logging.error("* Error in confing file:%s",e.__str__())
+        return False
+    else :
+        for service_name in service:
+            command = ['service',service_name, 'restart'];
+            logging.debug("Executing: %s command",command)
+            #shell=FALSE for sudo to work.
+            try :
+                subprocess.call(command, shell=False)
+            except Exception as e:
+                logging.error("* %s command execution failed with error %s",command,e.__str__())
+                return False
+    return True
+
+if __name__ == "__main__":
+    logging.config.fileConfig('monitoring_agent.conf', disable_existing_loggers=False)
+    app.run(host="0.0.0.0",port=5004,debug=False)
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2 b/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2
new file mode 100644
index 0000000..977847b
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2
@@ -0,0 +1,110 @@
+---
+sources:
+    - name: meter_source
+      interval: 600
+      meters:
+          - "*"
+      sinks:
+          - meter_sink
+    - name: cpu_source
+      interval: 600
+      meters:
+          - "cpu"
+      sinks:
+          - cpu_sink
+    - name: disk_source
+      interval: 600
+      meters:
+          - "disk.read.bytes"
+          - "disk.read.requests"
+          - "disk.write.bytes"
+          - "disk.write.requests"
+          - "disk.device.read.bytes"
+          - "disk.device.read.requests"
+          - "disk.device.write.bytes"
+          - "disk.device.write.requests"
+      sinks:
+          - disk_sink
+    - name: network_source
+      interval: 600
+      meters:
+          - "network.incoming.bytes"
+          - "network.incoming.packets"
+          - "network.outgoing.bytes"
+          - "network.outgoing.packets"
+      sinks:
+          - network_sink
+{% if onos %}
+    - name: sdn_source1
+      interval: 600
+      meters:
+          - "switch"
+          - "switch.*"
+      resources: {{ new_line }}
+{%- for urls in onos_endpoints %}
+          - {{ urls }} {{ new_line}}
+{%- if loop.last -%}
+{{ new_tab }}sinks:
+         - sdn_sink
+{%- endif -%}
+{%- endfor -%}
+{% endif %} 
+sinks:
+    - name: meter_sink
+      transformers:
+      publishers:
+          - notifier://
+{% if openstack %}
+          - udp://{{ listen_ip_addr }}:{{ port_number }}
+{% endif %}
+    - name: cpu_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                target:
+                    name: "cpu_util"
+                    unit: "%"
+                    type: "gauge"
+                    scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+      publishers:
+          - notifier://
+{% if openstack %}
+          - udp://{{ listen_ip_addr }}:4455
+{% endif %}
+    - name: disk_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                source:
+                    map_from:
+                        name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
+                        unit: "(B|request)"
+                target:
+                    map_to:
+                        name: "\\1.\\2.\\3.rate"
+                        unit: "\\1/s"
+                    type: "gauge"
+      publishers:
+          - notifier://
+    - name: network_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                source:
+                   map_from:
+                       name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
+                       unit: "(B|packet)"
+                target:
+                    map_to:
+                        name: "network.\\1.\\2.rate"
+                        unit: "\\1/s"
+                    type: "gauge"
+      publishers:
+          - notifier://
+{% if onos %}
+    - name: sdn_sink
+      transformers:
+      publishers:
+          - notifier://
+          - {{ onos_target }}
+{% endif %}
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh b/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh
new file mode 100755
index 0000000..f7dedc2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+sudo apt-get update
+sudo apt-get install -y python-dev
+sudo pip install Flask
+cd /home/ubuntu/monitoring_agent
+chmod +x monitoring_agent.py
+nohup python monitoring_agent.py &
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2 b/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2
new file mode 100755
index 0000000..dc7da38
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2
@@ -0,0 +1,25 @@
+[SERVICE]
+Ceilometer_service = {{ ceilometer_services }}
+
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=DEBUG
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('monitoring_agent.log','a',10000000,5)
+formatter=logfileformatter
diff --git a/xos/synchronizer/ceilometer/udp_proxy/udpagent.py b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
index 81826ad..1cac784 100644
--- a/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
+++ b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
@@ -39,6 +39,10 @@
         event_data = {'event_type': 'infra','message_id':six.text_type(uuid.uuid4()),'publisher_id': 'cpe_publisher_id','timestamp':datetime.datetime.now().isoformat(),'priority':'INFO','payload':msg}
         return event_data
    
+    def errback(self, exc, interval):
+        logging.error('Error: %r', exc, exc_info=1)
+        logging.info('Retry in %s seconds.', interval)
+
     def setup_rabbit_mq_channel(self):
         service_exchange = Exchange(self.acord_control_exchange, "topic", durable=False)
         # connections/channels
@@ -47,6 +51,8 @@
         channel = connection.channel()
         # produce
         self.producer = Producer(channel, exchange=service_exchange, routing_key='notifications.info')
+        self.publish = connection.ensure(self.producer, self.producer.publish, errback=self.errback, max_retries=3)
+
 
     def start_udp(self):
         address_family = socket.AF_INET
@@ -70,12 +76,16 @@
             else:
                 try:
                     if sample.has_key("event_type"):
-                         logging.debug("recevied event  :%s",sample)
-                         self.producer.publish(sample)
+                         #logging.debug("recevied event  :%s",sample)
+                         logging.debug("recevied event  :%s",sample['event_type'])
+                         #self.producer.publish(sample)
+                         self.publish(sample)
                     else:
-                         logging.debug("recevied Sample  :%s",sample)
+                         #logging.debug("recevied Sample  :%s",sample)
+                         logging.debug("recevied Sample :%s",sample['counter_name'])
                          msg = self.convert_sample_to_event_data(sample)
-                         self.producer.publish(msg)
+                         #self.producer.publish(msg)
+                         self.publish(msg)
                 except Exception:
                     logging.exception("UDP: Unable to publish msg")
        
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
index 9b9e144..f002ef4 100644
--- a/xos/synchronizer/manifest
+++ b/xos/synchronizer/manifest
@@ -12,6 +12,12 @@
 templates/ceilometer_proxy_server.py
 templates/start_ceilometer_proxy
 templates/update-keystone-endpoints.py.j2
+ceilometer/udp_proxy/udpagent.py
+templates/udpagent.conf.j2
+ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/notifications.py
+ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/__init__.py
+ceilometer/ceilometer-plugins/network/ext_services/vcpe/notifications.py
+ceilometer/ceilometer-plugins/network/ext_services/vcpe/__init__.py
 manifest
 run.sh
 monitoring_synchronizer_config
@@ -23,6 +29,9 @@
 steps/sync_sflowservice.py
 steps/sync_ceilometerservice.yaml
 steps/sync_ceilometerservice.py
+steps/sync_openstackmonitoringpublisher.py
+steps/sync_openstackmonitoringpublisher.yaml
+steps/enable_monitoring_service.yaml
 files/vm-resolv.conf
 files/docker.list
 model-deps
diff --git a/xos/synchronizer/steps/enable_monitoring_service.yaml b/xos/synchronizer/steps/enable_monitoring_service.yaml
new file mode 100644
index 0000000..de03646
--- /dev/null
+++ b/xos/synchronizer/steps/enable_monitoring_service.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  gather_facts: False
+  user: ubuntu
+  vars:
+    monitoringagents: 
+        {% for agent in agents %}
+        - url: {{ agent.url }}
+          body: {{ agent.body }}
+        {% endfor %}
+
+  tasks:
+  - name: debug
+    debug: msg=" {{ '{{' }} item {{ '}}' }} "
+    with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+    
+  - name:  starting openstack service monitoring agent
+    uri: 
+      url: "{{ '{{' }} item.url {{ '}}' }}"
+      method: POST
+      body: "{{ '{{' }} item.body {{ '}}' }}"
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json
+    with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.py b/xos/synchronizer/steps/sync_monitoringchannel.py
index 3b97741..fcb6f17 100644
--- a/xos/synchronizer/steps/sync_monitoringchannel.py
+++ b/xos/synchronizer/steps/sync_monitoringchannel.py
@@ -23,6 +23,10 @@
 
 logger = Logger(level=logging.INFO)
 
+#FIXME: Is this right approach?
+#Maintaining a global SSH tunnel database in order to handle tunnel deletions during the object delete
+ssh_tunnel_db = {}
+
 class SSHTunnel:
 
     def __init__(self, localip, localport, key, remoteip, remote_port, jumpuser, jumphost):
@@ -147,7 +151,7 @@
            #Check if ssh tunnel is needed
            proxy_ssh = getattr(Config(), "observer_proxy_ssh", False)
 
-           if proxy_ssh:
+           if proxy_ssh and (not o.ssh_proxy_tunnel):
                proxy_ssh_key = getattr(Config(), "observer_proxy_ssh_key", None)
                proxy_ssh_user = getattr(Config(), "observer_proxy_ssh_user", "root")
                jump_hostname = fields["hostname"]
@@ -159,16 +163,12 @@
                local_port = remote_port
                local_ip = socket.gethostbyname(socket.gethostname())
 
-#               tunnel = SSHTunnelForwarder(jump_hostname,
-#                                      ssh_username=proxy_ssh_user,
-#                                      ssh_pkey=proxy_ssh_key,
-#                                      ssh_private_key_password="",
-#                                      remote_bind_address=(remote_host,remote_port),
-#                                      local_bind_address=(local_ip,local_port),
-#                                      set_keepalive=300)
-#               tunnel.start()
                tunnel = SSHTunnel(local_ip, local_port, proxy_ssh_key, remote_host, remote_port, proxy_ssh_user, jump_hostname)
                tunnel.start()
+               logger.info("SSH Tunnel created for Monitoring channel-%s at local port:%s"%(o.id,local_port))
+
+               #FIXME:Store the tunnel handle in global tunnel database
+               ssh_tunnel_db[o.id] = tunnel
 
                #Update the model with ssh tunnel info
                o.ssh_proxy_tunnel = True
@@ -185,6 +185,14 @@
         #if quick_update:
         #    logger.info("quick_update triggered; skipping ansible recipe")
         #else:
+        if ('delete' in fields) and (fields['delete']):
+            logger.info("Delete for Monitoring channel-%s is getting synchronized"%(o.id))
+            if o.id in ssh_tunnel_db:
+                tunnel = ssh_tunnel_db[o.id]
+                tunnel.stop()
+                logger.info("Deleted SSH Tunnel for Monitoring channel-%s at local port:%s"%(o.id,o.ssh_tunnel_port))
+                o.ssh_proxy_tunnel = False
+                del ssh_tunnel_db[o.id]
         super(SyncMonitoringChannel, self).run_playbook(o, fields)
 
         #o.last_ansible_hash = ansible_hash
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
new file mode 100644
index 0000000..2c3d74d
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
@@ -0,0 +1,123 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+import json
+#import threading
+import subprocess
+import random
+import tempfile
+#from sshtunnel import SSHTunnelForwarder
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.monitoring.models import CeilometerService, OpenStackServiceMonitoringPublisher
+from xos.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncOpenStackMonitoringPublisher(SyncInstanceUsingAnsible):
+    provides=[OpenStackServiceMonitoringPublisher]
+    observes=OpenStackServiceMonitoringPublisher
+    requested_interval=0
+    template_name = "sync_openstackmonitoringpublisher.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncOpenStackMonitoringPublisher, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = OpenStackServiceMonitoringPublisher.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = OpenStackServiceMonitoringPublisher.get_deleted_tenant_objects()
+
+        return objs
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
+
+        self.prepare_record(o)
+
+        ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
+        if not ceilometer_services:
+            raise "No associated Ceilometer service"
+        ceilometer_service = ceilometer_services[0]
+        service_instance = ceilometer_service.get_instance()
+        # sync only when the corresponding service instance is fully synced
+        if not service_instance:
+            self.defer_sync(o, "waiting on associated service instance")
+            return
+        if not service_instance.instance_name:
+            self.defer_sync(o, "waiting on associated service instance.instance_name")
+            return
+
+        # Step1: Orchestrate UDP proxy agent on the compute node where monitoring service VM is spawned
+
+        fields = { "hostname": ceilometer_service.ceilometer_rabbit_compute_node,
+                   "baremetal_ssh": True,
+                   "instance_name": "rootcontext",
+                   "username": "root",
+                   "container_name": None,
+                   "rabbit_host": ceilometer_service.ceilometer_rabbit_host,
+                   "rabbit_user": ceilometer_service.ceilometer_rabbit_user,
+                   "rabbit_password": ceilometer_service.ceilometer_rabbit_password,
+		   "listen_ip_addr": socket.gethostbyname(ceilometer_service.ceilometer_rabbit_compute_node)
+	}
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        key_name = self.get_node_key(service_instance.node)
+        if not os.path.exists(key_name):
+            raise Exception("Node key %s does not exist" % key_name)
+        key = file(key_name).read()
+        fields["private_key"] = key
+
+        template_name = "sync_openstackmonitoringpublisher.yaml"
+        fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id) + "_step1"
+
+        self.run_playbook(o, fields, template_name)
+
+        # Step2: Orchestrate OpenStack publish agent
+        target_uri = "udp://" + ceilometer_service.ceilometer_rabbit_compute_node + ":4455"
+        fields = {}
+        agent_info = []
+        if o.monitoring_agents:
+           for agent in o.monitoring_agents.all():
+              body = {'target': target_uri}
+              if agent.start_url_json_data:
+                 start_url_dict = json.loads(agent.start_url_json_data)
+                 body.update(agent.start_url_dict)
+              a = {'url': agent.start_url, 'body': json.dumps(body)}
+              agent_info.append(a)
+
+        fields["agents"] = agent_info 
+        #fields["private_key"] = ""
+
+        template_name = "enable_monitoring_service.yaml"
+        fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id) + "_step2"
+
+        run_template(template_name, fields)
+
+        o.save()
+
+    def map_delete_inputs(self, o):
+        fields = {"unique_id": o.id,
+                  "delete": True}
+        return fields
+
+    def delete_record(self, o):
+        pass
+
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
new file mode 100644
index 0000000..0549b25
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
@@ -0,0 +1,45 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+  vars:
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      listen_ip_addr: {{ listen_ip_addr }}
+
+  tasks:
+
+  - name: Verify if udpagent ([] is to avoid capturing the shell process) is already running
+    shell: pgrep -f [u]dpagent | wc -l
+    register: udpagent_job_pids_count
+
+  - name: DEBUG
+    debug: var=udpagent_job_pids_count.stdout
+
+  - name: stop /usr/local/share/udp_proxy if already running
+    shell: pkill -f /usr/local/share/udp_proxy/udpagent.py
+    ignore_errors: True
+    when: udpagent_job_pids_count.stdout !=  "0"
+
+  - name: make sure /usr/local/share/udp_proxy exists
+    file: path=/usr/local/share/udp_proxy state=directory owner=root group=root
+
+  - name: Copy udp_proxy component files to destination
+    copy: src=/opt/xos/synchronizers/monitoring/ceilometer/udp_proxy/udpagent.py
+      dest=/usr/local/share/udp_proxy/udpagent.py
+
+  - name: udp_proxy config
+    template: src=/opt/xos/synchronizers/monitoring/templates/udpagent.conf.j2 dest=/usr/local/share/udp_proxy/udpagent.conf mode=0777
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+
+  - name: Launch udp_proxy
+    command: python /usr/local/share/udp_proxy/udpagent.py
+    args:
+       chdir: /usr/local/share/udp_proxy/
+    async: 9999999999999999
+    poll: 0
diff --git a/xos/synchronizer/templates/udpagent.conf.j2 b/xos/synchronizer/templates/udpagent.conf.j2
new file mode 100644
index 0000000..fe9cab6
--- /dev/null
+++ b/xos/synchronizer/templates/udpagent.conf.j2
@@ -0,0 +1,30 @@
+[udpservice]
+udp_address = {{ listen_ip_addr }}
+udp_port = 4455
+rabbit_userid = {{ rabbit_user }}
+rabbit_password = {{ rabbit_password }}
+rabbit_hosts = {{ rabbit_host }}
+acord_control_exchange = openstack_infra
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=INFO
+#level=DEBUG
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('udpagent.log','a',1000000,100)
+formatter=logfileformatter