Monitoring publisher tenant changes
Code changes includes:
disable_monitoring_service.yaml for diabling monitoring of the service.
enable_monitoring_service.yaml for enabling monitoring of the service,
install_monitoring_ceilometer.sh for installing monitoring agent in ceilometer and compute nodes.

Change-Id: I7f8d845cc59786eb25382b51573932fc6c9e5fac
diff --git a/xos/synchronizer/steps/enable_monitoring_service.yaml b/xos/synchronizer/steps/enable_monitoring_service.yaml
new file mode 100644
index 0000000..de03646
--- /dev/null
+++ b/xos/synchronizer/steps/enable_monitoring_service.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  gather_facts: False
+  user: ubuntu
+  vars:
+    monitoringagents: 
+        {% for agent in agents %}
+        - url: {{ agent.url }}
+          body: {{ agent.body }}
+        {% endfor %}
+
+  tasks:
+  - name: debug
+    debug: msg=" {{ '{{' }} item {{ '}}' }} "
+    with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+    
+  - name:  starting openstack service monitoring agent
+    uri: 
+      url: "{{ '{{' }} item.url {{ '}}' }}"
+      method: POST
+      body: "{{ '{{' }} item.body {{ '}}' }}"
+      force_basic_auth: yes
+      status_code: 200
+      body_format: json
+    with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.py b/xos/synchronizer/steps/sync_monitoringchannel.py
index 3b97741..fcb6f17 100644
--- a/xos/synchronizer/steps/sync_monitoringchannel.py
+++ b/xos/synchronizer/steps/sync_monitoringchannel.py
@@ -23,6 +23,10 @@
 
 logger = Logger(level=logging.INFO)
 
+#FIXME: Is this right approach?
+#Maintaining a global SSH tunnel database in order to handle tunnel deletions during the object delete
+ssh_tunnel_db = {}
+
 class SSHTunnel:
 
     def __init__(self, localip, localport, key, remoteip, remote_port, jumpuser, jumphost):
@@ -147,7 +151,7 @@
            #Check if ssh tunnel is needed
            proxy_ssh = getattr(Config(), "observer_proxy_ssh", False)
 
-           if proxy_ssh:
+           if proxy_ssh and (not o.ssh_proxy_tunnel):
                proxy_ssh_key = getattr(Config(), "observer_proxy_ssh_key", None)
                proxy_ssh_user = getattr(Config(), "observer_proxy_ssh_user", "root")
                jump_hostname = fields["hostname"]
@@ -159,16 +163,12 @@
                local_port = remote_port
                local_ip = socket.gethostbyname(socket.gethostname())
 
-#               tunnel = SSHTunnelForwarder(jump_hostname,
-#                                      ssh_username=proxy_ssh_user,
-#                                      ssh_pkey=proxy_ssh_key,
-#                                      ssh_private_key_password="",
-#                                      remote_bind_address=(remote_host,remote_port),
-#                                      local_bind_address=(local_ip,local_port),
-#                                      set_keepalive=300)
-#               tunnel.start()
                tunnel = SSHTunnel(local_ip, local_port, proxy_ssh_key, remote_host, remote_port, proxy_ssh_user, jump_hostname)
                tunnel.start()
+               logger.info("SSH Tunnel created for Monitoring channel-%s at local port:%s"%(o.id,local_port))
+
+               #FIXME:Store the tunnel handle in global tunnel database
+               ssh_tunnel_db[o.id] = tunnel
 
                #Update the model with ssh tunnel info
                o.ssh_proxy_tunnel = True
@@ -185,6 +185,14 @@
         #if quick_update:
         #    logger.info("quick_update triggered; skipping ansible recipe")
         #else:
+        if ('delete' in fields) and (fields['delete']):
+            logger.info("Delete for Monitoring channel-%s is getting synchronized"%(o.id))
+            if o.id in ssh_tunnel_db:
+                tunnel = ssh_tunnel_db[o.id]
+                tunnel.stop()
+                logger.info("Deleted SSH Tunnel for Monitoring channel-%s at local port:%s"%(o.id,o.ssh_tunnel_port))
+                o.ssh_proxy_tunnel = False
+                del ssh_tunnel_db[o.id]
         super(SyncMonitoringChannel, self).run_playbook(o, fields)
 
         #o.last_ansible_hash = ansible_hash
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
new file mode 100644
index 0000000..2c3d74d
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
@@ -0,0 +1,123 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+import json
+#import threading
+import subprocess
+import random
+import tempfile
+#from sshtunnel import SSHTunnelForwarder
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.monitoring.models import CeilometerService, OpenStackServiceMonitoringPublisher
+from xos.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncOpenStackMonitoringPublisher(SyncInstanceUsingAnsible):
+    provides=[OpenStackServiceMonitoringPublisher]
+    observes=OpenStackServiceMonitoringPublisher
+    requested_interval=0
+    template_name = "sync_openstackmonitoringpublisher.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncOpenStackMonitoringPublisher, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = OpenStackServiceMonitoringPublisher.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = OpenStackServiceMonitoringPublisher.get_deleted_tenant_objects()
+
+        return objs
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
+
+        self.prepare_record(o)
+
+        ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
+        if not ceilometer_services:
+            raise "No associated Ceilometer service"
+        ceilometer_service = ceilometer_services[0]
+        service_instance = ceilometer_service.get_instance()
+        # sync only when the corresponding service instance is fully synced
+        if not service_instance:
+            self.defer_sync(o, "waiting on associated service instance")
+            return
+        if not service_instance.instance_name:
+            self.defer_sync(o, "waiting on associated service instance.instance_name")
+            return
+
+        # Step1: Orchestrate UDP proxy agent on the compute node where monitoring service VM is spawned
+
+        fields = { "hostname": ceilometer_service.ceilometer_rabbit_compute_node,
+                   "baremetal_ssh": True,
+                   "instance_name": "rootcontext",
+                   "username": "root",
+                   "container_name": None,
+                   "rabbit_host": ceilometer_service.ceilometer_rabbit_host,
+                   "rabbit_user": ceilometer_service.ceilometer_rabbit_user,
+                   "rabbit_password": ceilometer_service.ceilometer_rabbit_password,
+		   "listen_ip_addr": socket.gethostbyname(ceilometer_service.ceilometer_rabbit_compute_node)
+	}
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        key_name = self.get_node_key(service_instance.node)
+        if not os.path.exists(key_name):
+            raise Exception("Node key %s does not exist" % key_name)
+        key = file(key_name).read()
+        fields["private_key"] = key
+
+        template_name = "sync_openstackmonitoringpublisher.yaml"
+        fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id) + "_step1"
+
+        self.run_playbook(o, fields, template_name)
+
+        # Step2: Orchestrate OpenStack publish agent
+        target_uri = "udp://" + ceilometer_service.ceilometer_rabbit_compute_node + ":4455"
+        fields = {}
+        agent_info = []
+        if o.monitoring_agents:
+           for agent in o.monitoring_agents.all():
+              body = {'target': target_uri}
+              if agent.start_url_json_data:
+                 start_url_dict = json.loads(agent.start_url_json_data)
+                 body.update(agent.start_url_dict)
+              a = {'url': agent.start_url, 'body': json.dumps(body)}
+              agent_info.append(a)
+
+        fields["agents"] = agent_info 
+        #fields["private_key"] = ""
+
+        template_name = "enable_monitoring_service.yaml"
+        fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id) + "_step2"
+
+        run_template(template_name, fields)
+
+        o.save()
+
+    def map_delete_inputs(self, o):
+        fields = {"unique_id": o.id,
+                  "delete": True}
+        return fields
+
+    def delete_record(self, o):
+        pass
+
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
new file mode 100644
index 0000000..0549b25
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
@@ -0,0 +1,45 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+  vars:
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      listen_ip_addr: {{ listen_ip_addr }}
+
+  tasks:
+
+  - name: Verify if udpagent ([] is to avoid capturing the shell process) is already running
+    shell: pgrep -f [u]dpagent | wc -l
+    register: udpagent_job_pids_count
+
+  - name: DEBUG
+    debug: var=udpagent_job_pids_count.stdout
+
+  - name: stop /usr/local/share/udp_proxy if already running
+    shell: pkill -f /usr/local/share/udp_proxy/udpagent.py
+    ignore_errors: True
+    when: udpagent_job_pids_count.stdout !=  "0"
+
+  - name: make sure /usr/local/share/udp_proxy exists
+    file: path=/usr/local/share/udp_proxy state=directory owner=root group=root
+
+  - name: Copy udp_proxy component files to destination
+    copy: src=/opt/xos/synchronizers/monitoring/ceilometer/udp_proxy/udpagent.py
+      dest=/usr/local/share/udp_proxy/udpagent.py
+
+  - name: udp_proxy config
+    template: src=/opt/xos/synchronizers/monitoring/templates/udpagent.conf.j2 dest=/usr/local/share/udp_proxy/udpagent.conf mode=0777
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+
+  - name: Launch udp_proxy
+    command: python /usr/local/share/udp_proxy/udpagent.py
+    args:
+       chdir: /usr/local/share/udp_proxy/
+    async: 9999999999999999
+    poll: 0