Initial commit of PassiveTest
Change-Id: Idcd9a0c72df5eae6b4eedc544e473ebc9763ccdb
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..dd23436
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,20 @@
+passivetest-synchronizer.yaml
+manifest
+passivetest_config
+monitoring_stats_notifier.py
+passivetest-synchronizer.py
+steps/sync_passivetesttenant.py
+steps/passivetesttenant_playbook.yaml
+steps/sync_monitoring_agent.yaml
+steps/roles/setup_probe/tasks/main.yml
+steps/roles/setup_probe/files/README
+steps/roles/setup_probe/files/debs/docker.io_1.6.2~dfsg1-1ubuntu4~14.04.1_amd64.deb
+steps/roles/setup_probe/files/debs/cgroup-lite_1.11~ubuntu14.04.2_all.deb
+steps/roles/setup_probe/files/debs/liberror-perl_0.17-1.1_all.deb
+steps/roles/setup_probe/files/debs/aufs-tools_3.2+20130722-1.1_amd64.deb
+steps/roles/setup_probe/files/debs/git-man_1.9.1-1ubuntu0.3_all.deb
+steps/roles/setup_probe/files/debs/git_1.9.1-1ubuntu0.3_amd64.deb
+steps/roles/setup_probe/files/passivetest_rsa.pub
+steps/roles/setup_probe/files/viv
+steps/roles/setup_probe/files/xcp.docker
+model-deps
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/monitoring_stats_notifier.py b/xos/synchronizer/monitoring_stats_notifier.py
new file mode 100644
index 0000000..bbfc0db
--- /dev/null
+++ b/xos/synchronizer/monitoring_stats_notifier.py
@@ -0,0 +1,111 @@
+import six, uuid, csv, datetime, threading, socket, shutil, argparse, glob, os, copy, pprint, time, sys
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+
+XAGG_CSV_DIR="/xsight/var/opt/xagg/tmp"
+XAGG_CSV_PROCESSED_DIR="/xsight/var/opt/xagg/tmp/processed"
+
+class RabbitMQ:
+ def __init__(self, rabbit_host, rabbit_user, rabbit_password, exchange_name):
+ exchange = Exchange(exchange_name, "topic", durable=False)
+ connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
+ channel = connection.channel()
+ self.producer = Producer(channel, exchange=exchange, routing_key="notifications.info")
+
+ def publish(self, stats):
+ self.producer.publish(stats)
+
+
+class CeilometerStats:
+ def __init__(self, keystone_user_id, keystone_tenant_id):
+ self.message_template = {'publisher_id': "monitoring_on_"+socket.gethostname(),
+ 'priority':'INFO'}
+ self.keystone_user_id = keystone_user_id
+ self.keystone_tenant_id = keystone_tenant_id
+
+ def _get_stat_template(self):
+ retval = copy.copy(self.message_template)
+ retval['message_id'] = six.text_type(uuid.uuid4())
+ retval['timestamp'] = datetime.datetime.now().isoformat()
+ retval['payload'] = {'user_id':self.keystone_user_id,'project_id':self.keystone_tenant_id}
+ return retval
+
+ def get_stat(self,name,event_type,stats={}):
+ retval = self._get_stat_template()
+ retval['event_type']=event_type
+ retval['payload']['resource_id']=name
+ for k,v in stats.iteritems():
+ retval['payload'][k]=v
+ return retval
+
+
+class XaggStatsReader:
+ XAGG_COLUMNS=[
+ {"name":"dn_thruput_min","unit":"kb/s","type":"gauge"},
+ {"name":"dn_thruput_max","unit":"kb/s","type":"gauge"},
+ {"name":"dn_thruput_avg","unit":"kb/s","type":"gauge"},
+ {"name":"up_thruput_min","unit":"kb/s","type":"gauge"},
+ {"name":"up_thruput_max","unit":"kb/s","type":"gauge"},
+ {"name":"up_thruput_avg","unit":"kb/s","type":"gauge"},
+ {"name":"up_byte","unit":"B","type":"cumulative"},
+ {"name":"dn_byte","unit":"B","type":"cumulative"},
+ {"name":"up_pkt","unit":"packet","type":"cumulative"},
+ {"name":"dn_pkt","unit":"packet","type":"cumulative"},
+ {"name":"tcp_rtt","unit":"ms","type":"gauge"},
+ {"name":"tcp_dn_retrans","unit":"packet","type":"gauge"},
+ {"name":"tcp_up_retrans","unit":"packet","type":"gauge"},
+ {"name":"tcp_attempt","unit":"attempt","type":"gauge"},
+ {"name":"tcp_success","unit":"attempt","type":"gauge"}
+ ]
+ CSV_FILE_COLUMNS=["user_src_ip","user_dst_ip","enb_id","customer_group","technology",
+ "handset","os","apn","service_category","service_type","service_name",
+ "application_name","app_attempt","app_success","app_response_time",
+ "dn_byte","dn_thruput_min","dn_thruput_max","dn_thruput_avg","up_byte",
+ "up_thruput_min","up_thruput_max","up_thruput_avg","tcp_dn_retrans",
+ "tcp_up_retrans","dn_pkt","up_pkt","tcp_rtt","tcp_attempt","tcp_success"]
+ def __init__(self, ceilometer_stats):
+ self.stats = ceilometer_stats
+
+ def get_stats(self, csvfile):
+ fp = open(csvfile)
+ f = csv.DictReader(filter(lambda row: row[0] !='#',fp),fieldnames=self.CSV_FILE_COLUMNS)
+ retval = []
+ for row in f:
+ name=row["user_src_ip"]+"_"+row["user_dst_ip"]
+ for stat in self.XAGG_COLUMNS:
+ stat['volume'] = row[stat["name"]]
+ retval.append(self.stats.get_stat(name,"passivetest.stats",stat))
+ return retval
+
+def periodic_publish(rabbit_mq,xagg_stats_reader):
+ for stats_file in glob.glob(XAGG_CSV_DIR+"/*.csv"):
+ if not os.path.isdir(stats_file):
+ stats = xagg_stats_reader.get_stats(stats_file)
+ for stat in stats:
+ rabbit_mq.publish(stat)
+ shutil.move(stats_file,XAGG_CSV_PROCESSED_DIR)
+
+ # Publish every minute
+ threading.Timer(60, periodic_publish, args=(rabbit_mq, xagg_stats_reader)).start()
+
+def main():
+ parser = argparse.ArgumentParser(description='Process xagg telemetry and send to ceilometer/monitoring service.')
+ for arg in ["keystone-tenant-id","keystone-user-id","rabbit-host","rabbit-user","rabbit-password","rabbit-exchange-name"]:
+ parser.add_argument("--"+arg,required=True)
+
+ args = parser.parse_args()
+
+ while True:
+ try:
+ rabbit_mq = RabbitMQ(args.rabbit_host, args.rabbit_user, args.rabbit_password, args.rabbit_exchange_name)
+ ceilometer_stats = CeilometerStats(args.keystone_user_id, args.keystone_tenant_id)
+ xagg_stats_reader = XaggStatsReader(ceilometer_stats)
+ periodic_publish(rabbit_mq,xagg_stats_reader)
+ except Exception as e:
+ print(e)
+ sys.stdout.flush()
+ print("Trying again in one minute...")
+ time.sleep(60)
+
+if __name__ == "__main__":
+ exit(main())
diff --git a/xos/synchronizer/passivetest-synchronizer.py b/xos/synchronizer/passivetest-synchronizer.py
new file mode 100644
index 0000000..90d2c98
--- /dev/null
+++ b/xos/synchronizer/passivetest-synchronizer.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+# Runs the standard XOS synchronizer
+
+import importlib
+import os
+import sys
+
+synchronizer_path = os.path.join(os.path.dirname(
+ os.path.realpath(__file__)), "../../synchronizers/base")
+sys.path.append(synchronizer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
+
diff --git a/xos/synchronizer/passivetest-synchronizer.yaml b/xos/synchronizer/passivetest-synchronizer.yaml
new file mode 100644
index 0000000..b1ad5d3
--- /dev/null
+++ b/xos/synchronizer/passivetest-synchronizer.yaml
@@ -0,0 +1,14 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: This recipe provides additional configuration for the onboarded services.
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ servicecontroller#passivetest:
+ type: tosca.nodes.ServiceController
+ properties:
+ no-create: true
+ synchronizer_config: /root/setup/files/passivetest_config
diff --git a/xos/synchronizer/passivetest_config b/xos/synchronizer/passivetest_config
new file mode 100644
index 0000000..b0f051a
--- /dev/null
+++ b/xos/synchronizer/passivetest_config
@@ -0,0 +1,30 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=xos_db
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the synchronizer
+[observer]
+name=passivetest
+dependency_graph=/opt/xos/synchronizers/passivetest/model-deps
+steps_dir=/opt/xos/synchronizers/passivetest/steps
+sys_dir=/opt/xos/synchronizers/passivetest/sys
+logfile=/var/log/xos_backend.log
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+enable_watchers=True
+
+[networking]
+use_vtn=True
+
diff --git a/xos/synchronizer/steps/passivetesttenant_playbook.yaml b/xos/synchronizer/steps/passivetesttenant_playbook.yaml
new file mode 100644
index 0000000..6bbf222
--- /dev/null
+++ b/xos/synchronizer/steps/passivetesttenant_playbook.yaml
@@ -0,0 +1,16 @@
+---
+# passivetesttenant_playbook - sets up the controller
+
+- hosts: "{{ instance_name }}"
+ connection: ssh
+ user: root
+ sudo: yes
+ gather_facts: no
+ vars:
+ - public_ip: "{{ public_ip }}"
+ - synchronizer_ip: "{{ synchronizer_ip }}"
+ - tap_ports: "{{ tap_ports }}"
+ - reset_viv: {{ reset_viv }}
+
+ roles:
+ - setup_probe
diff --git a/xos/synchronizer/steps/roles/setup_probe/files/viv b/xos/synchronizer/steps/roles/setup_probe/files/viv
new file mode 100755
index 0000000..7cd7375
--- /dev/null
+++ b/xos/synchronizer/steps/roles/setup_probe/files/viv
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+import requests, sys, getopt, json
+
+COMMANDS = [ 'show','reset', 'add','delete','deploy',"V_BUFS","NUM_BUFS", "INPUT_MODE", "TX_MODE", "QUEUE_TYPE" ]
+trafficTypes = ['TSA_CP','TSA_UP', 'TAA_CP', 'TAA_UP']
+INPUT_MODES=["mmap","pfring","pfring_zc","dpdk"]
+TX_MODES=["frp_udp","frp_tcp","viv"]
+QUEUE_TYPES=["CLFFIFO","dpdk"]
+RESOURCE_NAMES = [ 'V_BUFS', 'NUM_BUFS', 'INPUT_MODE', 'TX_MODE', 'QUEUE_TYPE']
+
+settings = {
+ 'ipAddress':'127.0.0.1',
+ 'port':'8080',
+ 'api_version':'v1.0',
+ 'resource': 'vivs/1',
+ 'operation':'GET',
+ 'verbose': False,
+ 'data': None
+ }
+
+class restClient:
+
+ def doIt( self, settings ):
+ try:
+ hdrs = { 'Accept':'application/json','content-type':'application/json' }
+ url = "http://%s:%s/%s" % ( settings['ipAddress'], settings['port'], settings['api_version'] )
+ if settings['resource']:
+ url = "%s/%s" % (url, settings['resource'])
+
+ if settings['operation'] == 'GET':
+ r = requests.get( url, headers = hdrs )
+ elif settings['operation'] == 'POST':
+ data = json.loads(settings['data'])
+ r = requests.post( url, data=json.dumps(data), headers = hdrs )
+
+ if r.status_code == requests.codes.ok:
+ if r.headers['content-type'] == 'application/json':
+ print json.dumps( r.json(), indent=4 )
+ else:
+ print "Received unexpected content type: %s" % r.headers['content-type']
+ if settings['verbose']:
+ print r.text
+
+ if settings['verbose']:
+ print "\nOperation:\n %s" % settings['operation']
+ print "\nURL:\n %s" % r.url
+ print "\nHeaders sent:\n %s" % r.request.headers
+ print "\nResponse status code:\n %s" % r.status_code
+
+ except requests.ConnectionError as e:
+ print e
+ raise # re-raise exception
+ except ValueError as e:
+ print "Invalid JSON: %s" % e
+ raise # re-raise exception
+
+
+def stringFromList( p ):
+ """
+ e.g. ['a', 'b', 'c' ] becomes "[a|b|c]"
+ """
+ return str( p ).replace("'","").replace(", ","|")
+
+def usage():
+ print "Usage:"
+ print " $ %s [-i ip][-p port][-a version][-v] command" % sys.argv[0]
+ print ""
+ print "optional arguments:"
+ print " -i <ip> IP address of server. (Default: 127.0.0.1)"
+ print " -p <port> Port on server. (Default: 8080)"
+ print " -a <version> API version to use. (Default: v1.0)"
+ print " -v Be verbose"
+ print ""
+ print "command"
+ print "-------"
+ print " show [<path>]"
+ print " add input <device> <type> [<type> [...]]"
+ print " add output <ip_address> <port> <type>"
+ print " delete input <device> [<type> [<type> [...]]]"
+ print " delete output <ip_address>[:<port>] [<type> [<type> [...]]]"
+ print " reset"
+ print " deploy"
+ print ""
+ print " V_BUFS <value>"
+ print " NUM_BUFS <value>"
+ print " INPUT_MODE <input-mode>"
+ print " TX_MODE <tx-mode>"
+ print " QUEUE_TYPE <queue-type>"
+ print ""
+ print ""
+ print " Where:"
+ print " <type> = %s" % stringFromList( trafficTypes )
+ print " <input_mode> = %s" % stringFromList(INPUT_MODES )
+ print " <tx-mode> = %s" % stringFromList( QUEUE_TYPES )
+ print ""
+
+
+def getTrafficTypes( args ):
+ traffic_types=[]
+ while len(args) > 0:
+ t = args.pop(0)
+ if t not in trafficTypes:
+ raise ValueError("Invalid traffic type '%s'" % t )
+ traffic_types.append( t )
+ return traffic_types
+
+def parseAddInputCommand( args ):
+ if len( args ) < 2:
+ raise ValueError("Usage: %s add input <device> <traffic_type> [<traffic_type> [...]]" % sys.argv[0] )
+
+ device = args.pop(0)
+ tt = getTrafficTypes( args )
+ data = { "inputs":[{"device":device,"traffic_types":tt}]}
+ settings['resource']='vivs/1/add'
+ settings['operation']='POST'
+ settings['data'] = json.dumps( data )
+
+def parseAddOutputCommand( args ):
+ if len( args ) != 3:
+ raise ValueError("Usage: %s add output <ip_address> <port> <traffic_type>" % sys.argv[0] )
+
+ ip_address = args.pop(0)
+ port = args.pop(0)
+ traffic_type = args.pop(0)
+ if traffic_type not in trafficTypes:
+ raise ValueError("Invalid traffic type '%s'" % traffic_type )
+
+ data = {"outputs":[{"ip_address":ip_address,"type":traffic_type,"port":port}]}
+ settings['resource']='vivs/1/add'
+ settings['operation']='POST'
+ settings['data'] = json.dumps( data )
+
+def parseDeleteInputCommand( args ):
+ if len( args ) < 1:
+ raise ValueError( "Usage: %s delete input <device> [<traffic_type> [<traffic_type> [...]]]" % sys.argv[0] )
+
+ device = args.pop(0)
+ tt = getTrafficTypes( args )
+ data = {"inputs":[{"device":device,"traffic_types":tt}]}
+ settings['resource']='vivs/1/delete'
+ settings['operation']='POST'
+ settings['data'] = json.dumps( data )
+
+def parseDeleteOutputCommand( args ):
+ if len(args) == 0:
+ raise ValueError( "Usage: %s delete output <ip_address>[:<port>] [<traffic_type> [<traffic_type> [...]]]" % sys.argv[0] )
+
+ ipp = args.pop(0).split(':')
+ ip_address = ipp[0]
+ try:
+ port = ipp[1]
+ except IndexError:
+ # No port was specified
+ # All ports with spec'd traffic types will be removed for
+ # the IP given.
+ port = None
+
+ tt = getTrafficTypes( args )
+ data = {"outputs":[{"ip_address":ip_address,"port":port, "traffic_types":tt}]}
+ settings['resource']='vivs/1/delete'
+ settings['operation']='POST'
+ settings['data'] = json.dumps( data )
+
+def parseArgs( argv ):
+ try:
+ opts,args = getopt.getopt( argv,
+ "i:p:a:vh", ["ip=","port=","api_version="])
+ except getopt.GetoptError as e:
+ print e
+ raise # re-raise exception
+ for opt,arg in opts:
+ if opt == '-h':
+ usage()
+ sys.exit()
+ elif opt == '-v':
+ settings['verbose'] = True
+ elif opt in ("-i","--ip"):
+ settings['ipAddress'] = arg
+ elif opt in ("-p","--port"):
+ settings['port'] = arg
+ elif opt in ("-a","--api_version"):
+ settings['api_version'] = arg
+
+ # process residual non option args
+ if len(args) == 0:
+ raise ValueError( "Expected one of: %s" % str( COMMANDS ) )
+
+ cmd = args.pop(0)
+ if cmd not in COMMANDS:
+ print 'Unknown command', cmd
+ sys.exit(1)
+
+ if cmd in ['show']:
+ if len(args) != 0:
+ settings['resource'] = args.pop(0)
+
+ elif cmd in ['reset']:
+ settings['resource']='vivs/1/reset'
+ settings['operation'] = 'POST'
+ settings['data'] = json.dumps( {} )
+
+ elif cmd in ['add']:
+
+ if len(args) == 0:
+ raise ValueError("Expected 'input' or 'output'")
+
+ direction = args.pop(0)
+ if direction not in ['input','output']:
+ raise ValueError( "expected 'input' or 'output', found '%s'" % direction )
+
+ if direction == 'input':
+ parseAddInputCommand( args )
+ else:
+ parseAddOutputCommand( args )
+
+ elif cmd in ['delete']:
+ if len(args) == 0:
+ raise ValueError("Expected 'input' or 'output'" )
+
+ direction = args.pop(0)
+ if direction not in ['input','output']:
+ raise ValueError("expected 'input' or 'output', found '%s'" % direction )
+
+ if direction == 'input':
+ parseDeleteInputCommand( args )
+ else:
+ parseDeleteOutputCommand( args )
+
+ elif cmd in ['deploy']:
+ settings['resource']='vivs/1/deploy'
+ settings['operation'] = 'POST'
+ settings['data'] = '{}'
+
+ else:
+
+ if cmd in RESOURCE_NAMES:
+ if len( args ) == 0 :
+ raise ValueError( 'No value supplied' )
+
+ val = args.pop(0)
+ # The server will complain if it does not like the value.
+ settings['resource']='vivs/1/%s' % cmd
+ settings['operation'] = 'POST'
+ settings['data'] = json.dumps( { cmd : val } )
+
+
+ return settings
+
+
+def RestClient(argv):
+ settings = parseArgs( argv )
+ client = restClient()
+ client.doIt( settings )
+
+
+if __name__ == '__main__':
+ try:
+ RestClient(sys.argv[1:])
+ except ValueError as e:
+ print e
+ sys.exit(2)
+ except getopt.GetoptError as e:
+ print e
+ sys.exit(3)
+ except requests.ConnectionError as e:
+ print e
+ sys.exit(4)
+
diff --git a/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml b/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml
new file mode 100644
index 0000000..ae2d5fb
--- /dev/null
+++ b/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- name: setup authorized key for user ubuntu since service chaining uses it
+ authorized_key:
+ user: ubuntu
+ key: "{{ lookup('file', 'passivetest_rsa.pub') }}"
+ state: present
+
+- name: copy viv client
+ copy: src=viv dest=/usr/bin mode=0755
+
+- name: reset viv (if needed)
+ shell: viv -i 172.27.0.1 reset
+ when: reset_viv
+
+- name: configure input
+ shell: viv -i 172.27.0.1 add input {{ tap_ports }} TSA_CP
+ ignore_errors: True
+
+- name: register probe with viv
+ shell: viv -i 172.27.0.1 add output {{ synchronizer_ip }} 50002 TSA_CP
+ ignore_errors: True
+
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
new file mode 100644
index 0000000..93f805e
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: root
+ vars:
+ keystone_tenant_id: {{ keystone_tenant_id }}
+ keystone_user_id: {{ keystone_user_id }}
+ rabbit_user: {{ rabbit_user }}
+ rabbit_password: {{ rabbit_password }}
+ rabbit_host: {{ rabbit_host }}
+
+ tasks:
+ - name: Verify if monitoring_stats_notifier ([] is to avoid capturing the shell process) job is already running
+ shell: pgrep -f [m]onitoring_stats_notifier | wc -l
+ register: job_pids_count
+
+ - name: make sure /usr/local/share/monitoring_agent exists
+ file: path=/usr/local/share/monitoring_agent state=directory owner=root group=root
+ when: job_pids_count.stdout == "0"
+
+ - name: make a processed file folder
+ file: path=/xsight/var/opt/xagg/tmp/processed state=directory owner=root group=root
+ when: job_pids_count.stdout == "0"
+
+ - name: Copy job to destination
+ copy: src=/opt/xos/synchronizers/passivetest/monitoring_stats_notifier.py
+ dest=/usr/local/share/monitoring_agent/monitoring_stats_notifier.py
+ when: job_pids_count.stdout == "0"
+
+ - name: Initiate monitoring_stats_notifier job
+ command: python /usr/local/share/monitoring_agent/monitoring_stats_notifier.py --keystone-tenant-id={{ keystone_tenant_id }} --keystone-user-id={{ keystone_user_id }} --rabbit-user={{ rabbit_user }} --rabbit-password={{ rabbit_password }} --rabbit-host={{ rabbit_host }} --rabbit-exchange-name='passivetestservice'
+ async: 9999999999999999
+ poll: 0
+ when: job_pids_count.stdout == "0"
+
diff --git a/xos/synchronizer/steps/sync_passivetesttenant.py b/xos/synchronizer/steps/sync_passivetesttenant.py
new file mode 100644
index 0000000..fb60667
--- /dev/null
+++ b/xos/synchronizer/steps/sync_passivetesttenant.py
@@ -0,0 +1,95 @@
+import os
+import sys
+from django.db.models import Q, F
+from services.passivetest.models import PassiveTestService, PassiveTestTenant
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import ModelLink, CoarseTenant, ServiceMonitoringAgentInfo
+from xos.logger import Logger, logging
+from urlparse import urlparse
+
+parentdir = os.path.join(os.path.dirname(__file__), "..")
+sys.path.insert(0, parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncPassiveTestTenant(SyncInstanceUsingAnsible):
+ provides = [PassiveTestTenant]
+ observes = PassiveTestTenant
+ requested_interval = 0
+ template_name = "passivetesttenant_playbook.yaml"
+ service_key_name = "/opt/xos/synchronizers/passivetest/passivetest_private_key"
+ watches = [ModelLink(CoarseTenant,via='coarsetenant'),
+ ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
+
+ def __init__(self, *args, **kwargs):
+ super(SyncPassiveTestTenant, self).__init__(*args, **kwargs)
+
+ def fetch_pending(self, deleted):
+
+ if (not deleted):
+ objs = PassiveTestTenant.get_tenant_objects().filter(
+ Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
+ else:
+ # If this is a deletion we get all of the deleted tenants..
+ objs = PassiveTestTenant.get_deleted_tenant_objects()
+
+ return objs
+
+ def get_passivetestservice(self, o):
+ if not o.provider_service:
+ return None
+
+ passivetestservice = PassiveTestService.get_service_objects().filter(id=o.provider_service.id)
+
+ if not passivetestservice:
+ return None
+
+ return passivetestservice[0]
+
+ # Gets the attributes that are used by the Ansible template but are not
+ # part of the set of default attributes.
+ def get_extra_attributes(self, o):
+ passivetestservice = self.get_passivetestservice(o)
+ return { "public_ip": o.public_ip,
+ "synchronizer_ip": o.synchronizer_ip,
+ "tap_ports": passivetestservice.tap_ports,
+ "reset_viv": passivetestservice.reset_viv}
+
+ def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
+ if not monitoring_agent_info.service:
+ logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+ return
+
+ if not monitoring_agent_info.target_uri:
+ logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+ return
+
+ objs = PassiveTestTenant.get_tenant_objects().all()
+ for obj in objs:
+ if obj.provider_service.id != monitoring_agent_info.service.id:
+ logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
+ return
+
+ instance = self.get_instance(obj)
+ if not instance:
+ logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
+ return
+
+ logger.info("handling watch notification for monitoring agent info:%s for PassiveTestTenant object:%s" % (monitoring_agent_info, obj))
+
+ #Run ansible playbook to update the routing table entries in the instance
+ fields = self.get_ansible_fields(instance)
+ fields["ansible_tag"] = obj.__class__.__name__ + "_" + str(obj.id) + "_service_monitoring"
+
+ #Parse the monitoring agent target_uri
+ url = urlparse(monitoring_agent_info.target_uri)
+
+ #Assuming target_uri is rabbitmq URI
+ fields["rabbit_user"] = url.username
+ fields["rabbit_password"] = url.password
+ fields["rabbit_host"] = url.hostname
+
+ template_name = "sync_monitoring_agent.yaml"
+ super(SyncPassiveTestTenant, self).run_playbook(obj, fields, template_name)
+ pass
+