Initial commit of PassiveTest

Change-Id: Idcd9a0c72df5eae6b4eedc544e473ebc9763ccdb
(cherry picked from commit 9062322cffd03d2c56b66d040ad13bc562bb6544)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b25c15b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..ded4f63
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 Open Networking Laboratory
+
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..3a57bd6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,90 @@
+include ~/service-profile/mcord/Makefile
+
+VIAVI_REPO_SERVER="http://10.3.75.178"
+
+passivetest: probe_dependencies passivetest-onboard mcord_taa
+	$(RUN_TOSCA) $(SERVICE_DIR)/PassiveTest/pod-passivetest.yaml
+
+rebuild-passivetest: probe_dependencies 
+	bash $(COMMON_DIR)/rebuild.sh $(XOS_BOOTSTRAP_PORT) passivetest
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+
+passivetest-onboard: 
+	cp $(SERVICE_DIR)/PassiveTest/xos/synchronizer/passivetest_config $(CONFIG_DIR)/files/passivetest_config
+	sudo cp $(CONFIG_DIR)/id_rsa $(CONFIG_DIR)/key_import/passivetest_rsa
+	sudo cp $(CONFIG_DIR)/id_rsa.pub $(CONFIG_DIR)/key_import/passivetest_rsa.pub
+	$(RUN_TOSCA_BOOTSTRAP) $(COMMON_DIR)/tosca/disable-onboarding.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/PassiveTest/xos/PassiveTest-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/PassiveTest/xos/synchronizer/passivetest-synchronizer.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(COMMON_DIR)/tosca/enable-onboarding.yaml
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) services/passivetest
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+	bash $(COMMON_DIR)/wait_for_xos_port.sh $(XOS_UI_PORT)
+
+clean: cleanup
+	./cleanup.sh
+	bash -c "source $(CONFIG_DIR)/admin-openrc.sh; nova list --all-tenants; neutron net-list"
+
+passivetest-acord:
+	bash install_monitoring_plugin.sh
+	$(RUN_TOSCA) $(SERVICE_DIR)/PassiveTest/passivetest-acord.yaml
+
+probe_dependencies: manifest
+
+manifest:
+	cp $(CONFIG_DIR)/id_rsa.pub $(SERVICE_DIR)/PassiveTest/xos/synchronizer/steps/roles/setup_probe/files/passivetest_rsa.pub
+	cd $(SERVICE_DIR)/PassiveTest/xos; bash $(SERVICE_DIR)/PassiveTest/xos/make_synchronizer_manifest.sh
+
+viv: viv.docker 
+	bash install_viv.sh
+
+viv.docker:
+	wget $(VIAVI_REPO_SERVER)/viv.docker
+
+mcord_taa: mcord_taa.qcow2
+	bash -c "source $(CONFIG_DIR)/admin-openrc.sh; glance image-show mcord_taa || glance image-create --name mcord_taa --container-format bare --disk-format qcow2 --progress --is-public True --file mcord_taa.qcow2"
+
+mcord_taa.qcow2:
+	wget $(VIAVI_REPO_SERVER)/mcord_taa.qcow2
+
+cord-monitoringservice: $(SERVICE_DIR)/monitoring monitoring_files
+	@echo "[Patching CORD-MONITORINGSERVICE]"
+	cp $(SERVICE_DIR)/PassiveTest/ceilometerdashboard.py $(SERVICE_DIR)/monitoring/xos/api/tenant/monitoring/dashboard/ceilometerdashboard.py
+	@echo "[CORD-MONITORINGSERVICE]"
+	sudo cp $(CONFIG_DIR)/id_rsa $(CONFIG_DIR)/key_import/monitoringservice_rsa
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/monitoring/xos/monitoring-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(CONFIG_DIR)/monitoring_synchronizer.yaml
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) services/monitoring
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+	bash $(COMMON_DIR)/wait_for_xos_port.sh $(XOS_UI_PORT)
+	$(RUN_TOSCA) $(CONFIG_DIR)/monitoringservice.yaml
+	$(RUN_TOSCA) $(CONFIG_DIR)/monitoringtenant.yaml
+	@echo "waiting for monitoring service to be fully ready...."
+	ansible-playbook -i local $(SERVICE_DIR)/monitoring/xos/test/monitoring_test_initial.yaml
+
+monitoring_files: monitoring_synchronizer.yaml monitoringservice.yaml monitoringtenant.yaml
+
+monitoring%.yaml:
+	rm $(CONFIG_DIR)/$@
+	cp $(CONFIG_DIR)/../cord-pod/$@ $(CONFIG_DIR)
+
+inframonitoring.yaml:
+	export SETUPDIR=$(CONFIG_DIR); bash $(CONFIG_DIR)/../cord-pod/make-inframonitoring-yaml.sh
+
+rebuild-monitoringservice:
+	bash $(COMMON_DIR)/rebuild.sh $(XOS_BOOTSTRAP_PORT) monitoring
+	bash $(COMMON_DIR)/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+
+install-inframonitoring-agents:
+	bash $(SERVICE_DIR)/monitoring/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh
+	@echo "Validate if infra metrics are available...."
+	ansible-playbook -i local $(SERVICE_DIR)/monitoring/xos/test/monitoring_test_inframetrics.yaml
+
+enable-inframonitoring: inframonitoring.yaml onos_monitoring_service_endpoints.json
+	$(RUN_TOSCA) $(CONFIG_DIR)/inframonitoring.yaml
+
+onos_monitoring_service_endpoints.json:
+	cp $(CONFIG_DIR)/../cord-pod/$@ $(CONFIG_DIR)/
+
+
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3676543
--- /dev/null
+++ b/README.md
@@ -0,0 +1,34 @@
+# PassiveTest Pre-reqs
+
+PassiveTest requires the following images (in the xos_services/PassiveTest folder):
+-	    mcord_taa.qcow2 (The main xos probe service for PassiveTest)
+-	    viv.docker      (The container providing the tapping service)
+
+If the images are not in the current folder, running the Makefile targets will attempt to download them from a private repo (internal to Viavi). So if you do not have access to the private repo, download them separately prior to running this step
+
+# Onboarding PassiveTest and tapping agent
+- Run `make viv` followed by `make passivetest`
+- (Note: running `make viv` only needs to be done once. If you need to redeploy passivetest or any of the other services, `make viv` does not need to be performed again (unless another compute node was added/removed/rebooted)
+- Ensure that the mysite_passivetest_slice VM has two networks (run nova list --all-tenants)
+  **Note: XOS has a race condition that the passivetest VM may only come up with one network
+
+# Onboarding A-CORD
+- Run `make cord-monitoringservice` followed by `make install-inframonitoring-agents` followed by `make enable-inframonitoring`
+- Ensure that the ceilometer VMs have two networks (run nova list --all-tenants)
+  **Note: XOS has a race condition that the VM may only come up with one network
+
+# Enabling PassiveTest to report meters in A-CORD
+- Run `make passivetest-acord`
+
+# Enabling tapping of an interface
+Ceilometer Dashboard shows the ports of VMs in the slices via a name with "tap" in it such as 'tapacb6a527-f8'. To tap that interface to provide PassiveTest metrics, perform the following:
+-	   Go to the PassiveTest Service (Services->PassiveTest->service_passivetest)
+-	   Enter in the name of the tap port in the form's "Tap ports:" field
+-	   Click 'Save'
+-	   Click on the Home->PassiveTest Breadcrumb
+-	   Click on "Change" in the PASSIVETEST Tenants row
+-	   Click on the 'mysite_passivetest_slice-X' instance
+-	   Click 'Save'
+	   **Note: The last three steps were to trigger the synchronizer to add the tap port
+-	   You should now see statistics in the Ceilometer Dashboard under service_passivetest XOS Service for each ip source/destination pair on that tap interface
+
diff --git a/ceilometerdashboard.py b/ceilometerdashboard.py
new file mode 100644
index 0000000..b684b7a
--- /dev/null
+++ b/ceilometerdashboard.py
@@ -0,0 +1,1586 @@
+import requests
+from six.moves import urllib
+import urllib2
+import pytz
+import datetime
+import time
+import pprint
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework.views import APIView
+from core.models import *
+from services.monitoring.models import MonitoringChannel, CeilometerService
+from django.forms import widgets
+from django.utils import datastructures
+from django.utils.translation import ugettext_lazy as _
+from django.utils import timezone
+from django.core.exceptions import PermissionDenied
+from xos.logger import observer_logger as logger
+import logging
+
+# This REST API endpoint provides information that the ceilometer view needs to display
+logger.setLevel(logging.DEBUG)
+
+def getTenantCeilometerProxyURL(user):
+    monitoring_channel = None
+    for obj in MonitoringChannel.get_tenant_objects().all():
+        if (obj.creator.username == user.username):
+            monitoring_channel = obj
+            break
+    if not monitoring_channel:
+        raise XOSMissingField("Monitoring channel is missing for this tenant...Create one and invoke this REST API")
+    #TODO: Wait until URL is completely UP
+    MAX_ATTEMPTS = 5
+    attempts = 0
+    while True:
+        try:
+            response = urllib2.urlopen(monitoring_channel.ceilometer_url)
+            break
+        except urllib2.HTTPError, e:
+            logger.info('HTTP error %(reason)s' % {'reason':e.reason})
+            break
+        except urllib2.URLError, e:
+            attempts += 1
+            if attempts >= MAX_ATTEMPTS:
+                raise XOSServiceUnavailable("Ceilometer channel is not ready yet...Try again later")
+            logger.info('URL error %(reason)s' % {'reason':e.reason})
+            time.sleep(1)
+            pass
+    logger.info("Ceilometer proxy URL for user %(user)s is %(url)s" % {'user':user.username,'url':monitoring_channel.ceilometer_url})
+    return monitoring_channel.ceilometer_url
+
+def getTenantControllerTenantMap(user, slice=None):
+    tenantmap={}
+    if not slice:
+        slices = Slice.objects.filter(creator=user)
+    else:
+        slices = [slice]
+    for s in slices:
+        for cs in s.controllerslices.all():
+            if cs.tenant_id:
+                tenantmap[cs.tenant_id] = {"slice": cs.slice.name}
+                if cs.slice.service:
+                    tenantmap[cs.tenant_id]["service"] = cs.slice.service.name
+                else:
+                    logger.warn("SRIKANTH: Slice %(slice)s is not associated with any service" % {'slice':cs.slice.name})
+                    tenantmap[cs.tenant_id]["service"] = "Other"
+    if not slice:
+        #TEMPORARY WORK AROUND: There are some resource in network like whitebox switches does not belong to a specific tenant.
+        #They are all associated with "default_admin_tenant" tenant
+        tenantmap["default_admin_tenant"] = {"slice": "default_admin_tenant", "service": "NetworkInfra"}
+    return tenantmap
+
+def build_url(path, q, params=None):
+    """Convert list of dicts and a list of params to query url format.
+
+    This will convert the following:
+        "[{field=this,op=le,value=34},
+          {field=that,op=eq,value=foo,type=string}],
+         ['foo=bar','sna=fu']"
+    to:
+        "?q.field=this&q.field=that&
+          q.op=le&q.op=eq&
+          q.type=&q.type=string&
+          q.value=34&q.value=foo&
+          foo=bar&sna=fu"
+    """
+    if q:
+        query_params = {'q.field': [],
+                        'q.value': [],
+                        'q.op': [],
+                        'q.type': []}
+
+        for query in q:
+            for name in ['field', 'op', 'value', 'type']:
+                query_params['q.%s' % name].append(query.get(name, ''))
+
+        # Transform the dict to a sequence of two-element tuples in fixed
+        # order, then the encoded string will be consistent in Python 2&3.
+        new_qparams = sorted(query_params.items(), key=lambda x: x[0])
+        path += "?" + urllib.parse.urlencode(new_qparams, doseq=True)
+
+        if params:
+            for p in params:
+                path += '&%s' % p
+    elif params:
+        path += '?%s' % params[0]
+        for p in params[1:]:
+            path += '&%s' % p
+    return path
+
+def concat_url(endpoint, url):
+    """Concatenate endpoint and final URL.
+
+    E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
+    "http://keystone/v2.0/tokens".
+
+    :param endpoint: the base URL
+    :param url: the final URL
+    """
+    return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
+
+def resource_list(request, query=None, ceilometer_url=None, ceilometer_usage_object=None):
+    """List the resources."""
+    url = concat_url(ceilometer_url, build_url('/v2/resources', query))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def sample_list(request, meter_name, ceilometer_url=None, query=None, limit=None):
+    """List the samples for this meters."""
+    params = ['limit=%s' % limit] if limit else []
+    url = concat_url(ceilometer_url, build_url('/v2/samples', query, params))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def meter_list(request, ceilometer_url=None, query=None):
+    """List the user's meters."""
+    url = concat_url(ceilometer_url, build_url('/v2/meters', query))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+
+def statistic_list(request, meter_name, ceilometer_url=None, query=None, period=None):
+    """List of statistics."""
+    p = ['period=%s' % period] if period else []
+    url = concat_url(ceilometer_url, build_url('/v2/meters/' + meter_name + '/statistics', query, p))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def diff_lists(a, b):
+    if not a:
+        return []
+    elif not b:
+        return a
+    else:
+        return list(set(a) - set(b))
+
+def get_resource_map(request, ceilometer_url, query=None):
+    resource_map = {}
+    try:
+        resources = resource_list(request, ceilometer_url=ceilometer_url, query=query)
+        for r in resources:
+            if 'display_name' in r['metadata']:
+                name = r['metadata']['display_name']
+            #elif 'name' in r['metadata']:
+            #    name = r['metadata']['name']
+            #Output of 'resources' REST query has chnaged from kilo to mitaka,below if conditions to handle mitaka output
+            elif 'resource_metadata.display_name' in r['metadata']:
+                name = r['metadata']['resource_metadata.display_name']
+            elif 'resource_metadata.name' in r['metadata']:
+                name = r['metadata']['resource_metadata.name']
+            else:
+                name = r['resource_id']
+            resource_map[r['resource_id']] = name
+    except requests.exceptions.RequestException as e:
+        raise e
+
+    return resource_map
+
+class Meters(object):
+    """Class for listing of available meters.
+
+    It is listing meters defined in this class that are available
+    in Ceilometer meter_list.
+
+    It is storing information that is not available in Ceilometer, i.e.
+    label, description.
+
+    """
+
+    def __init__(self, request=None, ceilometer_meter_list=None, ceilometer_url=None, query=None, tenant_map=None, resource_map=None):
+        # Storing the request.
+        self._request = request
+        self.ceilometer_url = ceilometer_url
+        self.tenant_map = tenant_map
+        self.resource_map = resource_map
+
+        # Storing the Ceilometer meter list
+        if ceilometer_meter_list:
+            self._ceilometer_meter_list = ceilometer_meter_list
+        else:
+            try:
+                meter_query=[]
+                if query:
+                    meter_query = query
+                self._ceilometer_meter_list = meter_list(request, self.ceilometer_url, meter_query)
+            except requests.exceptions.RequestException as e:
+                self._ceilometer_meter_list = []
+                raise e
+
+        # Storing the meters info categorized by their services.
+        self._nova_meters_info = self._get_nova_meters_info()
+        self._neutron_meters_info = self._get_neutron_meters_info()
+        self._glance_meters_info = self._get_glance_meters_info()
+        self._cinder_meters_info = self._get_cinder_meters_info()
+        self._swift_meters_info = self._get_swift_meters_info()
+        self._kwapi_meters_info = self._get_kwapi_meters_info()
+        self._ipmi_meters_info = self._get_ipmi_meters_info()
+        self._vcpe_meters_info = self._get_vcpe_meters_info()
+        self._passivetest_meters_info = self._get_passivetest_meters_info()
+        self._volt_meters_info = self._get_volt_meters_info()
+        self._sdn_meters_info = self._get_sdn_meters_info()
+        self._broadview_meters_info = self._get_broadview_meters_info()
+
+        # Storing the meters info of all services together.
+        all_services_meters = (self._nova_meters_info,
+                               self._neutron_meters_info,
+                               self._glance_meters_info,
+                               self._cinder_meters_info,
+                               self._swift_meters_info,
+                               self._kwapi_meters_info,
+                               self._ipmi_meters_info,
+                               self._vcpe_meters_info,
+                               self._passivetest_meters_info,
+                               self._volt_meters_info,
+                               self._sdn_meters_info,
+                               self._broadview_meters_info)
+        self._all_meters_info = {}
+        for service_meters in all_services_meters:
+            self._all_meters_info.update(dict([(meter_name, meter_info)
+                                               for meter_name, meter_info
+                                               in service_meters.items()]))
+
+        # Here will be the cached Meter objects, that will be reused for
+        # repeated listing.
+        self._cached_meters = {}
+
+    def list_all(self, only_meters=None, except_meters=None):
+        """Returns a list of meters based on the meters names.
+
+        :Parameters:
+          - `only_meters`: The list of meter names we want to show.
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=only_meters,
+                          except_meters=except_meters)
+
+    def list_nova(self, except_meters=None):
+        """Returns a list of meters tied to nova.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._nova_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_neutron(self, except_meters=None):
+        """Returns a list of meters tied to neutron.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._neutron_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_glance(self, except_meters=None):
+        """Returns a list of meters tied to glance.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._glance_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_cinder(self, except_meters=None):
+        """Returns a list of meters tied to cinder.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._cinder_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_swift(self, except_meters=None):
+        """Returns a list of meters tied to swift.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._swift_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_kwapi(self, except_meters=None):
+        """Returns a list of meters tied to kwapi.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._kwapi_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_ipmi(self, except_meters=None):
+        """Returns a list of meters tied to ipmi
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._ipmi_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_passivetest(self, except_meters=None):
+        """Returns a list of meters tied to passivetest service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._passivetest_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_vcpe(self, except_meters=None):
+        """Returns a list of meters tied to vcpe service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._vcpe_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_volt(self, except_meters=None):
+        """Returns a list of meters tied to volt service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._volt_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_sdn(self, except_meters=None):
+        """Returns a list of meters tied to sdn service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._sdn_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_broadview(self, except_meters=None):
+        """Returns a list of meters tied to broadview service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._broadview_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_other_services(self, except_meters=None):
+        """Returns a list of meters tied to ipmi
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+        other_service_meters = [m for m in self._ceilometer_meter_list
+                                if m.name not in self._all_meters_info.keys()]
+        other_service_meters = diff_lists(other_service_meters, except_meters)
+
+        meters = []
+        for meter in other_service_meters:
+            self._cached_meters[meter.name] = meter
+            meters.append(meter)
+        return meters
+
+    def _list(self, only_meters=None, except_meters=None):
+        """Returns a list of meters based on the meters names.
+
+        :Parameters:
+          - `only_meters`: The list of meter names we want to show.
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        # Get all wanted meter names.
+        if only_meters:
+            meter_names = only_meters
+        else:
+            meter_names = [meter_name for meter_name
+                           in self._all_meters_info.keys()]
+
+        meter_names = diff_lists(meter_names, except_meters)
+        # Collect meters for wanted meter names.
+        return self._get_meters(meter_names)
+
+    def _get_meters(self, meter_names):
+        """Obtain meters based on meter_names.
+
+        The meters that do not exist in Ceilometer meter list are left out.
+
+        :Parameters:
+          - `meter_names`: A list of meter names we want to fetch.
+        """
+
+        meters = []
+        for meter_name in meter_names:
+            meter_candidates = self._get_meter(meter_name)
+            if meter_candidates:
+                meters.extend(meter_candidates)
+        return meters
+
+    def _get_meter(self, meter_name):
+        """Obtains a meter.
+
+        Obtains meter either from cache or from Ceilometer meter list
+        joined with statically defined meter info like label and description.
+
+        :Parameters:
+          - `meter_name`: A meter name we want to fetch.
+        """
+        meter_candidates = self._cached_meters.get(meter_name, None)
+
+        if not meter_candidates:
+            meter_candidates = [m for m in self._ceilometer_meter_list
+                                if m["name"] == meter_name]
+
+            if meter_candidates:
+                meter_info = self._all_meters_info.get(meter_name, None)
+                if meter_info:
+                    label = meter_info["label"]
+                    description = meter_info["description"]
+                    meter_category = meter_info["type"]
+                else:
+                    label = ""
+                    description = ""
+                    meter_category = "Other"
+                for meter in meter_candidates:
+                    meter["label"] = label
+                    meter["description"] = description
+                    meter["category"] = meter_category
+                    if meter["project_id"] in self.tenant_map.keys():
+                        meter["slice"] = self.tenant_map[meter["project_id"]]["slice"]
+                        meter["service"] = self.tenant_map[meter["project_id"]]["service"]
+                    else:
+                        meter["slice"] = meter["project_id"]
+                        meter["service"] = "Other"
+                    if meter["resource_id"] in self.resource_map.keys():
+                        meter["resource_name"] = self.resource_map[meter["resource_id"]]
+
+                self._cached_meters[meter_name] = meter_candidates
+
+        return meter_candidates
+
+    def _get_nova_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        meters_info = datastructures.SortedDict([
+            ("instance", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Existence of instance"),
+            }),
+            ("instance:<type>", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Existence of instance <type> "
+                                 "(openstack types)"),
+            }),
+            ("memory", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Volume of RAM"),
+            }),
+            ("memory.usage", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Volume of RAM used"),
+            }),
+            ("cpu", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("CPU time used"),
+            }),
+            ("cpu_util", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average CPU utilization"),
+            }),
+            ("vcpus", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of VCPUs"),
+            }),
+            ("disk.read.requests", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of read requests"),
+            }),
+            ("disk.write.requests", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of write requests"),
+            }),
+            ("disk.read.bytes", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Volume of reads"),
+            }),
+            ("disk.write.bytes", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Volume of writes"),
+            }),
+            ("disk.read.requests.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate of read requests"),
+            }),
+            ("disk.write.requests.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate of write requests"),
+            }),
+            ("disk.read.bytes.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate of reads"),
+            }),
+            ("disk.write.bytes.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average volume of writes"),
+            }),
+            ("disk.root.size", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Size of root disk"),
+            }),
+            ("disk.ephemeral.size", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Size of ephemeral disk"),
+            }),
+            ("network.incoming.bytes", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of incoming bytes "
+                                 "on the network for a VM interface"),
+            }),
+            ("network.outgoing.bytes", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of outgoing bytes "
+                                 "on the network for a VM interface"),
+            }),
+            ("network.incoming.packets", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of incoming "
+                                 "packets for a VM interface"),
+            }),
+            ("network.outgoing.packets", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Number of outgoing "
+                                 "packets for a VM interface"),
+            }),
+            ("network.incoming.bytes.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate per sec of incoming "
+                                 "bytes on a VM network interface"),
+            }),
+            ("network.outgoing.bytes.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate per sec of outgoing "
+                                 "bytes on a VM network interface"),
+            }),
+            ("network.incoming.packets.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate per sec of incoming "
+                                 "packets on a VM network interface"),
+            }),
+            ("network.outgoing.packets.rate", {
+                'type': _("Nova"),
+                'label': '',
+                'description': _("Average rate per sec of outgoing "
+                                 "packets on a VM network interface"),
+            }),
+        ])
+        # Adding flavor based meters into meters_info dict
+        # TODO(lsmola) this kind of meter will be probably deprecated
+        # https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
+        #for flavor in get_flavor_names(self._request):
+        #    name = 'instance:%s' % flavor
+        #    meters_info[name] = dict(meters_info["instance:<type>"])
+
+        #    meters_info[name]['description'] = (
+        #        _('Duration of instance type %s (openstack flavor)') %
+        #        flavor)
+
+        # TODO(lsmola) allow to set specific in local_settings. For all meters
+        # because users can have their own agents and meters.
+        return meters_info
+
+    def _get_neutron_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('network', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Existence of network"),
+            }),
+            ('network.create', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Creation requests for this network"),
+            }),
+            ('network.update', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Update requests for this network"),
+            }),
+            ('subnet', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Existence of subnet"),
+            }),
+            ('subnet.create', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Creation requests for this subnet"),
+            }),
+            ('subnet.update', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Update requests for this subnet"),
+            }),
+            ('port', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Existence of port"),
+            }),
+            ('port.create', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Creation requests for this port"),
+            }),
+            ('port.update', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Update requests for this port"),
+            }),
+            ('router', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Existence of router"),
+            }),
+            ('router.create', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Creation requests for this router"),
+            }),
+            ('router.update', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Update requests for this router"),
+            }),
+            ('ip.floating', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Existence of floating ip"),
+            }),
+            ('ip.floating.create', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Creation requests for this floating ip"),
+            }),
+            ('ip.floating.update', {
+                'type': _("Neutron"),
+                'label': '',
+                'description': _("Update requests for this floating ip"),
+            }),
+        ])
+
+    def _get_glance_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('image', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Image existence check"),
+            }),
+            ('image.size', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Uploaded image size"),
+            }),
+            ('image.update', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Number of image updates"),
+            }),
+            ('image.upload', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Number of image uploads"),
+            }),
+            ('image.delete', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Number of image deletions"),
+            }),
+            ('image.download', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Image is downloaded"),
+            }),
+            ('image.serve', {
+                'type': _("Glance"),
+                'label': '',
+                'description': _("Image is served out"),
+            }),
+        ])
+
+    def _get_cinder_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('volume', {
+                'type': _("Cinder"),
+                'label': '',
+                'description': _("Existence of volume"),
+            }),
+            ('volume.size', {
+                'type': _("Cinder"),
+                'label': '',
+                'description': _("Size of volume"),
+            }),
+        ])
+
+    def _get_swift_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('storage.objects', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Number of objects"),
+            }),
+            ('storage.objects.size', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Total size of stored objects"),
+            }),
+            ('storage.objects.containers', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Number of containers"),
+            }),
+            ('storage.objects.incoming.bytes', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Number of incoming bytes"),
+            }),
+            ('storage.objects.outgoing.bytes', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Number of outgoing bytes"),
+            }),
+            ('storage.api.request', {
+                'type': _("Swift"),
+                'label': '',
+                'description': _("Number of API requests against swift"),
+            }),
+        ])
+
+    def _get_kwapi_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('energy', {
+                'type': _("Kwapi"),
+                'label': '',
+                'description': _("Amount of energy"),
+            }),
+            ('power', {
+                'type': _("Kwapi"),
+                'label': '',
+                'description': _("Power consumption"),
+            }),
+        ])
+
+    def _get_ipmi_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('hardware.ipmi.node.power', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System Current Power"),
+            }),
+            ('hardware.ipmi.fan', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("Fan RPM"),
+            }),
+            ('hardware.ipmi.temperature', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("Sensor Temperature Reading"),
+            }),
+            ('hardware.ipmi.current', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("Sensor Current Reading"),
+            }),
+            ('hardware.ipmi.voltage', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("Sensor Voltage Reading"),
+            }),
+            ('hardware.ipmi.node.inlet_temperature', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System Inlet Temperature Reading"),
+            }),
+            ('hardware.ipmi.node.outlet_temperature', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System Outlet Temperature Reading"),
+            }),
+            ('hardware.ipmi.node.airflow', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System Airflow Reading"),
+            }),
+            ('hardware.ipmi.node.cups', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System CUPS Reading"),
+            }),
+            ('hardware.ipmi.node.cpu_util', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System CPU Utility Reading"),
+            }),
+            ('hardware.ipmi.node.mem_util', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System Memory Utility Reading"),
+            }),
+            ('hardware.ipmi.node.io_util', {
+                'type': _("IPMI"),
+                'label': '',
+                'description': _("System IO Utility Reading"),
+            }),
+        ])
+
+    def _get_passivetest_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+        vals = [("dn_thruput_min","dn_thruput_min help text"),
+                ("dn_thruput_max","dn_thruput_max help text"),
+                ("dn_thruput_avg","dn_thruput_avg help text"),
+                ("up_thruput_min","up_thruput_min help text"),
+                ("up_thruput_max","up_thruput_max help text"),
+                ("up_thruput_avg","up_thruput_avg help text"),
+                ("up_byte","up_byte help text"),
+                ("dn_byte","dn_byte help text"),
+                ("up_pkt","up_pkt help text"),
+                ("dn_pkt","dn_pkt help text"),
+                ("tcp_rtt","tcp_rtt help text"),
+                ("tcp_dn_retrans","tcp_dn_retrans help text"),
+                ("tcp_up_retrans","tcp_up_retrans help text"),
+                ("tcp_attempt","tcp_attempt help text"),
+                ("tcp_success","tcp_success help text")]
+         
+        return datastructures.SortedDict([(v[0], {'type': _("PassiveTest"), 'label': '', 'description': _(v[1])}) for v in vals])
+
+    def _get_vcpe_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('vsg', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Existence of vsg instance"),
+            }),
+            ('vsg.dns.cache.size', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Number of entries in DNS cache"),
+            }),
+            ('vsg.dns.total_instered_entries', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Total number of inserted entries into the cache"),
+            }),
+            ('vsg.dns.replaced_unexpired_entries', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Unexpired entries that were thrown out of cache"),
+            }),
+            ('vsg.dns.queries_answered_locally', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Number of cache hits"),
+            }),
+            ('vsg.dns.queries_forwarded', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("Number of cache misses"),
+            }),
+            ('vsg.dns.server.queries_sent', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("For each upstream server, the number of queries sent"),
+            }),
+            ('vsg.dns.server.queries_failed', {
+                'type': _("VSG"),
+                'label': '',
+                'description': _("For each upstream server, the number of queries failed"),
+            }),
+        ])
+
+    def _get_volt_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('volt.device', {
+                'type': _("VOLT"),
+                'label': '',
+                'description': _("Existence of olt device"),
+            }),
+            ('volt.device.disconnect', {
+                'type': _("VOLT"),
+                'label': '',
+                'description': _("Olt device disconnected"),
+            }),
+            ('volt.device.subscriber', {
+                'type': _("VOLT"),
+                'label': '',
+                'description': _("Existence of olt subscriber"),
+            }),
+            ('volt.device.subscriber.unregister', {
+                'type': _("VOLT"),
+                'label': '',
+                'description': _("Olt subscriber unregistered"),
+            }),
+        ])
+
+    def _get_sdn_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('switch', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Existence of switch"),
+            }),
+            ('switch.port', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Existence of port"),
+            }),
+            ('switch.port.receive.packets', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Packets received on port"),
+            }),
+            ('switch.port.transmit.packets', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Packets transmitted on port"),
+            }),
+            ('switch.port.receive.drops', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Drops received on port"),
+            }),
+            ('switch.port.transmit.drops', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Drops transmitted on port"),
+            }),
+            ('switch.port.receive.errors', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Errors received on port"),
+            }),
+            ('switch.port.transmit.errors', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Errors transmitted on port"),
+            }),
+            ('switch.flow', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Duration of flow"),
+            }),
+            ('switch.flow.packets', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Packets received"),
+            }),
+            ('switch.table', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Existence of table"),
+            }),
+            ('switch.table.active.entries', {
+                'type': _("SDN"),
+                'label': '',
+                'description': _("Active entries in table"),
+            }),
+        ])
+
+    def _get_broadview_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('broadview.bst.device', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.ingress-port-priority-group', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.ingress-port-service-pool', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-cpu-queue', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-mc-queue', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-port-service-pool', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-rqe-queue', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-service-pool', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.bst.egress-uc-queue', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.pt.packet-trace-lag-resolution', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.pt.packet-trace-ecmp-resolution', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+            ('broadview.pt.packet-trace-profile', {
+                'type': _("BROADVIEW"),
+                'label': '',
+                'description': _("Existence of BST device"),
+            }),
+        ])
+
+def make_query(user_id=None, tenant_id=None, resource_id=None,
+               user_ids=None, tenant_ids=None, resource_ids=None):
+    """Returns query built from given parameters.
+
+    This query can be then used for querying resources, meters and
+    statistics.
+
+    :Parameters:
+      - `user_id`: user_id, has a priority over list of ids
+      - `tenant_id`: tenant_id, has a priority over list of ids
+      - `resource_id`: resource_id, has a priority over list of ids
+      - `user_ids`: list of user_ids
+      - `tenant_ids`: list of tenant_ids
+      - `resource_ids`: list of resource_ids
+    """
+    user_ids = user_ids or []
+    tenant_ids = tenant_ids or []
+    resource_ids = resource_ids or []
+
+    query = []
+    if user_id:
+        user_ids = [user_id]
+    for u_id in user_ids:
+        query.append({"field": "user_id", "op": "eq", "value": u_id})
+
+    if tenant_id:
+        tenant_ids = [tenant_id]
+    for t_id in tenant_ids:
+        query.append({"field": "project_id", "op": "eq", "value": t_id})
+
+    if resource_id:
+        resource_ids = [resource_id]
+    for r_id in resource_ids:
+        query.append({"field": "resource_id", "op": "eq", "value": r_id})
+
+    return query
+
+def calc_date_args(date_from, date_to, date_options):
+    # TODO(lsmola) all timestamps should probably work with
+    # current timezone. And also show the current timezone in chart.
+    if date_options == "other":
+        try:
+            if date_from:
+                date_from = pytz.utc.localize(
+                    datetime.datetime.strptime(str(date_from), "%Y-%m-%d"))
+            else:
+                # TODO(lsmola) there should be probably the date
+                # of the first sample as default, so it correctly
+                # counts the time window. Though I need ordering
+                # and limit of samples to obtain that.
+                pass
+            if date_to:
+                date_to = pytz.utc.localize(
+                    datetime.datetime.strptime(str(date_to), "%Y-%m-%d"))
+                # It returns the beginning of the day, I want the end of
+                # the day, so I add one day without a second.
+                date_to = (date_to + datetime.timedelta(days=1) -
+                           datetime.timedelta(seconds=1))
+            else:
+                date_to = timezone.now()
+        except Exception:
+            raise ValueError(_("The dates haven't been recognized"))
+    else:
+        try:
+            date_to = timezone.now()
+            date_from = date_to - datetime.timedelta(days=float(date_options))
+        except Exception as e:
+            raise e
+            #raise ValueError(_("The time delta must be a number representing "
+            #                   "the time span in days"))
+    return date_from, date_to
+
+class MetersList(APIView):
+    method_kind = "list"
+    method_name = "meters"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+
+        tenant_id = request.query_params.get('tenant', None)
+        resource_id = request.query_params.get('resource', None)
+
+        query = []
+        if tenant_id:
+            query.extend(make_query(tenant_id=tenant_id))
+        if resource_id:
+            query.extend(make_query(resource_id=resource_id))
+
+        tenant_map = getTenantControllerTenantMap(request.user)
+        resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
+        meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
+        services = {
+            _('Nova'): meters.list_nova(),
+            _('Neutron'): meters.list_neutron(),
+            _('VSG'): meters.list_vcpe(),
+            _('PassiveTest'): meters.list_passivetest(),
+            _('VOLT'): meters.list_volt(),
+            _('SDN'): meters.list_sdn(),
+            _('BROADVIEW'): meters.list_broadview(),
+        }
+        meters = []
+        for service,smeters in services.iteritems():
+             meters.extend(smeters)
+        return Response(meters)
+
+class MeterStatisticsList(APIView):
+    method_kind = "list"
+    method_name = "meterstatistics"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        tenant_map = getTenantControllerTenantMap(request.user)
+        
+        date_options = request.query_params.get('period', 1)
+        date_from = request.query_params.get('date_from', '')
+        date_to = request.query_params.get('date_to', '')
+
+        try:
+            date_from, date_to = calc_date_args(date_from,
+                                                date_to,
+                                                date_options)
+        except Exception as e:
+           raise e 
+
+        additional_query = []
+        if date_from:
+            additional_query.append({'field': 'timestamp',
+                                     'op': 'ge',
+                                     'value': date_from})
+        if date_to:
+            additional_query.append({'field': 'timestamp',
+                                     'op': 'le',
+                                     'value': date_to})
+
+        meter_name = request.query_params.get('meter', None)
+        tenant_id = request.query_params.get('tenant', None)
+        resource_id = request.query_params.get('resource', None)
+
+        query = []
+        if tenant_id:
+            query.extend(make_query(tenant_id=tenant_id))
+        if resource_id:
+            query.extend(make_query(resource_id=resource_id))
+
+        if meter_name:
+            #Statistics query for one meter
+            if additional_query:
+                query = query + additional_query
+            statistics = statistic_list(request, meter_name,
+                                        ceilometer_url=tenant_ceilometer_url, query=query, period=3600*24)
+            statistic = statistics[-1]
+            row = {"name": 'none',
+                   "meter": meter_name,
+                   "time": statistic["period_end"],
+                   "value": statistic["avg"]}
+            return Response(row)
+
+        #Statistics query for all meter
+        resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
+        meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
+        services = {
+            _('Nova'): meters.list_nova(),
+            _('Neutron'): meters.list_neutron(),
+            _('VSG'): meters.list_vcpe(),
+            _('PassiveTest'): meters.list_passivetest(),
+            _('VOLT'): meters.list_volt(),
+            _('SDN'): meters.list_sdn(),
+            _('BROADVIEW'): meters.list_broadview(),
+        }
+        report_rows = []
+        for service,meters in services.items():
+            for meter in meters:
+                query = make_query(tenant_id=meter["project_id"],resource_id=meter["resource_id"])
+                if additional_query:
+                    query = query + additional_query
+                try:
+                    statistics = statistic_list(request, meter["name"],
+                                        ceilometer_url=tenant_ceilometer_url, query=query, period=3600*24)
+                except Exception as e:
+                    logger.error('Exception during statistics query for meter %(meter)s and reason:%(reason)s' % {'meter':meter["name"], 'reason':str(e)})
+                    statistics = None
+
+                if not statistics:
+                    continue
+                statistic = statistics[-1]
+                row = {"name": 'none',
+                       "slice": meter["slice"],
+                       "project_id": meter["project_id"],
+                       "service": meter["service"],
+                       "resource_id": meter["resource_id"],
+                       "resource_name": meter["resource_name"],
+                       "meter": meter["name"],
+                       "description": meter["description"],
+                       "category": service,
+                       "time": statistic["period_end"],
+                       "value": statistic["avg"],
+                       "unit": meter["unit"]}
+                report_rows.append(row)
+
+        return Response(report_rows)
+
+
+class MeterSamplesList(APIView):
+    method_kind = "list"
+    method_name = "metersamples"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        meter_name = request.query_params.get('meter', None)
+        if not meter_name:
+            raise XOSMissingField("Meter name in query params is missing")
+        limit = request.query_params.get('limit', 10)
+        tenant_id = request.query_params.get('tenant', None)
+        resource_id = request.query_params.get('resource', None)
+        query = []
+        if tenant_id:
+            query.extend(make_query(tenant_id=tenant_id))
+        if resource_id:
+            query.extend(make_query(resource_id=resource_id))
+        query.append({"field": "meter", "op": "eq", "value": meter_name})
+        samples = sample_list(request, meter_name,
+                           ceilometer_url=tenant_ceilometer_url, query=query, limit=limit) 
+        if samples:
+            tenant_map = getTenantControllerTenantMap(request.user)
+            resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url)
+            for sample in samples:
+                 if sample["project_id"] in tenant_map.keys():
+                     sample["slice"] = tenant_map[sample["project_id"]]["slice"]
+                 else:
+                     sample["slice"] = sample["project_id"]
+                 if sample["resource_id"] in resource_map.keys():
+                     sample["resource_name"] = resource_map[sample["resource_id"]]
+                 else:
+                     sample["resource_name"] = sample["resource_id"]
+        return Response(samples)
+
+class XOSSliceServiceList(APIView):
+    method_kind = "list"
+    method_name = "xos-slice-service-mapping"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_map = getTenantControllerTenantMap(request.user)
+        service_map={}
+        for k,v in tenant_map.iteritems():
+            if not (v['service'] in service_map.keys()):
+                service_map[v['service']] = {}
+                service_map[v['service']]['service'] = v['service']
+                service_map[v['service']]['slices'] = []
+            slice_details = {'slice':v['slice'], 'project_id':k}
+            service_map[v['service']]['slices'].append(slice_details)
+        return Response(service_map.values())
+
+class XOSInstanceStatisticsList(APIView):
+    method_kind = "list"
+    method_name = "xos-instance-statistics"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        instance_uuid = request.query_params.get('instance-uuid', None)
+        if not instance_uuid:
+            raise XOSMissingField("Instance UUID in query params is missing")
+        if not Instance.objects.filter(instance_uuid=instance_uuid):
+            raise XOSMissingField("XOS Instance object is missing for this uuid")
+        xos_instance = Instance.objects.filter(instance_uuid=instance_uuid)[0]
+        tenant_map = getTenantControllerTenantMap(request.user, xos_instance.slice)
+        tenant_id = tenant_map.keys()[0]
+        resource_ids = []
+        resource_ids.append(instance_uuid)
+        for p in xos_instance.ports.all():
+            #neutron port resource id is represented in ceilometer as "nova instance-name"+"-"+"nova instance-id"+"-"+"tap"+first 11 characters of port-id
+            resource_ids.append(xos_instance.instance_id+"-"+instance_uuid+"-tap"+p.port_id[:11])
+        
+        date_options = request.query_params.get('period', 1)
+        date_from = request.query_params.get('date_from', '')
+        date_to = request.query_params.get('date_to', '')
+
+        try:
+            date_from, date_to = calc_date_args(date_from,
+                                                date_to,
+                                                date_options)
+        except Exception as e:
+           raise e 
+
+        additional_query = []
+        if date_from:
+            additional_query.append({'field': 'timestamp',
+                                     'op': 'ge',
+                                     'value': date_from})
+        if date_to:
+            additional_query.append({'field': 'timestamp',
+                                     'op': 'le',
+                                     'value': date_to})
+
+        report_rows = []
+        for resource_id in resource_ids:
+            query = []
+            if tenant_id:
+                query.extend(make_query(tenant_id=tenant_id))
+            if resource_id:
+                query.extend(make_query(resource_id=resource_id))
+
+            #Statistics query for all meter
+            resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
+            meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
+            exclude_nova_meters_info = [ "instance", "instance:<type>", "disk.read.requests", "disk.write.requests",
+                "disk.read.bytes", "disk.write.bytes", "disk.read.requests.rate", "disk.write.requests.rate", "disk.read.bytes.rate",
+                "disk.write.bytes.rate", "disk.root.size", "disk.ephemeral.size"]
+            exclude_neutron_meters_info = [ 'network.create', 'network.update', 'subnet.create',
+                'subnet.update', 'port.create', 'port.update', 'router.create', 'router.update',
+                'ip.floating.create', 'ip.floating.update']
+            services = {
+                _('Nova'): meters.list_nova(except_meters=exclude_nova_meters_info),
+                _('Neutron'): meters.list_neutron(except_meters=exclude_neutron_meters_info),
+                _('VSG'): meters.list_vcpe(),
+                _('PassiveTest'): meters.list_passivetest(),
+                _('VOLT'): meters.list_volt(),
+                _('SDN'): meters.list_sdn(),
+                _('BROADVIEW'): meters.list_broadview(),
+            }
+            for service,meters in services.items():
+                for meter in meters:
+                    query = make_query(tenant_id=meter["project_id"],resource_id=meter["resource_id"])
+                    if additional_query:
+                        query = query + additional_query
+                    try:
+                        statistics = statistic_list(request, meter["name"],
+                                            ceilometer_url=tenant_ceilometer_url, query=query, period=3600*24)
+                    except Exception as e:
+                        logger.error('Exception during statistics query for meter %(meter)s and reason:%(reason)s' % {'meter':meter["name"], 'reason':str(e)})
+                        statistics = None
+
+                    if not statistics:
+                        continue
+                    statistic = statistics[-1]
+                    row = {"name": 'none',
+                           "slice": meter["slice"],
+                           "project_id": meter["project_id"],
+                           "service": meter["service"],
+                           "resource_id": meter["resource_id"],
+                           "resource_name": meter["resource_name"],
+                           "meter": meter["name"],
+                           "description": meter["description"],
+                           "category": service,
+                           "time": statistic["period_end"],
+                           "value": statistic["avg"],
+                           "unit": meter["unit"]}
+                    report_rows.append(row)
+
+        return Response(report_rows)
+
+class ServiceAdjustScale(APIView):
+    method_kind = "list"
+    method_name = "serviceadjustscale"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()) or (not request.user.is_admin):
+            raise PermissionDenied("You must be authenticated admin user in order to use this API")
+        service = request.query_params.get('service', None)
+        slice_hint = request.query_params.get('slice_hint', None)
+        scale = request.query_params.get('scale', None)
+        if not service or not slice_hint or not scale:
+            raise XOSMissingField("Mandatory fields missing")
+        services = Service.select_by_user(request.user)
+        logger.info('SRIKANTH: Services for this user %(services)s' % {'services':services})
+        if not services or (not services.get(name=service)):
+            raise XOSMissingField("Service not found")
+        service = services.get(name=service)
+        service.adjust_scale(slice_hint, int(scale))
+        return Response("Success")
diff --git a/cleanup.sh b/cleanup.sh
new file mode 100755
index 0000000..0b5b2d5
--- /dev/null
+++ b/cleanup.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ~/service-profile/cord-pod/admin-openrc.sh
+
+#cleanup_network lan_passivetest_private_network 
+#cleanup_network public
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+#neutron net-delete lan_passivetest_private_network || true
+#neutron net-delete public || true
diff --git a/files/ext_services/passivetest/__init__.py b/files/ext_services/passivetest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/files/ext_services/passivetest/__init__.py
diff --git a/files/ext_services/passivetest/notifications.py b/files/ext_services/passivetest/notifications.py
new file mode 100644
index 0000000..8645820
--- /dev/null
+++ b/files/ext_services/passivetest/notifications.py
@@ -0,0 +1,54 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import oslo_messaging
+from oslo_config import cfg
+
+from ceilometer.agent import plugin_base
+from oslo_log import log
+from ceilometer import sample
+
+OPTS = [
+    cfg.StrOpt('passivetestservice_control_exchange',
+               default='passivetestservice',
+               help="Exchange name for PassiveTest notifications."),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+LOG = log.getLogger(__name__)
+
+class PassiveTestNotification(plugin_base.NotificationBase):
+
+    resource_name = None
+    event_types = ['passivetest.stats']
+
+    def get_targets(self,conf):
+        """Return a sequence of oslo.messaging.Target
+
+        This sequence is defining the exchange and topics to be connected for
+        this plugin.
+        """
+
+        return [oslo_messaging.Target(topic=topic,
+                                      exchange=conf.passivetestservice_control_exchange)
+                for topic in self.get_notification_topics(conf)]
+
+
+    def process_notification(self, message):
+        LOG.debug('Received Passivetest event passivetest.stats')
+
+        if message['payload']:
+            message['payload']['volume']=float(message['payload']['volume'])
+            yield sample.Sample.from_notification(
+                message=message,
+                **message['payload'])
diff --git a/install_monitoring_plugin.sh b/install_monitoring_plugin.sh
new file mode 100644
index 0000000..b2dc91c
--- /dev/null
+++ b/install_monitoring_plugin.sh
@@ -0,0 +1,19 @@
+#! /bin/bash
+#set -x 
+NODE=$( bash -c "source ~/service-profile/cord-pod/admin-openrc.sh; nova list --all-tenants 2>&1 | grep -m 1 ceilometer | cut -d ' ' -f 2 | xargs -I{} nova show {} 2>&1 | grep -m 1 hypervisor | tr -s ' ' | cut -d ' ' -f 4")
+
+INSTANCE_IP=$( bash -c "source ~/service-profile/cord-pod/admin-openrc.sh; nova list --all-tenants 2>&1 | grep -m 1 ceilometer | cut -d ' ' -f 2 | xargs -I{} nova show {} 2>&1 | grep -m 1 management | tr -s ' ' | cut -d ' ' -f 5")
+
+cat <<EOF > install_monitoring_agent.config
+[ssh_connection]
+ssh_args = -o "ProxyCommand ssh -q -i ~/service-profile/cord-pod/node_key -o StrictHostKeyChecking=no ubuntu@$NODE nc $INSTANCE_IP 22"
+scp_if_ssh = True
+pipelining = True
+
+[defaults]
+host_key_checking = False
+timeout = 30
+EOF
+
+ANSIBLE_CONFIG=install_monitoring_agent.config ansible-playbook -vvv -i /etc/maas/ansible/pod-inventory ~/xos_services/PassiveTest/install_monitoring_plugin.yaml -e instance_ip="$INSTANCE_IP" -e instance_host="$NODE"
+
diff --git a/install_monitoring_plugin.yaml b/install_monitoring_plugin.yaml
new file mode 100644
index 0000000..b5bf5f9
--- /dev/null
+++ b/install_monitoring_plugin.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: '{{ instance_host }}'
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+
+  tasks:
+
+  - name: copy plugin
+    copy: src=ext_services/passivetest dest=/usr/lib/python2.7/dist-packages/ceilometer/network/ext_services
+
+  - name: modify entry_points
+    lineinfile:
+        dest: /usr/lib/python2.7/dist-packages/ceilometer-6.0.0.egg-info/entry_points.txt
+        line: passivetest=ceilometer.network.ext_services.passivetest.notifications:PassiveTestNotification
+        insertafter: '^infra=.*'
+
+  - name: restart ceilometer services
+    service:
+        name: ceilometer-agent-central
+        state: restarted
+
+  - name: restart ceilometer services
+    service:
+        name: ceilometer-agent-notification
+        state: restarted
diff --git a/install_viv.sh b/install_viv.sh
new file mode 100644
index 0000000..9577102
--- /dev/null
+++ b/install_viv.sh
@@ -0,0 +1,9 @@
+#! /bin/bash
+#set -x 
+COMPUTENODES=$( bash -c "source ~/service-profile/cord-pod/admin-openrc.sh ; nova hypervisor-list" |grep "cord.lab"|awk '{print $4}')
+
+echo $COMPUTENODES
+
+for NODE in $COMPUTENODES; do
+    ansible-playbook -i /etc/maas/ansible/pod-inventory ~/xos_services/PassiveTest/install_viv.yaml -e instance_name=$NODE
+done
diff --git a/install_viv.yaml b/install_viv.yaml
new file mode 100644
index 0000000..d7eb405
--- /dev/null
+++ b/install_viv.yaml
@@ -0,0 +1,30 @@
+---
+- hosts: '{{ instance_name }}'
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+
+  tasks:
+
+  - name: copy viv image
+    copy: src=viv.docker dest=/tmp/viv.docker mode=0644
+
+  - name: check if we have a viv container already
+    register: docker_images
+    shell: docker images
+
+  - name: load viv container
+    shell: docker load -i /tmp/viv.docker
+    when: docker_images.stdout.find('viv') == -1
+
+  - name: check if viv container is already running
+    register: docker_containers
+    shell: docker ps
+
+  - name: launch the viv container
+    shell: docker run -d -p 8080:8080 -p 8082:8082 -v /dev:/dev -v /mnt:/mnt -v /lib/modules:/lib/modules --net=host --privileged -e IMODE="mmap" -e BLOCKING=1 --name=viv viv /opt/startContainer
+    when: docker_containers.stdout.find('viv') == -1
+
+  - name: remove viv
+    shell: rm -f /tmp/viv.docker
diff --git a/passivetest-acord.yaml b/passivetest-acord.yaml
new file mode 100644
index 0000000..30cb096
--- /dev/null
+++ b/passivetest-acord.yaml
@@ -0,0 +1,35 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the PassiveTest on the pod
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/PassiveTest.yaml
+   - custom_types/monitoring_tosca_types.yaml
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    service_passivetest:
+      type: tosca.nodes.PassiveTest
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    passivetest_monitoring_publisher_tenant:
+      description: PassiveTest Monitoring Publisher Tenant
+      type: tosca.nodes.UserServiceMonitoringPublisher
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.TenantOfService
+          - target_service:
+              node: service_passivetest
+              relationship: tosca.relationships.PublishesMonitoringData
diff --git a/pod-passivetest.yaml b/pod-passivetest.yaml
new file mode 100644
index 0000000..0c1bc07
--- /dev/null
+++ b/pod-passivetest.yaml
@@ -0,0 +1,75 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the PassiveTest on the pod
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/PassiveTest.yaml
+
+topology_template:
+  node_templates:
+
+    m1.large:
+      type: tosca.nodes.Flavor
+
+    mcord_taa:
+      type: tosca.nodes.Image
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    mysite:
+      type: tosca.nodes.Site
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+        no-create: true
+        no-delete: true
+        no-update: true
+
+
+    mysite_passivetest_slice:
+      description: This slice holds the PassiveTest service
+      type: tosca.nodes.Slice
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - management:
+            node: management
+            relationship: tosca.relationships.ConnectsToNetwork
+        - owner:
+            node: service_passivetest
+            relationship: tosca.relationships.MemberOfService
+        - image:
+            node: mcord_taa
+            relationship: tosca.relationships.DefaultImage
+        - default_flavor:
+            node: m1.large
+            relationship: tosca.relationships.DefaultFlavor
+
+    service_passivetest:
+      type: tosca.nodes.PassiveTest
+      requirements:
+        - management:
+            node: management
+            relationship: tosca.relationships.UsesNetwork
+      properties:
+        view_url: /admin/passivetest/passivetestservice/$id$/
+        kind: PassiveTest
+        public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+        private_key_fn: /opt/xos/services/passivetest/keys/passivetest_rsa
+      artifacts:
+        pubkey: /opt/xos/services/passivetest/keys/passivetest_rsa.pub
+
+    tenant#passivetesttenant1:
+      type: tosca.nodes.PassiveTestTenant
+      requirements:
+        - tenant:
+            node: service_passivetest
+            relationship: tosca.relationships.TenantOfService
+        - dependency:
+            node: mysite_passivetest_slice
+            relationship: tosca.relationships.DependsOn
+
diff --git a/xos/PassiveTest-onboard.yaml b/xos/PassiveTest-onboard.yaml
new file mode 100644
index 0000000..5b29c3d
--- /dev/null
+++ b/xos/PassiveTest-onboard.yaml
@@ -0,0 +1,26 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the PassiveTest
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    passivetest:
+      type: tosca.nodes.ServiceController
+      properties:
+          base_url: file:///opt/xos_services/PassiveTest/xos/
+          # The following will concatenate with base_url automatically, if
+          # base_url is non-null.
+          models: models.py
+          admin: admin.py
+          synchronizer: synchronizer/manifest
+          synchronizer_run: passivetest-synchronizer.py
+          tosca_custom_types: PassiveTest.yaml
+          tosca_resource: tosca/resources/passivetest.py, tosca/resources/passivetesttenant.py
+          rest_service: api/service/passivetest.py
+          rest_tenant: api/tenant/passivetesttenant.py
+          private_key: file:///opt/xos/key_import/passivetest_rsa
+          public_key: file:///opt/xos/key_import/passivetest_rsa.pub
+
diff --git a/xos/PassiveTest.m4 b/xos/PassiveTest.m4
new file mode 100644
index 0000000..a477bd4
--- /dev/null
+++ b/xos/PassiveTest.m4
@@ -0,0 +1,31 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# compile this with "m4 PassiveTest.m4 > PassiveTest.yaml"
+
+# include macros
+include(macros.m4)
+
+node_types:
+    tosca.nodes.PassiveTest:
+        derived_from: tosca.nodes.Root
+        description: >
+            PassiveTest Service
+        capabilities:
+            xos_base_service_caps
+        properties:
+            xos_base_props
+            xos_base_service_props
+            tap_ports:
+               type: string
+               required: false
+            reset_viv:
+               type: boolean
+               required: false
+
+    tosca.nodes.PassiveTestTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            A Tenant of the PassiveTest service
+        properties:
+            xos_base_tenant_props
+
diff --git a/xos/PassiveTest.yaml b/xos/PassiveTest.yaml
new file mode 100644
index 0000000..d3eb368
--- /dev/null
+++ b/xos/PassiveTest.yaml
@@ -0,0 +1,101 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# compile this with "m4 PassiveTest.m4 > PassiveTest.yaml"
+
+# include macros
+# Note: Tosca derived_from isn't working the way I think it should, it's not
+#    inheriting from the parent template. Until we get that figured out, use
+#    m4 macros do our inheritance
+
+
+# Service
+
+
+# Subscriber
+
+
+
+
+# end m4 macros
+
+
+
+node_types:
+    tosca.nodes.PassiveTest:
+        derived_from: tosca.nodes.Root
+        description: >
+            PassiveTest Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            no-delete:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to delete this object
+            no-create:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to create this object
+            no-update:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to update this object
+            replaces:
+                type: string
+                required: false
+                descrption: Replaces/renames this object
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            tap_ports:
+               type: string
+               required: false
+            reset_viv:
+               type: boolean
+               required: false
+
+    tosca.nodes.PassiveTestTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            A Tenant of the PassiveTest service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+            service_specific_id:
+                type: string
+                required: false
+                description: Service specific ID opaque to XOS but meaningful to service
+
diff --git a/xos/admin.py b/xos/admin.py
new file mode 100644
index 0000000..e492e58
--- /dev/null
+++ b/xos/admin.py
@@ -0,0 +1,118 @@
+# admin.py - PassiveTest Django Admin
+
+from core.admin import ReadOnlyAwareAdmin, SliceInline
+from core.middleware import get_request
+from core.models import User
+
+from django import forms
+from django.contrib import admin
+
+from services.passivetest.models import *
+
+class PassiveTestForm(forms.ModelForm):
+
+    class Meta:
+        model = PassiveTestService
+        fields = '__all__'
+
+    def __init__(self, *args, **kwargs):
+        super(PassiveTestForm, self).__init__(*args, **kwargs)
+
+        if self.instance:
+            self.fields['tap_ports'].initial = self.instance.tap_ports
+            self.fields['reset_viv'].initial = self.instance.reset_viv
+
+    def save(self, commit=True):
+        self.instance.tap_ports = self.cleaned_data.get('tap_ports')
+        self.instance.reset_viv = self.cleaned_data.get('reset_viv')
+        return super(PassiveTestForm, self).save(commit=commit)
+
+class PassiveTestAdmin(ReadOnlyAwareAdmin):
+
+    model = PassiveTestService
+    verbose_name = PASSIVETEST_SERVICE_NAME_VERBOSE
+    verbose_name_plural = PASSIVETEST_SERVICE_NAME_VERBOSE_PLURAL
+    form = PassiveTestForm
+    inlines = [SliceInline]
+
+    list_display = ('backend_status_icon', 'name', 'tap_ports', 'reset_viv', 'enabled')
+    list_display_links = ('backend_status_icon', 'name', 'tap_ports', 'reset_viv')
+
+    fieldsets = [(None, {
+        'fields': ['backend_status_text', 'name', 'enabled', 'versionNumber', 'tap_ports', 'reset_viv', 'description',],
+        'classes':['suit-tab suit-tab-general',],
+        })]
+
+    readonly_fields = ('backend_status_text', )
+    user_readonly_fields = ['name', 'enabled', 'versionNumber', 'description',]
+
+    extracontext_registered_admins = True
+
+    suit_form_tabs = (
+        ('general', 'PassiveTest Details', ),
+        ('slices', 'Slices',),
+        )
+
+    suit_form_includes = ((
+        'top',
+        'administration'),
+        )
+
+    def get_queryset(self, request):
+        return PassiveTestService.get_service_objects_by_user(request.user)
+
+admin.site.register(PassiveTestService, PassiveTestAdmin)
+
+class PassiveTestTenantForm(forms.ModelForm):
+
+    class Meta:
+        model = PassiveTestTenant
+        fields = '__all__'
+
+    creator = forms.ModelChoiceField(queryset=User.objects.all())
+
+    def __init__(self, *args, **kwargs):
+        super(PassiveTestTenantForm, self).__init__(*args, **kwargs)
+
+        self.fields['kind'].widget.attrs['readonly'] = True
+        self.fields['kind'].initial = PASSIVETEST_KIND
+
+        self.fields['provider_service'].queryset = PassiveTestService.get_service_objects().all()
+
+        if self.instance:
+            self.fields['creator'].initial = self.instance.creator
+
+        if (not self.instance) or (not self.instance.pk):
+            self.fields['creator'].initial = get_request().user
+            if PassiveTestService.get_service_objects().exists():
+                self.fields['provider_service'].initial = PassiveTestService.get_service_objects().all()[0]
+
+    def save(self, commit=True):
+        self.instance.creator = self.cleaned_data.get('creator')
+        return super(PassiveTestTenantForm, self).save(commit=commit)
+
+
+class PassiveTestTenantAdmin(ReadOnlyAwareAdmin):
+
+    verbose_name = PASSIVETEST_TENANT_NAME_VERBOSE
+    verbose_name_plural = PASSIVETEST_TENANT_NAME_VERBOSE_PLURAL
+
+    list_display = ('id', 'backend_status_icon', 'instance')
+    list_display_links = ('backend_status_icon', 'instance', 'id')
+
+    fieldsets = [(None, {
+        'fields': ['backend_status_text', 'kind', 'provider_service', 'instance', 'creator'],
+        'classes': ['suit-tab suit-tab-general'],
+        })]
+
+    readonly_fields = ('backend_status_text', 'instance',)
+
+    form = PassiveTestTenantForm
+
+    suit_form_tabs = (('general', 'Details'),)
+
+    def get_queryset(self, request):
+        return PassiveTestTenant.get_tenant_objects_by_user(request.user)
+
+admin.site.register(PassiveTestTenant, PassiveTestTenantAdmin)
+
diff --git a/xos/api/service/passivetest.py b/xos/api/service/passivetest.py
new file mode 100644
index 0000000..f5b556d
--- /dev/null
+++ b/xos/api/service/passivetest.py
@@ -0,0 +1,38 @@
+from rest_framework.response import Response
+from rest_framework import serializers
+from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
+from services.passivetest.models import PassiveTestService
+
+class PassiveTestSerializer(PlusModelSerializer):
+    id = ReadOnlyField()
+    humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+    tap_ports = serializers.CharField(required=False)
+    reset_viv = serializers.BooleanField(required=False)
+
+    class Meta:
+        model = PassiveTestService
+        fields = ('humanReadableName',
+                  'id',
+                  'tap_ports',
+                  'reset_viv')
+
+    def getHumanReadableName(self, obj):
+        return obj.__unicode__()
+
+class PassiveTestViewSet(XOSViewSet):
+    base_name = "passivetest"
+    method_name = "passivetest"
+    method_kind = "viewset"
+    queryset = PassiveTestService.get_service_objects().all()
+    serializer_class = PassiveTestSerializer
+
+    @classmethod
+    def get_urlpatterns(self, api_path="^"):
+        patterns = super(PassiveTestViewSet, self).get_urlpatterns(api_path=api_path)
+        return patterns
+
+    def list(self, request):
+        object_list = self.filter_queryset(self.get_queryset())
+        serializer = self.get_serializer(object_list, many=True)
+        return Response(serializer.data)
+
diff --git a/xos/api/tenant/passivetesttenant.py b/xos/api/tenant/passivetesttenant.py
new file mode 100644
index 0000000..21c9b1a
--- /dev/null
+++ b/xos/api/tenant/passivetesttenant.py
@@ -0,0 +1,39 @@
+from rest_framework.response import Response
+from rest_framework import serializers
+from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
+
+from services.passivetest.models import PassiveTestTenant, PassiveTestService
+
+def get_default_passivetest_service():
+    passivetest_services = PassiveTestService.get_service_objects().all()
+    if passivetest_services:
+        return passivetest_services[0]
+    return None
+
+class PassiveTestTenantSerializer(PlusModelSerializer):
+    id = ReadOnlyField()
+    provider_service = serializers.PrimaryKeyRelatedField(queryset=PassiveTestService.get_service_objects().all(), default=get_default_passivetest_service)
+    backend_status = ReadOnlyField()
+    
+    humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+
+    class Meta:
+        model = PassiveTestTenant
+        fields = ('humanReadableName', 'id', 'provider_service', 'backend_status')
+
+    def getHumanReadableName(self, obj):
+        return obj.__unicode__()
+
+class PassiveTestTenantViewSet(XOSViewSet):
+    base_name = "passivetesttenant"
+    method_name = "passivetesttenant"
+    method_kind = "viewset"
+    queryset = PassiveTestTenant.get_tenant_objects().all()
+    serializer_class = PassiveTestTenantSerializer
+
+
+    def list(self, request):
+        queryset = self.filter_queryset(self.get_queryset())
+        serializer = self.get_serializer(queryset, many=True)
+        return Response(serializer.data)
+
diff --git a/xos/macros.m4 b/xos/macros.m4
new file mode 100644
index 0000000..1f48f10
--- /dev/null
+++ b/xos/macros.m4
@@ -0,0 +1,84 @@
+# Note: Tosca derived_from isn't working the way I think it should, it's not
+#    inheriting from the parent template. Until we get that figured out, use
+#    m4 macros do our inheritance
+
+define(xos_base_props,
+            no-delete:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to delete this object
+            no-create:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to create this object
+            no-update:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to update this object
+            replaces:
+                type: string
+                required: false
+                descrption: Replaces/renames this object)
+# Service
+define(xos_base_service_caps,
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service)
+define(xos_base_service_props,
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.)
+# Subscriber
+define(xos_base_subscriber_caps,
+            subscriber:
+                type: tosca.capabilities.xos.Subscriber)
+define(xos_base_subscriber_props,
+            kind:
+                type: string
+                default: generic
+                description: Kind of subscriber
+            service_specific_id:
+                type: string
+                required: false
+                description: Service specific ID opaque to XOS but meaningful to service)
+define(xos_base_tenant_props,
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+            service_specific_id:
+                type: string
+                required: false
+                description: Service specific ID opaque to XOS but meaningful to service)
+
+# end m4 macros
+
diff --git a/xos/make_synchronizer_manifest.sh b/xos/make_synchronizer_manifest.sh
new file mode 100644
index 0000000..4058982
--- /dev/null
+++ b/xos/make_synchronizer_manifest.sh
@@ -0,0 +1,2 @@
+#! /bin/bash
+find synchronizer -type f | cut -b 14- > synchronizer/manifest 
diff --git a/xos/models.py b/xos/models.py
new file mode 100644
index 0000000..53b0d0e
--- /dev/null
+++ b/xos/models.py
@@ -0,0 +1,67 @@
+# models.py -  ExampleService Models
+
+from core.models import Service, TenantWithContainer
+from django.db import models, transaction
+
+PASSIVETEST_KIND = 'PassiveTest'
+PASSIVETEST_SERVICE_NAME = 'passivetest'
+PASSIVETEST_SERVICE_NAME_VERBOSE = 'PASSIVETEST Service'
+PASSIVETEST_SERVICE_NAME_VERBOSE_PLURAL = 'PASSIVETEST Services'
+PASSIVETEST_TENANT_NAME_VERBOSE = 'PASSIVETEST Tenant'
+PASSIVETEST_TENANT_NAME_VERBOSE_PLURAL = 'PASSIVETEST Tenants'
+
+class PassiveTestService(Service):
+
+    KIND = PASSIVETEST_KIND
+
+    class Meta:
+        app_label = PASSIVETEST_SERVICE_NAME
+        verbose_name = PASSIVETEST_SERVICE_NAME_VERBOSE
+
+    tap_ports = models.CharField(max_length=254, help_text="Neutron Port ids of the ports we want to tap (comma delimited)")
+    reset_viv = models.BooleanField(default=False, help_text="Reset the VIV's input and output configurations!")
+    
+
+class PassiveTestTenant(TenantWithContainer):
+
+    KIND = PASSIVETEST_KIND
+
+    class Meta:
+        verbose_name = PASSIVETEST_TENANT_NAME_VERBOSE
+
+    def __init__(self, *args, **kwargs):
+        passivetestservice = PassiveTestService.get_service_objects().all()
+        if passivetestservice:
+            self._meta.get_field('provider_service').default = passivetestservice[0].id
+        super(PassiveTestTenant, self).__init__(*args, **kwargs)
+
+    def save(self, *args, **kwargs):
+        super(PassiveTestTenant, self).save(*args, **kwargs)
+        model_policy_passivetesttenant(self.pk)
+
+    def delete(self, *args, **kwargs):
+        self.cleanup_container()
+        super(PassiveTestTenant, self).delete(*args, **kwargs)
+
+    @property
+    def public_ip(self):
+        for port in self.instance.ports.all():
+            if "public" in port.network.name.lower():
+                return port.ip
+        return None
+
+    @property
+    def synchronizer_ip(self):
+        for port in self.instance.ports.all():
+            if "management" in port.network.name.lower():
+                return port.ip
+        return None
+
+def model_policy_passivetesttenant(pk):
+    with transaction.atomic():
+        tenant = PassiveTestTenant.objects.select_for_update().filter(pk=pk)
+        if not tenant:
+            return
+        tenant = tenant[0]
+        tenant.manage_container()
+
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..dd23436
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,20 @@
+passivetest-synchronizer.yaml
+manifest
+passivetest_config
+monitoring_stats_notifier.py
+passivetest-synchronizer.py
+steps/sync_passivetesttenant.py
+steps/passivetesttenant_playbook.yaml
+steps/sync_monitoring_agent.yaml
+steps/roles/setup_probe/tasks/main.yml
+steps/roles/setup_probe/files/README
+steps/roles/setup_probe/files/debs/docker.io_1.6.2~dfsg1-1ubuntu4~14.04.1_amd64.deb
+steps/roles/setup_probe/files/debs/cgroup-lite_1.11~ubuntu14.04.2_all.deb
+steps/roles/setup_probe/files/debs/liberror-perl_0.17-1.1_all.deb
+steps/roles/setup_probe/files/debs/aufs-tools_3.2+20130722-1.1_amd64.deb
+steps/roles/setup_probe/files/debs/git-man_1.9.1-1ubuntu0.3_all.deb
+steps/roles/setup_probe/files/debs/git_1.9.1-1ubuntu0.3_amd64.deb
+steps/roles/setup_probe/files/passivetest_rsa.pub
+steps/roles/setup_probe/files/viv
+steps/roles/setup_probe/files/xcp.docker
+model-deps
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/monitoring_stats_notifier.py b/xos/synchronizer/monitoring_stats_notifier.py
new file mode 100644
index 0000000..bbfc0db
--- /dev/null
+++ b/xos/synchronizer/monitoring_stats_notifier.py
@@ -0,0 +1,111 @@
+import six, uuid, csv, datetime, threading, socket, shutil, argparse, glob, os, copy, pprint, time, sys
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+
+XAGG_CSV_DIR="/xsight/var/opt/xagg/tmp"
+XAGG_CSV_PROCESSED_DIR="/xsight/var/opt/xagg/tmp/processed"
+
+class RabbitMQ:
+    def __init__(self, rabbit_host, rabbit_user, rabbit_password, exchange_name):
+        exchange = Exchange(exchange_name, "topic", durable=False)
+        connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
+        channel = connection.channel()
+        self.producer = Producer(channel, exchange=exchange, routing_key="notifications.info")
+
+    def publish(self, stats):
+        self.producer.publish(stats)
+
+
+class CeilometerStats:
+    def __init__(self, keystone_user_id, keystone_tenant_id):
+        self.message_template = {'publisher_id': "monitoring_on_"+socket.gethostname(),
+                                 'priority':'INFO'}
+        self.keystone_user_id = keystone_user_id
+        self.keystone_tenant_id = keystone_tenant_id
+
+    def _get_stat_template(self):
+        retval = copy.copy(self.message_template)
+        retval['message_id'] = six.text_type(uuid.uuid4())
+        retval['timestamp'] = datetime.datetime.now().isoformat()
+        retval['payload'] = {'user_id':self.keystone_user_id,'project_id':self.keystone_tenant_id}
+        return retval
+
+    def get_stat(self,name,event_type,stats={}):
+        retval = self._get_stat_template()
+        retval['event_type']=event_type
+        retval['payload']['resource_id']=name
+        for k,v in stats.iteritems():
+            retval['payload'][k]=v
+        return retval
+
+
+class XaggStatsReader:
+    XAGG_COLUMNS=[
+        {"name":"dn_thruput_min","unit":"kb/s","type":"gauge"},
+        {"name":"dn_thruput_max","unit":"kb/s","type":"gauge"},
+        {"name":"dn_thruput_avg","unit":"kb/s","type":"gauge"},
+        {"name":"up_thruput_min","unit":"kb/s","type":"gauge"},
+        {"name":"up_thruput_max","unit":"kb/s","type":"gauge"},
+        {"name":"up_thruput_avg","unit":"kb/s","type":"gauge"},
+        {"name":"up_byte","unit":"B","type":"cumulative"},
+        {"name":"dn_byte","unit":"B","type":"cumulative"},
+        {"name":"up_pkt","unit":"packet","type":"cumulative"},
+        {"name":"dn_pkt","unit":"packet","type":"cumulative"},
+        {"name":"tcp_rtt","unit":"ms","type":"gauge"},
+        {"name":"tcp_dn_retrans","unit":"packet","type":"gauge"},
+        {"name":"tcp_up_retrans","unit":"packet","type":"gauge"},
+        {"name":"tcp_attempt","unit":"attempt","type":"gauge"},
+        {"name":"tcp_success","unit":"attempt","type":"gauge"}
+    ]
+    CSV_FILE_COLUMNS=["user_src_ip","user_dst_ip","enb_id","customer_group","technology",
+                      "handset","os","apn","service_category","service_type","service_name",
+                      "application_name","app_attempt","app_success","app_response_time",
+                      "dn_byte","dn_thruput_min","dn_thruput_max","dn_thruput_avg","up_byte",
+                      "up_thruput_min","up_thruput_max","up_thruput_avg","tcp_dn_retrans",
+                      "tcp_up_retrans","dn_pkt","up_pkt","tcp_rtt","tcp_attempt","tcp_success"]
+    def __init__(self, ceilometer_stats):
+        self.stats = ceilometer_stats
+
+    def get_stats(self, csvfile):
+        fp = open(csvfile)
+        f = csv.DictReader(filter(lambda row: row[0] !='#',fp),fieldnames=self.CSV_FILE_COLUMNS)
+        retval = []
+        for row in f:
+            name=row["user_src_ip"]+"_"+row["user_dst_ip"]
+            for stat in self.XAGG_COLUMNS:
+                stat['volume'] = row[stat["name"]]
+                retval.append(self.stats.get_stat(name,"passivetest.stats",stat))
+        return retval
+
+def periodic_publish(rabbit_mq,xagg_stats_reader):
+    for stats_file in glob.glob(XAGG_CSV_DIR+"/*.csv"):
+        if not os.path.isdir(stats_file):
+            stats = xagg_stats_reader.get_stats(stats_file)
+            for stat in stats:
+                rabbit_mq.publish(stat)
+            shutil.move(stats_file,XAGG_CSV_PROCESSED_DIR)
+
+    # Publish every minute
+    threading.Timer(60, periodic_publish, args=(rabbit_mq, xagg_stats_reader)).start()
+
+def main():
+    parser = argparse.ArgumentParser(description='Process xagg telemetry and send to ceilometer/monitoring service.')
+    for arg in ["keystone-tenant-id","keystone-user-id","rabbit-host","rabbit-user","rabbit-password","rabbit-exchange-name"]:
+        parser.add_argument("--"+arg,required=True)
+
+    args = parser.parse_args()
+
+    while True:
+        try:
+            rabbit_mq = RabbitMQ(args.rabbit_host, args.rabbit_user, args.rabbit_password, args.rabbit_exchange_name)
+            ceilometer_stats = CeilometerStats(args.keystone_user_id, args.keystone_tenant_id)
+            xagg_stats_reader = XaggStatsReader(ceilometer_stats)
+            periodic_publish(rabbit_mq,xagg_stats_reader)
+        except Exception as e:
+            print(e)
+            sys.stdout.flush()
+        print("Trying again in one minute...")
+        time.sleep(60)
+
+if __name__ == "__main__":
+   exit(main())
diff --git a/xos/synchronizer/passivetest-synchronizer.py b/xos/synchronizer/passivetest-synchronizer.py
new file mode 100644
index 0000000..90d2c98
--- /dev/null
+++ b/xos/synchronizer/passivetest-synchronizer.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+# Runs the standard XOS synchronizer
+
+import importlib
+import os
+import sys
+
+synchronizer_path = os.path.join(os.path.dirname(
+    os.path.realpath(__file__)), "../../synchronizers/base")
+sys.path.append(synchronizer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
+
diff --git a/xos/synchronizer/passivetest-synchronizer.yaml b/xos/synchronizer/passivetest-synchronizer.yaml
new file mode 100644
index 0000000..b1ad5d3
--- /dev/null
+++ b/xos/synchronizer/passivetest-synchronizer.yaml
@@ -0,0 +1,14 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: This recipe provides additional configuration for the onboarded services.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    servicecontroller#passivetest:
+      type: tosca.nodes.ServiceController
+      properties:
+        no-create: true
+        synchronizer_config: /root/setup/files/passivetest_config
diff --git a/xos/synchronizer/passivetest_config b/xos/synchronizer/passivetest_config
new file mode 100644
index 0000000..b0f051a
--- /dev/null
+++ b/xos/synchronizer/passivetest_config
@@ -0,0 +1,30 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=xos_db
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the synchronizer
+[observer]
+name=passivetest
+dependency_graph=/opt/xos/synchronizers/passivetest/model-deps
+steps_dir=/opt/xos/synchronizers/passivetest/steps
+sys_dir=/opt/xos/synchronizers/passivetest/sys
+logfile=/var/log/xos_backend.log
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+enable_watchers=True
+
+[networking]
+use_vtn=True
+
diff --git a/xos/synchronizer/steps/passivetesttenant_playbook.yaml b/xos/synchronizer/steps/passivetesttenant_playbook.yaml
new file mode 100644
index 0000000..6bbf222
--- /dev/null
+++ b/xos/synchronizer/steps/passivetesttenant_playbook.yaml
@@ -0,0 +1,16 @@
+---
+# passivetesttenant_playbook - sets up the controller
+
+- hosts: "{{ instance_name }}"
+  connection: ssh
+  user: root
+  sudo: yes
+  gather_facts: no
+  vars:
+    - public_ip: "{{ public_ip }}"
+    - synchronizer_ip: "{{ synchronizer_ip }}"
+    - tap_ports: "{{ tap_ports }}"
+    - reset_viv: {{ reset_viv }}
+
+  roles:
+    - setup_probe
diff --git a/xos/synchronizer/steps/roles/setup_probe/files/viv b/xos/synchronizer/steps/roles/setup_probe/files/viv
new file mode 100755
index 0000000..7cd7375
--- /dev/null
+++ b/xos/synchronizer/steps/roles/setup_probe/files/viv
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+import requests, sys, getopt, json
+ 
+COMMANDS     = [ 'show','reset', 'add','delete','deploy',"V_BUFS","NUM_BUFS", "INPUT_MODE", "TX_MODE", "QUEUE_TYPE" ]
+trafficTypes = ['TSA_CP','TSA_UP', 'TAA_CP', 'TAA_UP']
+INPUT_MODES=["mmap","pfring","pfring_zc","dpdk"]
+TX_MODES=["frp_udp","frp_tcp","viv"]
+QUEUE_TYPES=["CLFFIFO","dpdk"]
+RESOURCE_NAMES =  [ 'V_BUFS', 'NUM_BUFS', 'INPUT_MODE', 'TX_MODE', 'QUEUE_TYPE']
+
+settings = {
+             'ipAddress':'127.0.0.1',
+             'port':'8080',
+             'api_version':'v1.0',
+             'resource': 'vivs/1',
+             'operation':'GET',
+             'verbose': False,
+             'data': None
+            }
+ 
+class restClient:
+
+  def doIt( self, settings ):
+    try:
+      hdrs = { 'Accept':'application/json','content-type':'application/json' }
+      url = "http://%s:%s/%s" % ( settings['ipAddress'], settings['port'], settings['api_version'] )
+      if settings['resource']:
+        url = "%s/%s" % (url, settings['resource'])
+
+      if settings['operation'] == 'GET':
+        r = requests.get( url, headers = hdrs )
+      elif settings['operation'] == 'POST':
+        data = json.loads(settings['data'])
+        r = requests.post( url, data=json.dumps(data), headers = hdrs )
+
+      if r.status_code == requests.codes.ok:
+        if r.headers['content-type'] == 'application/json':
+          print json.dumps( r.json(), indent=4 )
+        else:
+          print "Received unexpected content type: %s" % r.headers['content-type']
+          if settings['verbose']:
+            print r.text
+
+      if settings['verbose']:
+        print "\nOperation:\n    %s"            % settings['operation']
+        print "\nURL:\n    %s"                  % r.url 
+        print "\nHeaders sent:\n    %s"         % r.request.headers
+        print "\nResponse status code:\n    %s" % r.status_code
+
+    except requests.ConnectionError as e:
+      print e
+      raise  # re-raise exception
+    except ValueError as e:
+      print "Invalid JSON: %s" % e 
+      raise # re-raise exception
+
+
+def stringFromList( p ):
+  """
+  e.g. ['a', 'b', 'c' ] becomes "[a|b|c]"
+  """
+  return str( p ).replace("'","").replace(", ","|")
+
+def usage():
+  print "Usage:"
+  print " $ %s [-i ip][-p port][-a version][-v] command" % sys.argv[0]
+  print ""
+  print "optional arguments:"
+  print "  -i <ip>          IP address of server. (Default: 127.0.0.1)"
+  print "  -p <port>        Port on server.       (Default: 8080)"
+  print "  -a <version>     API version to use.   (Default: v1.0)"
+  print "  -v               Be verbose"
+  print ""
+  print "command"
+  print "-------"
+  print " show [<path>]"
+  print " add input <device> <type> [<type> [...]]"
+  print " add output <ip_address> <port> <type>"
+  print " delete input <device> [<type> [<type> [...]]]"
+  print " delete output <ip_address>[:<port>] [<type> [<type> [...]]]"
+  print " reset"
+  print " deploy"
+  print ""
+  print " V_BUFS <value>"
+  print " NUM_BUFS <value>"
+  print " INPUT_MODE <input-mode>"
+  print " TX_MODE <tx-mode>"
+  print " QUEUE_TYPE <queue-type>"
+  print ""
+  print ""
+  print " Where:"
+  print "    <type>       = %s" % stringFromList( trafficTypes )
+  print "    <input_mode> = %s" % stringFromList(INPUT_MODES )
+  print "    <tx-mode>    = %s" % stringFromList( QUEUE_TYPES )
+  print ""
+
+
+def getTrafficTypes( args ):
+  traffic_types=[]
+  while len(args) > 0:
+    t = args.pop(0)
+    if t not in trafficTypes:
+      raise ValueError("Invalid traffic type '%s'" % t )
+    traffic_types.append( t )
+  return traffic_types
+
+def parseAddInputCommand( args ):
+  if len( args ) < 2:
+    raise ValueError("Usage: %s add input <device> <traffic_type> [<traffic_type> [...]]" % sys.argv[0] )
+
+  device = args.pop(0)
+  tt = getTrafficTypes( args )
+  data = { "inputs":[{"device":device,"traffic_types":tt}]}
+  settings['resource']='vivs/1/add'
+  settings['operation']='POST'
+  settings['data'] = json.dumps( data )
+
+def parseAddOutputCommand( args ):
+  if len( args ) != 3:
+    raise ValueError("Usage: %s add output <ip_address> <port> <traffic_type>" % sys.argv[0] )
+
+  ip_address = args.pop(0)
+  port = args.pop(0)
+  traffic_type = args.pop(0)
+  if traffic_type not in trafficTypes:
+    raise ValueError("Invalid traffic type '%s'" % traffic_type )
+
+  data = {"outputs":[{"ip_address":ip_address,"type":traffic_type,"port":port}]}
+  settings['resource']='vivs/1/add'
+  settings['operation']='POST'
+  settings['data'] = json.dumps( data )
+
+def parseDeleteInputCommand( args ):
+  if len( args ) < 1:
+    raise ValueError( "Usage: %s delete input <device> [<traffic_type> [<traffic_type> [...]]]" % sys.argv[0] )
+
+  device = args.pop(0)
+  tt = getTrafficTypes( args )
+  data = {"inputs":[{"device":device,"traffic_types":tt}]}
+  settings['resource']='vivs/1/delete'
+  settings['operation']='POST'
+  settings['data'] = json.dumps( data )
+
+def parseDeleteOutputCommand( args ):
+  if len(args) == 0:
+    raise ValueError( "Usage: %s delete output <ip_address>[:<port>] [<traffic_type> [<traffic_type> [...]]]" % sys.argv[0] )
+
+  ipp = args.pop(0).split(':')
+  ip_address = ipp[0]
+  try:
+    port = ipp[1]
+  except IndexError:
+    # No port was specified
+    # All ports with spec'd traffic types will be removed for 
+    # the IP given.
+    port = None
+
+  tt = getTrafficTypes( args )
+  data = {"outputs":[{"ip_address":ip_address,"port":port, "traffic_types":tt}]}
+  settings['resource']='vivs/1/delete'
+  settings['operation']='POST'
+  settings['data'] = json.dumps( data )
+
+def parseArgs( argv ):
+  try:
+    opts,args = getopt.getopt( argv,
+        "i:p:a:vh", ["ip=","port=","api_version="])
+  except getopt.GetoptError as e:
+    print e
+    raise    # re-raise exception
+  for opt,arg in opts:
+    if opt == '-h':
+      usage()
+      sys.exit()
+    elif opt == '-v':
+      settings['verbose'] = True
+    elif opt in ("-i","--ip"):
+      settings['ipAddress'] = arg
+    elif opt in ("-p","--port"):
+      settings['port'] = arg
+    elif opt in ("-a","--api_version"):
+      settings['api_version'] = arg
+
+  # process residual non option args
+  if len(args) == 0:
+    raise ValueError( "Expected one of: %s" % str( COMMANDS ) )
+
+  cmd = args.pop(0)
+  if cmd not in COMMANDS:
+    print 'Unknown command', cmd
+    sys.exit(1)
+
+  if cmd in ['show']:
+    if len(args) != 0:
+      settings['resource'] = args.pop(0)
+
+  elif cmd in ['reset']:
+    settings['resource']='vivs/1/reset' 
+    settings['operation'] = 'POST'
+    settings['data'] = json.dumps( {} )
+
+  elif cmd in ['add']:
+
+    if len(args) == 0:
+      raise ValueError("Expected 'input' or 'output'")
+
+    direction = args.pop(0)
+    if direction not in ['input','output']:
+      raise ValueError( "expected 'input' or 'output', found '%s'" % direction )
+ 
+    if direction == 'input':
+      parseAddInputCommand( args )
+    else:
+      parseAddOutputCommand( args )
+
+  elif cmd in ['delete']:
+    if len(args) == 0:
+      raise ValueError("Expected 'input' or 'output'" )
+
+    direction = args.pop(0)
+    if direction not in ['input','output']:
+      raise ValueError("expected 'input' or 'output', found '%s'" % direction )
+
+    if direction == 'input':
+      parseDeleteInputCommand( args )
+    else:
+      parseDeleteOutputCommand( args )
+
+  elif cmd in ['deploy']:
+    settings['resource']='vivs/1/deploy'
+    settings['operation'] = 'POST'
+    settings['data'] = '{}'
+
+  else:
+
+    if cmd in RESOURCE_NAMES:
+      if len( args ) == 0 :
+          raise ValueError( 'No value supplied' )
+
+      val = args.pop(0)
+      # The server will complain if it does not like the value.
+      settings['resource']='vivs/1/%s' % cmd
+      settings['operation'] = 'POST'
+      settings['data'] = json.dumps( { cmd : val } )
+
+
+  return settings
+
+
+def RestClient(argv):
+  settings = parseArgs( argv )
+  client = restClient()
+  client.doIt( settings )
+
+
+if __name__ == '__main__':
+  try:
+    RestClient(sys.argv[1:])
+  except ValueError as e:
+    print e
+    sys.exit(2)
+  except getopt.GetoptError as e:
+    print e
+    sys.exit(3)
+  except requests.ConnectionError as e:
+    print e
+    sys.exit(4)
+
diff --git a/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml b/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml
new file mode 100644
index 0000000..ae2d5fb
--- /dev/null
+++ b/xos/synchronizer/steps/roles/setup_probe/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- name: setup authorized key for user ubuntu since service chaining uses it
+  authorized_key: 
+    user: ubuntu
+    key: "{{ lookup('file', 'passivetest_rsa.pub') }}"
+    state: present
+
+- name: copy viv client
+  copy: src=viv dest=/usr/bin mode=0755
+
+- name: reset viv (if needed)
+  shell: viv -i 172.27.0.1 reset
+  when: reset_viv
+
+- name: configure input
+  shell: viv -i 172.27.0.1 add input {{ tap_ports }} TSA_CP
+  ignore_errors: True
+
+- name: register probe with viv
+  shell: viv -i 172.27.0.1 add output {{ synchronizer_ip }} 50002 TSA_CP
+  ignore_errors: True
+   
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
new file mode 100644
index 0000000..93f805e
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: root
+  vars:
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+
+  tasks:
+  - name: Verify if monitoring_stats_notifier ([] is to avoid capturing the shell process)  job is already running
+    shell: pgrep -f [m]onitoring_stats_notifier | wc -l
+    register: job_pids_count
+
+  - name: make sure /usr/local/share/monitoring_agent exists
+    file: path=/usr/local/share/monitoring_agent state=directory owner=root group=root
+    when: job_pids_count.stdout == "0"
+
+  - name: make a processed file folder
+    file: path=/xsight/var/opt/xagg/tmp/processed state=directory owner=root group=root
+    when: job_pids_count.stdout == "0"
+
+  - name: Copy  job to destination
+    copy: src=/opt/xos/synchronizers/passivetest/monitoring_stats_notifier.py
+      dest=/usr/local/share/monitoring_agent/monitoring_stats_notifier.py
+    when: job_pids_count.stdout == "0"
+
+  - name: Initiate monitoring_stats_notifier job
+    command: python /usr/local/share/monitoring_agent/monitoring_stats_notifier.py --keystone-tenant-id={{ keystone_tenant_id }} --keystone-user-id={{ keystone_user_id }} --rabbit-user={{ rabbit_user }} --rabbit-password={{ rabbit_password }} --rabbit-host={{ rabbit_host }} --rabbit-exchange-name='passivetestservice'
+    async: 9999999999999999
+    poll: 0
+    when: job_pids_count.stdout == "0"
+
diff --git a/xos/synchronizer/steps/sync_passivetesttenant.py b/xos/synchronizer/steps/sync_passivetesttenant.py
new file mode 100644
index 0000000..fb60667
--- /dev/null
+++ b/xos/synchronizer/steps/sync_passivetesttenant.py
@@ -0,0 +1,95 @@
+import os
+import sys
+from django.db.models import Q, F
+from services.passivetest.models import PassiveTestService, PassiveTestTenant
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import ModelLink, CoarseTenant, ServiceMonitoringAgentInfo
+from xos.logger import Logger, logging
+from urlparse import urlparse
+
+parentdir = os.path.join(os.path.dirname(__file__), "..")
+sys.path.insert(0, parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncPassiveTestTenant(SyncInstanceUsingAnsible):
+    provides = [PassiveTestTenant]
+    observes = PassiveTestTenant
+    requested_interval = 0
+    template_name = "passivetesttenant_playbook.yaml"
+    service_key_name = "/opt/xos/synchronizers/passivetest/passivetest_private_key"
+    watches = [ModelLink(CoarseTenant,via='coarsetenant'), 
+               ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
+
+    def __init__(self, *args, **kwargs):
+        super(SyncPassiveTestTenant, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+
+        if (not deleted):
+            objs = PassiveTestTenant.get_tenant_objects().filter(
+                Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
+        else:
+            # If this is a deletion we get all of the deleted tenants..
+            objs = PassiveTestTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_passivetestservice(self, o):
+        if not o.provider_service:
+            return None
+
+        passivetestservice = PassiveTestService.get_service_objects().filter(id=o.provider_service.id)
+
+        if not passivetestservice:
+            return None
+
+        return passivetestservice[0]
+
+    # Gets the attributes that are used by the Ansible template but are not
+    # part of the set of default attributes.
+    def get_extra_attributes(self, o):
+        passivetestservice = self.get_passivetestservice(o)
+        return { "public_ip": o.public_ip,
+                 "synchronizer_ip": o.synchronizer_ip,
+                 "tap_ports": passivetestservice.tap_ports,
+                 "reset_viv": passivetestservice.reset_viv}
+
+    def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
+        if not monitoring_agent_info.service:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        if not monitoring_agent_info.target_uri:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        objs = PassiveTestTenant.get_tenant_objects().all()
+        for obj in objs:
+            if obj.provider_service.id != monitoring_agent_info.service.id:
+                logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
+                return
+
+            instance = self.get_instance(obj)
+            if not instance:
+               logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
+               return
+
+            logger.info("handling watch notification for monitoring agent info:%s for PassiveTestTenant object:%s" % (monitoring_agent_info, obj))
+
+            #Run ansible playbook to update the routing table entries in the instance
+            fields = self.get_ansible_fields(instance)
+            fields["ansible_tag"] =  obj.__class__.__name__ + "_" + str(obj.id) + "_service_monitoring"
+            
+            #Parse the monitoring agent target_uri
+            url = urlparse(monitoring_agent_info.target_uri)
+
+            #Assuming target_uri is rabbitmq URI
+            fields["rabbit_user"] = url.username
+            fields["rabbit_password"] = url.password
+            fields["rabbit_host"] = url.hostname
+
+            template_name = "sync_monitoring_agent.yaml"
+            super(SyncPassiveTestTenant, self).run_playbook(obj, fields, template_name)
+        pass
+
diff --git a/xos/tosca/resources/passivetest.py b/xos/tosca/resources/passivetest.py
new file mode 100644
index 0000000..9ec5d11
--- /dev/null
+++ b/xos/tosca/resources/passivetest.py
@@ -0,0 +1,30 @@
+from core.models import CoarseTenant
+from services.passivetest.models import PassiveTestService
+
+from xosresource import XOSResource
+
+class XOSPassiveTestService(XOSResource):
+    provides = "tosca.nodes.PassiveTest"
+    xos_model = PassiveTestService
+    copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "private_key_fn", "versionNumber", "tap_ports", "reset_viv"]
+
+    def postprocess(self, obj):
+        for provider_service_name in self.get_requirements("tosca.relationships.TenantOfService"):
+            provider_service = self.get_xos_object(PassiveTestService, name=provider_service_name)
+
+            existing_tenancy = CoarseTenant.get_tenant_objects().filter(provider_service = provider_service, subscriber_service = obj)
+            if existing_tenancy:
+                self.info("Tenancy relationship from %s to %s already exists" % (str(obj), str(provider_service)))
+            else:
+                tenancy = CoarseTenant(provider_service = provider_service,
+                                       subscriber_service = obj)
+                tenancy.save()
+
+                self.info("Created Tenancy relationship  from %s to %s" % (str(obj), str(provider_service)))
+
+    def can_delete(self, obj):
+        if obj.slices.exists():
+            self.info("Service %s has passive slices; skipping delete" % obj.name)
+            return False
+        return super(XOSPassiveTestService, self).can_delete(obj)
+
diff --git a/xos/tosca/resources/passivetesttenant.py b/xos/tosca/resources/passivetesttenant.py
new file mode 100644
index 0000000..84ac28f
--- /dev/null
+++ b/xos/tosca/resources/passivetesttenant.py
@@ -0,0 +1,29 @@
+from core.models import Tenant, Service
+from services.passivetest.models import PassiveTestTenant
+
+from xosresource import XOSResource
+
+class XOSPassiveTestTenant(XOSResource):
+    provides = "tosca.nodes.PassiveTestTenant"
+    xos_model = PassiveTestTenant
+    name_field = "service_specific_id"
+    copyin_props = ("tenant_message",)
+
+    def get_xos_args(self, throw_exception=True):
+        args = super(XOSPassiveTestTenant, self).get_xos_args()
+
+        # PassiveTestTenant must always have a provider_service
+        provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=True)
+        if provider_name:
+            args["provider_service"] = self.get_xos_object(Service, throw_exception=True, name=provider_name)
+
+        return args
+
+    def get_existing_objs(self):
+        args = self.get_xos_args(throw_exception=False)
+        return PassiveTestTenant.get_tenant_objects().filter(provider_service=args["provider_service"], service_specific_id=args["service_specific_id"])
+        return []
+
+    def can_delete(self, obj):
+        return super(XOSPassiveTestTenant, self).can_delete(obj)
+