move ceilometer service over from xos repo
Change-Id: I9402876545cf552675cd0a836e02e2c72fea5f6b
diff --git a/xos/admin.py b/xos/admin.py
new file mode 100644
index 0000000..062a2ae
--- /dev/null
+++ b/xos/admin.py
@@ -0,0 +1,214 @@
+from django.contrib import admin
+
+from services.ceilometer.models import *
+from django import forms
+from django.utils.safestring import mark_safe
+from django.contrib.auth.admin import UserAdmin
+from django.contrib.admin.widgets import FilteredSelectMultiple
+from django.contrib.auth.forms import ReadOnlyPasswordHashField
+from django.contrib.auth.signals import user_logged_in
+from django.utils import timezone
+from django.contrib.contenttypes import generic
+from suit.widgets import LinkedSelect
+from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline, TenantAttrAsTabInline
+from core.middleware import get_request
+
+from functools import update_wrapper
+from django.contrib.admin.views.main import ChangeList
+from django.core.urlresolvers import reverse
+from django.contrib.admin.utils import quote
+
+class CeilometerServiceForm(forms.ModelForm):
+ ceilometer_pub_sub_url = forms.CharField(required=False, max_length=1024, help_text="REST URL of ceilometer PUB/SUB component in http://IP:port/ format")
+
+ def __init__(self,*args,**kwargs):
+ super (CeilometerServiceForm,self ).__init__(*args,**kwargs)
+ if self.instance:
+ # fields for the attributes
+ self.fields['ceilometer_pub_sub_url'].initial = self.instance.ceilometer_pub_sub_url
+
+ def save(self, commit=True):
+ self.instance.ceilometer_pub_sub_url = self.cleaned_data.get("ceilometer_pub_sub_url")
+ return super(CeilometerServiceForm, self).save(commit=commit)
+
+ class Meta:
+ model = CeilometerService
+
+class CeilometerServiceAdmin(ReadOnlyAwareAdmin):
+ model = CeilometerService
+ verbose_name = "Ceilometer Service"
+ verbose_name_plural = "Ceilometer Service"
+ list_display = ("backend_status_icon", "name", "enabled")
+ list_display_links = ('backend_status_icon', 'name', )
+ fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description','ceilometer_pub_sub_url', "view_url","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', )
+ inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
+ form = CeilometerServiceForm
+
+ extracontext_registered_admins = True
+
+ user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
+
+ suit_form_tabs =(('general', 'Ceilometer Service Details'),
+ ('administration', 'Administration'),
+ ('slices','Slices'),
+ ('serviceattrs','Additional Attributes'),
+ ('serviceprivileges','Privileges'),
+ )
+
+ suit_form_includes = (('ceilometeradmin.html', 'top', 'administration'),
+ )
+
+ def queryset(self, request):
+ return CeilometerService.get_service_objects_by_user(request.user)
+
+class MonitoringChannelForm(forms.ModelForm):
+ creator = forms.ModelChoiceField(queryset=User.objects.all())
+
+ def __init__(self,*args,**kwargs):
+ super (MonitoringChannelForm,self ).__init__(*args,**kwargs)
+ self.fields['kind'].widget.attrs['readonly'] = True
+ self.fields['provider_service'].queryset = CeilometerService.get_service_objects().all()
+ if self.instance:
+ # fields for the attributes
+ self.fields['creator'].initial = self.instance.creator
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['kind'].initial = CEILOMETER_KIND
+ self.fields['creator'].initial = get_request().user
+ if CeilometerService.get_service_objects().exists():
+ self.fields["provider_service"].initial = CeilometerService.get_service_objects().all()[0]
+
+
+ def save(self, commit=True):
+ self.instance.creator = self.cleaned_data.get("creator")
+ return super(MonitoringChannelForm, self).save(commit=commit)
+
+ class Meta:
+ model = MonitoringChannel
+
+class MonitoringChannelAdmin(ReadOnlyAwareAdmin):
+ list_display = ('backend_status_icon', 'id', )
+ list_display_links = ('backend_status_icon', 'id')
+ fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'service_specific_attribute',
+ 'ceilometer_url', 'tenant_list_str',
+ 'instance', 'creator'],
+ 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', 'instance', 'service_specific_attribute', 'ceilometer_url', 'tenant_list_str')
+ form = MonitoringChannelForm
+
+ suit_form_tabs = (('general','Details'),)
+ actions=['delete_selected_objects']
+
+ def get_actions(self, request):
+ actions = super(MonitoringChannelAdmin, self).get_actions(request)
+ if 'delete_selected' in actions:
+ del actions['delete_selected']
+ return actions
+
+ def delete_selected_objects(self, request, queryset):
+ for obj in queryset:
+ obj.delete()
+ delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
+
+ def queryset(self, request):
+ return MonitoringChannel.get_tenant_objects_by_user(request.user)
+
+class SFlowServiceForm(forms.ModelForm):
+ sflow_port = forms.IntegerField(required=False)
+ sflow_api_port = forms.IntegerField(required=False)
+
+ def __init__(self,*args,**kwargs):
+ super (SFlowServiceForm,self ).__init__(*args,**kwargs)
+ if self.instance:
+ # fields for the attributes
+ self.fields['sflow_port'].initial = self.instance.sflow_port
+ self.fields['sflow_api_port'].initial = self.instance.sflow_api_port
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['sflow_port'].initial = SFLOW_PORT
+ self.fields['sflow_api_port'].initial = SFLOW_API_PORT
+
+ def save(self, commit=True):
+ self.instance.sflow_port = self.cleaned_data.get("sflow_port")
+ self.instance.sflow_api_port = self.cleaned_data.get("sflow_api_port")
+ return super(SFlowServiceForm, self).save(commit=commit)
+
+ class Meta:
+ model = SFlowService
+
+class SFlowServiceAdmin(ReadOnlyAwareAdmin):
+ model = SFlowService
+ verbose_name = "SFlow Service"
+ verbose_name_plural = "SFlow Service"
+ list_display = ("backend_status_icon", "name", "enabled")
+ list_display_links = ('backend_status_icon', 'name', )
+ fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description',"view_url","sflow_port","sflow_api_port","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', )
+ inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
+ form = SFlowServiceForm
+
+ extracontext_registered_admins = True
+
+ user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
+
+ suit_form_tabs =(('general', 'SFlow Service Details'),
+ ('administration', 'Administration'),
+ ('slices','Slices'),
+ ('serviceattrs','Additional Attributes'),
+ ('serviceprivileges','Privileges'),
+ )
+
+ suit_form_includes = (('sflowadmin.html', 'top', 'administration'),
+ )
+
+ def queryset(self, request):
+ return SFlowService.get_service_objects_by_user(request.user)
+
+class SFlowTenantForm(forms.ModelForm):
+ creator = forms.ModelChoiceField(queryset=User.objects.all())
+ listening_endpoint = forms.CharField(max_length=1024, help_text="sFlow listening endpoint in udp://IP:port format")
+
+ def __init__(self,*args,**kwargs):
+ super (SFlowTenantForm,self ).__init__(*args,**kwargs)
+ self.fields['kind'].widget.attrs['readonly'] = True
+ self.fields['provider_service'].queryset = SFlowService.get_service_objects().all()
+ if self.instance:
+ # fields for the attributes
+ self.fields['creator'].initial = self.instance.creator
+ self.fields['listening_endpoint'].initial = self.instance.listening_endpoint
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['kind'].initial = SFLOW_KIND
+ self.fields['creator'].initial = get_request().user
+ if SFlowService.get_service_objects().exists():
+ self.fields["provider_service"].initial = SFlowService.get_service_objects().all()[0]
+
+ def save(self, commit=True):
+ self.instance.creator = self.cleaned_data.get("creator")
+ self.instance.listening_endpoint = self.cleaned_data.get("listening_endpoint")
+ return super(SFlowTenantForm, self).save(commit=commit)
+
+ class Meta:
+ model = SFlowTenant
+
+class SFlowTenantAdmin(ReadOnlyAwareAdmin):
+ list_display = ('backend_status_icon', 'creator', 'listening_endpoint' )
+ list_display_links = ('backend_status_icon', 'listening_endpoint')
+ fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_service', 'service_specific_attribute', 'listening_endpoint',
+ 'creator'],
+ 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', 'instance', 'service_specific_attribute')
+ inlines = [TenantAttrAsTabInline]
+ form = SFlowTenantForm
+
+ suit_form_tabs = (('general','Details'), ('tenantattrs', 'Attributes'))
+
+ def queryset(self, request):
+ return SFlowTenant.get_tenant_objects_by_user(request.user)
+
+admin.site.register(CeilometerService, CeilometerServiceAdmin)
+admin.site.register(SFlowService, SFlowServiceAdmin)
+admin.site.register(MonitoringChannel, MonitoringChannelAdmin)
+admin.site.register(SFlowTenant, SFlowTenantAdmin)
+
diff --git a/xos/api/tenant/ceilometer/monitoringchannel.py b/xos/api/tenant/ceilometer/monitoringchannel.py
new file mode 100644
index 0000000..43e1636
--- /dev/null
+++ b/xos/api/tenant/ceilometer/monitoringchannel.py
@@ -0,0 +1,94 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework import status
+from core.models import *
+from django.forms import widgets
+from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
+from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
+
+from services.ceilometer.models import MonitoringChannel, CeilometerService
+
+def get_default_ceilometer_service():
+ ceilometer_services = CeilometerService.get_service_objects().all()
+ if ceilometer_services:
+ return ceilometer_services[0].id
+ return None
+
+class MonitoringChannelForAPI(MonitoringChannel):
+ class Meta:
+ proxy = True
+ app_label = "ceilometer"
+
+ @property
+ def related(self):
+ related = {}
+ if self.creator:
+ related["creator"] = self.creator.username
+ if self.instance:
+ related["instance_id"] = self.instance.id
+ related["instance_name"] = self.instance.name
+ if self.instance.node:
+ related["compute_node_name"] = self.instance.node.name
+ return related
+
+class MonitoringChannelSerializer(PlusModelSerializer):
+ id = ReadOnlyField()
+ service_specific_attribute = ReadOnlyField()
+ ceilometer_url = ReadOnlyField()
+ tenant_list_str = ReadOnlyField()
+ #creator = ReadOnlyField()
+ #instance = ReadOnlyField()
+ provider_service = serializers.PrimaryKeyRelatedField(queryset=CeilometerService.get_service_objects().all(), default=get_default_ceilometer_service)
+
+ humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+ related = serializers.DictField(required=False)
+
+ #computeNodeName = serializers.SerializerMethodField("getComputeNodeName")
+
+ class Meta:
+ model = MonitoringChannelForAPI
+ fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_attribute', 'ceilometer_url', 'tenant_list_str', 'related' )
+
+ def getHumanReadableName(self, obj):
+ return obj.__unicode__()
+
+ #def getComputeNodeName(self, obj):
+ # instance = obj.instance
+ # if not instance:
+ # return None
+ # return instance.node.name
+
+class MonitoringChannelSet(XOSViewSet):
+ base_name = "monitoringchannel"
+ method_name = "monitoringchannel"
+ method_kind = "viewset"
+ queryset = MonitoringChannelForAPI.get_tenant_objects().all()
+ serializer_class = MonitoringChannelSerializer
+
+ def get_queryset(self):
+ queryset = MonitoringChannelForAPI.get_tenant_objects().all()
+
+ current_user = self.request.user.username
+ if current_user is not None:
+ ids = [x.id for x in queryset if x.creator.username==current_user]
+ queryset = queryset.filter(id__in=ids)
+
+ return queryset
+
+ def create(self, request):
+ current_user = request.user.username
+ existing_obj = None
+ for obj in MonitoringChannelForAPI.get_tenant_objects().all():
+ if (obj.creator.username == current_user):
+ existing_obj = obj
+ break
+
+ if existing_obj:
+ serializer = MonitoringChannelSerializer(existing_obj)
+ headers = self.get_success_headers(serializer.data)
+ return Response( serializer.data, status=status.HTTP_200_OK )
+
+ return super(MonitoringChannelSet, self).create(request)
diff --git a/xos/ceilometer-onboard.yaml b/xos/ceilometer-onboard.yaml
new file mode 100644
index 0000000..a657f86
--- /dev/null
+++ b/xos/ceilometer-onboard.yaml
@@ -0,0 +1,25 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ servicecontroller#ceilometer:
+ type: tosca.nodes.ServiceController
+ properties:
+ base_url: file:///opt/xos_services/ceilometer/xos/
+ # The following will concatenate with base_url automatically, if
+ # base_url is non-null.
+ models: models.py
+ admin: admin.py
+ admin_template: templates/ceilometeradmin.html, templates/sflowadmin.html
+ synchronizer: synchronizer/manifest
+ synchronizer_run: monitoring_channel_synchronizer.py
+ tosca_resource: tosca/resources/ceilometerservice.py, tosca/resources/ceilometertenant.py, tosca/resources/sflowservice.py
+ rest_tenant: subdirectory:ceilometer api/tenant/ceilometer/monitoringchannel.py
+ private_key: file:///opt/xos/key_import/monitoring_channel_rsa
+ public_key: file:///opt/xos/key_import/monitoring_channel_rsa.pub
+
diff --git a/xos/models.py b/xos/models.py
new file mode 100644
index 0000000..5285bd7
--- /dev/null
+++ b/xos/models.py
@@ -0,0 +1,307 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+import traceback
+from xos.exceptions import *
+from core.models import SlicePrivilege, SitePrivilege
+from sets import Set
+from urlparse import urlparse
+
+CEILOMETER_KIND = "ceilometer"
+
+class CeilometerService(Service):
+ KIND = CEILOMETER_KIND
+
+ class Meta:
+ app_label = "ceilometer"
+ verbose_name = "Ceilometer Service"
+ proxy = True
+
+ @property
+ def ceilometer_pub_sub_url(self):
+ return self.get_attribute("ceilometer_pub_sub_url", None)
+
+ @ceilometer_pub_sub_url.setter
+ def ceilometer_pub_sub_url(self, value):
+ self.set_attribute("ceilometer_pub_sub_url", value)
+
+class MonitoringChannel(TenantWithContainer): # aka 'CeilometerTenant'
+ class Meta:
+ proxy = True
+
+ KIND = CEILOMETER_KIND
+ LOOK_FOR_IMAGES=[ #"trusty-server-multi-nic-docker", # CloudLab
+ "ceilometer-trusty-server-multi-nic",
+ #"trusty-server-multi-nic",
+ ]
+
+
+ sync_attributes = ("private_ip", "private_mac",
+ "ceilometer_ip", "ceilometer_mac",
+ "nat_ip", "nat_mac", "ceilometer_port",)
+
+ default_attributes = {}
+ def __init__(self, *args, **kwargs):
+ ceilometer_services = CeilometerService.get_service_objects().all()
+ if ceilometer_services:
+ self._meta.get_field("provider_service").default = ceilometer_services[0].id
+ super(MonitoringChannel, self).__init__(*args, **kwargs)
+ self.set_attribute("use_same_instance_for_multiple_tenants", True)
+
+ def can_update(self, user):
+ #Allow creation of this model instances for non-admin users also
+ return True
+
+ def save(self, *args, **kwargs):
+ if not self.creator:
+ if not getattr(self, "caller", None):
+ # caller must be set when creating a monitoring channel since it creates a slice
+ raise XOSProgrammingError("MonitoringChannel's self.caller was not set")
+ self.creator = self.caller
+ if not self.creator:
+ raise XOSProgrammingError("MonitoringChannel's self.creator was not set")
+
+ if self.pk is None:
+ #Allow only one monitoring channel per user
+ channel_count = sum ( [1 for channel in MonitoringChannel.objects.filter(kind=CEILOMETER_KIND) if (channel.creator == self.creator)] )
+ if channel_count > 0:
+ raise XOSValidationError("Already %s channels exist for user Can only create max 1 MonitoringChannel instance per user" % str(channel_count))
+
+ super(MonitoringChannel, self).save(*args, **kwargs)
+ model_policy_monitoring_channel(self.pk)
+
+ def delete(self, *args, **kwargs):
+ self.cleanup_container()
+ super(MonitoringChannel, self).delete(*args, **kwargs)
+
+ @property
+ def addresses(self):
+ if (not self.id) or (not self.instance):
+ return {}
+
+ addresses = {}
+ for ns in self.instance.ports.all():
+ if "private" in ns.network.name.lower():
+ addresses["private"] = (ns.ip, ns.mac)
+ elif ("nat" in ns.network.name.lower()) or ("management" in ns.network.name.lower()):
+ addresses["nat"] = (ns.ip, ns.mac)
+ #TODO: Do we need this client_access_network. Revisit in VTN context
+ #elif "ceilometer_client_access" in ns.network.labels.lower():
+ # addresses["ceilometer"] = (ns.ip, ns.mac)
+ return addresses
+
+ @property
+ def nat_ip(self):
+ return self.addresses.get("nat", (None, None))[0]
+
+ @property
+ def nat_mac(self):
+ return self.addresses.get("nat", (None, None))[1]
+
+ @property
+ def private_ip(self):
+ return self.addresses.get("nat", (None, None))[0]
+
+ @property
+ def private_mac(self):
+ return self.addresses.get("nat", (None, None))[1]
+
+ @property
+ def ceilometer_ip(self):
+ return self.addresses.get("ceilometer", (None, None))[0]
+
+ @property
+ def ceilometer_mac(self):
+ return self.addresses.get("ceilometer", (None, None))[1]
+
+ @property
+ def site_tenant_list(self):
+ tenant_ids = Set()
+ for sp in SitePrivilege.objects.filter(user=self.creator):
+ site = sp.site
+ for cs in site.controllersite.all():
+ if cs.tenant_id:
+ tenant_ids.add(cs.tenant_id)
+ return tenant_ids
+
+ @property
+ def slice_tenant_list(self):
+ tenant_ids = Set()
+ for sp in SlicePrivilege.objects.filter(user=self.creator):
+ slice = sp.slice
+ for cs in slice.controllerslices.all():
+ if cs.tenant_id:
+ tenant_ids.add(cs.tenant_id)
+ for slice in Slice.objects.filter(creator=self.creator):
+ for cs in slice.controllerslices.all():
+ if cs.tenant_id:
+ tenant_ids.add(cs.tenant_id)
+ if self.creator.is_admin:
+ #TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
+ #For now, ceilometer code is changed to pusblish all such meters with tenant
+ #id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
+ #for all admin users.
+ tenant_ids.add("default_admin_tenant")
+ return tenant_ids
+
+ @property
+ def tenant_list(self):
+ return self.slice_tenant_list | self.site_tenant_list
+
+ @property
+ def tenant_list_str(self):
+ return ", ".join(self.tenant_list)
+
+ @property
+ def ceilometer_port(self):
+ # TODO: Find a better logic to choose unique ceilometer port number for each instance
+ if not self.id:
+ return None
+ return 8888+self.id
+
+ @property
+ def ceilometer_url(self):
+ if not self.private_ip:
+ return None
+ return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
+
+def model_policy_monitoring_channel(pk):
+ # TODO: this should be made in to a real model_policy
+ with transaction.atomic():
+ mc = MonitoringChannel.objects.select_for_update().filter(pk=pk)
+ if not mc:
+ return
+ mc = mc[0]
+ mc.manage_container()
+
+
+SFLOW_KIND = "sflow"
+SFLOW_PORT = 6343
+SFLOW_API_PORT = 33333
+
+class SFlowService(Service):
+ KIND = SFLOW_KIND
+
+ class Meta:
+ app_label = "ceilometer"
+ verbose_name = "sFlow Collection Service"
+ proxy = True
+
+ default_attributes = {"sflow_port": SFLOW_PORT, "sflow_api_port": SFLOW_API_PORT}
+
+ sync_attributes = ("sflow_port", "sflow_api_port",)
+
+ @property
+ def sflow_port(self):
+ return self.get_attribute("sflow_port", self.default_attributes["sflow_port"])
+
+ @sflow_port.setter
+ def sflow_port(self, value):
+ self.set_attribute("sflow_port", value)
+
+ @property
+ def sflow_api_port(self):
+ return self.get_attribute("sflow_api_port", self.default_attributes["sflow_api_port"])
+
+ @sflow_api_port.setter
+ def sflow_api_port(self, value):
+ self.set_attribute("sflow_api_port", value)
+
+ def get_instance(self):
+ if self.slices.exists():
+ slice = self.slices.all()[0]
+ if slice.instances.exists():
+ return slice.instances.all()[0]
+
+ return None
+
+ @property
+ def sflow_api_url(self):
+ if not self.get_instance():
+ return None
+ return "http://" + self.get_instance().get_ssh_ip() + ":" + str(self.sflow_api_port) + "/"
+
+class SFlowTenant(Tenant):
+ class Meta:
+ proxy = True
+
+ KIND = SFLOW_KIND
+
+ sync_attributes = ("listening_endpoint", )
+
+ default_attributes = {}
+ def __init__(self, *args, **kwargs):
+ sflow_services = SFlowService.get_service_objects().all()
+ if sflow_services:
+ self._meta.get_field("provider_service").default = sflow_services[0].id
+ super(SFlowTenant, self).__init__(*args, **kwargs)
+
+ @property
+ def creator(self):
+ from core.models import User
+ if getattr(self, "cached_creator", None):
+ return self.cached_creator
+ creator_id=self.get_attribute("creator_id")
+ if not creator_id:
+ return None
+ users=User.objects.filter(id=creator_id)
+ if not users:
+ return None
+ user=users[0]
+ self.cached_creator = users[0]
+ return user
+
+ @creator.setter
+ def creator(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("creator_id", None)):
+ self.cached_creator=None
+ self.set_attribute("creator_id", value)
+
+ @property
+ def listening_endpoint(self):
+ return self.get_attribute("listening_endpoint", None)
+
+ @listening_endpoint.setter
+ def listening_endpoint(self, value):
+ if urlparse(value).scheme != 'udp':
+ raise XOSProgrammingError("SFlowTenant: Only UDP listening endpoint URLs are accepted...valid syntax is: udp://ip:port")
+ self.set_attribute("listening_endpoint", value)
+
+ def save(self, *args, **kwargs):
+ if not self.creator:
+ if not getattr(self, "caller", None):
+ # caller must be set when creating a SFlow tenant since it creates a slice
+ raise XOSProgrammingError("SFlowTenant's self.caller was not set")
+ self.creator = self.caller
+ if not self.creator:
+ raise XOSProgrammingError("SFlowTenant's self.creator was not set")
+
+ if not self.listening_endpoint:
+ raise XOSProgrammingError("SFlowTenant's self.listening_endpoint was not set")
+
+ if self.pk is None:
+ #Allow only one sflow channel per user and listening_endpoint
+ channel_count = sum ( [1 for channel in SFlowTenant.objects.filter(kind=SFLOW_KIND) if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
+ if channel_count > 0:
+ raise XOSValidationError("Already %s sflow channels exist for user Can only create max 1 tenant per user and listening endpoint" % str(channel_count))
+
+ super(SFlowTenant, self).save(*args, **kwargs)
+
+ def delete(self, *args, **kwargs):
+ super(MonitoringChannel, self).delete(*args, **kwargs)
+
+ @property
+ def authorized_resource_list(self):
+ return ['all']
+
+ @property
+ def authorized_resource_list_str(self):
+ return ", ".join(self.authorized_resource_list)
+
diff --git a/xos/synchronizer/files/docker.list b/xos/synchronizer/files/docker.list
new file mode 100644
index 0000000..0ee9ae0
--- /dev/null
+++ b/xos/synchronizer/files/docker.list
@@ -0,0 +1 @@
+deb https://get.docker.com/ubuntu docker main
diff --git a/xos/synchronizer/files/vm-resolv.conf b/xos/synchronizer/files/vm-resolv.conf
new file mode 100644
index 0000000..cae093a
--- /dev/null
+++ b/xos/synchronizer/files/vm-resolv.conf
@@ -0,0 +1 @@
+nameserver 8.8.8.8
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..c679225
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,26 @@
+templates/Dockerfile.monitoring_channel
+templates/ceilometer_proxy_config.j2
+templates/Dockerfile.sflowpubsub
+templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
+templates/sflow_pub_sub/README
+templates/sflow_pub_sub/sflow_sub_records.py
+templates/sflow_pub_sub/start_sflow_pub_sub
+templates/sflow_pub_sub/sflow_pub_sub_main.py
+templates/sflow_pub_sub/sflow_pub_sub_config.j2
+templates/start-monitoring-channel.sh.j2
+templates/monitoring-channel.conf.j2
+templates/ceilometer_proxy_server.py
+templates/start_ceilometer_proxy
+manifest
+monitoring_channel_synchronizer_config
+steps/sync_sflowtenant.yaml
+steps/sync_sflowtenant.py
+steps/sync_monitoringchannel.yaml
+steps/sync_monitoringchannel.py
+steps/sync_sflowservice.yaml
+steps/sync_sflowservice.py
+files/vm-resolv.conf
+files/docker.list
+model-deps
+supervisor/monitoring_channel_observer.conf
+monitoring_channel_synchronizer.py
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/monitoring_channel_synchronizer.py b/xos/synchronizer/monitoring_channel_synchronizer.py
new file mode 100755
index 0000000..84bec4f
--- /dev/null
+++ b/xos/synchronizer/monitoring_channel_synchronizer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizer/monitoring_channel_synchronizer_config b/xos/synchronizer/monitoring_channel_synchronizer_config
new file mode 100644
index 0000000..8c6578f
--- /dev/null
+++ b/xos/synchronizer/monitoring_channel_synchronizer_config
@@ -0,0 +1,41 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=monitoring_channel
+dependency_graph=/opt/xos/synchronizers/monitoring_channel/model-deps
+steps_dir=/opt/xos/synchronizers/monitoring_channel/steps
+sys_dir=/opt/xos/synchronizers/monitoring_channel/sys
+deleters_dir=/opt/xos/synchronizers/monitoring_channel/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
+full_setup=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.py b/xos/synchronizer/steps/sync_monitoringchannel.py
new file mode 100644
index 0000000..2c0ba10
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoringchannel.py
@@ -0,0 +1,84 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.ceilometer.models import CeilometerService, MonitoringChannel
+from xos.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncMonitoringChannel(SyncInstanceUsingAnsible):
+ provides=[MonitoringChannel]
+ observes=MonitoringChannel
+ requested_interval=0
+ template_name = "sync_monitoringchannel.yaml"
+ service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+ def __init__(self, *args, **kwargs):
+ super(SyncMonitoringChannel, self).__init__(*args, **kwargs)
+
+ def fetch_pending(self, deleted):
+ if (not deleted):
+ objs = MonitoringChannel.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+ else:
+ objs = MonitoringChannel.get_deleted_tenant_objects()
+
+ return objs
+
+ def get_extra_attributes(self, o):
+ # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know
+ # 1) Allowed tenant ids
+ # 2) Ceilometer API service endpoint URL if running externally
+ # 3) Credentials to access Ceilometer API service
+
+ ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
+ if not ceilometer_services:
+ raise "No associated Ceilometer service"
+ ceilometer_service = ceilometer_services[0]
+ ceilometer_pub_sub_url = ceilometer_service.ceilometer_pub_sub_url
+ if not ceilometer_pub_sub_url:
+ ceilometer_pub_sub_url = ''
+ instance = self.get_instance(o)
+
+ try:
+ full_setup = Config().observer_full_setup
+ except:
+ full_setup = True
+
+ fields = {"unique_id": o.id,
+ "allowed_tenant_ids": o.tenant_list,
+ "auth_url":instance.controller.auth_url,
+ "admin_user":instance.controller.admin_user,
+ "admin_password":instance.controller.admin_password,
+ "admin_tenant":instance.controller.admin_tenant,
+ "ceilometer_pub_sub_url": ceilometer_pub_sub_url,
+ "full_setup": full_setup}
+
+ return fields
+
+ def run_playbook(self, o, fields):
+ #ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
+ #quick_update = (o.last_ansible_hash == ansible_hash)
+
+ #if quick_update:
+ # logger.info("quick_update triggered; skipping ansible recipe")
+ #else:
+ super(SyncMonitoringChannel, self).run_playbook(o, fields)
+
+ #o.last_ansible_hash = ansible_hash
+
+ def map_delete_inputs(self, o):
+ fields = {"unique_id": o.id,
+ "delete": True}
+ return fields
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.yaml b/xos/synchronizer/steps/sync_monitoringchannel.yaml
new file mode 100644
index 0000000..ca72c5f
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoringchannel.yaml
@@ -0,0 +1,145 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: ubuntu
+ sudo: yes
+ vars:
+ unique_id: {{ unique_id }}
+ auth_url: {{ auth_url }}
+ admin_user: {{ admin_user }}
+ admin_password: {{ admin_password }}
+ admin_tenant: {{ admin_tenant }}
+ shared_lan_ip: {{ private_ip }}
+ shared_lan_mac: {{ private_mac }}
+ headnode_flat_lan_ip: {{ rabbit_host }}
+ ceilometer_client_acess_ip: {{ ceilometer_ip }}
+ ceilometer_client_acess_mac: {{ ceilometer_mac }}
+ ceilometer_host_port: {{ ceilometer_port }}
+ ceilometer_pub_sub_url: {{ ceilometer_pub_sub_url }}
+ allowed_tenant_ids:
+ {% for allowed_tenant_id in allowed_tenant_ids %}
+ - {{ allowed_tenant_id }}
+ {% endfor %}
+
+ tasks:
+{% if delete %}
+ - name: Remove tenant
+# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
+ template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+ notify:
+ - stop monitoring-channel
+ - remove container
+{% else %}
+{% if full_setup %}
+# - name: Docker repository
+# copy: src=/opt/xos/synchronizers/monitoring_channel/files/docker.list
+# dest=/etc/apt/sources.list.d/docker.list
+#
+# - name: Import the repository key
+# apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+#
+# - name: install Docker
+# apt: name=lxc-docker state=present update_cache=yes
+#
+# - name: install python-setuptools
+# apt: name=python-setuptools state=present
+#
+# - name: install pip
+# easy_install: name=pip
+#
+# - name: install docker-py
+# pip: name=docker-py version=0.5.3
+#
+# - name: install Pipework
+# get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+# dest=/usr/local/bin/pipework
+# mode=0755
+#
+# - name: Disable resolvconf service
+# shell: service resolvconf stop
+# shell: echo manual > /etc/init/resolvconf.override
+# shell: rm -f /etc/resolv.conf
+#
+# - name: Install resolv.conf
+# copy: src=/opt/xos/synchronizers/monitoring_channel/files/vm-resolv.conf
+# dest=/etc/resolv.conf
+{% endif %}
+
+# FIXME: Temporary workaround to delete the monitoring-channel_ceilometer_proxy_config file always
+# to trigger ansible notify handlers in the following task.
+# Due to some issue, ansible "changed" flag is set to false even though there is a change of
+# ceilometer configuration file, because of which the configuration change is not reflecting in
+# ceilometer containers
+# - file: path=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config state=absent
+
+ - name: ceilometer proxy config
+ template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+ notify:
+ - copy ceilo-config-file
+ - restart monitoring-channel container
+# - stop monitoring-channel
+# - remove container
+# - start monitoring-channel
+
+ - name: Monitoring channel upstart
+ template: src=/opt/xos/synchronizers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
+
+ - name: Monitoring channel startup script
+ template: src=/opt/xos/synchronizers/monitoring_channel/templates/start-monitoring-channel.sh.j2 dest=/usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh mode=0755
+ notify:
+# - restart monitoring-channel
+ - stop monitoring-channel
+ - remove container
+ - start monitoring-channel
+
+# - name: Start monitoring-channel container
+# docker:
+# docker_api_version: "1.18"
+# name: monitoring-channel-{{ unique_id }}
+# # was: reloaded
+# state: running
+# image: srikanthvavila/monitoring-channel
+# expose:
+# - 8000
+# ports:
+# - "{{ ceilometer_port }}:8000"
+# volumes:
+# - /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config
+#
+# - name: Get Docker IP
+# #TODO: copy dockerip.sh to monitoring service synchronizer
+# script: /opt/xos/synchronizers/onos/scripts/dockerip.sh monitoring-channel-{{ unique_id }}
+# register: dockerip
+#
+# - name: Wait for Monitoring channel to come up
+# wait_for:
+# host={{ '{{' }} dockerip.stdout {{ '}}' }}
+# port={{ '{{' }} item {{ '}}' }}
+# state=present
+# with_items:
+# - {{ ceilometer_port }}
+# These are samples, not necessary for correct function of demo
+
+ - name: Make sure Monitoring channel service is running
+ service: name=monitoring-channel-{{ unique_id }} state=started
+{% endif %}
+
+ handlers:
+ - name: copy ceilo-config-file
+ shell: docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config monitoring-channel-{{ unique_id }}:/usr/local/share/ceilometer_proxy_config
+
+ - name: restart monitoring-channel container
+ shell: docker restart monitoring-channel-{{ unique_id }}
+
+ - name: restart monitoring-channel
+ shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
+
+ - name: stop monitoring-channel
+ service: name=monitoring-channel-{{ unique_id }} state=stopped
+
+ - name: remove container
+ docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
+
+ - name: start monitoring-channel
+ service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/synchronizer/steps/sync_sflowservice.py b/xos/synchronizer/steps/sync_sflowservice.py
new file mode 100644
index 0000000..154c5ab
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowservice.py
@@ -0,0 +1,74 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.ceilometer.models import SFlowService
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncSFlowService(SyncInstanceUsingAnsible):
+ provides=[SFlowService]
+ observes=SFlowService
+ requested_interval=0
+ template_name = "sync_sflowservice.yaml"
+ service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+ def __init__(self, *args, **kwargs):
+ super(SyncSFlowService, self).__init__(*args, **kwargs)
+
+ def fetch_pending(self, deleted):
+ if (not deleted):
+ objs = SFlowService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+ else:
+ objs = SFlowService.get_deleted_service_objects()
+
+ return objs
+
+ def get_instance(self, o):
+ # We assume the ONOS service owns a slice, so pick one of the instances
+ # inside that slice to sync to.
+
+ serv = o
+
+ if serv.slices.exists():
+ slice = serv.slices.all()[0]
+ if slice.instances.exists():
+ return slice.instances.all()[0]
+
+ return None
+
+ def get_extra_attributes(self, o):
+ fields={}
+ fields["instance_hostname"] = self.get_instance(o).instance_name.replace("_","-")
+ fields["sflow_port"] = o.sflow_port
+ fields["sflow_api_port"] = o.sflow_api_port
+ fields["sflow_container"] = "sflowpubsub"
+ return fields
+
+ def sync_fields(self, o, fields):
+ # the super causes the playbook to be run
+ super(SyncSFlowService, self).sync_fields(o, fields)
+
+ def run_playbook(self, o, fields):
+ instance = self.get_instance(o)
+ if (instance.isolation=="container"):
+ # If the instance is already a container, then we don't need to
+ # install ONOS.
+ return
+ super(SyncSFlowService, self).run_playbook(o, fields)
+
+ def delete_record(self, m):
+ pass
diff --git a/xos/synchronizer/steps/sync_sflowservice.yaml b/xos/synchronizer/steps/sync_sflowservice.yaml
new file mode 100644
index 0000000..8d853a2
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowservice.yaml
@@ -0,0 +1,74 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: ubuntu
+ sudo: yes
+ vars:
+ sflow_port: {{ sflow_port }}
+ sflow_api_port: {{ sflow_api_port }}
+
+ tasks:
+
+ - name: Fix /etc/hosts
+ lineinfile:
+ dest=/etc/hosts
+ regexp="127.0.0.1 localhost"
+ line="127.0.0.1 localhost {{ instance_hostname }}"
+
+ - name: Add repo key
+ apt_key:
+ keyserver=hkp://pgp.mit.edu:80
+ id=58118E89F3A912897C070ADBF76221572C52609D
+
+ - name: Install Docker repo
+ apt_repository:
+ repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+ state=present
+
+ - name: Install Docker
+ apt:
+ name={{ '{{' }} item {{ '}}' }}
+ state=latest
+ update_cache=yes
+ with_items:
+ - docker-engine
+ - python-pip
+ - python-httplib2
+
+ - name: Install docker-py
+ pip:
+ name=docker-py
+ state=latest
+
+ - name: sflow pub-sub config
+ template: src=/opt/xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_pub_sub_config.j2 dest=/usr/local/share/sflow_pub_sub.conf mode=0777
+
+ - name: Start SFLOW pub-sub container
+ docker:
+ docker_api_version: "1.18"
+ name: {{ sflow_container }}
+ # was: reloaded
+ state: running
+ image: srikanthvavila/sflowpubsub
+ expose:
+ - {{ sflow_api_port }}
+ - {{ sflow_port }}/udp
+ ports:
+ - "{{ sflow_port }}:{{ sflow_port }}/udp"
+ - "{{ sflow_api_port }}:{{ sflow_api_port }}"
+ volumes:
+ - /usr/local/share/sflow_pub_sub.conf:/usr/local/share/sflow_pub_sub/sflow_pub_sub.conf
+
+ - name: Get Docker IP
+ #TODO: copy dockerip.sh to monitoring service synchronizer
+ script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
+ register: dockerip
+
+ - name: Wait for SFlow service to come up
+ wait_for:
+ host={{ '{{' }} dockerip.stdout {{ '}}' }}
+ port={{ '{{' }} item {{ '}}' }}
+ state=present
+ with_items:
+ - {{ sflow_api_port }}
diff --git a/xos/synchronizer/steps/sync_sflowtenant.py b/xos/synchronizer/steps/sync_sflowtenant.py
new file mode 100644
index 0000000..a15fa54
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowtenant.py
@@ -0,0 +1,82 @@
+import hashlib
+import os
+import socket
+import socket
+import sys
+import base64
+import time
+import re
+import json
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice, ControllerSlice, ControllerUser
+from services.ceilometer.models import SFlowService, SFlowTenant
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncSFlowTenant(SyncInstanceUsingAnsible):
+ provides=[SFlowTenant]
+ observes=SFlowTenant
+ requested_interval=0
+ template_name = "sync_sflowtenant.yaml"
+ service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
+
+ def __init__(self, *args, **kwargs):
+ super(SyncSFlowTenant, self).__init__(*args, **kwargs)
+
+ def fetch_pending(self, deleted):
+ if (not deleted):
+ objs = SFlowTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+ else:
+ objs = SFlowTenant.get_deleted_tenant_objects()
+
+ return objs
+
+ def get_sflow_service(self, o):
+ sflows = SFlowService.get_service_objects().filter(id=o.provider_service.id)
+ if not sflows:
+ raise "No associated SFlow service"
+
+ return sflows[0]
+
+ def get_instance(self, o):
+ # We assume the SFlow service owns a slice, so pick one of the instances
+ # inside that slice to sync to.
+
+ serv = self.get_sflow_service(o)
+
+ if serv.slices.exists():
+ slice = serv.slices.all()[0]
+ if slice.instances.exists():
+ return slice.instances.all()[0]
+
+ return None
+
+ def get_extra_attributes(self, o):
+ instance = self.get_instance(o)
+
+ fields={}
+ fields["sflow_api_base_url"] = self.get_sflow_service(o).sflow_api_url
+ fields["sflow_api_port"] = self.get_sflow_service(o).sflow_api_port
+ fields["listening_endpoint"] = o.listening_endpoint
+ fields["sflow_container"] = "sflowpubsub"
+
+ return fields
+
+ def sync_fields(self, o, fields):
+ # the super causes the playbook to be run
+ super(SyncSFlowTenant, self).sync_fields(o, fields)
+
+ def run_playbook(self, o, fields):
+ super(SyncSFlowTenant, self).run_playbook(o, fields)
+
+ def delete_record(self, m):
+ pass
diff --git a/xos/synchronizer/steps/sync_sflowtenant.yaml b/xos/synchronizer/steps/sync_sflowtenant.yaml
new file mode 100644
index 0000000..701ce5c
--- /dev/null
+++ b/xos/synchronizer/steps/sync_sflowtenant.yaml
@@ -0,0 +1,28 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: {{ username }}
+ sudo: yes
+
+ tasks:
+
+ - name: Get Docker IP
+ #TODO: copy dockerip.sh to monitoring service synchronizer
+ script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
+ register: sflowserviceaddr
+
+ - name: Wait for SFlow service to come up
+ wait_for:
+ host={{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}
+ port={{ '{{' }} item {{ '}}' }}
+ state=present
+ with_items:
+ - {{ sflow_api_port }}
+
+ - name: Invoke SFlow service REST API to subscribe
+ uri:
+ url: http://{{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}:{{ sflow_api_port }}/subscribe
+ body: "{{ listening_endpoint }}"
+ body_format: raw
+ method: POST
diff --git a/xos/synchronizer/supervisor/monitoring_channel_observer.conf b/xos/synchronizer/supervisor/monitoring_channel_observer.conf
new file mode 100644
index 0000000..1c2dd42
--- /dev/null
+++ b/xos/synchronizer/supervisor/monitoring_channel_observer.conf
@@ -0,0 +1,2 @@
+[program:monitoring_channel_observer]
+command=python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config
diff --git a/xos/synchronizer/templates/Dockerfile.monitoring_channel b/xos/synchronizer/templates/Dockerfile.monitoring_channel
new file mode 100644
index 0000000..45defb8
--- /dev/null
+++ b/xos/synchronizer/templates/Dockerfile.monitoring_channel
@@ -0,0 +1,26 @@
+FROM ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+ python-pip \
+ python-dev
+
+RUN pip install web.py
+RUN pip install wsgilog
+RUN pip install python-ceilometerclient
+RUN mkdir -p /usr/local/share
+ADD ceilometer_proxy_server.py /usr/local/share/
+RUN chmod +x /usr/local/share/ceilometer_proxy_server.py
+ADD start_ceilometer_proxy /usr/local/sbin/
+RUN chmod +x /usr/local/sbin/start_ceilometer_proxy
+EXPOSE 8000
+WORKDIR /usr/local/share
+CMD /usr/local/sbin/start_ceilometer_proxy
diff --git a/xos/synchronizer/templates/Dockerfile.sflowpubsub b/xos/synchronizer/templates/Dockerfile.sflowpubsub
new file mode 100644
index 0000000..c9025ee
--- /dev/null
+++ b/xos/synchronizer/templates/Dockerfile.sflowpubsub
@@ -0,0 +1,22 @@
+FROM ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+ python-pip \
+ python-dev
+
+RUN pip install Flask
+RUN mkdir -p /usr/local/share/
+ADD sflow_pub_sub /usr/local/share/sflow_pub_sub
+RUN chmod +x /usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
+RUN chmod +x /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
+WORKDIR /usr/local/share/sflow_pub_sub/
+CMD /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
diff --git a/xos/synchronizer/templates/ceilometer_proxy_config.j2 b/xos/synchronizer/templates/ceilometer_proxy_config.j2
new file mode 100644
index 0000000..bd6c521
--- /dev/null
+++ b/xos/synchronizer/templates/ceilometer_proxy_config.j2
@@ -0,0 +1,17 @@
+# This file autogenerated by monitoring-channel observer
+# It contains a list of attributes to be used by ceilometer proxy web server
+# syntax: key=value
+
+[default]
+auth_url={{ auth_url }}
+admin_user={{ admin_user }}
+admin_tenant={{ admin_tenant }}
+admin_password={{ admin_password }}
+ceilometer_pub_sub_url={{ ceilometer_pub_sub_url }}
+
+[allowed_tenants]
+{% if allowed_tenant_ids %}
+{% for tenant_id in allowed_tenant_ids %}
+{{ tenant_id }}
+{% endfor %}
+{% endif %}
diff --git a/xos/synchronizer/templates/ceilometer_proxy_server.py b/xos/synchronizer/templates/ceilometer_proxy_server.py
new file mode 100644
index 0000000..c81b941
--- /dev/null
+++ b/xos/synchronizer/templates/ceilometer_proxy_server.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python
+import web
+import ConfigParser
+import io
+import json
+from ceilometerclient import client
+import logging
+import urllib
+import urllib2
+from urlparse import urlparse
+from wsgilog import WsgiLog
+
+web.config.debug=False
+
+logfile = "ceilometer_proxy_server.log"
+level=logging.INFO
+logger=logging.getLogger('ceilometer_proxy_server')
+logger.setLevel(level)
+handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
+logger.addHandler(handler)
+
+class FileLog(WsgiLog):
+ def __init__(self, application):
+ WsgiLog.__init__(
+ self,
+ application,
+ logformat = '%(message)s',
+ tofile = True,
+ toprint = True,
+ prnlevel = level,
+ file = logfile,
+ backups =1
+ )
+ def __call__(self, environ, start_response):
+ def hstart_response(status, response_headers, *args):
+ out = start_response(status, response_headers, *args)
+ try:
+ logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
+ except err:
+ logline="Could not log <%s> due to err <%s>" % (str(environ), err)
+ logger.info(logline)
+
+ return out
+
+ return super(FileLog, self).__call__(environ, hstart_response)
+
+#TODOs:
+#-See if we can avoid using python-ceilometerclient and instead use the REST calls directly with AuthToken
+#
+urls = (
+ r'^/v2/meters$', 'meter_list',
+ r'^/v2/meters/(?P<meter_name>[A-Za-z0-9_:.\-]+)/statistics$', 'statistics_list',
+ r'^/v2/samples$', 'sample_list',
+ r'^/v2/resources$', 'resource_list',
+ r'^/v2/subscribe$', 'pubsub_handler',
+)
+
+app = web.application(urls, globals())
+
+config = None
+ceilometer_client = None
+
+
+def parse_ceilometer_proxy_config():
+ global config
+ config = ConfigParser.RawConfigParser(allow_no_value=True)
+ config.read('ceilometer_proxy_config')
+
+def ceilometerclient():
+ global config, ceilometer_client
+ if ceilometer_client:
+ return ceilometer_client
+
+ if not config:
+ parse_ceilometer_proxy_config()
+
+ keystone = {}
+ keystone['os_username']=config.get('default','admin_user')
+ keystone['os_password']=config.get('default','admin_password')
+ keystone['os_auth_url']=config.get('default','auth_url')
+ keystone['os_tenant_name']=config.get('default','admin_tenant')
+ ceilometer_client = client.get_client(2,**keystone)
+ logger.info('ceilometer get_client is successful')
+ return ceilometer_client
+
+def make_query(user_id=None, tenant_id=None, resource_id=None,
+ user_ids=None, tenant_ids=None, resource_ids=None):
+ """Returns query built from given parameters.
+
+ This query can be then used for querying resources, meters and
+ statistics.
+
+ :Parameters:
+ - `user_id`: user_id, has a priority over list of ids
+ - `tenant_id`: tenant_id, has a priority over list of ids
+ - `resource_id`: resource_id, has a priority over list of ids
+ - `user_ids`: list of user_ids
+ - `tenant_ids`: list of tenant_ids
+ - `resource_ids`: list of resource_ids
+ """
+ user_ids = user_ids or []
+ tenant_ids = tenant_ids or []
+ resource_ids = resource_ids or []
+
+ query = []
+ if user_id:
+ user_ids = [user_id]
+ for u_id in user_ids:
+ query.append({"field": "user_id", "op": "eq", "value": u_id})
+
+ if tenant_id:
+ tenant_ids = [tenant_id]
+ for t_id in tenant_ids:
+ query.append({"field": "project_id", "op": "eq", "value": t_id})
+
+ if resource_id:
+ resource_ids = [resource_id]
+ for r_id in resource_ids:
+ query.append({"field": "resource_id", "op": "eq", "value": r_id})
+
+ return query
+
+def filter_query_params(query_params):
+ new_query=[]
+ i=0
+ user_specified_tenants=[]
+ for field in query_params['q.field']:
+ if (field != 'project_id') and (field != 'project'):
+ query = {}
+ query['field']=field
+ if query_params['q.op'][i] != '':
+ query['op']=query_params['q.op'][i]
+ query['value']=query_params['q.value'][i]
+ new_query.append(query)
+ else:
+ user_specified_tenants.append(query_params['q.value'][i])
+ i=i+1
+ return new_query,user_specified_tenants
+
+class meter_list:
+ def GET(self):
+ global config
+ keyword_args = {
+ "q.field": [],
+ "q.op": [],
+ "q.type": [],
+ "q.value": [],
+ }
+ query_params = web.input(**keyword_args)
+ new_query, user_specified_tenants = filter_query_params(query_params)
+
+ client = ceilometerclient()
+ meters=[]
+ for (k,v) in config.items('allowed_tenants'):
+ if user_specified_tenants and (k not in user_specified_tenants):
+ continue
+ final_query=[]
+ final_query.extend(new_query)
+ query = make_query(tenant_id=k)
+ final_query.extend(query)
+ logger.debug('final query=%s',final_query)
+ results = client.meters.list(q=final_query)
+ meters.extend(results)
+ return json.dumps([ob._info for ob in meters])
+
+class statistics_list:
+ def GET(self, meter_name):
+ global config
+ keyword_args = {
+ "q.field": [],
+ "q.op": [],
+ "q.type": [],
+ "q.value": [],
+ "period": None
+ }
+ query_params = web.input(**keyword_args)
+ new_query, user_specified_tenants = filter_query_params(query_params)
+
+ client = ceilometerclient()
+ period = query_params.period
+ statistics = []
+ for (k,v) in config.items('allowed_tenants'):
+ if user_specified_tenants and (k not in user_specified_tenants):
+ continue
+ final_query=[]
+ final_query.extend(new_query)
+ query = make_query(tenant_id=k)
+ final_query.extend(query)
+ logger.debug('final query=%s',final_query)
+ results = client.statistics.list(meter_name=meter_name, q=final_query, period=period)
+ statistics.extend(results)
+ return json.dumps([ob._info for ob in statistics])
+
+class sample_list:
+ def GET(self):
+ global config
+ keyword_args = {
+ "q.field": [],
+ "q.op": [],
+ "q.type": [],
+ "q.value": [],
+ "limit": None,
+ }
+ query_params = web.input(**keyword_args)
+ new_query, user_specified_tenants = filter_query_params(query_params)
+
+ client = ceilometerclient()
+ limit=query_params.limit
+ samples=[]
+ for (k,v) in config.items('allowed_tenants'):
+ if user_specified_tenants and (k not in user_specified_tenants):
+ continue
+ final_query=[]
+ final_query.extend(new_query)
+ query = make_query(tenant_id=k)
+ final_query.extend(query)
+ logger.debug('final query=%s',final_query)
+ results = client.new_samples.list(q=final_query,limit=limit)
+ samples.extend(results)
+ return json.dumps([ob._info for ob in samples])
+
+class resource_list:
+ def GET(self):
+ global config
+ keyword_args = {
+ "q.field": [],
+ "q.op": [],
+ "q.type": [],
+ "q.value": [],
+ "limit": None,
+ "links": None,
+ }
+ query_params = web.input(**keyword_args)
+ new_query, user_specified_tenants = filter_query_params(query_params)
+
+ client = ceilometerclient()
+ limit=query_params.limit
+ links=query_params.links
+ resources=[]
+ for (k,v) in config.items('allowed_tenants'):
+ if user_specified_tenants and (k not in user_specified_tenants):
+ continue
+ final_query=[]
+ final_query.extend(new_query)
+ query = make_query(tenant_id=k)
+ final_query.extend(query)
+ logger.debug('final query=%s',final_query)
+ results = client.resources.list(q=final_query, limit=limit, links=links)
+ resources.extend(results)
+ return json.dumps([ob._info for ob in resources])
+
+class pubsub_handler:
+ def POST(self):
+ global config
+ parse_ceilometer_proxy_config()
+ ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
+ url = urlparse(ceilometer_pub_sub_url)
+ if (not url.scheme) or (not url.netloc):
+ raise Exception("Ceilometer PUB/SUB URL not set")
+ ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/subscribe"
+ data_str = unicode(web.data(),'iso-8859-1')
+ post_data = json.loads(data_str)
+ final_query=[]
+ for (k,v) in config.items('allowed_tenants'):
+ query = make_query(tenant_id=k)
+ final_query.extend(query)
+ if not final_query:
+ raise Exception("Not allowed to subscribe to any meters")
+ post_data["query"] = final_query
+ #TODO: The PUB/SUB url needs to be read from config
+ put_request = urllib2.Request(ceilometer_pub_sub_url, json.dumps(post_data))
+ put_request.get_method = lambda: 'SUB'
+ put_request.add_header('Content-Type', 'application/json')
+ response = urllib2.urlopen(put_request)
+ response_text = response.read()
+ return json.dumps(response_text)
+
+ def DELETE(self):
+ ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
+ url = urlparse(ceilometer_pub_sub_url)
+ if (not url.scheme) or (not url.netloc):
+ raise Exception("Ceilometer PUB/SUB URL not set")
+ ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/unsubscribe"
+ data_str = web.data()
+ #TODO: The PUB/SUB url needs to be read from config
+ put_request = urllib2.Request(ceilometer_pub_sub_url, data_str)
+ put_request.get_method = lambda: 'UNSUB'
+ put_request.add_header('Content-Type', 'application/json')
+ response = urllib2.urlopen(put_request)
+ response_text = response.read()
+ return json.dumps(response_text)
+
+if __name__ == "__main__":
+ app.run(FileLog)
diff --git a/xos/synchronizer/templates/monitoring-channel.conf.j2 b/xos/synchronizer/templates/monitoring-channel.conf.j2
new file mode 100644
index 0000000..eb937ac
--- /dev/null
+++ b/xos/synchronizer/templates/monitoring-channel.conf.j2
@@ -0,0 +1,10 @@
+# Upstart script for Monitoring channel
+description "Upstart script for Monitoring channel container"
+author "andy@onlab.us"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+ /usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh
+end script
diff --git a/xos/synchronizer/templates/sflow_pub_sub/README b/xos/synchronizer/templates/sflow_pub_sub/README
new file mode 100644
index 0000000..ee8ad9b
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/README
@@ -0,0 +1,37 @@
+
+Subscribe-Publish Frame Work:
+1.Command to Install Flask Webserver frame work.
+ sudo pip install Flask
+
+ Along with flask we need the following packages:
+ msgpack
+ fnmatch
+ operator
+ logging
+ oslo_utils
+ ConfigParser
+
+2.Files: i.sub_main.py
+ ii.pubrecords.py
+ iii.pub_sub.conf
+
+3.Command to start the server:
+ #python sun_main.py
+4.Command for subscription:
+ i.app_id:Application ID,should be unique.
+ ii.target:
+ Presently only udp is supported.
+ a.udp:<ip:portno>
+ b.kafka:<kafkaip:kafkaport>
+ iii.sub_info:Sunscription notifications.ex:cpu_util,cpu_*
+ iv.query:
+ Below information need to provide as part of query.
+ a.field:fileds like user id ,porject id etc.,
+ b.op:"eq","gt","lt" etc.,
+ c.value:value of the fileds.
+ Example:
+ curl -i -H "Content-Type: application/json" -X SUB -d '{"app_id":"10","target":"udp://10.11.10.1:5006","sub_info":"cpu_util","query":[{"field":"user_id","op":"eq","value":"e1271a86bd4e413c87248baf2e5f01e0"},{"field":"project_id","op":"eq","value":"b1a3bf16d2014b47be9aefea88087318"},{"field":"resource_id","op":"eq","value":"658cd03f-d0f0-4f55-9f48-39e7222a8646"}]}' -L http://10.11.10.1:4455/subscribe
+
+5.Command for unsunscription:
+ For unsubcription only appid will be needed.
+ curl -i -H "Content-Type: application/json" -X UNSUB -d '{"app_id":"10"}' http://10.11.10.1:4455/unsubscribe
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample b/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
new file mode 100644
index 0000000..40b5bf5
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
@@ -0,0 +1,11 @@
+[LOGGING]
+level = DEBUG
+filename = sflow_pub_sub.log
+
+[WEB_SERVER]
+webserver_host = 0.0.0.0
+webserver_port = 33333
+
+[SFLOW]
+listening_ip_addr = 0.0.0.0
+listening_port = 6343
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2 b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
new file mode 100644
index 0000000..1c5c88c
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
@@ -0,0 +1,15 @@
+# This file autogenerated by sflow service synchronizer
+# It contains a list of attributes to be used by sflow service
+# syntax: key=value
+
+[LOGGING]
+level = DEBUG
+filename = sflow_pub_sub.log
+
+[WEB_SERVER]
+webserver_host = 0.0.0.0
+webserver_port = {{ sflow_api_port }}
+
+[SFLOW]
+listening_ip_addr = 0.0.0.0
+listening_port = {{ sflow_port }}
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
new file mode 100644
index 0000000..1276721
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+import socket,thread
+import sys
+import fnmatch
+import operator
+import logging
+import ConfigParser
+from urlparse import urlparse
+from sflow_sub_records import *
+
+from flask import request, Request, jsonify
+from flask import Flask
+from flask import make_response
+app = Flask(__name__)
+
+COMPARATORS = {
+ 'gt': operator.gt,
+ 'lt': operator.lt,
+ 'ge': operator.ge,
+ 'le': operator.le,
+ 'eq': operator.eq,
+ 'ne': operator.ne,
+}
+
+LEVELS = {'DEBUG': logging.DEBUG,
+ 'INFO': logging.INFO,
+ 'WARNING': logging.WARNING,
+ 'ERROR': logging.ERROR,
+ 'CRITICAL': logging.CRITICAL}
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+@app.route('/subscribe',methods=['POST'])
+def subscribe():
+ logging.debug(" SUB data:%s",request.data)
+ target = request.data
+ parse_target=urlparse(target)
+ if not parse_target.netloc:
+ err_str = "Error:Invalid target format"
+ logging.error("* Invalid target format")
+ return err_str
+
+ status = ""
+ if parse_target.scheme == "udp" :
+ host=parse_target.hostname
+ port=parse_target.port
+ scheme = parse_target.scheme
+ app_ip = host
+ app_port = port
+
+ if host == None or port == None :
+ err_str = "* Error: Invalid IP Address format"
+ logging.error("* Invalid IP Address format")
+ return err_str
+
+ subscrip_obj=sflow_sub_record(scheme,None,app_ip,app_port,None,None)
+ status = add_sflow_sub_record(subscrip_obj)
+ print_sflow_sub_records()
+
+ if parse_target.scheme == "kafka" :
+ pass
+ if parse_target.scheme == "file" :
+ pass
+ return status
+
+@app.route('/unsubscribe',methods=['POST'])
+def unsubscribe():
+ try :
+ target = request.data
+ parse_target=urlparse(target)
+ if not parse_target.netloc:
+ err_str = "Error:Invalid target format"
+ logging.error("* Invalid target format")
+ return err_str
+
+ status = ""
+ if parse_target.scheme == "udp" :
+ host=parse_target.hostname
+ port=parse_target.port
+ scheme = parse_target.scheme
+ app_ip = host
+ app_port = port
+
+ delete_sflow_sub_record(app_ip, app_port)
+ except Exception as e:
+ logging.error("* %s",e.__str__())
+ return e.__str__()
+ return "UnSubscrition is sucessful! \n"
+
+@app.errorhandler(404)
+def not_found(error):
+ return make_response(jsonify({'error': 'Not found'}), 404)
+
+def sflow_recv(host,port):
+ udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
+ udp.bind((host, port))
+ logging.info("Started sflow receive thread on %s:%s",host, str(port))
+
+ while True:
+ data, source = udp.recvfrom(64000)
+ for obj in sflow_sub_database:
+ target_host = obj.ipaddress
+ target_port = int(obj.portno)
+ try:
+ logging.debug("Replicating the sFlow data to:%s:%s",target_host, str(target_port))
+ udp.sendto(data,(target_host,target_port))
+ except Exception:
+ logging.error ("Unable to send sFlow data to target %s:%s ",target_host,str(target_port))
+ logging.warn("Exiting sflow receive thread")
+
+
+def initialize(host,port):
+ thread.start_new(sflow_recv,(host,port,))
+
+if __name__ == "__main__":
+
+ try:
+ config = ConfigParser.ConfigParser()
+ config.read('sflow_pub_sub.conf')
+ webserver_host = config.get('WEB_SERVER','webserver_host')
+ webserver_port = int (config.get('WEB_SERVER','webserver_port'))
+ sflow_listening_ip_addr = config.get('SFLOW','listening_ip_addr')
+ sflow_listening_port = int (config.get('SFLOW','listening_port'))
+
+ log_level = config.get('LOGGING','level')
+ log_file = config.get('LOGGING','filename')
+
+ level = LEVELS.get(log_level, logging.NOTSET)
+ logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',\
+ datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level)
+ except Exception as e:
+ print("* Error in config file:",e.__str__())
+ logging.error("* Error in confing file:%s",e.__str__())
+ else:
+ initialize(sflow_listening_ip_addr,sflow_listening_port)
+ app.run(host=webserver_host,port=webserver_port,debug=False)
diff --git a/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py b/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
new file mode 100644
index 0000000..f8b0038
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+import fnmatch
+import logging
+
+class sflow_sub_record:
+ def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter):
+ logging.debug("* Updating subscription_info ")
+ self.scheme = scheme
+ self.app_id = app_id
+ self.ipaddress = app_ip
+ self.portno = app_port
+ self.subscription_info = subscription_info
+ self.sub_info_filter = sub_info_filter
+
+sflow_sub_database=[]
+def add_sflow_sub_record(record):
+ logging.info("* inside %s",add_sflow_sub_record.__name__)
+ if not sflow_sub_database:
+ logging.debug("* -----------List is EMpty -------------")
+ sflow_sub_database.append(record)
+ logging.debug("* Subscription is sucessful")
+ return "Subscription is sucessful \n"
+ for x in sflow_sub_database:
+ if (record.ipaddress == x.ipaddress) and (record.portno == x.portno) :
+ logging.warning("* entry already exists\n")
+ return "entry already exists \n"
+ sflow_sub_database.append(record)
+ return "Subscription is sucessful \n"
+
+def delete_sflow_sub_record(ip,port):
+ logging.info("* inside %s",delete_sflow_sub_record.__name__)
+ Flag = False
+ for x in sflow_sub_database:
+ if (ip == x.ipaddress) and (port == x.portno) :
+ sflow_sub_database.remove(x)
+ Flag = True
+ logging.debug("* Un-Subscription is sucessful")
+ return "Un-Subscription is sucessful \n"
+ if not Flag :
+ err_str = "No subscription exists with target: udp://" + ip + ":" + str(port) + "\n"
+ logging.error(err_str)
+ raise Exception (err_str)
+
+def print_sflow_sub_records():
+ logging.info("* inside %s",print_sflow_sub_records.__name__)
+ for obj in sflow_sub_database:
+ logging.debug("* ------------------------------------------------")
+ logging.debug("* scheme:%s",obj.scheme)
+ logging.debug("* app_id:%s",obj.app_id)
+ logging.debug("* portno:%s",obj.portno )
+ logging.debug("* ipaddress:%s",obj.ipaddress)
+ logging.debug("* portno:%s",obj.portno)
+ logging.debug("* subscription_info:%s",obj.subscription_info)
+ logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
+ logging.debug("* ------------------------------------------------")
+
+def get_sflow_sub_records(notif_subscription_info):
+ logging.info("* inside %s",get_sflow_sub_records.__name__)
+ sub_list=[]
+ for obj in sflow_sub_database:
+ if obj.subscription_info == notif_subscription_info:
+ sub_list.append(obj)
+ return sub_list
diff --git a/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub b/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
new file mode 100644
index 0000000..e2edda2
--- /dev/null
+++ b/xos/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
@@ -0,0 +1 @@
+/usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
diff --git a/xos/synchronizer/templates/start-monitoring-channel.sh.j2 b/xos/synchronizer/templates/start-monitoring-channel.sh.j2
new file mode 100755
index 0000000..4486985
--- /dev/null
+++ b/xos/synchronizer/templates/start-monitoring-channel.sh.j2
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function mac_to_iface {
+ MAC=$1
+ ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function generate_mac_from_ip {
+ IP=$1
+ printf "02:42:%02x:%02x:%02x:%02x\n" `echo $IP|awk -F '.' '{print $1, $2, $3, $4}'`
+}
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
+HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
+HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
+
+docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+ #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
+ #sudo docker pull srikanthvavila/monitoring-channel
+if [ -z "$HEADNODEFLATLANIP" ] || [ "$HEADNODEFLATLANIP" == "None" ]
+then
+# docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
+ docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
+else
+# docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
+ docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
+fi
+else
+ docker start $MONITORING_CHANNEL
+fi
+
+# Set up networking via pipework
+#SHARED_LAN_IFACE=$( mac_to_iface {{ shared_lan_mac }} )
+#docker exec $MONITORING_CHANNEL ifconfig eth0 >> /dev/null || pipework $SHARED_LAN_IFACE -i eth0 $MONITORING_CHANNEL 192.168.0.1/24
+
+# Make sure VM's eth0 (hpc_client) has no IP address
+#ifconfig $HPC_IFACE 0.0.0.0
+
+# Now copy ceilometer proxy configuration to container
+#cat /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config | sudo docker exec -i $MONITORING_CHANNEL bash -c 'cat > /usr/local/share/ceilometer_proxy_config'
+docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config $MONITORING_CHANNEL:/usr/local/share/ceilometer_proxy_config
+
+# Attach to container
+docker start -a $MONITORING_CHANNEL
diff --git a/xos/synchronizer/templates/start_ceilometer_proxy b/xos/synchronizer/templates/start_ceilometer_proxy
new file mode 100644
index 0000000..ddaa9c8
--- /dev/null
+++ b/xos/synchronizer/templates/start_ceilometer_proxy
@@ -0,0 +1 @@
+/usr/local/share/ceilometer_proxy_server.py 8000
diff --git a/xos/templates/ceilometeradmin.html b/xos/templates/ceilometeradmin.html
new file mode 100644
index 0000000..40f57e8
--- /dev/null
+++ b/xos/templates/ceilometeradmin.html
@@ -0,0 +1,6 @@
+<div class = "row text-center">
+ <div class="col-xs-12">
+ <a class="btn btn-primary" href="/admin/ceilometer/monitoringchannel/">Monitoring Channels</a>
+ </div>
+</div>
+
diff --git a/xos/templates/sflowadmin.html b/xos/templates/sflowadmin.html
new file mode 100644
index 0000000..3cbb333
--- /dev/null
+++ b/xos/templates/sflowadmin.html
@@ -0,0 +1,6 @@
+<div class = "row text-center">
+ <div class="col-xs-12">
+ <a class="btn btn-primary" href="/admin/ceilometer/sflowtenant/">sFlow Tenants</a>
+ </div>
+</div>
+
diff --git a/xos/tosca/resources/ceilometerservice.py b/xos/tosca/resources/ceilometerservice.py
new file mode 100644
index 0000000..e77fa55
--- /dev/null
+++ b/xos/tosca/resources/ceilometerservice.py
@@ -0,0 +1,41 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+
+from core.models import ServiceAttribute
+from services.ceilometer.models import CeilometerService
+
+from service import XOSService
+
+class XOSCeilometerService(XOSService):
+ provides = "tosca.nodes.CeilometerService"
+ xos_model = CeilometerService
+ copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "ceilometer_pub_sub_url"]
+
+ def set_service_attr(self, obj, prop_name, value):
+ value = self.try_intrinsic_function(value)
+ if value:
+ attrs = ServiceAttribute.objects.filter(service=obj, name=prop_name)
+ if attrs:
+ attr = attrs[0]
+ if attr.value != value:
+ self.info("updating attribute %s" % prop_name)
+ attr.value = value
+ attr.save()
+ else:
+ self.info("adding attribute %s" % prop_name)
+ ta = ServiceAttribute(service=obj, name=prop_name, value=value)
+ ta.save()
+
+ def postprocess(self, obj):
+ props = self.nodetemplate.get_properties()
+ for (k,d) in props.items():
+ v = d.value
+ if k.startswith("config_"):
+ self.set_service_attr(obj, k, v)
+ elif k.startswith("rest_"):
+ self.set_service_attr(obj, k, v)
+
diff --git a/xos/tosca/resources/ceilometertenant.py b/xos/tosca/resources/ceilometertenant.py
new file mode 100644
index 0000000..cb3a623
--- /dev/null
+++ b/xos/tosca/resources/ceilometertenant.py
@@ -0,0 +1,39 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+import pdb
+
+from services.ceilometer.models import MonitoringChannel, CeilometerService
+
+from xosresource import XOSResource
+
+class XOSCeilometerTenant(XOSResource):
+ provides = "tosca.nodes.CeilometerTenant"
+ xos_model = MonitoringChannel
+ name_field = None
+
+ def get_xos_args(self, throw_exception=True):
+ args = super(XOSCeilometerTenant, self).get_xos_args()
+
+ provider_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
+ if provider_name:
+ args["provider_service"] = self.get_xos_object(CeilometerService, throw_exception=throw_exception, name=provider_name)
+
+ return args
+
+ def get_existing_objs(self):
+ args = self.get_xos_args(throw_exception=False)
+ provider_service = args.get("provider", None)
+ if provider_service:
+ return [ self.get_xos_object(provider_service=provider_service) ]
+ return []
+
+ def postprocess(self, obj):
+ pass
+
+ def can_delete(self, obj):
+ return super(XOSCeilometerTenant, self).can_delete(obj)
+
diff --git a/xos/tosca/resources/sflowservice.py b/xos/tosca/resources/sflowservice.py
new file mode 100644
index 0000000..272518e
--- /dev/null
+++ b/xos/tosca/resources/sflowservice.py
@@ -0,0 +1,41 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+
+from core.models import ServiceAttribute
+from services.ceilometer.models import SFlowService
+
+from service import XOSService
+
+class XOSSFlowService(XOSService):
+ provides = "tosca.nodes.SFlowService"
+ xos_model = SFlowService
+ copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "sflow_port", "sflow_api_port"]
+
+ def set_service_attr(self, obj, prop_name, value):
+ value = self.try_intrinsic_function(value)
+ if value:
+ attrs = ServiceAttribute.objects.filter(service=obj, name=prop_name)
+ if attrs:
+ attr = attrs[0]
+ if attr.value != value:
+ self.info("updating attribute %s" % prop_name)
+ attr.value = value
+ attr.save()
+ else:
+ self.info("adding attribute %s" % prop_name)
+ ta = ServiceAttribute(service=obj, name=prop_name, value=value)
+ ta.save()
+
+ def postprocess(self, obj):
+ props = self.nodetemplate.get_properties()
+ for (k,d) in props.items():
+ v = d.value
+ if k.startswith("config_"):
+ self.set_service_attr(obj, k, v)
+ elif k.startswith("rest_"):
+ self.set_service_attr(obj, k, v)
+