Monitoring publisher tenant changes
Code changes includes:
disable_monitoring_service.yaml for diabling monitoring of the service.
enable_monitoring_service.yaml for enabling monitoring of the service,
install_monitoring_ceilometer.sh for installing monitoring agent in ceilometer and compute nodes.
Change-Id: I7f8d845cc59786eb25382b51573932fc6c9e5fac
diff --git a/xos/admin.py b/xos/admin.py
index b2a9242..fdc7e46 100644
--- a/xos/admin.py
+++ b/xos/admin.py
@@ -10,7 +10,7 @@
from django.utils import timezone
from django.contrib.contenttypes import generic
from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline, TenantAttrAsTabInline
+from core.admin import XOSBaseAdmin,ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline, TenantAttrAsTabInline, UploadTextareaWidget
from core.middleware import get_request
from functools import update_wrapper
@@ -59,6 +59,18 @@
suit_form_includes = (('ceilometeradmin.html', 'top', 'administration'),
)
+ #actions=['delete_selected_objects']
+
+ #def get_actions(self, request):
+ # actions = super(CeilometerServiceAdmin, self).get_actions(request)
+ # if 'delete_selected' in actions:
+ # del actions['delete_selected']
+ # return actions
+
+ #def delete_selected_objects(self, request, queryset):
+ # for obj in queryset:
+ # obj.delete()
+ #delete_selected_objects.short_description = "Delete Selected Ceilometer Service Objects"
def get_queryset(self, request):
return CeilometerService.get_service_objects_by_user(request.user)
@@ -100,18 +112,18 @@
form = MonitoringChannelForm
suit_form_tabs = (('general','Details'),)
- actions=['delete_selected_objects']
+ #actions=['delete_selected_objects']
- def get_actions(self, request):
- actions = super(MonitoringChannelAdmin, self).get_actions(request)
- if 'delete_selected' in actions:
- del actions['delete_selected']
- return actions
+ #def get_actions(self, request):
+ # actions = super(MonitoringChannelAdmin, self).get_actions(request)
+ # if 'delete_selected' in actions:
+ # del actions['delete_selected']
+ # return actions
- def delete_selected_objects(self, request, queryset):
- for obj in queryset:
- obj.delete()
- delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
+ #def delete_selected_objects(self, request, queryset):
+ # for obj in queryset:
+ # obj.delete()
+ #delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
def get_queryset(self, request):
return MonitoringChannel.get_tenant_objects_by_user(request.user)
@@ -211,8 +223,204 @@
def get_queryset(self, request):
return SFlowTenant.get_tenant_objects_by_user(request.user)
+class OpenStackServiceMonitoringPublisherForm(forms.ModelForm):
+ creator = forms.ModelChoiceField(queryset=User.objects.all())
+
+ def __init__(self,*args,**kwargs):
+ super (OpenStackServiceMonitoringPublisherForm,self ).__init__(*args,**kwargs)
+ self.fields['kind'].widget.attrs['readonly'] = True
+ self.fields['provider_service'].queryset = CeilometerService.get_service_objects().all()
+ if self.instance:
+ # fields for the attributes
+ self.fields['creator'].initial = self.instance.creator
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['kind'].initial = CEILOMETER_PUBLISH_TENANT_OS_KIND
+ self.fields['creator'].initial = get_request().user
+ if CeilometerService.get_service_objects().exists():
+ self.fields["provider_service"].initial = CeilometerService.get_service_objects().all()[0]
+
+ def save(self, commit=True):
+ self.instance.creator = self.cleaned_data.get("creator")
+ return super(OpenStackServiceMonitoringPublisherForm, self).save(commit=commit)
+
+ class Meta:
+ model = OpenStackServiceMonitoringPublisher
+ fields = '__all__'
+
+class OpenStackServiceMonitoringPublisherAdmin(ReadOnlyAwareAdmin):
+ list_display = ('backend_status_icon', 'id', )
+ list_display_links = ('backend_status_icon', 'id')
+ fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'service_specific_attribute', 'creator'],
+ 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', 'service_specific_attribute' )
+ form = OpenStackServiceMonitoringPublisherForm
+
+ suit_form_tabs = (('general','Details'),)
+ actions=['delete_selected_objects']
+
+ def get_actions(self, request):
+ actions = super(OpenStackServiceMonitoringPublisherAdmin, self).get_actions(request)
+ if 'delete_selected' in actions:
+ del actions['delete_selected']
+ return actions
+
+ def delete_selected_objects(self, request, queryset):
+ for obj in queryset:
+ obj.delete()
+ delete_selected_objects.short_description = "Delete Selected OpenStackServiceMonitoringPublisher Objects"
+
+ def get_queryset(self, request):
+ return OpenStackServiceMonitoringPublisher.get_tenant_objects_by_user(request.user)
+
+class ONOSServiceMonitoringPublisherForm(forms.ModelForm):
+ creator = forms.ModelChoiceField(queryset=User.objects.all())
+ onos_service_endpoints = forms.CharField(max_length=1024, help_text="IP addresses of all the ONOS services to be monitored")
+
+ def __init__(self,*args,**kwargs):
+ super (ONOSServiceMonitoringPublisherForm,self ).__init__(*args,**kwargs)
+ self.fields['kind'].widget.attrs['readonly'] = True
+ self.fields['provider_service'].queryset = CeilometerService.get_service_objects().all()
+ if self.instance:
+ # fields for the attributes
+ self.fields['creator'].initial = self.instance.creator
+ self.fields['onos_service_endpoints'].initial = self.instance.onos_service_endpoints
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['kind'].initial = CEILOMETER_PUBLISH_TENANT_ONOS_KIND
+ self.fields['creator'].initial = get_request().user
+ if CeilometerService.get_service_objects().exists():
+ self.fields["provider_service"].initial = CeilometerService.get_service_objects().all()[0]
+
+ def save(self, commit=True):
+ self.instance.creator = self.cleaned_data.get("creator")
+ self.instance.onos_service_endpoints = self.cleaned_data.get("onos_service_endpoints")
+ return super(ONOSServiceMonitoringPublisherForm, self).save(commit=commit)
+
+ class Meta:
+ model = ONOSServiceMonitoringPublisher
+ fields = '__all__'
+
+class ONOSServiceMonitoringPublisherAdmin(ReadOnlyAwareAdmin):
+ list_display = ('backend_status_icon', 'id', )
+ list_display_links = ('backend_status_icon', 'id')
+ fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'service_specific_attribute', 'creator', 'onos_service_endpoints'],
+ 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', 'service_specific_attribute' )
+ form = ONOSServiceMonitoringPublisherForm
+
+ suit_form_tabs = (('general','Details'),)
+ actions=['delete_selected_objects']
+
+ def get_actions(self, request):
+ actions = super(ONOSServiceMonitoringPublisherAdmin, self).get_actions(request)
+ if 'delete_selected' in actions:
+ del actions['delete_selected']
+ return actions
+
+ def delete_selected_objects(self, request, queryset):
+ for obj in queryset:
+ obj.delete()
+ delete_selected_objects.short_description = "Delete Selected OpenStackServiceMonitoringPublisher Objects"
+
+ def get_queryset(self, request):
+ return ONOSServiceMonitoringPublisher.get_tenant_objects_by_user(request.user)
+
+class UserServiceMonitoringPublisherForm(forms.ModelForm):
+ creator = forms.ModelChoiceField(queryset=User.objects.all())
+ exclude_service_list = ['ceilometer', 'onos', 'VTN', 'vROUTER', 'vOLT', 'vTR']
+ target_service = forms.ModelChoiceField(queryset=Service.objects.all().exclude(kind__in=exclude_service_list))
+
+ def __init__(self,*args,**kwargs):
+ super (UserServiceMonitoringPublisherForm,self ).__init__(*args,**kwargs)
+ self.fields['kind'].widget.attrs['readonly'] = True
+ self.fields['provider_service'].queryset = CeilometerService.get_service_objects().all()
+ if self.instance:
+ # fields for the attributes
+ self.fields['creator'].initial = self.instance.creator
+ self.fields['target_service'].initial = self.instance.target_service
+ if (not self.instance) or (not self.instance.pk):
+ # default fields for an 'add' form
+ self.fields['kind'].initial = CEILOMETER_PUBLISH_TENANT_USER_KIND
+ self.fields['creator'].initial = get_request().user
+ if CeilometerService.get_service_objects().exists():
+ self.fields["provider_service"].initial = CeilometerService.get_service_objects().all()[0]
+
+ def save(self, commit=True):
+ self.instance.creator = self.cleaned_data.get("creator")
+ self.instance.target_service = self.cleaned_data.get("target_service")
+ return super(UserServiceMonitoringPublisherForm, self).save(commit=commit)
+
+ class Meta:
+ model = UserServiceMonitoringPublisher
+ fields = '__all__'
+
+class UserServiceMonitoringPublisherAdmin(ReadOnlyAwareAdmin):
+ list_display = ('backend_status_icon', 'id', )
+ list_display_links = ('backend_status_icon', 'id')
+ fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'service_specific_attribute', 'creator', 'target_service'],
+ 'classes':['suit-tab suit-tab-general']})]
+ readonly_fields = ('backend_status_text', 'service_specific_attribute' )
+ form = UserServiceMonitoringPublisherForm
+
+ suit_form_tabs = (('general','Details'),)
+ actions=['delete_selected_objects']
+
+ def get_actions(self, request):
+ actions = super(UserServiceMonitoringPublisherAdmin, self).get_actions(request)
+ if 'delete_selected' in actions:
+ del actions['delete_selected']
+ return actions
+
+ def delete_selected_objects(self, request, queryset):
+ for obj in queryset:
+ obj.delete()
+ delete_selected_objects.short_description = "Delete Selected UserServiceMonitoringPublisher Objects"
+
+ def get_queryset(self, request):
+ return UserServiceMonitoringPublisher.get_tenant_objects_by_user(request.user)
+
+class InfraMonitoringAgentInfoForm(forms.ModelForm):
+ class Meta:
+ model = InfraMonitoringAgentInfo
+ widgets = {
+ 'start_url_json_data': UploadTextareaWidget(attrs={'rows': 5, 'cols': 80, 'class': "input-xxlarge"}),
+ }
+ fields = '__all__'
+
+class InfraMonitoringAgentInfoAdmin(XOSBaseAdmin):
+ list_display = ('backend_status_icon', 'name', 'id', )
+ list_display_links = ('backend_status_icon', 'name', 'id')
+ fieldsets = [ (None, {'fields': ['name', 'start_url', 'start_url_json_data', 'stop_url', 'monitoring_publisher'],
+ 'classes':['suit-tab suit-tab-general']})]
+ form = InfraMonitoringAgentInfoForm
+
+ suit_form_tabs = (('general','Details'),)
+
+class MonitoringCollectorPluginInfoForm(forms.ModelForm):
+ class Meta:
+ model = MonitoringCollectorPluginInfo
+ #widgets = {
+ # 'plugin_notification_handlers_json': UploadTextareaWidget(attrs={'rows': 5, 'cols': 80, 'class': "input-xxlarge"}),
+ #}
+ fields = '__all__'
+
+class MonitoringCollectorPluginInfoAdmin(XOSBaseAdmin):
+ list_display = ('backend_status_icon', 'name', 'id', )
+ list_display_links = ('backend_status_icon', 'name', 'id')
+ fieldsets = [ (None, {'fields': ['name', 'plugin_folder_path', 'plugin_rabbit_exchange', 'monitoring_publisher'],
+ 'classes':['suit-tab suit-tab-general']})]
+ form = MonitoringCollectorPluginInfoForm
+
+ suit_form_tabs = (('general','Details'),)
+
admin.site.register(CeilometerService, CeilometerServiceAdmin)
admin.site.register(SFlowService, SFlowServiceAdmin)
admin.site.register(MonitoringChannel, MonitoringChannelAdmin)
admin.site.register(SFlowTenant, SFlowTenantAdmin)
+admin.site.register(OpenStackServiceMonitoringPublisher, OpenStackServiceMonitoringPublisherAdmin)
+admin.site.register(ONOSServiceMonitoringPublisher, ONOSServiceMonitoringPublisherAdmin)
+admin.site.register(UserServiceMonitoringPublisher, UserServiceMonitoringPublisherAdmin)
+admin.site.register(InfraMonitoringAgentInfo, InfraMonitoringAgentInfoAdmin)
+admin.site.register(MonitoringCollectorPluginInfo, MonitoringCollectorPluginInfoAdmin)
diff --git a/xos/models.py b/xos/models.py
index 0960152..cf7a23b 100644
--- a/xos/models.py
+++ b/xos/models.py
@@ -1,5 +1,6 @@
from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
+from django.core.validators import URLValidator
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, CoarseTenant, ServiceMonitoringAgentInfo
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
@@ -13,6 +14,11 @@
from urlparse import urlparse
CEILOMETER_KIND = "ceilometer"
+#Ensure the length of name for 'kind' attribute is below 30
+CEILOMETER_PUBLISH_TENANT_KIND = "ceilo-publish-tenant"
+CEILOMETER_PUBLISH_TENANT_OS_KIND = "ceilo-os-publish-tenant"
+CEILOMETER_PUBLISH_TENANT_ONOS_KIND = "ceilo-onos-publish-tenant"
+CEILOMETER_PUBLISH_TENANT_USER_KIND = "ceilo-user-publish-tenant"
class CeilometerService(Service):
KIND = CEILOMETER_KIND
@@ -113,8 +119,48 @@
return self.get_controller().admin_tenant
return 'admin'
+ @property
+ def ceilometer_rabbit_compute_node(self):
+ if not self.get_instance():
+ return None
+ return self.get_instance().node.name
+
+ @property
+ def ceilometer_rabbit_host(self):
+ if not self.get_instance():
+ return None
+ return self.nat_ip
+
+ @property
+ def ceilometer_rabbit_user(self):
+ if not self.get_instance():
+ return None
+ return 'openstack'
+
+ @property
+ def ceilometer_rabbit_password(self):
+ if not self.get_instance():
+ return None
+ return 'password'
+
+ @property
+ def ceilometer_rabbit_uri(self):
+ if not self.get_instance():
+ return None
+ if not self.private_ip:
+ return None
+ return 'rabbit://openstack:password@' + self.private_ip + ':5672'
+
+ def delete(self, *args, **kwargs):
+ instance = self.get_instance()
+ if instance:
+ instance.delete()
+ super(CeilometerService, self).delete(*args, **kwargs)
+
+
class MonitoringChannel(TenantWithContainer): # aka 'CeilometerTenant'
class Meta:
+ app_label = "monitoring"
proxy = True
KIND = CEILOMETER_KIND
@@ -151,7 +197,7 @@
if self.pk is None:
#Allow only one monitoring channel per user
- channel_count = sum ( [1 for channel in MonitoringChannel.objects.filter(kind=CEILOMETER_KIND) if (channel.creator == self.creator)] )
+ channel_count = sum ( [1 for channel in MonitoringChannel.get_tenant_objects().all() if (channel.creator == self.creator)] )
if channel_count > 0:
raise XOSValidationError("Already %s channels exist for user Can only create max 1 MonitoringChannel instance per user" % str(channel_count))
@@ -293,6 +339,256 @@
mc = mc[0]
mc.manage_container()
+#@receiver(models.signals.post_delete, sender=MonitoringChannel)
+#def cleanup_monitoring_channel(sender, o, *args, **kwargs):
+# #o.cleanup_container()
+# #Temporary change only, remove the below code after testing
+# if o.instance:
+# o.instance.delete()
+# o.instance = None
+
+class MonitoringPublisher(Tenant):
+ class Meta:
+ app_label = "monitoring"
+ proxy = True
+
+ KIND = CEILOMETER_PUBLISH_TENANT_KIND
+
+ default_attributes = {}
+ def __init__(self, *args, **kwargs):
+ ceilometer_services = CeilometerService.get_service_objects().all()
+ if ceilometer_services:
+ self._meta.get_field("provider_service").default = ceilometer_services[0].id
+ super(MonitoringPublisher, self).__init__(*args, **kwargs)
+
+ def can_update(self, user):
+ #Allow creation of this model instances for non-admin users also
+ return True
+
+ @property
+ def creator(self):
+ from core.models import User
+ if getattr(self, "cached_creator", None):
+ return self.cached_creator
+ creator_id=self.get_attribute("creator_id")
+ if not creator_id:
+ return None
+ users=User.objects.filter(id=creator_id)
+ if not users:
+ return None
+ user=users[0]
+ self.cached_creator = users[0]
+ return user
+
+ @creator.setter
+ def creator(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("creator_id", None)):
+ self.cached_creator=None
+ self.set_attribute("creator_id", value)
+
+class OpenStackServiceMonitoringPublisher(MonitoringPublisher):
+ class Meta:
+ app_label = "monitoring"
+ proxy = True
+
+ KIND = CEILOMETER_PUBLISH_TENANT_OS_KIND
+
+ def __init__(self, *args, **kwargs):
+ super(OpenStackServiceMonitoringPublisher, self).__init__(*args, **kwargs)
+
+ def can_update(self, user):
+ #Don't allow creation of this model instances for non-admin users also
+ return False
+
+ def save(self, *args, **kwargs):
+ if not self.creator:
+ if not getattr(self, "caller", None):
+ # caller must be set when creating a monitoring channel since it creates a slice
+ raise XOSProgrammingError("OpenStackServiceMonitoringPublisher's self.caller was not set")
+ self.creator = self.caller
+ if not self.creator:
+ raise XOSProgrammingError("OpenStackServiceMonitoringPublisher's self.creator was not set")
+
+ if self.pk is None:
+ #Allow only one openstack monitoring publisher per admin user
+ publisher_count = sum ( [1 for ospublisher in OpenStackServiceMonitoringPublisher.get_tenant_objects().all() if (ospublisher.creator == self.creator)] )
+ if publisher_count > 0:
+ raise XOSValidationError("Already %s openstack publishers exist for user Can only create max 1 OpenStackServiceMonitoringPublisher instance per user" % str(publisher_count))
+
+ super(OpenStackServiceMonitoringPublisher, self).save(*args, **kwargs)
+
+class ONOSServiceMonitoringPublisher(MonitoringPublisher):
+ class Meta:
+ app_label = "monitoring"
+ proxy = True
+
+ KIND = CEILOMETER_PUBLISH_TENANT_ONOS_KIND
+
+ def __init__(self, *args, **kwargs):
+ super(ONOSServiceMonitoringPublisher, self).__init__(*args, **kwargs)
+
+ def can_update(self, user):
+ #Don't allow creation of this model instances for non-admin users also
+ return False
+
+ def save(self, *args, **kwargs):
+ if not self.creator:
+ if not getattr(self, "caller", None):
+ # caller must be set when creating a monitoring channel since it creates a slice
+ raise XOSProgrammingError("ONOSServiceMonitoringPublisher's self.caller was not set")
+ self.creator = self.caller
+ if not self.creator:
+ raise XOSProgrammingError("ONOSServiceMonitoringPublisher's self.creator was not set")
+
+ if self.pk is None:
+ #Allow only one openstack monitoring publisher per admin user
+ publisher_count = sum ( [1 for onospublisher in ONOSServiceMonitoringPublisher.get_tenant_objects().all() if (onospublisher.creator == self.creator)] )
+ if publisher_count > 0:
+ raise XOSValidationError("Already %s openstack publishers exist for user Can only create max 1 ONOSServiceMonitoringPublisher instance per user" % str(publisher_count))
+
+ super(ONOSServiceMonitoringPublisher, self).save(*args, **kwargs)
+
+class UserServiceMonitoringPublisher(MonitoringPublisher):
+ class Meta:
+ app_label = "monitoring"
+ proxy = True
+
+ KIND = CEILOMETER_PUBLISH_TENANT_USER_KIND
+
+ def __init__(self, *args, **kwargs):
+ self.cached_target_service = None
+ self.cached_tenancy_from_target_service = None
+ self.cached_service_monitoring_agent = None
+ super(UserServiceMonitoringPublisher, self).__init__(*args, **kwargs)
+
+ def can_update(self, user):
+ #Don't allow creation of this model instances for non-admin users also
+ return True
+
+ @property
+ def target_service(self):
+ if getattr(self, "cached_target_service", None):
+ return self.cached_target_service
+ target_service_id = self.get_attribute("target_service_id")
+ if not target_service_id:
+ return None
+ services = Service.objects.filter(id=target_service_id)
+ if not services:
+ return None
+ target_service = services[0]
+ self.cached_target_service = target_service
+ return target_service
+
+ @target_service.setter
+ def target_service(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("target_service_id", None)):
+ self.cached_target_service = None
+ self.set_attribute("target_service_id", value)
+
+ @property
+ def tenancy_from_target_service(self):
+ if getattr(self, "cached_tenancy_from_target_service", None):
+ return self.cached_tenancy_from_target_service
+ tenancy_from_target_service_id = self.get_attribute("tenancy_from_target_service_id")
+ if not tenancy_from_target_service_id:
+ return None
+ tenancy_from_target_services = CoarseTenant.objects.filter(id=tenancy_from_target_service_id)
+ if not tenancy_from_target_services:
+ return None
+ tenancy_from_target_service = tenancy_from_target_services[0]
+ self.cached_tenancy_from_target_service = tenancy_from_target_service
+ return tenancy_from_target_service
+
+ @tenancy_from_target_service.setter
+ def tenancy_from_target_service(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("tenancy_from_target_service_id", None)):
+ self.cached_tenancy_from_target_service = None
+ self.set_attribute("tenancy_from_target_service_id", value)
+
+ @property
+ def service_monitoring_agent(self):
+ if getattr(self, "cached_service_monitoring_agent", None):
+ return self.cached_service_monitoring_agent
+ service_monitoring_agent_id = self.get_attribute("service_monitoring_agent")
+ if not service_monitoring_agent_id:
+ return None
+ service_monitoring_agents = CoarseTenant.objects.filter(id=service_monitoring_agent_id)
+ if not service_monitoring_agents:
+ return None
+ service_monitoring_agent = service_monitoring_agents[0]
+ self.cached_service_monitoring_agent = service_monitoring_agent
+ return service_monitoring_agent
+
+ @service_monitoring_agent.setter
+ def service_monitoring_agent(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("service_monitoring_agent", None)):
+ self.cached_service_monitoring_agent = None
+ self.set_attribute("service_monitoring_agent", value)
+
+ def save(self, *args, **kwargs):
+ if not self.creator:
+ if not getattr(self, "caller", None):
+ # caller must be set when creating a monitoring channel since it creates a slice
+ raise XOSProgrammingError("UserServiceMonitoringPublisher's self.caller was not set")
+ self.creator = self.caller
+ if not self.creator:
+ raise XOSProgrammingError("UserServiceMonitoringPublisher's self.creator was not set")
+
+ tenancy_from_target_service = None
+ if self.pk is None:
+ if self.target_service is None:
+ raise XOSValidationError("Target service is not specified in UserServiceMonitoringPublisher")
+ #Allow only one monitoring publisher for a given service
+ publisher_count = sum ( [1 for publisher in UserServiceMonitoringPublisher.get_tenant_objects().all() if (publisher.target_service.id == self.target_service.id)] )
+ if publisher_count > 0:
+ raise XOSValidationError("Already %s publishers exist for service. Can only create max 1 UserServiceMonitoringPublisher instances" % str(publisher_count))
+ #Create Service composition object here
+ tenancy_from_target_service = CoarseTenant(provider_service = self.provider_service,
+ subscriber_service = self.target_service)
+ tenancy_from_target_service.save()
+ self.tenancy_from_target_service = tenancy_from_target_service
+
+ target_uri = CeilometerService.objects.get(id=self.provider_service.id).ceilometer_rabbit_uri
+ if target_uri is None:
+ raise XOSProgrammingError("Unable to get the Target_URI for Monitoring Agent")
+ service_monitoring_agent = ServiceMonitoringAgentInfo(service = self.target_service,
+ target_uri = target_uri)
+ service_monitoring_agent.save()
+ self.service_monitoring_agent = service_monitoring_agent
+
+ try:
+ super(UserServiceMonitoringPublisher, self).save(*args, **kwargs)
+ except:
+ if tenancy_from_target_service:
+ tenancy_from_target_service.delete()
+ if service_monitoring_agent:
+ service_monitoring_agent.delete()
+ raise
+
+class InfraMonitoringAgentInfo(ServiceMonitoringAgentInfo):
+ class Meta:
+ app_label = "monitoring"
+ start_url = models.TextField(validators=[URLValidator()], help_text="URL/API to be used to start monitoring agent")
+ start_url_json_data = models.TextField(help_text="Metadata to be passed along with start API")
+ stop_url = models.TextField(validators=[URLValidator()], help_text="URL/API to be used to stop monitoring agent")
+ monitoring_publisher = models.ForeignKey(MonitoringPublisher, related_name="monitoring_agents", null=True, blank=True)
+
+class MonitoringCollectorPluginInfo(PlCoreBase):
+ class Meta:
+ app_label = "monitoring"
+ name = models.CharField(max_length=32)
+ plugin_folder_path = StrippedCharField(blank=True, null=True, max_length=1024, help_text="Path pointing to plugin files. e.g. /opt/xos/synchronizers/monitoring/ceilometer/ceilometer-plugins/network/ext_services/vsg/")
+ plugin_rabbit_exchange = StrippedCharField(blank=True, null=True, max_length=100)
+ #plugin_notification_handlers_json = models.TextField(blank=True, null=True, help_text="Dictionary of notification handler classes. e.g {\"name\":\"plugin handler main class\"}")
+ monitoring_publisher = models.OneToOneField(MonitoringPublisher, related_name="monitoring_collector_plugin", null=True, blank=True)
SFLOW_KIND = "sflow"
SFLOW_PORT = 6343
@@ -402,7 +698,7 @@
if self.pk is None:
#Allow only one sflow channel per user and listening_endpoint
- channel_count = sum ( [1 for channel in SFlowTenant.objects.filter(kind=SFLOW_KIND) if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
+ channel_count = sum ( [1 for channel in SFlowTenant.get_tenant_objects().all() if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
if channel_count > 0:
raise XOSValidationError("Already %s sflow channels exist for user Can only create max 1 tenant per user and listening endpoint" % str(channel_count))
diff --git a/xos/monitoring-onboard.yaml b/xos/monitoring-onboard.yaml
index 65ef2da..21a6174 100644
--- a/xos/monitoring-onboard.yaml
+++ b/xos/monitoring-onboard.yaml
@@ -18,7 +18,8 @@
admin_template: templates/ceilometeradmin.html, templates/sflowadmin.html
synchronizer: synchronizer/manifest
synchronizer_run: monitoring_synchronizer.py
- tosca_resource: tosca/resources/ceilometerservice.py, tosca/resources/ceilometertenant.py, tosca/resources/sflowservice.py
+ tosca_custom_types: monitoring_tosca_types.yaml
+ tosca_resource: tosca/resources/ceilometerservice.py, tosca/resources/ceilometertenant.py, tosca/resources/sflowservice.py, tosca/resources/openstackmonitoringpublisher.py, tosca/resources/onosmonitoringpublisher.py, tosca/resources/userservicemonitoringpublisher.py, tosca/resources/inframonitoringagentinfo.py, tosca/resources/monitoringcollectorplugininfo.py
rest_tenant: subdirectory:monitoring api/tenant/monitoring/monitoringchannel.py, subdirectory:monitoring/dashboard api/tenant/monitoring/dashboard/ceilometerdashboard.py
private_key: file:///opt/xos/key_import/monitoringservice_rsa
public_key: file:///opt/xos/key_import/monitoringservice_rsa.pub
diff --git a/xos/monitoring_tosca_types.yaml b/xos/monitoring_tosca_types.yaml
new file mode 100644
index 0000000..928bb2c
--- /dev/null
+++ b/xos/monitoring_tosca_types.yaml
@@ -0,0 +1,243 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# compile this with "m4 monitoring_custom_tosca_types.m4 > monitoring_custom_tosca_types.yaml"
+
+# include macros
+# Note: Tosca derived_from isn't working the way I think it should, it's not
+# inheriting from the parent template. Until we get that figured out, use
+# m4 macros do our inheritance
+
+
+# Service
+
+
+# Subscriber
+
+
+
+
+# end m4 macros
+
+
+
+node_types:
+ tosca.nodes.SFlowService:
+ derived_from: tosca.nodes.Root
+ description: >
+ XOS SFlow Collection Service
+ capabilities:
+ scalable:
+ type: tosca.capabilities.Scalable
+ service:
+ type: tosca.capabilities.xos.Service
+ properties:
+ no-delete:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to delete this object
+ no-create:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to create this object
+ no-update:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to update this object
+ kind:
+ type: string
+ default: generic
+ description: Type of service.
+ view_url:
+ type: string
+ required: false
+ description: URL to follow when icon is clicked in the Service Directory.
+ icon_url:
+ type: string
+ required: false
+ description: ICON to display in the Service Directory.
+ enabled:
+ type: boolean
+ default: true
+ published:
+ type: boolean
+ default: true
+ description: If True then display this Service in the Service Directory.
+ public_key:
+ type: string
+ required: false
+ description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
+ versionNumber:
+ type: string
+ required: false
+ description: Version number of Service.
+ sflow_port:
+ type: integer
+ required: false
+ default: 6343
+ description: sFlow listening port
+ sflow_api_port:
+ type: integer
+ required: false
+ default: 33333
+ description: sFlow publish subscribe api listening port
+
+ tosca.nodes.CeilometerService:
+ derived_from: tosca.nodes.Root
+ description: >
+ XOS Ceilometer Service
+ capabilities:
+ scalable:
+ type: tosca.capabilities.Scalable
+ service:
+ type: tosca.capabilities.xos.Service
+ properties:
+ no-delete:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to delete this object
+ no-create:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to create this object
+ no-update:
+ type: boolean
+ default: false
+ description: Do not allow Tosca to update this object
+ kind:
+ type: string
+ default: generic
+ description: Type of service.
+ view_url:
+ type: string
+ required: false
+ description: URL to follow when icon is clicked in the Service Directory.
+ icon_url:
+ type: string
+ required: false
+ description: ICON to display in the Service Directory.
+ enabled:
+ type: boolean
+ default: true
+ published:
+ type: boolean
+ default: true
+ description: If True then display this Service in the Service Directory.
+ public_key:
+ type: string
+ required: false
+ description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
+ versionNumber:
+ type: string
+ required: false
+ description: Version number of Service.
+ ceilometer_pub_sub_url:
+ type: string
+ required: false
+ description: REST URL of ceilometer PUB/SUB component
+
+ tosca.nodes.CeilometerTenant:
+ derived_from: tosca.nodes.Root
+ description: >
+ CORD: A Tenant of the Ceilometer Service.
+ properties:
+ kind:
+ type: string
+ default: generic
+ description: Kind of tenant
+
+ tosca.nodes.InfraMonitoringAgentInfo:
+ derived_from: tosca.nodes.Root
+ description: >
+ Infra Monitoring agent info
+ capabilities:
+ inframonitoringagentinfo:
+ type: tosca.capabilities.xos.InfraMonitoringAgentInfo
+ properties:
+ start_url:
+ type: string
+ required: true
+ description: REST URL to be invoked to start monitoring agent
+ start_url_json_data:
+ type: string
+ required: false
+ description: Meta data to be sent along with start API
+ stop_url:
+ type: string
+ required: true
+ description: REST URL to be invoked to stop monitoring agent
+
+ tosca.relationships.ProvidesInfraMonitoringAgentInfo:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.InfraMonitoringAgentInfo ]
+
+ tosca.capabilities.xos.InfraMonitoringAgentInfo:
+ derived_from: tosca.capabilities.Root
+ description: An XOS Infra MonitoringAgentInfo
+
+ tosca.nodes.MonitoringCollectorPluginInfo:
+ derived_from: tosca.nodes.Root
+ description: >
+ Monitoring collector plugin info
+ capabilities:
+ monitoringcollectorplugininfo:
+ type: tosca.capabilities.xos.MonitoringCollectorPluginInfo
+ properties:
+ plugin_folder_path:
+ type: string
+ required: true
+ description: Folder path pointing to plugin files.
+ plugin_rabbit_exchange:
+ type: string
+ required: true
+ description: RabbitMQ exchange used by Monitoring collector notification plugin
+
+ tosca.relationships.ProvidesMonitoringCollectorPluginInfo:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.MonitoringCollectorPluginInfo ]
+
+ tosca.capabilities.xos.MonitoringCollectorPluginInfo:
+ derived_from: tosca.capabilities.Root
+ description: An XOS MonitoringCollectorPluginInfo
+
+ tosca.nodes.OpenStackMonitoringPublisher:
+ derived_from: tosca.nodes.Root
+ description: >
+ A OpenStack Publish Tenant of the Monitoring service
+ properties:
+ kind:
+ type: string
+ default: generic
+ description: Kind of tenant
+
+ tosca.nodes.ONOSMonitoringPublisher:
+ derived_from: tosca.nodes.Root
+ description: >
+ A ONOS Publish Tenant of the Monitoring service
+ properties:
+ kind:
+ type: string
+ default: generic
+ description: Kind of tenant
+
+ tosca.relationships.PublishesMonitoringData:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.Service ]
+
+ tosca.nodes.UserServiceMonitoringPublisher:
+ derived_from: tosca.nodes.Root
+ description: >
+ A Publish Tenant of the Monitoring service user defined services
+ properties:
+ kind:
+ type: string
+ default: generic
+ description: Kind of tenant
+
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/README b/xos/synchronizer/ceilometer/monitoring_agent/README
new file mode 100644
index 0000000..c367075
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/README
@@ -0,0 +1,9 @@
+Sample curl commands to test Monitoring agent:
+---------------------------------------------
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}}' -L http://nova-compute-1:5004/monitoring/agent/onos/start
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455"}' -L http://nova-compute-1:5004/monitoring/agent/openstack/start
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455"}' -L http://nova-compute-1:5004/monitoring/agent/openstack/stop
+
+curl -i -H "Content-Type: application/json" -X POST -d '{"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}}' -L http://nova-compute-1:5004/monitoring/agent/onos/stop
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml b/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml
new file mode 100644
index 0000000..9d36bcb
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml
@@ -0,0 +1,79 @@
+---
+- hosts: '{{ instance_name }}'
+ gather_facts: False
+ connection: ssh
+ user: ubuntu
+ sudo: yes
+
+ tasks:
+
+ - name: Installing python-dev
+ apt: name=python-dev state=present update_cache=yes
+
+ - name: Installing Flask
+ pip: name=Flask
+
+ - name: Verify if ([monitoring_agent] is to avoid capturing the shell process) is already running
+ shell: pgrep -f [m]onitoring_agent | wc -l
+ register: monitoringagent_job_pids_count
+
+ - name: DEBUG
+ debug: var=monitoringagent_job_pids_count.stdout
+
+ - name: stop /usr/local/share/monitoring_agent if already running
+ shell: pkill -f /usr/local/share/monitoring_agent/monitoring_agent.py
+ ignore_errors: True
+ when: monitoringagent_job_pids_count.stdout != "0"
+
+ - name: Deleting monitoring agent folder(if already exists)
+ file: path=/usr/local/share/monitoring_agent state=absent owner=root group=root
+
+ - name: make sure /usr/local/share/monitoring_agent exists
+ file: path=/usr/local/share/monitoring_agent state=directory owner=root group=root
+
+ - name: Copying monitoring agent conf file
+ when : "'ceilometer' in instance_name"
+ set_fact: ceilometer_services="ceilometer-agent-central,ceilometer-agent-notification,ceilometer-collector,ceilometer-api"
+
+ - name: Copying monitoring agent conf file
+ when : "'ceilometer' not in instance_name"
+ set_fact: ceilometer_services="ceilometer-agent-compute"
+
+ - name : DEBUG
+ debug: var=ceilometer_services
+
+ - name: Copying monitoring agent conf file
+ template: src=monitoring_agent.conf.j2 dest=/usr/local/share/monitoring_agent/monitoring_agent.conf mode=0777
+
+ - name: Copying file to /usr/local/share
+ copy: src=monitoring_agent.py dest=/usr/local/share/monitoring_agent/monitoring_agent.py mode=0777
+
+ - name: Copying file to /usr/local/share
+ copy: src=generate_pipeline.py dest=/usr/local/share/monitoring_agent/generate_pipeline.py mode=0777
+
+ - name: Copying file to /usr/local/share
+ copy: src=pipeline.yaml.j2 dest=/usr/local/share/monitoring_agent/pipeline.yaml.j2 mode=0777
+
+ - name: Copying file to /usr/local/share
+ copy: src=start_monitoring_agent.sh dest=/usr/local/share/monitoring_agent/start_monitoring_agent.sh mode=0777
+
+ - name: Starting monitoring agent
+ command: nohup python /usr/local/share/monitoring_agent/monitoring_agent.py &
+ args:
+ chdir: /usr/local/share/monitoring_agent/
+ async: 9999999999999999
+ poll: 0
+
+ - name: Configuring monitoring agent
+ shell: /usr/local/share/monitoring_agent/start_monitoring_agent.sh
+
+#TODO:
+#Copy ONOS notification handlers
+#from ~/xos_services/monitoring/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos
+#to /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/onos in the headnode ceilometer node
+#Copy a file from ~/xos_services/monitoring/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/__init__.py
+#to /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/ folder
+#Also, update the entry_points.txt with the following changes:
+#[network.statistics.drivers]
+#....
+#onos = ceilometer.network.statistics.onos.driver:ONOSDriver
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml b/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml
new file mode 100644
index 0000000..4f4bc7f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/disable_monitoring_service.yaml
@@ -0,0 +1,23 @@
+---
+- hosts: '{{ instance_name }}'
+ gather_facts: False
+ connection: ssh
+ user: ubuntu
+ sudo: yes
+ tasks:
+ - name : stopping onos service on {{ instance_name }}
+ uri:
+ url: http://{{ instance_name }}:5004/monitoring/agent/onos/stop
+ method: POST
+ body: '"target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]} '
+ force_basic_auth: yes
+ status_code: 200
+ body_format: json
+ - name: stopping openstack service on {{ instance_name }}
+ uri:
+ url: http://{{ instance_name }}:5004/monitoring/agent/openstack/stop
+ method: POST
+ body: '"target":"udp://9.9.9.9:4455"'
+ force_basic_auth: yes
+ status_code: 200
+ body_format: json
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml b/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml
new file mode 100644
index 0000000..f4d4de5
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/enable_monitoring_service.yaml
@@ -0,0 +1,24 @@
+---
+- hosts: '{{ instance_name }}'
+ gather_facts: False
+ connection: ssh
+ user: ubuntu
+ sudo: yes
+ tasks:
+ - name : starting onos service on {{ instance_name }}
+ uri:
+ url: http://{{ instance_name }}:5004/monitoring/agent/onos/start
+ method: POST
+ body: '{ "target":"udp://9.9.9.9:4455", "meta_data": { "resources": ["onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http","onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http"]}} '
+ force_basic_auth: yes
+ status_code: 200
+ body_format: json
+ - name: starting openstack service on {{ instance_name }}
+ uri:
+ url: http://{{ instance_name }}:5004/monitoring/agent/openstack/start
+ method: POST
+ body: '"target":"udp://9.9.9.9:4455"'
+ force_basic_auth: yes
+ status_code: 200
+ body_format: json
+
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py b/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py
new file mode 100644
index 0000000..beab6d6
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/generate_pipeline.py
@@ -0,0 +1,125 @@
+from jinja2 import Environment, FileSystemLoader
+from urlparse import urlparse
+import os
+
+# Capture our current directory
+THIS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+openstack_service_info=[]
+onos_service_info=[]
+class Openstack_Service():
+ def __init__(self,service_name,target):
+ self.service_name=service_name
+ self.target=target
+ self.service_enable=True
+ def update_openstack_service_info(self):
+ if not openstack_service_info:
+ openstack_service_info.append(self)
+ else:
+ for obj in openstack_service_info:
+ openstack_service_info.remove(obj)
+ openstack_service_info.append(self)
+ #openstack_service_info[0].target.append(target)
+
+class Onos_Service():
+ def __init__(self,service_name,target,resources):
+ self.service_name=service_name
+ self.target=target
+ self.resources=resources
+ self.service_enable=True
+ def update_onos_service_info(self):
+ if not onos_service_info:
+ onos_service_info.append(self)
+ else:
+ for obj in onos_service_info:
+ onos_service_info.remove(obj)
+ onos_service_info.append(self)
+ #onos_service_info[0].target.append(target)
+
+def generate_pipeline_yaml_for_openstack(target,Flag):
+ # Create the jinja2 environment.
+ # Notice the use of trim_blocks, which greatly helps control whitespace.
+ op_service=Openstack_Service("OPENSTACK",target)
+ op_service.update_openstack_service_info()
+ parse_target=urlparse(target)
+ host = parse_target.hostname
+ port = parse_target.port
+ with open("pipeline.yaml", 'w') as f:
+ j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+ trim_blocks=True)
+ context = {
+ 'openstack' : Flag,
+ 'listen_ip_addr': host,
+ 'port_number' : port
+ }
+ fp = j2_env.get_template('pipeline.yaml.j2').render (
+ context)
+ f.write(fp)
+
+def generate_pipeline_yaml_for_onos(target,resources,Flag):
+
+ onos_service=Onos_Service("ONOS",target,resources)
+ onos_service.update_onos_service_info()
+ with open("pipeline.yaml", 'w') as f:
+ j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+ trim_blocks=True)
+ context = {
+ 'onos' : Flag,
+ 'onos_endpoints' : resources,
+ 'onos_target' : target,
+ 'new_line': '\n',
+ 'new_tab': ' '
+ }
+ fp = j2_env.get_template('pipeline.yaml.j2').render (
+ context)
+ f.write(fp)
+
+def generate_pipeline_yaml_for_openstack_onos(target,Flag):
+
+ op_service=Openstack_Service("OPENSTACK",target)
+ op_service.update_openstack_service_info()
+ parse_target=urlparse(target)
+ host = parse_target.hostname
+ port = parse_target.port
+ with open("pipeline.yaml", 'w') as f:
+ j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+ trim_blocks=True)
+ context = {
+ 'openstack' : Flag,
+ 'listen_ip_addr': host,
+ 'port_number' : port,
+ 'onos' : Flag,
+ 'onos_endpoints' : onos_service_info[0].resources,
+ 'onos_target' : onos_service_info[0].target,
+ 'new_line': '\n',
+ 'new_tab': ' '
+ }
+ fp = j2_env.get_template('pipeline.yaml.j2').render (
+ context)
+ f.write(fp)
+
+def generate_pipeline_yaml_for_onos_openstack(target,resources,Flag):
+
+ onos_service=Onos_Service("ONOS",target,resources)
+ onos_service.update_onos_service_info()
+
+ parse_target=urlparse(openstack_service_info[0].target)
+ host = parse_target.hostname
+ port = parse_target.port
+
+ with open("pipeline.yaml", 'w') as f:
+ j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
+ trim_blocks=True)
+ context = {
+ 'onos' : Flag,
+ 'onos_endpoints' : resources,
+ 'onos_target' : target,
+ 'new_line': '\n',
+ 'new_tab': ' ',
+ 'openstack' : Flag,
+ 'listen_ip_addr': host,
+ 'port_number' : port
+ }
+ fp = j2_env.get_template('pipeline.yaml.j2').render (
+ context)
+ f.write(fp)
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh b/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh
new file mode 100755
index 0000000..8cec15f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/install_monitoring_ceilometer.sh
@@ -0,0 +1,12 @@
+#! /bin/bash
+#set -x
+COMPUTENODES=$( bash -c "source ~/service-profile/cord-pod/admin-openrc.sh ; nova hypervisor-list" |grep "cord.lab"|awk '{print $4}')
+
+echo $COMPUTENODES
+
+for NODE in $COMPUTENODES; do
+ ansible-playbook -i /etc/maas/ansible/pod-inventory ~/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml -e instance_name=$NODE
+done
+
+CEILOMETERNODE="ceilometer-1"
+ansible-playbook ~/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml -e instance_name=$CEILOMETERNODE
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py b/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py
new file mode 100644
index 0000000..5128d50
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/monitoring_agent.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+from flask import request, Request, jsonify
+from flask import Flask
+from flask import make_response
+import logging
+import logging.handlers
+import logging.config
+import subprocess
+import ConfigParser
+import generate_pipeline
+app = Flask(__name__)
+
+
+@app.route('/monitoring/agent/openstack/start',methods=['POST'])
+def openstack_start():
+ try:
+ # To do validation of user inputs for all the functions
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ if not generate_pipeline.onos_service_info:
+ logging.debug (" ONOS Service is not enalble,Only openstack need to be enabled ")
+ generate_pipeline.generate_pipeline_yaml_for_openstack(target,True)
+ else:
+ logging.debug(" ONOS Service is also enabled ,please generate yaml file for both onos and openstack")
+ generate_pipeline.generate_pipeline_yaml_for_openstack_onos(target,True)
+ restart_ceilometer_services()
+ return "Openstack start service called \n"
+ except Exception as e:
+ return e.__str__()
+
+@app.route('/monitoring/agent/onos/start',methods=['POST'])
+def onos_start():
+ try:
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ metadata = request.json['meta_data']
+ logging.debug("metadata:%s",metadata)
+ logging.debug(type(target))
+ resources = metadata['resources']
+ logging.debug("resources:%s",resources)
+ if not generate_pipeline.openstack_service_info:
+ logging.debug("Openstak Service is not enabled,Only ONOS need to be enabled")
+ generate_pipeline.generate_pipeline_yaml_for_onos(target,resources,True)
+ else:
+ logging.debug(" Openstack Service is also enabled ,please generate yaml file for both onos and openstack")
+ generate_pipeline.generate_pipeline_yaml_for_onos_openstack(target,resources,True)
+
+ restart_ceilometer_services()
+ return "ONOS start service called \n"
+ except Exception as e:
+ return e.__str__()
+
+@app.route('/monitoring/agent/vsg/start',methods=['POST'])
+def vsg_start():
+ try:
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ return "vsg start service called \n"
+ except Exception as e:
+ return e.__str__()
+
+
+@app.route('/monitoring/agent/openstack/stop',methods=['POST'])
+def openstack_stop():
+ try:
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ if not generate_pipeline.onos_service_info:
+ generate_pipeline.generate_pipeline_yaml_for_openstack(target,False)
+ else:
+ generate_pipeline.generate_pipeline_yaml_for_onos(generate_pipeline.onos_service_info[0].target,generate_pipeline.onos_service_info[0].resources,True)
+ logging.debug("Delete Openstack object")
+ for obj in generate_pipeline.openstack_service_info:
+ generate_pipeline.openstack_service_info.remove(obj)
+
+ restart_ceilometer_services()
+ return "Openstack stop service called \n"
+
+ except Exception as e:
+ return e.__str__()
+
+@app.route('/monitoring/agent/onos/stop',methods=['POST'])
+def onos_stop():
+ try:
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ metadata = request.json['meta_data']
+ logging.debug("metadata:%s",metadata)
+ resources = metadata['resources']
+ logging.debug("resources:%s",resources)
+
+ if not generate_pipeline.openstack_service_info:
+ generate_pipeline.generate_pipeline_yaml_for_onos(target,resources,False)
+ else:
+ generate_pipeline.generate_pipeline_yaml_for_openstack(generate_pipeline.openstack_service_info[0].target,True)
+
+ logging.debug("Delete ONOS Object")
+ for obj in generate_pipeline.onos_service_info:
+ generate_pipeline.onos_service_info.remove(obj)
+
+ restart_ceilometer_services()
+ return "ONOS stop service called \n"
+ except Exception as e:
+ return e.__str__()
+
+@app.route('/monitoring/agent/vsg/stop',methods=['POST'])
+def vsg_stop():
+ try:
+ target = request.json['target']
+ logging.debug("target:%s",target)
+ return "vsg stop service called \n"
+ except Exception as e:
+ return e.__str__()
+
+
+def restart_ceilometer_services():
+ try :
+ config = ConfigParser.ConfigParser()
+ config.read('monitoring_agent.conf')
+ services = config.get('SERVICE','Ceilometer_service')
+ service = services.split(",")
+ subprocess.call("sudo cp pipeline.yaml /etc/ceilometer/pipeline.yaml",shell=True)
+ except Exception as e:
+ logging.error("* Error in confing file:%s",e.__str__())
+ return False
+ else :
+ for service_name in service:
+ command = ['service',service_name, 'restart'];
+ logging.debug("Executing: %s command",command)
+ #shell=FALSE for sudo to work.
+ try :
+ subprocess.call(command, shell=False)
+ except Exception as e:
+ logging.error("* %s command execution failed with error %s",command,e.__str__())
+ return False
+ return True
+
+if __name__ == "__main__":
+ logging.config.fileConfig('monitoring_agent.conf', disable_existing_loggers=False)
+ app.run(host="0.0.0.0",port=5004,debug=False)
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2 b/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2
new file mode 100644
index 0000000..977847b
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/pipeline.yaml.j2
@@ -0,0 +1,110 @@
+---
+sources:
+ - name: meter_source
+ interval: 600
+ meters:
+ - "*"
+ sinks:
+ - meter_sink
+ - name: cpu_source
+ interval: 600
+ meters:
+ - "cpu"
+ sinks:
+ - cpu_sink
+ - name: disk_source
+ interval: 600
+ meters:
+ - "disk.read.bytes"
+ - "disk.read.requests"
+ - "disk.write.bytes"
+ - "disk.write.requests"
+ - "disk.device.read.bytes"
+ - "disk.device.read.requests"
+ - "disk.device.write.bytes"
+ - "disk.device.write.requests"
+ sinks:
+ - disk_sink
+ - name: network_source
+ interval: 600
+ meters:
+ - "network.incoming.bytes"
+ - "network.incoming.packets"
+ - "network.outgoing.bytes"
+ - "network.outgoing.packets"
+ sinks:
+ - network_sink
+{% if onos %}
+ - name: sdn_source1
+ interval: 600
+ meters:
+ - "switch"
+ - "switch.*"
+ resources: {{ new_line }}
+{%- for urls in onos_endpoints %}
+ - {{ urls }} {{ new_line}}
+{%- if loop.last -%}
+{{ new_tab }}sinks:
+ - sdn_sink
+{%- endif -%}
+{%- endfor -%}
+{% endif %}
+sinks:
+ - name: meter_sink
+ transformers:
+ publishers:
+ - notifier://
+{% if openstack %}
+ - udp://{{ listen_ip_addr }}:{{ port_number }}
+{% endif %}
+ - name: cpu_sink
+ transformers:
+ - name: "rate_of_change"
+ parameters:
+ target:
+ name: "cpu_util"
+ unit: "%"
+ type: "gauge"
+ scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+ publishers:
+ - notifier://
+{% if openstack %}
+ - udp://{{ listen_ip_addr }}:4455
+{% endif %}
+ - name: disk_sink
+ transformers:
+ - name: "rate_of_change"
+ parameters:
+ source:
+ map_from:
+ name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
+ unit: "(B|request)"
+ target:
+ map_to:
+ name: "\\1.\\2.\\3.rate"
+ unit: "\\1/s"
+ type: "gauge"
+ publishers:
+ - notifier://
+ - name: network_sink
+ transformers:
+ - name: "rate_of_change"
+ parameters:
+ source:
+ map_from:
+ name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
+ unit: "(B|packet)"
+ target:
+ map_to:
+ name: "network.\\1.\\2.rate"
+ unit: "\\1/s"
+ type: "gauge"
+ publishers:
+ - notifier://
+{% if onos %}
+ - name: sdn_sink
+ transformers:
+ publishers:
+ - notifier://
+ - {{ onos_target }}
+{% endif %}
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh b/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh
new file mode 100755
index 0000000..f7dedc2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/start_monitoring_agent.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+sudo apt-get update
+sudo apt-get install -y python-dev
+sudo pip install Flask
+cd /home/ubuntu/monitoring_agent
+chmod +x monitoring_agent.py
+nohup python monitoring_agent.py &
diff --git a/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2 b/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2
new file mode 100755
index 0000000..dc7da38
--- /dev/null
+++ b/xos/synchronizer/ceilometer/monitoring_agent/templates/monitoring_agent.conf.j2
@@ -0,0 +1,25 @@
+[SERVICE]
+Ceilometer_service = {{ ceilometer_services }}
+
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=DEBUG
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('monitoring_agent.log','a',10000000,5)
+formatter=logfileformatter
diff --git a/xos/synchronizer/ceilometer/udp_proxy/udpagent.py b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
index 81826ad..1cac784 100644
--- a/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
+++ b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
@@ -39,6 +39,10 @@
event_data = {'event_type': 'infra','message_id':six.text_type(uuid.uuid4()),'publisher_id': 'cpe_publisher_id','timestamp':datetime.datetime.now().isoformat(),'priority':'INFO','payload':msg}
return event_data
+ def errback(self, exc, interval):
+ logging.error('Error: %r', exc, exc_info=1)
+ logging.info('Retry in %s seconds.', interval)
+
def setup_rabbit_mq_channel(self):
service_exchange = Exchange(self.acord_control_exchange, "topic", durable=False)
# connections/channels
@@ -47,6 +51,8 @@
channel = connection.channel()
# produce
self.producer = Producer(channel, exchange=service_exchange, routing_key='notifications.info')
+ self.publish = connection.ensure(self.producer, self.producer.publish, errback=self.errback, max_retries=3)
+
def start_udp(self):
address_family = socket.AF_INET
@@ -70,12 +76,16 @@
else:
try:
if sample.has_key("event_type"):
- logging.debug("recevied event :%s",sample)
- self.producer.publish(sample)
+ #logging.debug("recevied event :%s",sample)
+ logging.debug("recevied event :%s",sample['event_type'])
+ #self.producer.publish(sample)
+ self.publish(sample)
else:
- logging.debug("recevied Sample :%s",sample)
+ #logging.debug("recevied Sample :%s",sample)
+ logging.debug("recevied Sample :%s",sample['counter_name'])
msg = self.convert_sample_to_event_data(sample)
- self.producer.publish(msg)
+ #self.producer.publish(msg)
+ self.publish(msg)
except Exception:
logging.exception("UDP: Unable to publish msg")
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
index 9b9e144..f002ef4 100644
--- a/xos/synchronizer/manifest
+++ b/xos/synchronizer/manifest
@@ -12,6 +12,12 @@
templates/ceilometer_proxy_server.py
templates/start_ceilometer_proxy
templates/update-keystone-endpoints.py.j2
+ceilometer/udp_proxy/udpagent.py
+templates/udpagent.conf.j2
+ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/notifications.py
+ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/__init__.py
+ceilometer/ceilometer-plugins/network/ext_services/vcpe/notifications.py
+ceilometer/ceilometer-plugins/network/ext_services/vcpe/__init__.py
manifest
run.sh
monitoring_synchronizer_config
@@ -23,6 +29,9 @@
steps/sync_sflowservice.py
steps/sync_ceilometerservice.yaml
steps/sync_ceilometerservice.py
+steps/sync_openstackmonitoringpublisher.py
+steps/sync_openstackmonitoringpublisher.yaml
+steps/enable_monitoring_service.yaml
files/vm-resolv.conf
files/docker.list
model-deps
diff --git a/xos/synchronizer/steps/enable_monitoring_service.yaml b/xos/synchronizer/steps/enable_monitoring_service.yaml
new file mode 100644
index 0000000..de03646
--- /dev/null
+++ b/xos/synchronizer/steps/enable_monitoring_service.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: False
+ user: ubuntu
+ vars:
+ monitoringagents:
+ {% for agent in agents %}
+ - url: {{ agent.url }}
+ body: {{ agent.body }}
+ {% endfor %}
+
+ tasks:
+ - name: debug
+ debug: msg=" {{ '{{' }} item {{ '}}' }} "
+ with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+
+ - name: starting openstack service monitoring agent
+ uri:
+ url: "{{ '{{' }} item.url {{ '}}' }}"
+ method: POST
+ body: "{{ '{{' }} item.body {{ '}}' }}"
+ force_basic_auth: yes
+ status_code: 200
+ body_format: json
+ with_items: " {{ '{{' }} monitoringagents {{ '}}' }} "
+
diff --git a/xos/synchronizer/steps/sync_monitoringchannel.py b/xos/synchronizer/steps/sync_monitoringchannel.py
index 3b97741..fcb6f17 100644
--- a/xos/synchronizer/steps/sync_monitoringchannel.py
+++ b/xos/synchronizer/steps/sync_monitoringchannel.py
@@ -23,6 +23,10 @@
logger = Logger(level=logging.INFO)
+#FIXME: Is this right approach?
+#Maintaining a global SSH tunnel database in order to handle tunnel deletions during the object delete
+ssh_tunnel_db = {}
+
class SSHTunnel:
def __init__(self, localip, localport, key, remoteip, remote_port, jumpuser, jumphost):
@@ -147,7 +151,7 @@
#Check if ssh tunnel is needed
proxy_ssh = getattr(Config(), "observer_proxy_ssh", False)
- if proxy_ssh:
+ if proxy_ssh and (not o.ssh_proxy_tunnel):
proxy_ssh_key = getattr(Config(), "observer_proxy_ssh_key", None)
proxy_ssh_user = getattr(Config(), "observer_proxy_ssh_user", "root")
jump_hostname = fields["hostname"]
@@ -159,16 +163,12 @@
local_port = remote_port
local_ip = socket.gethostbyname(socket.gethostname())
-# tunnel = SSHTunnelForwarder(jump_hostname,
-# ssh_username=proxy_ssh_user,
-# ssh_pkey=proxy_ssh_key,
-# ssh_private_key_password="",
-# remote_bind_address=(remote_host,remote_port),
-# local_bind_address=(local_ip,local_port),
-# set_keepalive=300)
-# tunnel.start()
tunnel = SSHTunnel(local_ip, local_port, proxy_ssh_key, remote_host, remote_port, proxy_ssh_user, jump_hostname)
tunnel.start()
+ logger.info("SSH Tunnel created for Monitoring channel-%s at local port:%s"%(o.id,local_port))
+
+ #FIXME:Store the tunnel handle in global tunnel database
+ ssh_tunnel_db[o.id] = tunnel
#Update the model with ssh tunnel info
o.ssh_proxy_tunnel = True
@@ -185,6 +185,14 @@
#if quick_update:
# logger.info("quick_update triggered; skipping ansible recipe")
#else:
+ if ('delete' in fields) and (fields['delete']):
+ logger.info("Delete for Monitoring channel-%s is getting synchronized"%(o.id))
+ if o.id in ssh_tunnel_db:
+ tunnel = ssh_tunnel_db[o.id]
+ tunnel.stop()
+ logger.info("Deleted SSH Tunnel for Monitoring channel-%s at local port:%s"%(o.id,o.ssh_tunnel_port))
+ o.ssh_proxy_tunnel = False
+ del ssh_tunnel_db[o.id]
super(SyncMonitoringChannel, self).run_playbook(o, fields)
#o.last_ansible_hash = ansible_hash
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
new file mode 100644
index 0000000..2c3d74d
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.py
@@ -0,0 +1,123 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+import json
+#import threading
+import subprocess
+import random
+import tempfile
+#from sshtunnel import SSHTunnelForwarder
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice
+from services.monitoring.models import CeilometerService, OpenStackServiceMonitoringPublisher
+from xos.logger import Logger, logging
+
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncOpenStackMonitoringPublisher(SyncInstanceUsingAnsible):
+ provides=[OpenStackServiceMonitoringPublisher]
+ observes=OpenStackServiceMonitoringPublisher
+ requested_interval=0
+ template_name = "sync_openstackmonitoringpublisher.yaml"
+
+ def __init__(self, *args, **kwargs):
+ super(SyncOpenStackMonitoringPublisher, self).__init__(*args, **kwargs)
+
+ def fetch_pending(self, deleted):
+ if (not deleted):
+ objs = OpenStackServiceMonitoringPublisher.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+ else:
+ objs = OpenStackServiceMonitoringPublisher.get_deleted_tenant_objects()
+
+ return objs
+
+ def sync_record(self, o):
+ logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
+
+ self.prepare_record(o)
+
+ ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
+ if not ceilometer_services:
+ raise "No associated Ceilometer service"
+ ceilometer_service = ceilometer_services[0]
+ service_instance = ceilometer_service.get_instance()
+ # sync only when the corresponding service instance is fully synced
+ if not service_instance:
+ self.defer_sync(o, "waiting on associated service instance")
+ return
+ if not service_instance.instance_name:
+ self.defer_sync(o, "waiting on associated service instance.instance_name")
+ return
+
+ # Step1: Orchestrate UDP proxy agent on the compute node where monitoring service VM is spawned
+
+ fields = { "hostname": ceilometer_service.ceilometer_rabbit_compute_node,
+ "baremetal_ssh": True,
+ "instance_name": "rootcontext",
+ "username": "root",
+ "container_name": None,
+ "rabbit_host": ceilometer_service.ceilometer_rabbit_host,
+ "rabbit_user": ceilometer_service.ceilometer_rabbit_user,
+ "rabbit_password": ceilometer_service.ceilometer_rabbit_password,
+ "listen_ip_addr": socket.gethostbyname(ceilometer_service.ceilometer_rabbit_compute_node)
+ }
+
+ # If 'o' defines a 'sync_attributes' list, then we'll copy those
+ # attributes into the Ansible recipe's field list automatically.
+ if hasattr(o, "sync_attributes"):
+ for attribute_name in o.sync_attributes:
+ fields[attribute_name] = getattr(o, attribute_name)
+
+ key_name = self.get_node_key(service_instance.node)
+ if not os.path.exists(key_name):
+ raise Exception("Node key %s does not exist" % key_name)
+ key = file(key_name).read()
+ fields["private_key"] = key
+
+ template_name = "sync_openstackmonitoringpublisher.yaml"
+ fields["ansible_tag"] = o.__class__.__name__ + "_" + str(o.id) + "_step1"
+
+ self.run_playbook(o, fields, template_name)
+
+ # Step2: Orchestrate OpenStack publish agent
+ target_uri = "udp://" + ceilometer_service.ceilometer_rabbit_compute_node + ":4455"
+ fields = {}
+ agent_info = []
+ if o.monitoring_agents:
+ for agent in o.monitoring_agents.all():
+ body = {'target': target_uri}
+ if agent.start_url_json_data:
+ start_url_dict = json.loads(agent.start_url_json_data)
+ body.update(agent.start_url_dict)
+ a = {'url': agent.start_url, 'body': json.dumps(body)}
+ agent_info.append(a)
+
+ fields["agents"] = agent_info
+ #fields["private_key"] = ""
+
+ template_name = "enable_monitoring_service.yaml"
+ fields["ansible_tag"] = o.__class__.__name__ + "_" + str(o.id) + "_step2"
+
+ run_template(template_name, fields)
+
+ o.save()
+
+ def map_delete_inputs(self, o):
+ fields = {"unique_id": o.id,
+ "delete": True}
+ return fields
+
+ def delete_record(self, o):
+ pass
+
diff --git a/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
new file mode 100644
index 0000000..0549b25
--- /dev/null
+++ b/xos/synchronizer/steps/sync_openstackmonitoringpublisher.yaml
@@ -0,0 +1,45 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: {{ username }}
+ sudo: yes
+ vars:
+ rabbit_user: {{ rabbit_user }}
+ rabbit_password: {{ rabbit_password }}
+ rabbit_host: {{ rabbit_host }}
+ listen_ip_addr: {{ listen_ip_addr }}
+
+ tasks:
+
+ - name: Verify if udpagent ([] is to avoid capturing the shell process) is already running
+ shell: pgrep -f [u]dpagent | wc -l
+ register: udpagent_job_pids_count
+
+ - name: DEBUG
+ debug: var=udpagent_job_pids_count.stdout
+
+ - name: stop /usr/local/share/udp_proxy if already running
+ shell: pkill -f /usr/local/share/udp_proxy/udpagent.py
+ ignore_errors: True
+ when: udpagent_job_pids_count.stdout != "0"
+
+ - name: make sure /usr/local/share/udp_proxy exists
+ file: path=/usr/local/share/udp_proxy state=directory owner=root group=root
+
+ - name: Copy udp_proxy component files to destination
+ copy: src=/opt/xos/synchronizers/monitoring/ceilometer/udp_proxy/udpagent.py
+ dest=/usr/local/share/udp_proxy/udpagent.py
+
+ - name: udp_proxy config
+ template: src=/opt/xos/synchronizers/monitoring/templates/udpagent.conf.j2 dest=/usr/local/share/udp_proxy/udpagent.conf mode=0777
+
+ - name: install python-kombu
+ apt: name=python-kombu state=present
+
+ - name: Launch udp_proxy
+ command: python /usr/local/share/udp_proxy/udpagent.py
+ args:
+ chdir: /usr/local/share/udp_proxy/
+ async: 9999999999999999
+ poll: 0
diff --git a/xos/synchronizer/templates/udpagent.conf.j2 b/xos/synchronizer/templates/udpagent.conf.j2
new file mode 100644
index 0000000..fe9cab6
--- /dev/null
+++ b/xos/synchronizer/templates/udpagent.conf.j2
@@ -0,0 +1,30 @@
+[udpservice]
+udp_address = {{ listen_ip_addr }}
+udp_port = 4455
+rabbit_userid = {{ rabbit_user }}
+rabbit_password = {{ rabbit_password }}
+rabbit_hosts = {{ rabbit_host }}
+acord_control_exchange = openstack_infra
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=INFO
+#level=DEBUG
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('udpagent.log','a',1000000,100)
+formatter=logfileformatter
diff --git a/xos/tosca/resources/inframonitoringagentinfo.py b/xos/tosca/resources/inframonitoringagentinfo.py
new file mode 100644
index 0000000..36748ba
--- /dev/null
+++ b/xos/tosca/resources/inframonitoringagentinfo.py
@@ -0,0 +1,7 @@
+from xosresource import XOSResource
+from services.monitoring.models import InfraMonitoringAgentInfo
+
+class XOSInfraMonitoringAgentInfo(XOSResource):
+ provides = "tosca.nodes.InfraMonitoringAgentInfo"
+ xos_model = InfraMonitoringAgentInfo
+ copyin_props = ["start_url", "start_url_json_data", "stop_url"]
diff --git a/xos/tosca/resources/monitoringcollectorplugininfo.py b/xos/tosca/resources/monitoringcollectorplugininfo.py
new file mode 100644
index 0000000..1d7f88f
--- /dev/null
+++ b/xos/tosca/resources/monitoringcollectorplugininfo.py
@@ -0,0 +1,7 @@
+from xosresource import XOSResource
+from services.monitoring.models import MonitoringCollectorPluginInfo
+
+class XOSMonitoringCollectorPluginInfo(XOSResource):
+ provides = "tosca.nodes.MonitoringCollectorPluginInfo"
+ xos_model = MonitoringCollectorPluginInfo
+ copyin_props = ["plugin_folder_path", "plugin_rabbit_exchange"]
diff --git a/xos/tosca/resources/onosmonitoringpublisher.py b/xos/tosca/resources/onosmonitoringpublisher.py
new file mode 100644
index 0000000..a38b556
--- /dev/null
+++ b/xos/tosca/resources/onosmonitoringpublisher.py
@@ -0,0 +1,36 @@
+from xosresource import XOSResource
+from core.models import Tenant, Service
+from services.monitoring.models import ONOSServiceMonitoringPublisher, CeilometerService, InfraMonitoringAgentInfo, MonitoringCollectorPluginInfo
+
+class XOSONOSMonitoringPublisher(XOSResource):
+ provides = "tosca.nodes.ONOSMonitoringPublisher"
+ xos_model = ONOSServiceMonitoringPublisher
+ name_field = None
+
+ def get_xos_args(self, throw_exception=True):
+ args = super(XOSONOSMonitoringPublisher, self).get_xos_args()
+
+ # PublisherTenant must always have a provider_service
+ provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=True)
+ if provider_name:
+ args["provider_service"] = self.get_xos_object(Service, throw_exception=True, name=provider_name)
+
+ return args
+
+ def get_existing_objs(self):
+ args = self.get_xos_args(throw_exception=False)
+ return ONOSServiceMonitoringPublisher.get_tenant_objects().filter(provider_service=args["provider_service"])
+
+ def postprocess(self, obj):
+ for ma_name in self.get_requirements("tosca.relationships.ProvidesInfraMonitoringAgentInfo"):
+ ma = self.get_xos_object(InfraMonitoringAgentInfo, name=ma_name)
+ ma.monitoring_publisher = obj
+ ma.save()
+ for mcp_name in self.get_requirements("tosca.relationships.ProvidesMonitoringCollectorPluginInfo"):
+ mcp = self.get_xos_object(MonitoringCollectorPluginInfo, name=mcp_name)
+ mcp.monitoring_publisher = obj
+ mcp.save()
+
+ def can_delete(self, obj):
+ return super(XOSONOSMonitoringPublisher, self).can_delete(obj)
+
diff --git a/xos/tosca/resources/openstackmonitoringpublisher.py b/xos/tosca/resources/openstackmonitoringpublisher.py
new file mode 100644
index 0000000..1b30555
--- /dev/null
+++ b/xos/tosca/resources/openstackmonitoringpublisher.py
@@ -0,0 +1,36 @@
+from xosresource import XOSResource
+from core.models import Tenant, Service
+from services.monitoring.models import OpenStackServiceMonitoringPublisher, CeilometerService, InfraMonitoringAgentInfo, MonitoringCollectorPluginInfo
+
+class XOSOpenStackMonitoringPublisher(XOSResource):
+ provides = "tosca.nodes.OpenStackMonitoringPublisher"
+ xos_model = OpenStackServiceMonitoringPublisher
+ name_field = None
+
+ def get_xos_args(self, throw_exception=True):
+ args = super(XOSOpenStackMonitoringPublisher, self).get_xos_args()
+
+ # PublisherTenant must always have a provider_service
+ provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=True)
+ if provider_name:
+ args["provider_service"] = self.get_xos_object(Service, throw_exception=True, name=provider_name)
+
+ return args
+
+ def postprocess(self, obj):
+ for ma_name in self.get_requirements("tosca.relationships.ProvidesInfraMonitoringAgentInfo"):
+ ma = self.get_xos_object(InfraMonitoringAgentInfo, name=ma_name)
+ ma.monitoring_publisher = obj
+ ma.save()
+ for mcp_name in self.get_requirements("tosca.relationships.ProvidesMonitoringCollectorPluginInfo"):
+ mcp = self.get_xos_object(MonitoringCollectorPluginInfo, name=mcp_name)
+ mcp.monitoring_publisher = obj
+ mcp.save()
+
+ def get_existing_objs(self):
+ args = self.get_xos_args(throw_exception=False)
+ return OpenStackServiceMonitoringPublisher.get_tenant_objects().filter(provider_service=args["provider_service"])
+
+ def can_delete(self, obj):
+ return super(XOSOpenStackMonitoringPublisher, self).can_delete(obj)
+
diff --git a/xos/tosca/resources/userservicemonitoringpublisher.py b/xos/tosca/resources/userservicemonitoringpublisher.py
new file mode 100644
index 0000000..efac81d
--- /dev/null
+++ b/xos/tosca/resources/userservicemonitoringpublisher.py
@@ -0,0 +1,40 @@
+from xosresource import XOSResource
+from core.models import Tenant, Service
+from services.monitoring.models import UserServiceMonitoringPublisher, CeilometerService, InfraMonitoringAgentInfo, MonitoringCollectorPluginInfo
+
+class XOSUserServiceMonitoringPublisher(XOSResource):
+ provides = "tosca.nodes.UserServiceMonitoringPublisher"
+ xos_model = UserServiceMonitoringPublisher
+ name_field = None
+
+ def get_xos_args(self, throw_exception=True):
+ args = super(XOSUserServiceMonitoringPublisher, self).get_xos_args()
+
+ # PublisherTenant must always have a provider_service
+ provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=True)
+ if provider_name:
+ args["provider_service"] = self.get_xos_object(Service, throw_exception=True, name=provider_name)
+
+ target_service_name = self.get_requirement("tosca.relationships.PublishesMonitoringData", throw_exception=True)
+ if target_service_name:
+ args["target_service"] = self.get_xos_object(Service, throw_exception=True, name=target_service_name)
+
+ return args
+
+ def get_existing_objs(self):
+ args = self.get_xos_args(throw_exception=False)
+ return [publisher for publisher in UserServiceMonitoringPublisher.get_tenant_objects().all() if (publisher.target_service.id == args["target_service"].id)]
+
+ def postprocess(self, obj):
+ #for ma_name in self.get_requirements("tosca.relationships.ProvidesMonitoringAgentInfo"):
+ # ma = self.get_xos_object(MonitoringAgentInfo, name=ma_name)
+ # ma.monitoring_publisher = obj
+ # ma.save()
+ for mcp_name in self.get_requirements("tosca.relationships.ProvidesMonitoringCollectorPluginInfo"):
+ mcp = self.get_xos_object(MonitoringCollectorPluginInfo, name=mcp_name)
+ mcp.monitoring_publisher = obj
+ mcp.save()
+
+ def can_delete(self, obj):
+ return super(XOSUserServiceMonitoringPublisher, self).can_delete(obj)
+