move ceilometer and openvpn to separate repos
Change-Id: I95af703f7994a4db1b7303c2a54985eb2f48ad79
diff --git a/xos/configurations/common/Makefile.services b/xos/configurations/common/Makefile.services
index b5ee431..e212f41 100644
--- a/xos/configurations/common/Makefile.services
+++ b/xos/configurations/common/Makefile.services
@@ -10,6 +10,8 @@
$(SERVICE_DIR)/onos-service \
$(SERVICE_DIR)/fabric
+monitoring_services: $(SERVICE_DIR)/monitoring
+
$(SERVICE_DIR):
mkdir -p $(SERVICE_DIR)
@@ -37,6 +39,9 @@
$(SERVICE_DIR)/fabric:
git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/fabric.git
+$(SERVICE_DIR)/monitoring:
+ git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/monitoring.git
+
cleanup:
rm -rf $(SERVICE_DIR)/*
diff --git a/xos/configurations/cord-pod/Makefile b/xos/configurations/cord-pod/Makefile
index 6911818..4ff18b2 100644
--- a/xos/configurations/cord-pod/Makefile
+++ b/xos/configurations/cord-pod/Makefile
@@ -99,14 +99,17 @@
cord-ceilometer: ceilometer_custom_images cord onboard-ceilometer
sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/ceilometer.yaml
-onboard-ceilometer:
+onboard-ceilometer: download-ceilometer
sudo cp id_rsa key_import/monitoring_channel_rsa
sudo cp id_rsa.pub key_import/monitoring_channel_rsa.pub
- sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None /opt/xos/onboard/ceilometer/ceilometer-onboard.yaml
+ sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None /opt/xos_services/monitoring/xos/ceilometer-onboard.yaml
bash ../common/wait_for_onboarding_ready.sh 81 services/ceilometer
bash ../common/wait_for_onboarding_ready.sh 81 xos
bash ../common/wait_for_xos_port.sh 80
+download-ceilometer:
+ make -f ../common/Makefile.services monitoring_services
+
nodes.yaml:
export SETUPDIR=.; bash ../common/make-nodes-yaml.sh
diff --git a/xos/onboard/README.md b/xos/onboard/README.md
deleted file mode 100644
index 2030708..0000000
--- a/xos/onboard/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-This directory is a temporary placeholder for services that can be on-boarded.
-
-Once we move to Gerritt and service-per-repo, this directory will be removed.
diff --git a/xos/onboard/ceilometer/admin.py b/xos/onboard/ceilometer/admin.py
deleted file mode 100644
index 062a2ae..0000000
--- a/xos/onboard/ceilometer/admin.py
+++ /dev/null
@@ -1,214 +0,0 @@
-from django.contrib import admin
-
-from services.ceilometer.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline, TenantAttrAsTabInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class CeilometerServiceForm(forms.ModelForm):
- ceilometer_pub_sub_url = forms.CharField(required=False, max_length=1024, help_text="REST URL of ceilometer PUB/SUB component in http://IP:port/ format")
-
- def __init__(self,*args,**kwargs):
- super (CeilometerServiceForm,self ).__init__(*args,**kwargs)
- if self.instance:
- # fields for the attributes
- self.fields['ceilometer_pub_sub_url'].initial = self.instance.ceilometer_pub_sub_url
-
- def save(self, commit=True):
- self.instance.ceilometer_pub_sub_url = self.cleaned_data.get("ceilometer_pub_sub_url")
- return super(CeilometerServiceForm, self).save(commit=commit)
-
- class Meta:
- model = CeilometerService
-
-class CeilometerServiceAdmin(ReadOnlyAwareAdmin):
- model = CeilometerService
- verbose_name = "Ceilometer Service"
- verbose_name_plural = "Ceilometer Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description','ceilometer_pub_sub_url', "view_url","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
- form = CeilometerServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'Ceilometer Service Details'),
- ('administration', 'Administration'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('ceilometeradmin.html', 'top', 'administration'),
- )
-
- def queryset(self, request):
- return CeilometerService.get_service_objects_by_user(request.user)
-
-class MonitoringChannelForm(forms.ModelForm):
- creator = forms.ModelChoiceField(queryset=User.objects.all())
-
- def __init__(self,*args,**kwargs):
- super (MonitoringChannelForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = CeilometerService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['creator'].initial = self.instance.creator
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = CEILOMETER_KIND
- self.fields['creator'].initial = get_request().user
- if CeilometerService.get_service_objects().exists():
- self.fields["provider_service"].initial = CeilometerService.get_service_objects().all()[0]
-
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get("creator")
- return super(MonitoringChannelForm, self).save(commit=commit)
-
- class Meta:
- model = MonitoringChannel
-
-class MonitoringChannelAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'service_specific_attribute',
- 'ceilometer_url', 'tenant_list_str',
- 'instance', 'creator'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'instance', 'service_specific_attribute', 'ceilometer_url', 'tenant_list_str')
- form = MonitoringChannelForm
-
- suit_form_tabs = (('general','Details'),)
- actions=['delete_selected_objects']
-
- def get_actions(self, request):
- actions = super(MonitoringChannelAdmin, self).get_actions(request)
- if 'delete_selected' in actions:
- del actions['delete_selected']
- return actions
-
- def delete_selected_objects(self, request, queryset):
- for obj in queryset:
- obj.delete()
- delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
-
- def queryset(self, request):
- return MonitoringChannel.get_tenant_objects_by_user(request.user)
-
-class SFlowServiceForm(forms.ModelForm):
- sflow_port = forms.IntegerField(required=False)
- sflow_api_port = forms.IntegerField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (SFlowServiceForm,self ).__init__(*args,**kwargs)
- if self.instance:
- # fields for the attributes
- self.fields['sflow_port'].initial = self.instance.sflow_port
- self.fields['sflow_api_port'].initial = self.instance.sflow_api_port
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['sflow_port'].initial = SFLOW_PORT
- self.fields['sflow_api_port'].initial = SFLOW_API_PORT
-
- def save(self, commit=True):
- self.instance.sflow_port = self.cleaned_data.get("sflow_port")
- self.instance.sflow_api_port = self.cleaned_data.get("sflow_api_port")
- return super(SFlowServiceForm, self).save(commit=commit)
-
- class Meta:
- model = SFlowService
-
-class SFlowServiceAdmin(ReadOnlyAwareAdmin):
- model = SFlowService
- verbose_name = "SFlow Service"
- verbose_name_plural = "SFlow Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description',"view_url","sflow_port","sflow_api_port","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
- form = SFlowServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'SFlow Service Details'),
- ('administration', 'Administration'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('sflowadmin.html', 'top', 'administration'),
- )
-
- def queryset(self, request):
- return SFlowService.get_service_objects_by_user(request.user)
-
-class SFlowTenantForm(forms.ModelForm):
- creator = forms.ModelChoiceField(queryset=User.objects.all())
- listening_endpoint = forms.CharField(max_length=1024, help_text="sFlow listening endpoint in udp://IP:port format")
-
- def __init__(self,*args,**kwargs):
- super (SFlowTenantForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = SFlowService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['creator'].initial = self.instance.creator
- self.fields['listening_endpoint'].initial = self.instance.listening_endpoint
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = SFLOW_KIND
- self.fields['creator'].initial = get_request().user
- if SFlowService.get_service_objects().exists():
- self.fields["provider_service"].initial = SFlowService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get("creator")
- self.instance.listening_endpoint = self.cleaned_data.get("listening_endpoint")
- return super(SFlowTenantForm, self).save(commit=commit)
-
- class Meta:
- model = SFlowTenant
-
-class SFlowTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'creator', 'listening_endpoint' )
- list_display_links = ('backend_status_icon', 'listening_endpoint')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_service', 'service_specific_attribute', 'listening_endpoint',
- 'creator'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'instance', 'service_specific_attribute')
- inlines = [TenantAttrAsTabInline]
- form = SFlowTenantForm
-
- suit_form_tabs = (('general','Details'), ('tenantattrs', 'Attributes'))
-
- def queryset(self, request):
- return SFlowTenant.get_tenant_objects_by_user(request.user)
-
-admin.site.register(CeilometerService, CeilometerServiceAdmin)
-admin.site.register(SFlowService, SFlowServiceAdmin)
-admin.site.register(MonitoringChannel, MonitoringChannelAdmin)
-admin.site.register(SFlowTenant, SFlowTenantAdmin)
-
diff --git a/xos/onboard/ceilometer/api/tenant/ceilometer/monitoringchannel.py b/xos/onboard/ceilometer/api/tenant/ceilometer/monitoringchannel.py
deleted file mode 100644
index 43e1636..0000000
--- a/xos/onboard/ceilometer/api/tenant/ceilometer/monitoringchannel.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-from services.ceilometer.models import MonitoringChannel, CeilometerService
-
-def get_default_ceilometer_service():
- ceilometer_services = CeilometerService.get_service_objects().all()
- if ceilometer_services:
- return ceilometer_services[0].id
- return None
-
-class MonitoringChannelForAPI(MonitoringChannel):
- class Meta:
- proxy = True
- app_label = "ceilometer"
-
- @property
- def related(self):
- related = {}
- if self.creator:
- related["creator"] = self.creator.username
- if self.instance:
- related["instance_id"] = self.instance.id
- related["instance_name"] = self.instance.name
- if self.instance.node:
- related["compute_node_name"] = self.instance.node.name
- return related
-
-class MonitoringChannelSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- service_specific_attribute = ReadOnlyField()
- ceilometer_url = ReadOnlyField()
- tenant_list_str = ReadOnlyField()
- #creator = ReadOnlyField()
- #instance = ReadOnlyField()
- provider_service = serializers.PrimaryKeyRelatedField(queryset=CeilometerService.get_service_objects().all(), default=get_default_ceilometer_service)
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- related = serializers.DictField(required=False)
-
- #computeNodeName = serializers.SerializerMethodField("getComputeNodeName")
-
- class Meta:
- model = MonitoringChannelForAPI
- fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_attribute', 'ceilometer_url', 'tenant_list_str', 'related' )
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
- #def getComputeNodeName(self, obj):
- # instance = obj.instance
- # if not instance:
- # return None
- # return instance.node.name
-
-class MonitoringChannelSet(XOSViewSet):
- base_name = "monitoringchannel"
- method_name = "monitoringchannel"
- method_kind = "viewset"
- queryset = MonitoringChannelForAPI.get_tenant_objects().all()
- serializer_class = MonitoringChannelSerializer
-
- def get_queryset(self):
- queryset = MonitoringChannelForAPI.get_tenant_objects().all()
-
- current_user = self.request.user.username
- if current_user is not None:
- ids = [x.id for x in queryset if x.creator.username==current_user]
- queryset = queryset.filter(id__in=ids)
-
- return queryset
-
- def create(self, request):
- current_user = request.user.username
- existing_obj = None
- for obj in MonitoringChannelForAPI.get_tenant_objects().all():
- if (obj.creator.username == current_user):
- existing_obj = obj
- break
-
- if existing_obj:
- serializer = MonitoringChannelSerializer(existing_obj)
- headers = self.get_success_headers(serializer.data)
- return Response( serializer.data, status=status.HTTP_200_OK )
-
- return super(MonitoringChannelSet, self).create(request)
diff --git a/xos/onboard/ceilometer/ceilometer-onboard.yaml b/xos/onboard/ceilometer/ceilometer-onboard.yaml
deleted file mode 100644
index 82c955f..0000000
--- a/xos/onboard/ceilometer/ceilometer-onboard.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#ceilometer:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/ceilometer/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/ceilometeradmin.html, templates/sflowadmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: monitoring_channel_synchronizer.py
- tosca_resource: tosca/resources/ceilometerservice.py, tosca/resources/ceilometertenant.py, tosca/resources/sflowservice.py
- rest_tenant: subdirectory:ceilometer api/tenant/ceilometer/monitoringchannel.py
- private_key: file:///opt/xos/key_import/monitoring_channel_rsa
- public_key: file:///opt/xos/key_import/monitoring_channel_rsa.pub
-
diff --git a/xos/onboard/ceilometer/models.py b/xos/onboard/ceilometer/models.py
deleted file mode 100644
index 5285bd7..0000000
--- a/xos/onboard/ceilometer/models.py
+++ /dev/null
@@ -1,307 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-import traceback
-from xos.exceptions import *
-from core.models import SlicePrivilege, SitePrivilege
-from sets import Set
-from urlparse import urlparse
-
-CEILOMETER_KIND = "ceilometer"
-
-class CeilometerService(Service):
- KIND = CEILOMETER_KIND
-
- class Meta:
- app_label = "ceilometer"
- verbose_name = "Ceilometer Service"
- proxy = True
-
- @property
- def ceilometer_pub_sub_url(self):
- return self.get_attribute("ceilometer_pub_sub_url", None)
-
- @ceilometer_pub_sub_url.setter
- def ceilometer_pub_sub_url(self, value):
- self.set_attribute("ceilometer_pub_sub_url", value)
-
-class MonitoringChannel(TenantWithContainer): # aka 'CeilometerTenant'
- class Meta:
- proxy = True
-
- KIND = CEILOMETER_KIND
- LOOK_FOR_IMAGES=[ #"trusty-server-multi-nic-docker", # CloudLab
- "ceilometer-trusty-server-multi-nic",
- #"trusty-server-multi-nic",
- ]
-
-
- sync_attributes = ("private_ip", "private_mac",
- "ceilometer_ip", "ceilometer_mac",
- "nat_ip", "nat_mac", "ceilometer_port",)
-
- default_attributes = {}
- def __init__(self, *args, **kwargs):
- ceilometer_services = CeilometerService.get_service_objects().all()
- if ceilometer_services:
- self._meta.get_field("provider_service").default = ceilometer_services[0].id
- super(MonitoringChannel, self).__init__(*args, **kwargs)
- self.set_attribute("use_same_instance_for_multiple_tenants", True)
-
- def can_update(self, user):
- #Allow creation of this model instances for non-admin users also
- return True
-
- def save(self, *args, **kwargs):
- if not self.creator:
- if not getattr(self, "caller", None):
- # caller must be set when creating a monitoring channel since it creates a slice
- raise XOSProgrammingError("MonitoringChannel's self.caller was not set")
- self.creator = self.caller
- if not self.creator:
- raise XOSProgrammingError("MonitoringChannel's self.creator was not set")
-
- if self.pk is None:
- #Allow only one monitoring channel per user
- channel_count = sum ( [1 for channel in MonitoringChannel.objects.filter(kind=CEILOMETER_KIND) if (channel.creator == self.creator)] )
- if channel_count > 0:
- raise XOSValidationError("Already %s channels exist for user Can only create max 1 MonitoringChannel instance per user" % str(channel_count))
-
- super(MonitoringChannel, self).save(*args, **kwargs)
- model_policy_monitoring_channel(self.pk)
-
- def delete(self, *args, **kwargs):
- self.cleanup_container()
- super(MonitoringChannel, self).delete(*args, **kwargs)
-
- @property
- def addresses(self):
- if (not self.id) or (not self.instance):
- return {}
-
- addresses = {}
- for ns in self.instance.ports.all():
- if "private" in ns.network.name.lower():
- addresses["private"] = (ns.ip, ns.mac)
- elif ("nat" in ns.network.name.lower()) or ("management" in ns.network.name.lower()):
- addresses["nat"] = (ns.ip, ns.mac)
- #TODO: Do we need this client_access_network. Revisit in VTN context
- #elif "ceilometer_client_access" in ns.network.labels.lower():
- # addresses["ceilometer"] = (ns.ip, ns.mac)
- return addresses
-
- @property
- def nat_ip(self):
- return self.addresses.get("nat", (None, None))[0]
-
- @property
- def nat_mac(self):
- return self.addresses.get("nat", (None, None))[1]
-
- @property
- def private_ip(self):
- return self.addresses.get("nat", (None, None))[0]
-
- @property
- def private_mac(self):
- return self.addresses.get("nat", (None, None))[1]
-
- @property
- def ceilometer_ip(self):
- return self.addresses.get("ceilometer", (None, None))[0]
-
- @property
- def ceilometer_mac(self):
- return self.addresses.get("ceilometer", (None, None))[1]
-
- @property
- def site_tenant_list(self):
- tenant_ids = Set()
- for sp in SitePrivilege.objects.filter(user=self.creator):
- site = sp.site
- for cs in site.controllersite.all():
- if cs.tenant_id:
- tenant_ids.add(cs.tenant_id)
- return tenant_ids
-
- @property
- def slice_tenant_list(self):
- tenant_ids = Set()
- for sp in SlicePrivilege.objects.filter(user=self.creator):
- slice = sp.slice
- for cs in slice.controllerslices.all():
- if cs.tenant_id:
- tenant_ids.add(cs.tenant_id)
- for slice in Slice.objects.filter(creator=self.creator):
- for cs in slice.controllerslices.all():
- if cs.tenant_id:
- tenant_ids.add(cs.tenant_id)
- if self.creator.is_admin:
- #TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
- #For now, ceilometer code is changed to pusblish all such meters with tenant
- #id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
- #for all admin users.
- tenant_ids.add("default_admin_tenant")
- return tenant_ids
-
- @property
- def tenant_list(self):
- return self.slice_tenant_list | self.site_tenant_list
-
- @property
- def tenant_list_str(self):
- return ", ".join(self.tenant_list)
-
- @property
- def ceilometer_port(self):
- # TODO: Find a better logic to choose unique ceilometer port number for each instance
- if not self.id:
- return None
- return 8888+self.id
-
- @property
- def ceilometer_url(self):
- if not self.private_ip:
- return None
- return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
-
-def model_policy_monitoring_channel(pk):
- # TODO: this should be made in to a real model_policy
- with transaction.atomic():
- mc = MonitoringChannel.objects.select_for_update().filter(pk=pk)
- if not mc:
- return
- mc = mc[0]
- mc.manage_container()
-
-
-SFLOW_KIND = "sflow"
-SFLOW_PORT = 6343
-SFLOW_API_PORT = 33333
-
-class SFlowService(Service):
- KIND = SFLOW_KIND
-
- class Meta:
- app_label = "ceilometer"
- verbose_name = "sFlow Collection Service"
- proxy = True
-
- default_attributes = {"sflow_port": SFLOW_PORT, "sflow_api_port": SFLOW_API_PORT}
-
- sync_attributes = ("sflow_port", "sflow_api_port",)
-
- @property
- def sflow_port(self):
- return self.get_attribute("sflow_port", self.default_attributes["sflow_port"])
-
- @sflow_port.setter
- def sflow_port(self, value):
- self.set_attribute("sflow_port", value)
-
- @property
- def sflow_api_port(self):
- return self.get_attribute("sflow_api_port", self.default_attributes["sflow_api_port"])
-
- @sflow_api_port.setter
- def sflow_api_port(self, value):
- self.set_attribute("sflow_api_port", value)
-
- def get_instance(self):
- if self.slices.exists():
- slice = self.slices.all()[0]
- if slice.instances.exists():
- return slice.instances.all()[0]
-
- return None
-
- @property
- def sflow_api_url(self):
- if not self.get_instance():
- return None
- return "http://" + self.get_instance().get_ssh_ip() + ":" + str(self.sflow_api_port) + "/"
-
-class SFlowTenant(Tenant):
- class Meta:
- proxy = True
-
- KIND = SFLOW_KIND
-
- sync_attributes = ("listening_endpoint", )
-
- default_attributes = {}
- def __init__(self, *args, **kwargs):
- sflow_services = SFlowService.get_service_objects().all()
- if sflow_services:
- self._meta.get_field("provider_service").default = sflow_services[0].id
- super(SFlowTenant, self).__init__(*args, **kwargs)
-
- @property
- def creator(self):
- from core.models import User
- if getattr(self, "cached_creator", None):
- return self.cached_creator
- creator_id=self.get_attribute("creator_id")
- if not creator_id:
- return None
- users=User.objects.filter(id=creator_id)
- if not users:
- return None
- user=users[0]
- self.cached_creator = users[0]
- return user
-
- @creator.setter
- def creator(self, value):
- if value:
- value = value.id
- if (value != self.get_attribute("creator_id", None)):
- self.cached_creator=None
- self.set_attribute("creator_id", value)
-
- @property
- def listening_endpoint(self):
- return self.get_attribute("listening_endpoint", None)
-
- @listening_endpoint.setter
- def listening_endpoint(self, value):
- if urlparse(value).scheme != 'udp':
- raise XOSProgrammingError("SFlowTenant: Only UDP listening endpoint URLs are accepted...valid syntax is: udp://ip:port")
- self.set_attribute("listening_endpoint", value)
-
- def save(self, *args, **kwargs):
- if not self.creator:
- if not getattr(self, "caller", None):
- # caller must be set when creating a SFlow tenant since it creates a slice
- raise XOSProgrammingError("SFlowTenant's self.caller was not set")
- self.creator = self.caller
- if not self.creator:
- raise XOSProgrammingError("SFlowTenant's self.creator was not set")
-
- if not self.listening_endpoint:
- raise XOSProgrammingError("SFlowTenant's self.listening_endpoint was not set")
-
- if self.pk is None:
- #Allow only one sflow channel per user and listening_endpoint
- channel_count = sum ( [1 for channel in SFlowTenant.objects.filter(kind=SFLOW_KIND) if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
- if channel_count > 0:
- raise XOSValidationError("Already %s sflow channels exist for user Can only create max 1 tenant per user and listening endpoint" % str(channel_count))
-
- super(SFlowTenant, self).save(*args, **kwargs)
-
- def delete(self, *args, **kwargs):
- super(MonitoringChannel, self).delete(*args, **kwargs)
-
- @property
- def authorized_resource_list(self):
- return ['all']
-
- @property
- def authorized_resource_list_str(self):
- return ", ".join(self.authorized_resource_list)
-
diff --git a/xos/onboard/ceilometer/synchronizer/files/docker.list b/xos/onboard/ceilometer/synchronizer/files/docker.list
deleted file mode 100644
index 0ee9ae0..0000000
--- a/xos/onboard/ceilometer/synchronizer/files/docker.list
+++ /dev/null
@@ -1 +0,0 @@
-deb https://get.docker.com/ubuntu docker main
diff --git a/xos/onboard/ceilometer/synchronizer/files/vm-resolv.conf b/xos/onboard/ceilometer/synchronizer/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/onboard/ceilometer/synchronizer/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/onboard/ceilometer/synchronizer/manifest b/xos/onboard/ceilometer/synchronizer/manifest
deleted file mode 100644
index c679225..0000000
--- a/xos/onboard/ceilometer/synchronizer/manifest
+++ /dev/null
@@ -1,26 +0,0 @@
-templates/Dockerfile.monitoring_channel
-templates/ceilometer_proxy_config.j2
-templates/Dockerfile.sflowpubsub
-templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
-templates/sflow_pub_sub/README
-templates/sflow_pub_sub/sflow_sub_records.py
-templates/sflow_pub_sub/start_sflow_pub_sub
-templates/sflow_pub_sub/sflow_pub_sub_main.py
-templates/sflow_pub_sub/sflow_pub_sub_config.j2
-templates/start-monitoring-channel.sh.j2
-templates/monitoring-channel.conf.j2
-templates/ceilometer_proxy_server.py
-templates/start_ceilometer_proxy
-manifest
-monitoring_channel_synchronizer_config
-steps/sync_sflowtenant.yaml
-steps/sync_sflowtenant.py
-steps/sync_monitoringchannel.yaml
-steps/sync_monitoringchannel.py
-steps/sync_sflowservice.yaml
-steps/sync_sflowservice.py
-files/vm-resolv.conf
-files/docker.list
-model-deps
-supervisor/monitoring_channel_observer.conf
-monitoring_channel_synchronizer.py
diff --git a/xos/onboard/ceilometer/synchronizer/model-deps b/xos/onboard/ceilometer/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/ceilometer/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer.py b/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer_config b/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer_config
deleted file mode 100644
index 8c6578f..0000000
--- a/xos/onboard/ceilometer/synchronizer/monitoring_channel_synchronizer_config
+++ /dev/null
@@ -1,41 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=monitoring_channel
-dependency_graph=/opt/xos/synchronizers/monitoring_channel/model-deps
-steps_dir=/opt/xos/synchronizers/monitoring_channel/steps
-sys_dir=/opt/xos/synchronizers/monitoring_channel/sys
-deleters_dir=/opt/xos/synchronizers/monitoring_channel/deleters
-log_file=console
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.py b/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.py
deleted file mode 100644
index 2c0ba10..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.ceilometer.models import CeilometerService, MonitoringChannel
-from xos.logger import Logger, logging
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncMonitoringChannel(SyncInstanceUsingAnsible):
- provides=[MonitoringChannel]
- observes=MonitoringChannel
- requested_interval=0
- template_name = "sync_monitoringchannel.yaml"
- service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncMonitoringChannel, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = MonitoringChannel.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = MonitoringChannel.get_deleted_tenant_objects()
-
- return objs
-
- def get_extra_attributes(self, o):
- # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know
- # 1) Allowed tenant ids
- # 2) Ceilometer API service endpoint URL if running externally
- # 3) Credentials to access Ceilometer API service
-
- ceilometer_services = CeilometerService.get_service_objects().filter(id=o.provider_service.id)
- if not ceilometer_services:
- raise "No associated Ceilometer service"
- ceilometer_service = ceilometer_services[0]
- ceilometer_pub_sub_url = ceilometer_service.ceilometer_pub_sub_url
- if not ceilometer_pub_sub_url:
- ceilometer_pub_sub_url = ''
- instance = self.get_instance(o)
-
- try:
- full_setup = Config().observer_full_setup
- except:
- full_setup = True
-
- fields = {"unique_id": o.id,
- "allowed_tenant_ids": o.tenant_list,
- "auth_url":instance.controller.auth_url,
- "admin_user":instance.controller.admin_user,
- "admin_password":instance.controller.admin_password,
- "admin_tenant":instance.controller.admin_tenant,
- "ceilometer_pub_sub_url": ceilometer_pub_sub_url,
- "full_setup": full_setup}
-
- return fields
-
- def run_playbook(self, o, fields):
- #ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
- #quick_update = (o.last_ansible_hash == ansible_hash)
-
- #if quick_update:
- # logger.info("quick_update triggered; skipping ansible recipe")
- #else:
- super(SyncMonitoringChannel, self).run_playbook(o, fields)
-
- #o.last_ansible_hash = ansible_hash
-
- def map_delete_inputs(self, o):
- fields = {"unique_id": o.id,
- "delete": True}
- return fields
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.yaml b/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.yaml
deleted file mode 100644
index ca72c5f..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_monitoringchannel.yaml
+++ /dev/null
@@ -1,145 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- unique_id: {{ unique_id }}
- auth_url: {{ auth_url }}
- admin_user: {{ admin_user }}
- admin_password: {{ admin_password }}
- admin_tenant: {{ admin_tenant }}
- shared_lan_ip: {{ private_ip }}
- shared_lan_mac: {{ private_mac }}
- headnode_flat_lan_ip: {{ rabbit_host }}
- ceilometer_client_acess_ip: {{ ceilometer_ip }}
- ceilometer_client_acess_mac: {{ ceilometer_mac }}
- ceilometer_host_port: {{ ceilometer_port }}
- ceilometer_pub_sub_url: {{ ceilometer_pub_sub_url }}
- allowed_tenant_ids:
- {% for allowed_tenant_id in allowed_tenant_ids %}
- - {{ allowed_tenant_id }}
- {% endfor %}
-
- tasks:
-{% if delete %}
- - name: Remove tenant
-# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
- template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
- notify:
- - stop monitoring-channel
- - remove container
-{% else %}
-{% if full_setup %}
-# - name: Docker repository
-# copy: src=/opt/xos/synchronizers/monitoring_channel/files/docker.list
-# dest=/etc/apt/sources.list.d/docker.list
-#
-# - name: Import the repository key
-# apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-#
-# - name: install Docker
-# apt: name=lxc-docker state=present update_cache=yes
-#
-# - name: install python-setuptools
-# apt: name=python-setuptools state=present
-#
-# - name: install pip
-# easy_install: name=pip
-#
-# - name: install docker-py
-# pip: name=docker-py version=0.5.3
-#
-# - name: install Pipework
-# get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
-# dest=/usr/local/bin/pipework
-# mode=0755
-#
-# - name: Disable resolvconf service
-# shell: service resolvconf stop
-# shell: echo manual > /etc/init/resolvconf.override
-# shell: rm -f /etc/resolv.conf
-#
-# - name: Install resolv.conf
-# copy: src=/opt/xos/synchronizers/monitoring_channel/files/vm-resolv.conf
-# dest=/etc/resolv.conf
-{% endif %}
-
-# FIXME: Temporary workaround to delete the monitoring-channel_ceilometer_proxy_config file always
-# to trigger ansible notify handlers in the following task.
-# Due to some issue, ansible "changed" flag is set to false even though there is a change of
-# ceilometer configuration file, because of which the configuration change is not reflecting in
-# ceilometer containers
-# - file: path=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config state=absent
-
- - name: ceilometer proxy config
- template: src=/opt/xos/synchronizers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
- notify:
- - copy ceilo-config-file
- - restart monitoring-channel container
-# - stop monitoring-channel
-# - remove container
-# - start monitoring-channel
-
- - name: Monitoring channel upstart
- template: src=/opt/xos/synchronizers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
-
- - name: Monitoring channel startup script
- template: src=/opt/xos/synchronizers/monitoring_channel/templates/start-monitoring-channel.sh.j2 dest=/usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh mode=0755
- notify:
-# - restart monitoring-channel
- - stop monitoring-channel
- - remove container
- - start monitoring-channel
-
-# - name: Start monitoring-channel container
-# docker:
-# docker_api_version: "1.18"
-# name: monitoring-channel-{{ unique_id }}
-# # was: reloaded
-# state: running
-# image: srikanthvavila/monitoring-channel
-# expose:
-# - 8000
-# ports:
-# - "{{ ceilometer_port }}:8000"
-# volumes:
-# - /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config
-#
-# - name: Get Docker IP
-# #TODO: copy dockerip.sh to monitoring service synchronizer
-# script: /opt/xos/synchronizers/onos/scripts/dockerip.sh monitoring-channel-{{ unique_id }}
-# register: dockerip
-#
-# - name: Wait for Monitoring channel to come up
-# wait_for:
-# host={{ '{{' }} dockerip.stdout {{ '}}' }}
-# port={{ '{{' }} item {{ '}}' }}
-# state=present
-# with_items:
-# - {{ ceilometer_port }}
-# These are samples, not necessary for correct function of demo
-
- - name: Make sure Monitoring channel service is running
- service: name=monitoring-channel-{{ unique_id }} state=started
-{% endif %}
-
- handlers:
- - name: copy ceilo-config-file
- shell: docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config monitoring-channel-{{ unique_id }}:/usr/local/share/ceilometer_proxy_config
-
- - name: restart monitoring-channel container
- shell: docker restart monitoring-channel-{{ unique_id }}
-
- - name: restart monitoring-channel
- shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
-
- - name: stop monitoring-channel
- service: name=monitoring-channel-{{ unique_id }} state=stopped
-
- - name: remove container
- docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
-
- - name: start monitoring-channel
- service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.py b/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.py
deleted file mode 100644
index 154c5ab..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.ceilometer.models import SFlowService
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncSFlowService(SyncInstanceUsingAnsible):
- provides=[SFlowService]
- observes=SFlowService
- requested_interval=0
- template_name = "sync_sflowservice.yaml"
- service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncSFlowService, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = SFlowService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = SFlowService.get_deleted_service_objects()
-
- return objs
-
- def get_instance(self, o):
- # We assume the ONOS service owns a slice, so pick one of the instances
- # inside that slice to sync to.
-
- serv = o
-
- if serv.slices.exists():
- slice = serv.slices.all()[0]
- if slice.instances.exists():
- return slice.instances.all()[0]
-
- return None
-
- def get_extra_attributes(self, o):
- fields={}
- fields["instance_hostname"] = self.get_instance(o).instance_name.replace("_","-")
- fields["sflow_port"] = o.sflow_port
- fields["sflow_api_port"] = o.sflow_api_port
- fields["sflow_container"] = "sflowpubsub"
- return fields
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
- super(SyncSFlowService, self).sync_fields(o, fields)
-
- def run_playbook(self, o, fields):
- instance = self.get_instance(o)
- if (instance.isolation=="container"):
- # If the instance is already a container, then we don't need to
- # install ONOS.
- return
- super(SyncSFlowService, self).run_playbook(o, fields)
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.yaml b/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.yaml
deleted file mode 100644
index 8d853a2..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowservice.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- sflow_port: {{ sflow_port }}
- sflow_api_port: {{ sflow_api_port }}
-
- tasks:
-
- - name: Fix /etc/hosts
- lineinfile:
- dest=/etc/hosts
- regexp="127.0.0.1 localhost"
- line="127.0.0.1 localhost {{ instance_hostname }}"
-
- - name: Add repo key
- apt_key:
- keyserver=hkp://pgp.mit.edu:80
- id=58118E89F3A912897C070ADBF76221572C52609D
-
- - name: Install Docker repo
- apt_repository:
- repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
- state=present
-
- - name: Install Docker
- apt:
- name={{ '{{' }} item {{ '}}' }}
- state=latest
- update_cache=yes
- with_items:
- - docker-engine
- - python-pip
- - python-httplib2
-
- - name: Install docker-py
- pip:
- name=docker-py
- state=latest
-
- - name: sflow pub-sub config
- template: src=/opt/xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_pub_sub_config.j2 dest=/usr/local/share/sflow_pub_sub.conf mode=0777
-
- - name: Start SFLOW pub-sub container
- docker:
- docker_api_version: "1.18"
- name: {{ sflow_container }}
- # was: reloaded
- state: running
- image: srikanthvavila/sflowpubsub
- expose:
- - {{ sflow_api_port }}
- - {{ sflow_port }}/udp
- ports:
- - "{{ sflow_port }}:{{ sflow_port }}/udp"
- - "{{ sflow_api_port }}:{{ sflow_api_port }}"
- volumes:
- - /usr/local/share/sflow_pub_sub.conf:/usr/local/share/sflow_pub_sub/sflow_pub_sub.conf
-
- - name: Get Docker IP
- #TODO: copy dockerip.sh to monitoring service synchronizer
- script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
- register: dockerip
-
- - name: Wait for SFlow service to come up
- wait_for:
- host={{ '{{' }} dockerip.stdout {{ '}}' }}
- port={{ '{{' }} item {{ '}}' }}
- state=present
- with_items:
- - {{ sflow_api_port }}
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.py b/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.py
deleted file mode 100644
index a15fa54..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import hashlib
-import os
-import socket
-import socket
-import sys
-import base64
-import time
-import re
-import json
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice, ControllerSlice, ControllerUser
-from services.ceilometer.models import SFlowService, SFlowTenant
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncSFlowTenant(SyncInstanceUsingAnsible):
- provides=[SFlowTenant]
- observes=SFlowTenant
- requested_interval=0
- template_name = "sync_sflowtenant.yaml"
- service_key_name = "/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncSFlowTenant, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = SFlowTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = SFlowTenant.get_deleted_tenant_objects()
-
- return objs
-
- def get_sflow_service(self, o):
- sflows = SFlowService.get_service_objects().filter(id=o.provider_service.id)
- if not sflows:
- raise "No associated SFlow service"
-
- return sflows[0]
-
- def get_instance(self, o):
- # We assume the SFlow service owns a slice, so pick one of the instances
- # inside that slice to sync to.
-
- serv = self.get_sflow_service(o)
-
- if serv.slices.exists():
- slice = serv.slices.all()[0]
- if slice.instances.exists():
- return slice.instances.all()[0]
-
- return None
-
- def get_extra_attributes(self, o):
- instance = self.get_instance(o)
-
- fields={}
- fields["sflow_api_base_url"] = self.get_sflow_service(o).sflow_api_url
- fields["sflow_api_port"] = self.get_sflow_service(o).sflow_api_port
- fields["listening_endpoint"] = o.listening_endpoint
- fields["sflow_container"] = "sflowpubsub"
-
- return fields
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
- super(SyncSFlowTenant, self).sync_fields(o, fields)
-
- def run_playbook(self, o, fields):
- super(SyncSFlowTenant, self).run_playbook(o, fields)
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.yaml b/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.yaml
deleted file mode 100644
index 701ce5c..0000000
--- a/xos/onboard/ceilometer/synchronizer/steps/sync_sflowtenant.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
-
- tasks:
-
- - name: Get Docker IP
- #TODO: copy dockerip.sh to monitoring service synchronizer
- script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ sflow_container }}
- register: sflowserviceaddr
-
- - name: Wait for SFlow service to come up
- wait_for:
- host={{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}
- port={{ '{{' }} item {{ '}}' }}
- state=present
- with_items:
- - {{ sflow_api_port }}
-
- - name: Invoke SFlow service REST API to subscribe
- uri:
- url: http://{{ '{{' }} sflowserviceaddr.stdout {{ '}}' }}:{{ sflow_api_port }}/subscribe
- body: "{{ listening_endpoint }}"
- body_format: raw
- method: POST
diff --git a/xos/onboard/ceilometer/synchronizer/supervisor/monitoring_channel_observer.conf b/xos/onboard/ceilometer/synchronizer/supervisor/monitoring_channel_observer.conf
deleted file mode 100644
index 1c2dd42..0000000
--- a/xos/onboard/ceilometer/synchronizer/supervisor/monitoring_channel_observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:monitoring_channel_observer]
-command=python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config
diff --git a/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.monitoring_channel b/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.monitoring_channel
deleted file mode 100644
index 45defb8..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.monitoring_channel
+++ /dev/null
@@ -1,26 +0,0 @@
-FROM ubuntu:14.04.2
-MAINTAINER Andy Bavier <acb@cs.princeton.edu>
-
-# XXX Workaround for docker bug:
-# https://github.com/docker/docker/issues/6345
-# Kernel 3.15 breaks docker, uss the line below as a workaround
-# until there is a fix
-RUN ln -s -f /bin/true /usr/bin/chfn
-# XXX End workaround
-
-# Install.
-RUN apt-get update && apt-get install -y \
- python-pip \
- python-dev
-
-RUN pip install web.py
-RUN pip install wsgilog
-RUN pip install python-ceilometerclient
-RUN mkdir -p /usr/local/share
-ADD ceilometer_proxy_server.py /usr/local/share/
-RUN chmod +x /usr/local/share/ceilometer_proxy_server.py
-ADD start_ceilometer_proxy /usr/local/sbin/
-RUN chmod +x /usr/local/sbin/start_ceilometer_proxy
-EXPOSE 8000
-WORKDIR /usr/local/share
-CMD /usr/local/sbin/start_ceilometer_proxy
diff --git a/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.sflowpubsub b/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.sflowpubsub
deleted file mode 100644
index c9025ee..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/Dockerfile.sflowpubsub
+++ /dev/null
@@ -1,22 +0,0 @@
-FROM ubuntu:14.04.2
-MAINTAINER Andy Bavier <acb@cs.princeton.edu>
-
-# XXX Workaround for docker bug:
-# https://github.com/docker/docker/issues/6345
-# Kernel 3.15 breaks docker, uss the line below as a workaround
-# until there is a fix
-RUN ln -s -f /bin/true /usr/bin/chfn
-# XXX End workaround
-
-# Install.
-RUN apt-get update && apt-get install -y \
- python-pip \
- python-dev
-
-RUN pip install Flask
-RUN mkdir -p /usr/local/share/
-ADD sflow_pub_sub /usr/local/share/sflow_pub_sub
-RUN chmod +x /usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
-RUN chmod +x /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
-WORKDIR /usr/local/share/sflow_pub_sub/
-CMD /usr/local/share/sflow_pub_sub/start_sflow_pub_sub
diff --git a/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_config.j2 b/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_config.j2
deleted file mode 100644
index bd6c521..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_config.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file autogenerated by monitoring-channel observer
-# It contains a list of attributes to be used by ceilometer proxy web server
-# syntax: key=value
-
-[default]
-auth_url={{ auth_url }}
-admin_user={{ admin_user }}
-admin_tenant={{ admin_tenant }}
-admin_password={{ admin_password }}
-ceilometer_pub_sub_url={{ ceilometer_pub_sub_url }}
-
-[allowed_tenants]
-{% if allowed_tenant_ids %}
-{% for tenant_id in allowed_tenant_ids %}
-{{ tenant_id }}
-{% endfor %}
-{% endif %}
diff --git a/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_server.py b/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_server.py
deleted file mode 100644
index c81b941..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/ceilometer_proxy_server.py
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/env python
-import web
-import ConfigParser
-import io
-import json
-from ceilometerclient import client
-import logging
-import urllib
-import urllib2
-from urlparse import urlparse
-from wsgilog import WsgiLog
-
-web.config.debug=False
-
-logfile = "ceilometer_proxy_server.log"
-level=logging.INFO
-logger=logging.getLogger('ceilometer_proxy_server')
-logger.setLevel(level)
-handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
-logger.addHandler(handler)
-
-class FileLog(WsgiLog):
- def __init__(self, application):
- WsgiLog.__init__(
- self,
- application,
- logformat = '%(message)s',
- tofile = True,
- toprint = True,
- prnlevel = level,
- file = logfile,
- backups =1
- )
- def __call__(self, environ, start_response):
- def hstart_response(status, response_headers, *args):
- out = start_response(status, response_headers, *args)
- try:
- logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
- except err:
- logline="Could not log <%s> due to err <%s>" % (str(environ), err)
- logger.info(logline)
-
- return out
-
- return super(FileLog, self).__call__(environ, hstart_response)
-
-#TODOs:
-#-See if we can avoid using python-ceilometerclient and instead use the REST calls directly with AuthToken
-#
-urls = (
- r'^/v2/meters$', 'meter_list',
- r'^/v2/meters/(?P<meter_name>[A-Za-z0-9_:.\-]+)/statistics$', 'statistics_list',
- r'^/v2/samples$', 'sample_list',
- r'^/v2/resources$', 'resource_list',
- r'^/v2/subscribe$', 'pubsub_handler',
-)
-
-app = web.application(urls, globals())
-
-config = None
-ceilometer_client = None
-
-
-def parse_ceilometer_proxy_config():
- global config
- config = ConfigParser.RawConfigParser(allow_no_value=True)
- config.read('ceilometer_proxy_config')
-
-def ceilometerclient():
- global config, ceilometer_client
- if ceilometer_client:
- return ceilometer_client
-
- if not config:
- parse_ceilometer_proxy_config()
-
- keystone = {}
- keystone['os_username']=config.get('default','admin_user')
- keystone['os_password']=config.get('default','admin_password')
- keystone['os_auth_url']=config.get('default','auth_url')
- keystone['os_tenant_name']=config.get('default','admin_tenant')
- ceilometer_client = client.get_client(2,**keystone)
- logger.info('ceilometer get_client is successful')
- return ceilometer_client
-
-def make_query(user_id=None, tenant_id=None, resource_id=None,
- user_ids=None, tenant_ids=None, resource_ids=None):
- """Returns query built from given parameters.
-
- This query can be then used for querying resources, meters and
- statistics.
-
- :Parameters:
- - `user_id`: user_id, has a priority over list of ids
- - `tenant_id`: tenant_id, has a priority over list of ids
- - `resource_id`: resource_id, has a priority over list of ids
- - `user_ids`: list of user_ids
- - `tenant_ids`: list of tenant_ids
- - `resource_ids`: list of resource_ids
- """
- user_ids = user_ids or []
- tenant_ids = tenant_ids or []
- resource_ids = resource_ids or []
-
- query = []
- if user_id:
- user_ids = [user_id]
- for u_id in user_ids:
- query.append({"field": "user_id", "op": "eq", "value": u_id})
-
- if tenant_id:
- tenant_ids = [tenant_id]
- for t_id in tenant_ids:
- query.append({"field": "project_id", "op": "eq", "value": t_id})
-
- if resource_id:
- resource_ids = [resource_id]
- for r_id in resource_ids:
- query.append({"field": "resource_id", "op": "eq", "value": r_id})
-
- return query
-
-def filter_query_params(query_params):
- new_query=[]
- i=0
- user_specified_tenants=[]
- for field in query_params['q.field']:
- if (field != 'project_id') and (field != 'project'):
- query = {}
- query['field']=field
- if query_params['q.op'][i] != '':
- query['op']=query_params['q.op'][i]
- query['value']=query_params['q.value'][i]
- new_query.append(query)
- else:
- user_specified_tenants.append(query_params['q.value'][i])
- i=i+1
- return new_query,user_specified_tenants
-
-class meter_list:
- def GET(self):
- global config
- keyword_args = {
- "q.field": [],
- "q.op": [],
- "q.type": [],
- "q.value": [],
- }
- query_params = web.input(**keyword_args)
- new_query, user_specified_tenants = filter_query_params(query_params)
-
- client = ceilometerclient()
- meters=[]
- for (k,v) in config.items('allowed_tenants'):
- if user_specified_tenants and (k not in user_specified_tenants):
- continue
- final_query=[]
- final_query.extend(new_query)
- query = make_query(tenant_id=k)
- final_query.extend(query)
- logger.debug('final query=%s',final_query)
- results = client.meters.list(q=final_query)
- meters.extend(results)
- return json.dumps([ob._info for ob in meters])
-
-class statistics_list:
- def GET(self, meter_name):
- global config
- keyword_args = {
- "q.field": [],
- "q.op": [],
- "q.type": [],
- "q.value": [],
- "period": None
- }
- query_params = web.input(**keyword_args)
- new_query, user_specified_tenants = filter_query_params(query_params)
-
- client = ceilometerclient()
- period = query_params.period
- statistics = []
- for (k,v) in config.items('allowed_tenants'):
- if user_specified_tenants and (k not in user_specified_tenants):
- continue
- final_query=[]
- final_query.extend(new_query)
- query = make_query(tenant_id=k)
- final_query.extend(query)
- logger.debug('final query=%s',final_query)
- results = client.statistics.list(meter_name=meter_name, q=final_query, period=period)
- statistics.extend(results)
- return json.dumps([ob._info for ob in statistics])
-
-class sample_list:
- def GET(self):
- global config
- keyword_args = {
- "q.field": [],
- "q.op": [],
- "q.type": [],
- "q.value": [],
- "limit": None,
- }
- query_params = web.input(**keyword_args)
- new_query, user_specified_tenants = filter_query_params(query_params)
-
- client = ceilometerclient()
- limit=query_params.limit
- samples=[]
- for (k,v) in config.items('allowed_tenants'):
- if user_specified_tenants and (k not in user_specified_tenants):
- continue
- final_query=[]
- final_query.extend(new_query)
- query = make_query(tenant_id=k)
- final_query.extend(query)
- logger.debug('final query=%s',final_query)
- results = client.new_samples.list(q=final_query,limit=limit)
- samples.extend(results)
- return json.dumps([ob._info for ob in samples])
-
-class resource_list:
- def GET(self):
- global config
- keyword_args = {
- "q.field": [],
- "q.op": [],
- "q.type": [],
- "q.value": [],
- "limit": None,
- "links": None,
- }
- query_params = web.input(**keyword_args)
- new_query, user_specified_tenants = filter_query_params(query_params)
-
- client = ceilometerclient()
- limit=query_params.limit
- links=query_params.links
- resources=[]
- for (k,v) in config.items('allowed_tenants'):
- if user_specified_tenants and (k not in user_specified_tenants):
- continue
- final_query=[]
- final_query.extend(new_query)
- query = make_query(tenant_id=k)
- final_query.extend(query)
- logger.debug('final query=%s',final_query)
- results = client.resources.list(q=final_query, limit=limit, links=links)
- resources.extend(results)
- return json.dumps([ob._info for ob in resources])
-
-class pubsub_handler:
- def POST(self):
- global config
- parse_ceilometer_proxy_config()
- ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
- url = urlparse(ceilometer_pub_sub_url)
- if (not url.scheme) or (not url.netloc):
- raise Exception("Ceilometer PUB/SUB URL not set")
- ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/subscribe"
- data_str = unicode(web.data(),'iso-8859-1')
- post_data = json.loads(data_str)
- final_query=[]
- for (k,v) in config.items('allowed_tenants'):
- query = make_query(tenant_id=k)
- final_query.extend(query)
- if not final_query:
- raise Exception("Not allowed to subscribe to any meters")
- post_data["query"] = final_query
- #TODO: The PUB/SUB url needs to be read from config
- put_request = urllib2.Request(ceilometer_pub_sub_url, json.dumps(post_data))
- put_request.get_method = lambda: 'SUB'
- put_request.add_header('Content-Type', 'application/json')
- response = urllib2.urlopen(put_request)
- response_text = response.read()
- return json.dumps(response_text)
-
- def DELETE(self):
- ceilometer_pub_sub_url = config.get('default', 'ceilometer_pub_sub_url')
- url = urlparse(ceilometer_pub_sub_url)
- if (not url.scheme) or (not url.netloc):
- raise Exception("Ceilometer PUB/SUB URL not set")
- ceilometer_pub_sub_url = url.scheme + "://" + url.netloc + "/unsubscribe"
- data_str = web.data()
- #TODO: The PUB/SUB url needs to be read from config
- put_request = urllib2.Request(ceilometer_pub_sub_url, data_str)
- put_request.get_method = lambda: 'UNSUB'
- put_request.add_header('Content-Type', 'application/json')
- response = urllib2.urlopen(put_request)
- response_text = response.read()
- return json.dumps(response_text)
-
-if __name__ == "__main__":
- app.run(FileLog)
diff --git a/xos/onboard/ceilometer/synchronizer/templates/monitoring-channel.conf.j2 b/xos/onboard/ceilometer/synchronizer/templates/monitoring-channel.conf.j2
deleted file mode 100644
index eb937ac..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/monitoring-channel.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for Monitoring channel
-description "Upstart script for Monitoring channel container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
- /usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh
-end script
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/README b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/README
deleted file mode 100644
index ee8ad9b..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/README
+++ /dev/null
@@ -1,37 +0,0 @@
-
-Subscribe-Publish Frame Work:
-1.Command to Install Flask Webserver frame work.
- sudo pip install Flask
-
- Along with flask we need the following packages:
- msgpack
- fnmatch
- operator
- logging
- oslo_utils
- ConfigParser
-
-2.Files: i.sub_main.py
- ii.pubrecords.py
- iii.pub_sub.conf
-
-3.Command to start the server:
- #python sun_main.py
-4.Command for subscription:
- i.app_id:Application ID,should be unique.
- ii.target:
- Presently only udp is supported.
- a.udp:<ip:portno>
- b.kafka:<kafkaip:kafkaport>
- iii.sub_info:Sunscription notifications.ex:cpu_util,cpu_*
- iv.query:
- Below information need to provide as part of query.
- a.field:fileds like user id ,porject id etc.,
- b.op:"eq","gt","lt" etc.,
- c.value:value of the fileds.
- Example:
- curl -i -H "Content-Type: application/json" -X SUB -d '{"app_id":"10","target":"udp://10.11.10.1:5006","sub_info":"cpu_util","query":[{"field":"user_id","op":"eq","value":"e1271a86bd4e413c87248baf2e5f01e0"},{"field":"project_id","op":"eq","value":"b1a3bf16d2014b47be9aefea88087318"},{"field":"resource_id","op":"eq","value":"658cd03f-d0f0-4f55-9f48-39e7222a8646"}]}' -L http://10.11.10.1:4455/subscribe
-
-5.Command for unsunscription:
- For unsubcription only appid will be needed.
- curl -i -H "Content-Type: application/json" -X UNSUB -d '{"app_id":"10"}' http://10.11.10.1:4455/unsubscribe
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
deleted file mode 100644
index 40b5bf5..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sample_sflow_pub_sub.conf_sample
+++ /dev/null
@@ -1,11 +0,0 @@
-[LOGGING]
-level = DEBUG
-filename = sflow_pub_sub.log
-
-[WEB_SERVER]
-webserver_host = 0.0.0.0
-webserver_port = 33333
-
-[SFLOW]
-listening_ip_addr = 0.0.0.0
-listening_port = 6343
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2 b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
deleted file mode 100644
index 1c5c88c..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_config.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file autogenerated by sflow service synchronizer
-# It contains a list of attributes to be used by sflow service
-# syntax: key=value
-
-[LOGGING]
-level = DEBUG
-filename = sflow_pub_sub.log
-
-[WEB_SERVER]
-webserver_host = 0.0.0.0
-webserver_port = {{ sflow_api_port }}
-
-[SFLOW]
-listening_ip_addr = 0.0.0.0
-listening_port = {{ sflow_port }}
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
deleted file mode 100644
index 1276721..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_pub_sub_main.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/python
-import socket,thread
-import sys
-import fnmatch
-import operator
-import logging
-import ConfigParser
-from urlparse import urlparse
-from sflow_sub_records import *
-
-from flask import request, Request, jsonify
-from flask import Flask
-from flask import make_response
-app = Flask(__name__)
-
-COMPARATORS = {
- 'gt': operator.gt,
- 'lt': operator.lt,
- 'ge': operator.ge,
- 'le': operator.le,
- 'eq': operator.eq,
- 'ne': operator.ne,
-}
-
-LEVELS = {'DEBUG': logging.DEBUG,
- 'INFO': logging.INFO,
- 'WARNING': logging.WARNING,
- 'ERROR': logging.ERROR,
- 'CRITICAL': logging.CRITICAL}
-
-_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-@app.route('/subscribe',methods=['POST'])
-def subscribe():
- logging.debug(" SUB data:%s",request.data)
- target = request.data
- parse_target=urlparse(target)
- if not parse_target.netloc:
- err_str = "Error:Invalid target format"
- logging.error("* Invalid target format")
- return err_str
-
- status = ""
- if parse_target.scheme == "udp" :
- host=parse_target.hostname
- port=parse_target.port
- scheme = parse_target.scheme
- app_ip = host
- app_port = port
-
- if host == None or port == None :
- err_str = "* Error: Invalid IP Address format"
- logging.error("* Invalid IP Address format")
- return err_str
-
- subscrip_obj=sflow_sub_record(scheme,None,app_ip,app_port,None,None)
- status = add_sflow_sub_record(subscrip_obj)
- print_sflow_sub_records()
-
- if parse_target.scheme == "kafka" :
- pass
- if parse_target.scheme == "file" :
- pass
- return status
-
-@app.route('/unsubscribe',methods=['POST'])
-def unsubscribe():
- try :
- target = request.data
- parse_target=urlparse(target)
- if not parse_target.netloc:
- err_str = "Error:Invalid target format"
- logging.error("* Invalid target format")
- return err_str
-
- status = ""
- if parse_target.scheme == "udp" :
- host=parse_target.hostname
- port=parse_target.port
- scheme = parse_target.scheme
- app_ip = host
- app_port = port
-
- delete_sflow_sub_record(app_ip, app_port)
- except Exception as e:
- logging.error("* %s",e.__str__())
- return e.__str__()
- return "UnSubscrition is sucessful! \n"
-
-@app.errorhandler(404)
-def not_found(error):
- return make_response(jsonify({'error': 'Not found'}), 404)
-
-def sflow_recv(host,port):
- udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
- udp.bind((host, port))
- logging.info("Started sflow receive thread on %s:%s",host, str(port))
-
- while True:
- data, source = udp.recvfrom(64000)
- for obj in sflow_sub_database:
- target_host = obj.ipaddress
- target_port = int(obj.portno)
- try:
- logging.debug("Replicating the sFlow data to:%s:%s",target_host, str(target_port))
- udp.sendto(data,(target_host,target_port))
- except Exception:
- logging.error ("Unable to send sFlow data to target %s:%s ",target_host,str(target_port))
- logging.warn("Exiting sflow receive thread")
-
-
-def initialize(host,port):
- thread.start_new(sflow_recv,(host,port,))
-
-if __name__ == "__main__":
-
- try:
- config = ConfigParser.ConfigParser()
- config.read('sflow_pub_sub.conf')
- webserver_host = config.get('WEB_SERVER','webserver_host')
- webserver_port = int (config.get('WEB_SERVER','webserver_port'))
- sflow_listening_ip_addr = config.get('SFLOW','listening_ip_addr')
- sflow_listening_port = int (config.get('SFLOW','listening_port'))
-
- log_level = config.get('LOGGING','level')
- log_file = config.get('LOGGING','filename')
-
- level = LEVELS.get(log_level, logging.NOTSET)
- logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',\
- datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level)
- except Exception as e:
- print("* Error in config file:",e.__str__())
- logging.error("* Error in confing file:%s",e.__str__())
- else:
- initialize(sflow_listening_ip_addr,sflow_listening_port)
- app.run(host=webserver_host,port=webserver_port,debug=False)
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
deleted file mode 100644
index f8b0038..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/sflow_sub_records.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/python
-import fnmatch
-import logging
-
-class sflow_sub_record:
- def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter):
- logging.debug("* Updating subscription_info ")
- self.scheme = scheme
- self.app_id = app_id
- self.ipaddress = app_ip
- self.portno = app_port
- self.subscription_info = subscription_info
- self.sub_info_filter = sub_info_filter
-
-sflow_sub_database=[]
-def add_sflow_sub_record(record):
- logging.info("* inside %s",add_sflow_sub_record.__name__)
- if not sflow_sub_database:
- logging.debug("* -----------List is EMpty -------------")
- sflow_sub_database.append(record)
- logging.debug("* Subscription is sucessful")
- return "Subscription is sucessful \n"
- for x in sflow_sub_database:
- if (record.ipaddress == x.ipaddress) and (record.portno == x.portno) :
- logging.warning("* entry already exists\n")
- return "entry already exists \n"
- sflow_sub_database.append(record)
- return "Subscription is sucessful \n"
-
-def delete_sflow_sub_record(ip,port):
- logging.info("* inside %s",delete_sflow_sub_record.__name__)
- Flag = False
- for x in sflow_sub_database:
- if (ip == x.ipaddress) and (port == x.portno) :
- sflow_sub_database.remove(x)
- Flag = True
- logging.debug("* Un-Subscription is sucessful")
- return "Un-Subscription is sucessful \n"
- if not Flag :
- err_str = "No subscription exists with target: udp://" + ip + ":" + str(port) + "\n"
- logging.error(err_str)
- raise Exception (err_str)
-
-def print_sflow_sub_records():
- logging.info("* inside %s",print_sflow_sub_records.__name__)
- for obj in sflow_sub_database:
- logging.debug("* ------------------------------------------------")
- logging.debug("* scheme:%s",obj.scheme)
- logging.debug("* app_id:%s",obj.app_id)
- logging.debug("* portno:%s",obj.portno )
- logging.debug("* ipaddress:%s",obj.ipaddress)
- logging.debug("* portno:%s",obj.portno)
- logging.debug("* subscription_info:%s",obj.subscription_info)
- logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
- logging.debug("* ------------------------------------------------")
-
-def get_sflow_sub_records(notif_subscription_info):
- logging.info("* inside %s",get_sflow_sub_records.__name__)
- sub_list=[]
- for obj in sflow_sub_database:
- if obj.subscription_info == notif_subscription_info:
- sub_list.append(obj)
- return sub_list
diff --git a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub b/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
deleted file mode 100644
index e2edda2..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/sflow_pub_sub/start_sflow_pub_sub
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/share/sflow_pub_sub/sflow_pub_sub_main.py
diff --git a/xos/onboard/ceilometer/synchronizer/templates/start-monitoring-channel.sh.j2 b/xos/onboard/ceilometer/synchronizer/templates/start-monitoring-channel.sh.j2
deleted file mode 100755
index 4486985..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/start-monitoring-channel.sh.j2
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-function mac_to_iface {
- MAC=$1
- ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-function generate_mac_from_ip {
- IP=$1
- printf "02:42:%02x:%02x:%02x:%02x\n" `echo $IP|awk -F '.' '{print $1, $2, $3, $4}'`
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
-HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
-HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
-
-docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
- #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
- #sudo docker pull srikanthvavila/monitoring-channel
-if [ -z "$HEADNODEFLATLANIP" ] || [ "$HEADNODEFLATLANIP" == "None" ]
-then
-# docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
- docker run -d --name=$MONITORING_CHANNEL --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
-else
-# docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 -v /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config:/usr/local/share/ceilometer_proxy_config srikanthvavila/monitoring-channel
- docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
-fi
-else
- docker start $MONITORING_CHANNEL
-fi
-
-# Set up networking via pipework
-#SHARED_LAN_IFACE=$( mac_to_iface {{ shared_lan_mac }} )
-#docker exec $MONITORING_CHANNEL ifconfig eth0 >> /dev/null || pipework $SHARED_LAN_IFACE -i eth0 $MONITORING_CHANNEL 192.168.0.1/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now copy ceilometer proxy configuration to container
-#cat /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config | sudo docker exec -i $MONITORING_CHANNEL bash -c 'cat > /usr/local/share/ceilometer_proxy_config'
-docker cp /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config $MONITORING_CHANNEL:/usr/local/share/ceilometer_proxy_config
-
-# Attach to container
-docker start -a $MONITORING_CHANNEL
diff --git a/xos/onboard/ceilometer/synchronizer/templates/start_ceilometer_proxy b/xos/onboard/ceilometer/synchronizer/templates/start_ceilometer_proxy
deleted file mode 100644
index ddaa9c8..0000000
--- a/xos/onboard/ceilometer/synchronizer/templates/start_ceilometer_proxy
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/share/ceilometer_proxy_server.py 8000
diff --git a/xos/onboard/ceilometer/templates/ceilometeradmin.html b/xos/onboard/ceilometer/templates/ceilometeradmin.html
deleted file mode 100644
index 40f57e8..0000000
--- a/xos/onboard/ceilometer/templates/ceilometeradmin.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a class="btn btn-primary" href="/admin/ceilometer/monitoringchannel/">Monitoring Channels</a>
- </div>
-</div>
-
diff --git a/xos/onboard/ceilometer/templates/sflowadmin.html b/xos/onboard/ceilometer/templates/sflowadmin.html
deleted file mode 100644
index 3cbb333..0000000
--- a/xos/onboard/ceilometer/templates/sflowadmin.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a class="btn btn-primary" href="/admin/ceilometer/sflowtenant/">sFlow Tenants</a>
- </div>
-</div>
-
diff --git a/xos/onboard/ceilometer/tosca/resources/ceilometerservice.py b/xos/onboard/ceilometer/tosca/resources/ceilometerservice.py
deleted file mode 100644
index e77fa55..0000000
--- a/xos/onboard/ceilometer/tosca/resources/ceilometerservice.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from core.models import ServiceAttribute
-from services.ceilometer.models import CeilometerService
-
-from service import XOSService
-
-class XOSCeilometerService(XOSService):
- provides = "tosca.nodes.CeilometerService"
- xos_model = CeilometerService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "ceilometer_pub_sub_url"]
-
- def set_service_attr(self, obj, prop_name, value):
- value = self.try_intrinsic_function(value)
- if value:
- attrs = ServiceAttribute.objects.filter(service=obj, name=prop_name)
- if attrs:
- attr = attrs[0]
- if attr.value != value:
- self.info("updating attribute %s" % prop_name)
- attr.value = value
- attr.save()
- else:
- self.info("adding attribute %s" % prop_name)
- ta = ServiceAttribute(service=obj, name=prop_name, value=value)
- ta.save()
-
- def postprocess(self, obj):
- props = self.nodetemplate.get_properties()
- for (k,d) in props.items():
- v = d.value
- if k.startswith("config_"):
- self.set_service_attr(obj, k, v)
- elif k.startswith("rest_"):
- self.set_service_attr(obj, k, v)
-
diff --git a/xos/onboard/ceilometer/tosca/resources/ceilometertenant.py b/xos/onboard/ceilometer/tosca/resources/ceilometertenant.py
deleted file mode 100644
index cb3a623..0000000
--- a/xos/onboard/ceilometer/tosca/resources/ceilometertenant.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from services.ceilometer.models import MonitoringChannel, CeilometerService
-
-from xosresource import XOSResource
-
-class XOSCeilometerTenant(XOSResource):
- provides = "tosca.nodes.CeilometerTenant"
- xos_model = MonitoringChannel
- name_field = None
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSCeilometerTenant, self).get_xos_args()
-
- provider_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
- if provider_name:
- args["provider_service"] = self.get_xos_object(CeilometerService, throw_exception=throw_exception, name=provider_name)
-
- return args
-
- def get_existing_objs(self):
- args = self.get_xos_args(throw_exception=False)
- provider_service = args.get("provider", None)
- if provider_service:
- return [ self.get_xos_object(provider_service=provider_service) ]
- return []
-
- def postprocess(self, obj):
- pass
-
- def can_delete(self, obj):
- return super(XOSCeilometerTenant, self).can_delete(obj)
-
diff --git a/xos/onboard/ceilometer/tosca/resources/sflowservice.py b/xos/onboard/ceilometer/tosca/resources/sflowservice.py
deleted file mode 100644
index 272518e..0000000
--- a/xos/onboard/ceilometer/tosca/resources/sflowservice.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from core.models import ServiceAttribute
-from services.ceilometer.models import SFlowService
-
-from service import XOSService
-
-class XOSSFlowService(XOSService):
- provides = "tosca.nodes.SFlowService"
- xos_model = SFlowService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "sflow_port", "sflow_api_port"]
-
- def set_service_attr(self, obj, prop_name, value):
- value = self.try_intrinsic_function(value)
- if value:
- attrs = ServiceAttribute.objects.filter(service=obj, name=prop_name)
- if attrs:
- attr = attrs[0]
- if attr.value != value:
- self.info("updating attribute %s" % prop_name)
- attr.value = value
- attr.save()
- else:
- self.info("adding attribute %s" % prop_name)
- ta = ServiceAttribute(service=obj, name=prop_name, value=value)
- ta.save()
-
- def postprocess(self, obj):
- props = self.nodetemplate.get_properties()
- for (k,d) in props.items():
- v = d.value
- if k.startswith("config_"):
- self.set_service_attr(obj, k, v)
- elif k.startswith("rest_"):
- self.set_service_attr(obj, k, v)
-
diff --git a/xos/onboard/exampleservice-old/admin.py b/xos/onboard/exampleservice-old/admin.py
deleted file mode 100644
index f679e4e..0000000
--- a/xos/onboard/exampleservice-old/admin.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# admin.py - ExampleService Django Admin
-
-from core.admin import ReadOnlyAwareAdmin, SliceInline
-from core.middleware import get_request
-from core.models import User
-
-from django import forms
-from django.contrib import admin
-
-from services.exampleservice.models import *
-
-class ExampleServiceForm(forms.ModelForm):
-
- class Meta:
- model = ExampleService
-
- def __init__(self, *args, **kwargs):
- super(ExampleServiceForm, self).__init__(*args, **kwargs)
-
- if self.instance:
- self.fields['service_message'].initial = self.instance.service_message
-
- def save(self, commit=True):
- self.instance.service_message = self.cleaned_data.get('service_message')
- return super(ExampleServiceForm, self).save(commit=commit)
-
-class ExampleServiceAdmin(ReadOnlyAwareAdmin):
-
- model = ExampleService
- verbose_name = SERVICE_NAME_VERBOSE
- verbose_name_plural = SERVICE_NAME_VERBOSE_PLURAL
- form = ExampleServiceForm
- inlines = [SliceInline]
-
- list_display = ('backend_status_icon', 'name', 'service_message', 'enabled')
- list_display_links = ('backend_status_icon', 'name', 'service_message' )
-
- fieldsets = [(None, {
- 'fields': ['backend_status_text', 'name', 'enabled', 'versionNumber', 'service_message', 'description',],
- 'classes':['suit-tab suit-tab-general',],
- })]
-
- readonly_fields = ('backend_status_text', )
- user_readonly_fields = ['name', 'enabled', 'versionNumber', 'description',]
-
- extracontext_registered_admins = True
-
- suit_form_tabs = (
- ('general', 'Example Service Details', ),
- ('slices', 'Slices',),
- )
-
- suit_form_includes = ((
- 'top',
- 'administration'),
- )
-
- def queryset(self, request):
- return ExampleService.get_service_objects_by_user(request.user)
-
-admin.site.register(ExampleService, ExampleServiceAdmin)
-
-class ExampleTenantForm(forms.ModelForm):
-
- class Meta:
- model = ExampleTenant
-
- creator = forms.ModelChoiceField(queryset=User.objects.all())
-
- def __init__(self, *args, **kwargs):
- super(ExampleTenantForm, self).__init__(*args, **kwargs)
-
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['kind'].initial = SERVICE_NAME
-
- self.fields['provider_service'].queryset = ExampleService.get_service_objects().all()
-
- if self.instance:
- self.fields['creator'].initial = self.instance.creator
- self.fields['tenant_message'].initial = self.instance.tenant_message
-
- if (not self.instance) or (not self.instance.pk):
- self.fields['creator'].initial = get_request().user
- if ExampleService.get_service_objects().exists():
- self.fields['provider_service'].initial = ExampleService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get('creator')
- self.instance.tenant_message = self.cleaned_data.get('tenant_message')
- return super(ExampleTenantForm, self).save(commit=commit)
-
-
-class ExampleTenantAdmin(ReadOnlyAwareAdmin):
-
- verbose_name = TENANT_NAME_VERBOSE
- verbose_name_plural = TENANT_NAME_VERBOSE_PLURAL
-
- list_display = ('id', 'backend_status_icon', 'instance', 'tenant_message')
- list_display_links = ('backend_status_icon', 'instance', 'tenant_message', 'id')
-
- fieldsets = [(None, {
- 'fields': ['backend_status_text', 'kind', 'provider_service', 'instance', 'creator', 'tenant_message'],
- 'classes': ['suit-tab suit-tab-general'],
- })]
-
- readonly_fields = ('backend_status_text', 'instance',)
-
- form = ExampleTenantForm
-
- suit_form_tabs = (('general', 'Details'),)
-
- def queryset(self, request):
- return ExampleTenant.get_tenant_objects_by_user(request.user)
-
-admin.site.register(ExampleTenant, ExampleTenantAdmin)
-
diff --git a/xos/onboard/exampleservice-old/api/service/exampleservice.py b/xos/onboard/exampleservice-old/api/service/exampleservice.py
deleted file mode 100644
index d8fe23a..0000000
--- a/xos/onboard/exampleservice-old/api/service/exampleservice.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import viewsets
-from rest_framework import status
-from rest_framework.decorators import detail_route, list_route
-from rest_framework.views import APIView
-from core.models import *
-from django.forms import widgets
-from django.conf.urls import patterns, url
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-from django.shortcuts import get_object_or_404
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from xos.exceptions import *
-import json
-import subprocess
-from services.exampleservice.models import ExampleService
-
-class ExampleServiceSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- service_message = serializers.CharField(required=False)
-
- class Meta:
- model = ExampleService
- fields = ('humanReadableName',
- 'id',
- 'service_message')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class ExampleServiceViewSet(XOSViewSet):
- base_name = "exampleservice"
- method_name = "exampleservice"
- method_kind = "viewset"
- queryset = ExampleService.get_service_objects().all()
- serializer_class = ExampleServiceSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(ExampleServiceViewSet, self).get_urlpatterns(api_path=api_path)
-
- return patterns
-
- def list(self, request):
- object_list = self.filter_queryset(self.get_queryset())
-
- serializer = self.get_serializer(object_list, many=True)
-
- return Response(serializer.data)
-
diff --git a/xos/onboard/exampleservice-old/api/tenant/exampletenant.py b/xos/onboard/exampleservice-old/api/tenant/exampletenant.py
deleted file mode 100644
index f4778cc..0000000
--- a/xos/onboard/exampleservice-old/api/tenant/exampletenant.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-from services.exampleservice.models import ExampleTenant, ExampleService
-
-def get_default_example_service():
- example_services = ExampleService.get_service_objects().all()
- if example_services:
- return example_services[0]
- return None
-
-class ExampleTenantSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- provider_service = serializers.PrimaryKeyRelatedField(queryset=ExampleService.get_service_objects().all(), default=get_default_example_service)
- tenant_message = serializers.CharField(required=False)
- backend_status = ReadOnlyField()
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
-
- class Meta:
- model = ExampleTenant
- fields = ('humanReadableName', 'id', 'provider_service', 'tenant_message', 'backend_status')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class ExampleTenantViewSet(XOSViewSet):
- base_name = "exampletenant"
- method_name = "exampletenant"
- method_kind = "viewset"
- queryset = ExampleTenant.get_tenant_objects().all()
- serializer_class = ExampleTenantSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(ExampleTenantViewSet, self).get_urlpatterns(api_path=api_path)
-
- # example to demonstrate adding a custom endpoint
- patterns.append( self.detail_url("message/$", {"get": "get_message", "put": "set_message"}, "message") )
-
- return patterns
-
- def list(self, request):
- queryset = self.filter_queryset(self.get_queryset())
-
- serializer = self.get_serializer(queryset, many=True)
-
- return Response(serializer.data)
-
- def get_message(self, request, pk=None):
- example_tenant = self.get_object()
- return Response({"tenant_message": example_tenant.tenant_message})
-
- def set_message(self, request, pk=None):
- example_tenant = self.get_object()
- example_tenant.tenant_message = request.data["tenant_message"]
- example_tenant.save()
- return Response({"tenant_message": example_tenant.tenant_message})
-
diff --git a/xos/onboard/exampleservice-old/exampleservice-onboard-longform.yaml b/xos/onboard/exampleservice-old/exampleservice-onboard-longform.yaml
deleted file mode 100644
index 0eddd51..0000000
--- a/xos/onboard/exampleservice-old/exampleservice-onboard-longform.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- exampleservice:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:/opt/xos/onboard/exampleservice/
-
- exampleservice_models:
- type: tosca.nodes.ServiceControllerResource
- properties:
- kind: models
- format: python
- url: models.py
- requirements:
- - controller:
- node: exampleservice
- relationship: tosca.relationships.UsedByController
-
- exampleservice_admin:
- type: tosca.nodes.ServiceControllerResource
- properties:
- kind: admin
- format: python
- url: admin.py
- requirements:
- - controller:
- node: exampleservice
- relationship: tosca.relationships.UsedByController
-
- exampleservice_synchronizer:
- type: tosca.nodes.ServiceControllerResource
- properties:
- kind: synchronizer
- format: manifest
- url: synchronizer/manifest
- requirements:
- - controller:
- node: exampleservice
- relationship: tosca.relationships.UsedByController
-
- exampleservice_tosca_types:
- type: tosca.nodes.ServiceControllerResource
- properties:
- kind: tosca_custom_types
- format: yaml
- url: exampleservice.yaml
- requirements:
- - controller:
- node: exampleservice
- relationship: tosca.relationships.UsedByController
diff --git a/xos/onboard/exampleservice-old/exampleservice-onboard.yaml b/xos/onboard/exampleservice-old/exampleservice-onboard.yaml
deleted file mode 100644
index 9999a38..0000000
--- a/xos/onboard/exampleservice-old/exampleservice-onboard.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- exampleservice:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/exampleservice/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- synchronizer: synchronizer/manifest
- tosca_custom_types: exampleservice.yaml
- tosca_resource: tosca/resources/exampleservice.py, tosca/resources/exampletenant.py
- rest_service: api/service/exampleservice.py
- rest_tenant: api/tenant/exampletenant.py
- private_key: file:///opt/xos/key_import/exampleservice_rsa
- public_key: file:///opt/xos/key_import/exampleservice_rsa.pub
-
diff --git a/xos/onboard/exampleservice-old/exampleservice.m4 b/xos/onboard/exampleservice-old/exampleservice.m4
deleted file mode 100644
index 720913e..0000000
--- a/xos/onboard/exampleservice-old/exampleservice.m4
+++ /dev/null
@@ -1,31 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-# compile this with "m4 exampleservice.m4 > exampleservice.yaml"
-
-# include macros
-include(macros.m4)
-
-node_types:
- tosca.nodes.ExampleService:
- derived_from: tosca.nodes.Root
- description: >
- Example Service
- capabilities:
- xos_base_service_caps
- properties:
- xos_base_props
- xos_base_service_props
- service_message:
- type: string
- required: false
-
- tosca.nodes.ExampleTenant:
- derived_from: tosca.nodes.Root
- description: >
- A Tenant of the example service
- properties:
- xos_base_tenant_props
- tenant_message:
- type: string
- required: false
-
diff --git a/xos/onboard/exampleservice-old/exampleservice.yaml b/xos/onboard/exampleservice-old/exampleservice.yaml
deleted file mode 100644
index 2cd70dd..0000000
--- a/xos/onboard/exampleservice-old/exampleservice.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-# compile this with "m4 exampleservice.m4 > exampleservice.yaml"
-
-# include macros
-# Note: Tosca derived_from isn't working the way I think it should, it's not
-# inheriting from the parent template. Until we get that figured out, use
-# m4 macros do our inheritance
-
-
-# Service
-
-
-# Subscriber
-
-
-
-
-# end m4 macros
-
-
-
-node_types:
- tosca.nodes.ExampleService:
- derived_from: tosca.nodes.Root
- description: >
- Example Service
- capabilities:
- scalable:
- type: tosca.capabilities.Scalable
- service:
- type: tosca.capabilities.xos.Service
- properties:
- no-delete:
- type: boolean
- default: false
- description: Do not allow Tosca to delete this object
- no-create:
- type: boolean
- default: false
- description: Do not allow Tosca to create this object
- no-update:
- type: boolean
- default: false
- description: Do not allow Tosca to update this object
- replaces:
- type: string
- required: false
- descrption: Replaces/renames this object
- kind:
- type: string
- default: generic
- description: Type of service.
- view_url:
- type: string
- required: false
- description: URL to follow when icon is clicked in the Service Directory.
- icon_url:
- type: string
- required: false
- description: ICON to display in the Service Directory.
- enabled:
- type: boolean
- default: true
- published:
- type: boolean
- default: true
- description: If True then display this Service in the Service Directory.
- public_key:
- type: string
- required: false
- description: Public key to install into Instances to allows Services to SSH into them.
- private_key_fn:
- type: string
- required: false
- description: Location of private key file
- versionNumber:
- type: string
- required: false
- description: Version number of Service.
- service_message:
- type: string
- required: false
-
- tosca.nodes.ExampleTenant:
- derived_from: tosca.nodes.Root
- description: >
- A Tenant of the example service
- properties:
- kind:
- type: string
- default: generic
- description: Kind of tenant
- service_specific_id:
- type: string
- required: false
- description: Service specific ID opaque to XOS but meaningful to service
- tenant_message:
- type: string
- required: false
-
diff --git a/xos/onboard/exampleservice-old/macros.m4 b/xos/onboard/exampleservice-old/macros.m4
deleted file mode 100644
index 1f48f10..0000000
--- a/xos/onboard/exampleservice-old/macros.m4
+++ /dev/null
@@ -1,84 +0,0 @@
-# Note: Tosca derived_from isn't working the way I think it should, it's not
-# inheriting from the parent template. Until we get that figured out, use
-# m4 macros do our inheritance
-
-define(xos_base_props,
- no-delete:
- type: boolean
- default: false
- description: Do not allow Tosca to delete this object
- no-create:
- type: boolean
- default: false
- description: Do not allow Tosca to create this object
- no-update:
- type: boolean
- default: false
- description: Do not allow Tosca to update this object
- replaces:
- type: string
- required: false
- descrption: Replaces/renames this object)
-# Service
-define(xos_base_service_caps,
- scalable:
- type: tosca.capabilities.Scalable
- service:
- type: tosca.capabilities.xos.Service)
-define(xos_base_service_props,
- kind:
- type: string
- default: generic
- description: Type of service.
- view_url:
- type: string
- required: false
- description: URL to follow when icon is clicked in the Service Directory.
- icon_url:
- type: string
- required: false
- description: ICON to display in the Service Directory.
- enabled:
- type: boolean
- default: true
- published:
- type: boolean
- default: true
- description: If True then display this Service in the Service Directory.
- public_key:
- type: string
- required: false
- description: Public key to install into Instances to allows Services to SSH into them.
- private_key_fn:
- type: string
- required: false
- description: Location of private key file
- versionNumber:
- type: string
- required: false
- description: Version number of Service.)
-# Subscriber
-define(xos_base_subscriber_caps,
- subscriber:
- type: tosca.capabilities.xos.Subscriber)
-define(xos_base_subscriber_props,
- kind:
- type: string
- default: generic
- description: Kind of subscriber
- service_specific_id:
- type: string
- required: false
- description: Service specific ID opaque to XOS but meaningful to service)
-define(xos_base_tenant_props,
- kind:
- type: string
- default: generic
- description: Kind of tenant
- service_specific_id:
- type: string
- required: false
- description: Service specific ID opaque to XOS but meaningful to service)
-
-# end m4 macros
-
diff --git a/xos/onboard/exampleservice-old/make_synchronizer_manifest.sh b/xos/onboard/exampleservice-old/make_synchronizer_manifest.sh
deleted file mode 100644
index 4058982..0000000
--- a/xos/onboard/exampleservice-old/make_synchronizer_manifest.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/bash
-find synchronizer -type f | cut -b 14- > synchronizer/manifest
diff --git a/xos/onboard/exampleservice-old/models.py b/xos/onboard/exampleservice-old/models.py
deleted file mode 100644
index 5d3e258..0000000
--- a/xos/onboard/exampleservice-old/models.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# models.py - ExampleService Models
-
-from core.models import Service, TenantWithContainer
-from django.db import models, transaction
-
-SERVICE_NAME = 'exampleservice'
-SERVICE_NAME_VERBOSE = 'Example Service'
-SERVICE_NAME_VERBOSE_PLURAL = 'Example Services'
-TENANT_NAME_VERBOSE = 'Example Tenant'
-TENANT_NAME_VERBOSE_PLURAL = 'Example Tenants'
-
-class ExampleService(Service):
-
- KIND = SERVICE_NAME
-
- class Meta:
- app_label = SERVICE_NAME
- verbose_name = SERVICE_NAME_VERBOSE
-
- service_message = models.CharField(max_length=254, help_text="Service Message to Display")
-
-class ExampleTenant(TenantWithContainer):
-
- KIND = SERVICE_NAME
-
- class Meta:
- verbose_name = TENANT_NAME_VERBOSE
-
- tenant_message = models.CharField(max_length=254, help_text="Tenant Message to Display")
-
- def __init__(self, *args, **kwargs):
- exampleservice = ExampleService.get_service_objects().all()
- if exampleservice:
- self._meta.get_field('provider_service').default = exampleservice[0].id
- super(ExampleTenant, self).__init__(*args, **kwargs)
-
- def save(self, *args, **kwargs):
- super(ExampleTenant, self).save(*args, **kwargs)
- model_policy_exampletenant(self.pk)
-
- def delete(self, *args, **kwargs):
- self.cleanup_container()
- super(ExampleTenant, self).delete(*args, **kwargs)
-
-
-def model_policy_exampletenant(pk):
- with transaction.atomic():
- tenant = ExampleTenant.objects.select_for_update().filter(pk=pk)
- if not tenant:
- return
- tenant = tenant[0]
- tenant.manage_container()
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/exampleservice-synchronizer.py b/xos/onboard/exampleservice-old/synchronizer/exampleservice-synchronizer.py
deleted file mode 100644
index 90d2c98..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/exampleservice-synchronizer.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-
-# Runs the standard XOS synchronizer
-
-import importlib
-import os
-import sys
-
-synchronizer_path = os.path.join(os.path.dirname(
- os.path.realpath(__file__)), "../../synchronizers/base")
-sys.path.append(synchronizer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/exampleservice_config b/xos/onboard/exampleservice-old/synchronizer/exampleservice_config
deleted file mode 100644
index 7e59fdd..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/exampleservice_config
+++ /dev/null
@@ -1,24 +0,0 @@
-# Required by XOS
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-# Required by XOS
-[api]
-nova_enabled=True
-
-# Sets options for the synchronizer
-[observer]
-name=exampleservice
-dependency_graph=/opt/xos/synchronizers/exampleservice/model-deps
-steps_dir=/opt/xos/synchronizers/exampleservice/steps
-sys_dir=/opt/xos/synchronizers/exampleservice/sys
-logfile=/var/log/xos_backend.log
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-proxy_ssh=False
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/manifest b/xos/onboard/exampleservice-old/synchronizer/manifest
deleted file mode 100644
index 8f43610..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/manifest
+++ /dev/null
@@ -1,10 +0,0 @@
-manifest
-steps/sync_exampletenant.py
-steps/roles/install_apache/tasks/main.yml
-steps/roles/create_index/templates/index.html.j2
-steps/roles/create_index/tasks/main.yml
-steps/exampletenant_playbook.yaml
-exampleservice-synchronizer.py
-model-deps
-run.sh
-exampleservice_config
diff --git a/xos/onboard/exampleservice-old/synchronizer/model-deps b/xos/onboard/exampleservice-old/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/exampleservice-old/synchronizer/run.sh b/xos/onboard/exampleservice-old/synchronizer/run.sh
deleted file mode 100755
index e6da8f6..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python exampleservice-synchronizer.py -C $XOS_DIR/synchronizers/exampleservice/exampleservice_config
diff --git a/xos/onboard/exampleservice-old/synchronizer/steps/exampletenant_playbook.yaml b/xos/onboard/exampleservice-old/synchronizer/steps/exampletenant_playbook.yaml
deleted file mode 100644
index 89e4617..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/steps/exampletenant_playbook.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# exampletenant_playbook
-
-- hosts: "{{ instance_name }}"
- connection: ssh
- user: ubuntu
- sudo: yes
- gather_facts: no
- vars:
- - tenant_message: "{{ tenant_message }}"
- - service_message: "{{ service_message }}"
-
- roles:
- - install_apache
- - create_index
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/tasks/main.yml b/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/tasks/main.yml
deleted file mode 100644
index 91c6029..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-- name: Write index.html file to apache document root
- template:
- src=index.html.j2
- dest=/var/www/html/index.html
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/templates/index.html.j2 b/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/templates/index.html.j2
deleted file mode 100644
index 9cec084..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/steps/roles/create_index/templates/index.html.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-ExampleService
- Service Message: "{{ service_message }}"
- Tenant Message: "{{ tenant_message }}"
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/steps/roles/install_apache/tasks/main.yml b/xos/onboard/exampleservice-old/synchronizer/steps/roles/install_apache/tasks/main.yml
deleted file mode 100644
index d9a155c..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/steps/roles/install_apache/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-- name: Install apache using apt
- apt:
- name=apache2
- update_cache=yes
-
diff --git a/xos/onboard/exampleservice-old/synchronizer/steps/sync_exampletenant.py b/xos/onboard/exampleservice-old/synchronizer/steps/sync_exampletenant.py
deleted file mode 100644
index fbde96f..0000000
--- a/xos/onboard/exampleservice-old/synchronizer/steps/sync_exampletenant.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import sys
-from django.db.models import Q, F
-from services.exampleservice.models import ExampleService, ExampleTenant
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-
-parentdir = os.path.join(os.path.dirname(__file__), "..")
-sys.path.insert(0, parentdir)
-
-class SyncExampleTenant(SyncInstanceUsingAnsible):
-
- provides = [ExampleTenant]
-
- observes = ExampleTenant
-
- requested_interval = 0
-
- template_name = "exampletenant_playbook.yaml"
-
- service_key_name = "/opt/xos/synchronizers/exampleservice/exampleservice_private_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncExampleTenant, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
-
- if (not deleted):
- objs = ExampleTenant.get_tenant_objects().filter(
- Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
- else:
- # If this is a deletion we get all of the deleted tenants..
- objs = ExampleTenant.get_deleted_tenant_objects()
-
- return objs
-
- def get_exampleservice(self, o):
- if not o.provider_service:
- return None
-
- exampleservice = ExampleService.get_service_objects().filter(id=o.provider_service.id)
-
- if not exampleservice:
- return None
-
- return exampleservice[0]
-
- # Gets the attributes that are used by the Ansible template but are not
- # part of the set of default attributes.
- def get_extra_attributes(self, o):
- fields = {}
- fields['tenant_message'] = o.tenant_message
- exampleservice = self.get_exampleservice(o)
- fields['service_message'] = exampleservice.service_message
- return fields
-
diff --git a/xos/onboard/exampleservice-old/tosca/resources/exampleservice.py b/xos/onboard/exampleservice-old/tosca/resources/exampleservice.py
deleted file mode 100644
index f26b8b7..0000000
--- a/xos/onboard/exampleservice-old/tosca/resources/exampleservice.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from core.models import Service,User,CoarseTenant
-from services.exampleservice.models import ExampleService
-
-from xosresource import XOSResource
-
-class XOSExampleService(XOSResource):
- provides = "tosca.nodes.ExampleService"
- xos_model = ExampleService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "private_key_fn", "versionNumber", "service_message"]
-
- def postprocess(self, obj):
- for provider_service_name in self.get_requirements("tosca.relationships.TenantOfService"):
- provider_service = self.get_xos_object(ExampleService, name=provider_service_name)
-
- existing_tenancy = CoarseTenant.get_tenant_objects().filter(provider_service = provider_service, subscriber_service = obj)
- if existing_tenancy:
- self.info("Tenancy relationship from %s to %s already exists" % (str(obj), str(provider_service)))
- else:
- tenancy = CoarseTenant(provider_service = provider_service,
- subscriber_service = obj)
- tenancy.save()
-
- self.info("Created Tenancy relationship from %s to %s" % (str(obj), str(provider_service)))
-
- def can_delete(self, obj):
- if obj.slices.exists():
- self.info("Service %s has active slices; skipping delete" % obj.name)
- return False
- return super(XOSExampleService, self).can_delete(obj)
-
diff --git a/xos/onboard/exampleservice-old/tosca/resources/exampletenant.py b/xos/onboard/exampleservice-old/tosca/resources/exampletenant.py
deleted file mode 100644
index d9239f9..0000000
--- a/xos/onboard/exampleservice-old/tosca/resources/exampletenant.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import importlib
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-from core.models import Tenant, Service
-from services.exampleservice.models import ExampleTenant, SERVICE_NAME as EXAMPLETENANT_KIND
-
-from xosresource import XOSResource
-
-class XOSExampleTenant(XOSResource):
- provides = "tosca.nodes.ExampleTenant"
- xos_model = ExampleTenant
- name_field = "service_specific_id"
- copyin_props = ("tenant_message",)
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSExampleTenant, self).get_xos_args()
-
- # ExampleTenant must always have a provider_service
- provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=True)
- if provider_name:
- args["provider_service"] = self.get_xos_object(Service, throw_exception=True, name=provider_name)
-
- return args
-
- def get_existing_objs(self):
- args = self.get_xos_args(throw_exception=False)
- return ExampleTenant.get_tenant_objects().filter(provider_service=args["provider_service"], service_specific_id=args["service_specific_id"])
- return []
-
- def can_delete(self, obj):
- return super(XOSExampleTenant, self).can_delete(obj)
-
diff --git a/xos/onboard/fabric-old/admin.py b/xos/onboard/fabric-old/admin.py
deleted file mode 100644
index e372a7d..0000000
--- a/xos/onboard/fabric-old/admin.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from django.contrib import admin
-
-from services.fabric.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.models import AddressPool
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class FabricServiceForm(forms.ModelForm):
- def __init__(self,*args,**kwargs):
- super (FabricServiceForm,self ).__init__(*args,**kwargs)
-
- def save(self, commit=True):
- return super(FabricServiceForm, self).save(commit=commit)
-
- class Meta:
- model = FabricService
-
-class FabricServiceAdmin(ReadOnlyAwareAdmin):
- model = FabricService
- verbose_name = "Fabric Service"
- verbose_name_plural = "Fabric Services"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "view_url", "icon_url", "autoconfig", ],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
- form = FabricServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'Fabric Service Details'),
- ('administration', 'Administration'),
- #('tools', 'Tools'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('fabricadmin.html', 'top', 'administration'),
- )
-
- def queryset(self, request):
- return FabricService.get_service_objects_by_user(request.user)
-
-admin.site.register(FabricService, FabricServiceAdmin)
diff --git a/xos/onboard/fabric-old/fabric-onboard.yaml b/xos/onboard/fabric-old/fabric-onboard.yaml
deleted file mode 100644
index e0f0fa7..0000000
--- a/xos/onboard/fabric-old/fabric-onboard.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the fabric
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#fabric:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/fabric/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/fabricadmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: fabric-synchronizer.py
- tosca_resource: tosca/resources/fabricservice.py
- #private_key: file:///opt/xos/key_import/vsg_rsa
- #public_key: file:///opt/xos/key_import/vsg_rsa.pub
-
diff --git a/xos/onboard/fabric-old/models.py b/xos/onboard/fabric-old/models.py
deleted file mode 100644
index bd37416..0000000
--- a/xos/onboard/fabric-old/models.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from django.db import models
-from core.models import Service
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-FABRIC_KIND = "fabric"
-
-class FabricService(Service):
- KIND = FABRIC_KIND
-
- class Meta:
- app_label = "fabric"
- verbose_name = "Fabric Service"
-
- autoconfig = models.BooleanField(default=True, help_text="Autoconfigure the fabric")
diff --git a/xos/onboard/fabric-old/synchronizer/fabric-synchronizer.py b/xos/onboard/fabric-old/synchronizer/fabric-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/fabric-old/synchronizer/fabric-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/fabric-old/synchronizer/fabric_synchronizer_config b/xos/onboard/fabric-old/synchronizer/fabric_synchronizer_config
deleted file mode 100644
index 2ed56fe..0000000
--- a/xos/onboard/fabric-old/synchronizer/fabric_synchronizer_config
+++ /dev/null
@@ -1,23 +0,0 @@
-# Required by XOS
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-# Required by XOS
-[api]
-nova_enabled=True
-
-# Sets options for the synchronizer
-[observer]
-name=fabric
-dependency_graph=/opt/xos/synchronizers/fabric/model-deps
-steps_dir=/opt/xos/synchronizers/fabric/steps
-sys_dir=/opt/xos/synchronizers/fabric/sys
-logfile=console
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-proxy_ssh=False
diff --git a/xos/onboard/fabric-old/synchronizer/manifest b/xos/onboard/fabric-old/synchronizer/manifest
deleted file mode 100644
index 62a0722..0000000
--- a/xos/onboard/fabric-old/synchronizer/manifest
+++ /dev/null
@@ -1,9 +0,0 @@
-manifest
-fabric_synchronizer_config
-steps/sync_host.yaml
-steps/sync_vroutertenant.py
-start.sh
-stop.sh
-model-deps
-run.sh
-fabric-synchronizer.py
diff --git a/xos/onboard/fabric-old/synchronizer/model-deps b/xos/onboard/fabric-old/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/fabric-old/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/fabric-old/synchronizer/run.sh b/xos/onboard/fabric-old/synchronizer/run.sh
deleted file mode 100755
index 4e0c214..0000000
--- a/xos/onboard/fabric-old/synchronizer/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python fabric-synchronizer.py -C $XOS_DIR/synchronizers/fabric/fabric_synchronizer_config
diff --git a/xos/onboard/fabric-old/synchronizer/start.sh b/xos/onboard/fabric-old/synchronizer/start.sh
deleted file mode 100755
index 8d02bf3..0000000
--- a/xos/onboard/fabric-old/synchronizer/start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-nohup python fabric-synchronizer.py -C $XOS_DIR/synchronizers/fabric/fabric_synchronizer_config > /dev/null 2>&1 &
diff --git a/xos/onboard/fabric-old/synchronizer/steps/sync_host.yaml b/xos/onboard/fabric-old/synchronizer/steps/sync_host.yaml
deleted file mode 100644
index 58bccba..0000000
--- a/xos/onboard/fabric-old/synchronizer/steps/sync_host.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- vars:
- rest_hostname: {{ rest_hostname }}
- rest_port: {{ rest_port }}
- rest_endpoint: {{ rest_endpoint }}
- rest_json: '{{ rest_json }}'
-
- tasks:
- - debug: var=rest_json
-
- - name: Call Fabric REST API
- uri:
- url: http://{{ '{{' }} rest_hostname {{ '}}' }}:{{ '{{' }} rest_port {{ '}}' }}/{{ '{{' }} rest_endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
- body: "{{ '{{' }} rest_json {{ '}}' }}"
- body_format: raw
- method: POST
- user: karaf
- password: karaf
diff --git a/xos/onboard/fabric-old/synchronizer/steps/sync_vroutertenant.py b/xos/onboard/fabric-old/synchronizer/steps/sync_vroutertenant.py
deleted file mode 100644
index fd77ca2..0000000
--- a/xos/onboard/fabric-old/synchronizer/steps/sync_vroutertenant.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import Controller
-from core.models import Image, ControllerImages
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-from services.vrouter.models import VRouterTenant
-from services.onos.models import ONOSService
-from services.fabric.models import FabricService
-import json
-
-class SyncVRouterTenant(SyncStep):
- provides=[VRouterTenant]
- observes = VRouterTenant
- requested_interval=30
- playbook='sync_host.yaml'
-
- def get_fabric_onos_service(self):
- fos = None
- fs = FabricService.get_service_objects().all()[0]
- if fs.subscribed_tenants.exists():
- app = fs.subscribed_tenants.all()[0]
- if app.provider_service:
- ps = app.provider_service
- fos = ONOSService.get_service_objects().filter(id=ps.id)[0]
- return fos
-
- def get_node_tag(self, node, tagname):
- tags = Tag.select_by_content_object(node).filter(name=tagname)
- return tags[0].value
-
- def fetch_pending(self, deleted):
- fs = FabricService.get_service_objects().all()[0]
- if not fs.autoconfig:
- return None
-
- if (not deleted):
- objs = VRouterTenant.get_tenant_objects().filter(Q(lazy_blocked=False))
- else:
- objs = VRouterTenant.get_deleted_tenant_objects()
-
- return objs
-
- def map_sync_inputs(self, vroutertenant):
-
- fos = self.get_fabric_onos_service()
-
- name = None
- instance = None
- # VRouterTenant setup is kind of hacky right now, we'll
- # need to revisit. The idea is:
- # * Look up the instance corresponding to the address
- # * Look up the node running the instance
- # * Get the "location" tag, push to the fabric
- #
- # Do we have a vCPE subscriber_tenant?
- if (vroutertenant.subscriber_tenant):
- sub = vroutertenant.subscriber_tenant
- if (sub.kind == 'vCPE'):
- instance_id = sub.get_attribute("instance_id")
- if instance_id:
- instance = Instance.objects.filter(id=instance_id)[0]
- name = str(sub)
- else:
- # Maybe the VRouterTenant is for an instance
- instance_id = vroutertenant.get_attribute("tenant_for_instance_id")
- if instance_id:
- instance = Instance.objects.filter(id=instance_id)[0]
- name = str(instance)
-
- node = instance.node
- location = self.get_node_tag(node, "location")
-
- # Is it a POST or DELETE?
-
- # Create JSON
- data = {
- "%s/-1"%vroutertenant.public_mac : {
- "basic" : {
- "ips" : [ vroutertenant.public_ip ],
- "location" : location
- }
- }
- }
-
- rest_json = json.dumps(data, indent=4)
-
- fields = {
- 'rest_hostname': fos.rest_hostname,
- 'rest_port': fos.rest_port,
- 'rest_json': rest_json,
- 'rest_endpoint': "onos/v1/network/configuration/hosts",
- 'ansible_tag': '%s'%name, # name of ansible playbook
- }
- return fields
-
- def map_sync_outputs(self, controller_image, res):
- pass
diff --git a/xos/onboard/fabric-old/synchronizer/stop.sh b/xos/onboard/fabric-old/synchronizer/stop.sh
deleted file mode 100755
index d35b057..0000000
--- a/xos/onboard/fabric-old/synchronizer/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f fabric-synchronizer.py
diff --git a/xos/onboard/fabric-old/templates/fabricadmin.html b/xos/onboard/fabric-old/templates/fabricadmin.html
deleted file mode 100644
index e69de29..0000000
--- a/xos/onboard/fabric-old/templates/fabricadmin.html
+++ /dev/null
diff --git a/xos/onboard/fabric-old/tosca/resources/fabricservice.py b/xos/onboard/fabric-old/tosca/resources/fabricservice.py
deleted file mode 100644
index 0c9cfb4..0000000
--- a/xos/onboard/fabric-old/tosca/resources/fabricservice.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.fabric.models import FabricService
-
-from service import XOSService
-
-class FabricService(XOSService):
- provides = "tosca.nodes.FabricService"
- xos_model = FabricService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber"]
-
diff --git a/xos/onboard/onos-old/admin.py b/xos/onboard/onos-old/admin.py
deleted file mode 100644
index fb0f1d7..0000000
--- a/xos/onboard/onos-old/admin.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from django.contrib import admin
-
-from services.onos.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline, TenantAttrAsTabInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class ONOSServiceForm(forms.ModelForm):
- rest_hostname = forms.CharField(required=False)
- rest_port = forms.CharField(required=False)
- no_container = forms.BooleanField(required=False)
-# external_hostname = forms.CharField(required=False)
-# external_container = forms.CharField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (ONOSServiceForm,self ).__init__(*args,**kwargs)
- if self.instance:
- # fields for the attributes
- self.fields['rest_hostname'].initial = self.instance.rest_hostname
- self.fields['rest_port'].initial = self.instance.rest_port
- self.fields['no_container'].initial = self.instance.no_container
-# self.fields['external_hostname'].initial = self.instance.external_hostname
-# self.fields['external_container'].initial = self.instance.external_hostname
-
- def save(self, commit=True):
- self.instance.rest_hostname = self.cleaned_data.get("rest_hostname")
- self.instance.rest_port = self.cleaned_data.get("rest_port")
- self.instance.no_container = self.cleaned_data.get("no_container")
-# self.instance.external_hostname = self.cleaned_data.get("external_hostname")
-# self.instance.external_container = self.cleaned_data.get("external_container")
- return super(ONOSServiceForm, self).save(commit=commit)
-
- class Meta:
- model = ONOSService
-
-class ONOSServiceAdmin(ReadOnlyAwareAdmin):
- model = ONOSService
- verbose_name = "ONOS Service"
- verbose_name_plural = "ONOS Services"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description',"view_url","icon_url", "rest_hostname", "rest_port", "no_container" ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
- form = ONOSServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'ONOS Service Details'),
- ('administration', 'Administration'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('onosadmin.html', 'top', 'administration'),
- )
-
- def queryset(self, request):
- return ONOSService.get_service_objects_by_user(request.user)
-
-class ONOSAppForm(forms.ModelForm):
- creator = forms.ModelChoiceField(queryset=User.objects.all())
- name = forms.CharField()
- dependencies = forms.CharField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (ONOSAppForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = ONOSService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['creator'].initial = self.instance.creator
- self.fields['name'].initial = self.instance.name
- self.fields['dependencies'].initial = self.instance.dependencies
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = ONOS_KIND
- self.fields['creator'].initial = get_request().user
- if ONOSService.get_service_objects().exists():
- self.fields["provider_service"].initial = ONOSService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get("creator")
- self.instance.name = self.cleaned_data.get("name")
- self.instance.dependencies = self.cleaned_data.get("dependencies")
- return super(ONOSAppForm, self).save(commit=commit)
-
- class Meta:
- model = ONOSApp
-
-class ONOSAppAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'name', )
- list_display_links = ('backend_status_icon', 'name')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'name', 'provider_service', 'subscriber_service', 'service_specific_attribute', "dependencies",
- 'creator'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'instance', 'service_specific_attribute')
- inlines = [TenantAttrAsTabInline]
- form = ONOSAppForm
-
- suit_form_tabs = (('general','Details'), ('tenantattrs', 'Attributes'))
-
- def queryset(self, request):
- return ONOSApp.get_tenant_objects_by_user(request.user)
-
-admin.site.register(ONOSService, ONOSServiceAdmin)
-admin.site.register(ONOSApp, ONOSAppAdmin)
-
diff --git a/xos/onboard/onos-old/api/service/onos.py b/xos/onboard/onos-old/api/service/onos.py
deleted file mode 100644
index a143b3d..0000000
--- a/xos/onboard/onos-old/api/service/onos.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from services.onos.models import ONOSService
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-class ONOSServiceSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- rest_hostname = serializers.CharField(required=False)
- rest_port = serializers.CharField(default="8181")
- no_container = serializers.BooleanField(default=False)
- node_key = serializers.CharField(required=False)
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- class Meta:
- model = ONOSService
- fields = ('humanReadableName', 'id', 'rest_hostname', 'rest_port', 'no_container', 'node_key')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class ServiceAttributeSerializer(serializers.Serializer):
- id = ReadOnlyField()
- name = serializers.CharField(required=False)
- value = serializers.CharField(required=False)
-
-class ONOSServiceViewSet(XOSViewSet):
- base_name = "onos"
- method_name = "onos"
- method_kind = "viewset"
- queryset = ONOSService.get_service_objects().all()
- serializer_class = ONOSServiceSerializer
-
- custom_serializers = {"set_attribute": ServiceAttributeSerializer}
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(ONOSServiceViewSet, self).get_urlpatterns(api_path=api_path)
-
- patterns.append( self.detail_url("attributes/$", {"get": "get_attributes", "post": "add_attribute"}, "attributes") )
- patterns.append( self.detail_url("attributes/(?P<attribute>[0-9]+)/$", {"get": "get_attribute", "put": "set_attribute", "delete": "delete_attribute"}, "attribute") )
-
- return patterns
-
- def get_attributes(self, request, pk=None):
- svc = self.get_object()
- return Response(ServiceAttributeSerializer(svc.serviceattributes.all(), many=True).data)
-
- def add_attribute(self, request, pk=None):
- svc = self.get_object()
- ser = ServiceAttributeSerializer(data=request.data)
- ser.is_valid(raise_exception = True)
- att = ServiceAttribute(service=svc, **ser.validated_data)
- att.save()
- return Response(ServiceAttributeSerializer(att).data)
-
- def get_attribute(self, request, pk=None, attribute=None):
- svc = self.get_object()
- att = ServiceAttribute.objects.get(pk=attribute)
- return Response(ServiceAttributeSerializer(att).data)
-
- def set_attribute(self, request, pk=None, attribute=None):
- svc = self.get_object()
- att = ServiceAttribute.objects.get(pk=attribute)
- ser = ServicettributeSerializer(att, data=request.data)
- ser.is_valid(raise_exception = True)
- att.name = ser.validated_data.get("name", att.name)
- att.value = ser.validated_data.get("value", att.value)
- att.save()
- return Response(ServiceAttributeSerializer(att).data)
-
- def delete_attribute(self, request, pk=None, attribute=None):
- att = ServiceAttribute.objects.get(pk=attribute)
- att.delete()
- return Response(status=status.HTTP_204_NO_CONTENT)
-
-
-
-
-
-
diff --git a/xos/onboard/onos-old/api/tenant/onos/app.py b/xos/onboard/onos-old/api/tenant/onos/app.py
deleted file mode 100644
index 481057d..0000000
--- a/xos/onboard/onos-old/api/tenant/onos/app.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from services.onos.models import ONOSService, ONOSApp
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-def get_default_onos_service():
- onos_services = ONOSService.get_service_objects().all()
- if onos_services:
- return onos_services[0].id
- return None
-
-class ONOSAppSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- name = serializers.CharField()
- dependencies = serializers.CharField()
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- class Meta:
- model = ONOSApp
- fields = ('humanReadableName', 'id', 'name', 'dependencies')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class TenantAttributeSerializer(serializers.Serializer):
- id = ReadOnlyField()
- name = serializers.CharField(required=False)
- value = serializers.CharField(required=False)
-
-class ONOSAppViewSet(XOSViewSet):
- base_name = "app"
- method_name = "app"
- method_kind = "viewset"
- queryset = ONOSApp.get_tenant_objects().all()
- serializer_class = ONOSAppSerializer
-
- custom_serializers = {"set_attribute": TenantAttributeSerializer}
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(ONOSAppViewSet, self).get_urlpatterns(api_path=api_path)
-
- patterns.append( self.detail_url("attributes/$", {"get": "get_attributes", "post": "add_attribute"}, "attributes") )
- patterns.append( self.detail_url("attributes/(?P<attribute>[0-9]+)/$", {"get": "get_attribute", "put": "set_attribute", "delete": "delete_attribute"}, "attribute") )
-
- return patterns
-
- def get_attributes(self, request, pk=None):
- app = self.get_object()
- return Response(TenantAttributeSerializer(app.tenantattributes.all(), many=True).data)
-
- def add_attribute(self, request, pk=None):
- app = self.get_object()
- ser = TenantAttributeSerializer(data=request.data)
- ser.is_valid(raise_exception = True)
- att = TenantAttribute(tenant=app, **ser.validated_data)
- att.save()
- return Response(TenantAttributeSerializer(att).data)
-
- def get_attribute(self, request, pk=None, attribute=None):
- app = self.get_object()
- att = TenantAttribute.objects.get(pk=attribute)
- return Response(TenantAttributeSerializer(att).data)
-
- def set_attribute(self, request, pk=None, attribute=None):
- app = self.get_object()
- att = TenantAttribute.objects.get(pk=attribute)
- ser = TenantAttributeSerializer(att, data=request.data)
- ser.is_valid(raise_exception = True)
- att.name = ser.validated_data.get("name", att.name)
- att.value = ser.validated_data.get("value", att.value)
- att.save()
- return Response(TenantAttributeSerializer(att).data)
-
- def delete_attribute(self, request, pk=None, attribute=None):
- att = TenantAttribute.objects.get(pk=attribute)
- att.delete()
- return Response(status=status.HTTP_204_NO_CONTENT)
-
-
-
-
-
-
diff --git a/xos/onboard/onos-old/models.py b/xos/onboard/onos-old/models.py
deleted file mode 100644
index 20fa73f..0000000
--- a/xos/onboard/onos-old/models.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-import traceback
-from xos.exceptions import *
-from core.models import SlicePrivilege, SitePrivilege
-from sets import Set
-
-ONOS_KIND = "onos"
-
-class ONOSService(Service):
- KIND = ONOS_KIND
-
- class Meta:
- app_label = "onos"
- verbose_name = "ONOS Service"
- proxy = True
-
- default_attributes = {"rest_hostname": "",
- "rest_port": "8181",
- "no_container": False,
- "node_key": ""}
-
- @property
- def rest_hostname(self):
- return self.get_attribute("rest_hostname", self.default_attributes["rest_hostname"])
-
- @rest_hostname.setter
- def rest_hostname(self, value):
- self.set_attribute("rest_hostname", value)
-
- @property
- def rest_port(self):
- return self.get_attribute("rest_port", self.default_attributes["rest_port"])
-
- @rest_port.setter
- def rest_port(self, value):
- self.set_attribute("rest_port", value)
-
- @property
- def no_container(self):
- return self.get_attribute("no_container", self.default_attributes["no_container"])
-
- @no_container.setter
- def no_container(self, value):
- self.set_attribute("no_container", value)
-
- @property
- def node_key(self):
- return self.get_attribute("node_key", self.default_attributes["node_key"])
-
- @node_key.setter
- def node_key(self, value):
- self.set_attribute("node_key", value)
-
-
-class ONOSApp(Tenant): # aka 'ONOSTenant'
- class Meta:
- proxy = True
-
- KIND = ONOS_KIND
-
- default_attributes = {"name": "",
- "install_dependencies": "",
- "dependencies": ""}
- def __init__(self, *args, **kwargs):
- onos_services = ONOSService.get_service_objects().all()
- if onos_services:
- self._meta.get_field("provider_service").default = onos_services[0].id
- super(ONOSApp, self).__init__(*args, **kwargs)
-
- @property
- def creator(self):
- from core.models import User
- if getattr(self, "cached_creator", None):
- return self.cached_creator
- creator_id=self.get_attribute("creator_id")
- if not creator_id:
- return None
- users=User.objects.filter(id=creator_id)
- if not users:
- return None
- user=users[0]
- self.cached_creator = users[0]
- return user
-
- @creator.setter
- def creator(self, value):
- if value:
- value = value.id
- if (value != self.get_attribute("creator_id", None)):
- self.cached_creator=None
- self.set_attribute("creator_id", value)
-
- @property
- def name(self):
- return self.get_attribute("name", self.default_attributes["name"])
-
- @name.setter
- def name(self, value):
- self.set_attribute("name", value)
-
- @property
- def dependencies(self):
- return self.get_attribute("dependencies", self.default_attributes["dependencies"])
-
- @dependencies.setter
- def dependencies(self, value):
- self.set_attribute("dependencies", value)
-
- @property
- def install_dependencies(self):
- return self.get_attribute("install_dependencies", self.default_attributes["install_dependencies"])
-
- @install_dependencies.setter
- def install_dependencies(self, value):
- self.set_attribute("install_dependencies", value)
-
- def save(self, *args, **kwargs):
- if not self.creator:
- if not getattr(self, "caller", None):
- # caller must be set when creating a vCPE since it creates a slice
- raise XOSProgrammingError("ONOSApp's self.caller was not set")
- self.creator = self.caller
- if not self.creator:
- raise XOSProgrammingError("ONOSApp's self.creator was not set")
-
- super(ONOSApp, self).save(*args, **kwargs)
- model_policy_onos_app(self.pk)
-
-# TODO: Probably don't need this...
-def model_policy_onos_app(pk):
- # TODO: this should be made in to a real model_policy
- with transaction.atomic():
- oa = ONOSApp.objects.select_for_update().filter(pk=pk)
- if not oa:
- return
- oa = oa[0]
- #oa.manage_container()
-
-
diff --git a/xos/onboard/onos-old/onos-onboard.yaml b/xos/onboard/onos-old/onos-onboard.yaml
deleted file mode 100644
index 3d4ac3b..0000000
--- a/xos/onboard/onos-old/onos-onboard.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#onos:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/onos/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/onosadmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: onos-synchronizer.py
- #tosca_custom_types: exampleservice.yaml
- tosca_resource: tosca/resources/onosservice.py, tosca/resources/onosapp.py
- rest_service: api/service/onos.py
- rest_tenant: subdirectory:onos api/tenant/onos/app.py
- private_key: file:///opt/xos/key_import/onos_rsa
- public_key: file:///opt/xos/key_import/onos_rsa.pub
-
diff --git a/xos/onboard/onos-old/synchronizer/manifest b/xos/onboard/onos-old/synchronizer/manifest
deleted file mode 100644
index b96216a..0000000
--- a/xos/onboard/onos-old/synchronizer/manifest
+++ /dev/null
@@ -1,16 +0,0 @@
-manifest
-onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
-scripts/dockerip.sh
-steps/sync_onosapp.py
-steps/sync_onosapp_nocontainer.yaml
-steps/sync_onosservice.py
-steps/sync_onosservice.yaml
-steps/sync_onosapp.yaml
-onos-ext-notifier-1.0-SNAPSHOT.oar
-start.sh
-stop.sh
-model-deps
-onos_synchronizer_config
-supervisor/onos-observer.conf
-run.sh
-onos-synchronizer.py
diff --git a/xos/onboard/onos-old/synchronizer/model-deps b/xos/onboard/onos-old/synchronizer/model-deps
deleted file mode 100644
index 2da80e0..0000000
--- a/xos/onboard/onos-old/synchronizer/model-deps
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "ONOSApp": [
- "ONOSService"
- ]
-}
diff --git a/xos/onboard/onos-old/synchronizer/onos-ext-notifier-1.0-SNAPSHOT.oar b/xos/onboard/onos-old/synchronizer/onos-ext-notifier-1.0-SNAPSHOT.oar
deleted file mode 100644
index 23c6fcd..0000000
--- a/xos/onboard/onos-old/synchronizer/onos-ext-notifier-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/xos/onboard/onos-old/synchronizer/onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar b/xos/onboard/onos-old/synchronizer/onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
deleted file mode 100644
index 244f589..0000000
--- a/xos/onboard/onos-old/synchronizer/onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/xos/onboard/onos-old/synchronizer/onos-synchronizer.py b/xos/onboard/onos-old/synchronizer/onos-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/onos-old/synchronizer/onos-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/onos-old/synchronizer/onos_synchronizer_config b/xos/onboard/onos-old/synchronizer/onos_synchronizer_config
deleted file mode 100644
index c6ceece..0000000
--- a/xos/onboard/onos-old/synchronizer/onos_synchronizer_config
+++ /dev/null
@@ -1,41 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=onos
-dependency_graph=/opt/xos/synchronizers/onos/model-deps
-steps_dir=/opt/xos/synchronizers/onos/steps
-sys_dir=/opt/xos/synchronizers/onos/sys
-deleters_dir=/opt/xos/synchronizers/onos/deleters
-log_file=console
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/onos-old/synchronizer/run.sh b/xos/onboard/onos-old/synchronizer/run.sh
deleted file mode 100755
index b108d5b..0000000
--- a/xos/onboard/onos-old/synchronizer/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-python onos-synchronizer.py -C $XOS_DIR/synchronizers/onos/onos_synchronizer_config
diff --git a/xos/onboard/onos-old/synchronizer/scripts/dockerip.sh b/xos/onboard/onos-old/synchronizer/scripts/dockerip.sh
deleted file mode 100644
index 732c3fe..0000000
--- a/xos/onboard/onos-old/synchronizer/scripts/dockerip.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-MODE=`docker inspect --format '{{ .HostConfig.NetworkMode }}' $1 | tr -d '\n' | tr -d '\r'`
-if [[ "$MODE" == "host" ]]; then
- echo -n "127.0.0.1"
-else
- docker inspect --format '{{ .NetworkSettings.IPAddress }}' $1 | tr -d '\n' | tr -d '\r'
-fi
-
diff --git a/xos/onboard/onos-old/synchronizer/start.sh b/xos/onboard/onos-old/synchronizer/start.sh
deleted file mode 100755
index f0a1535..0000000
--- a/xos/onboard/onos-old/synchronizer/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python onos-synchronizer.py -C $XOS_DIR/synchronizers/onos/onos_synchronizer_config > /dev/null 2>&1 &
diff --git a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.py b/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.py
deleted file mode 100644
index 78a8cc8..0000000
--- a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.py
+++ /dev/null
@@ -1,536 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-import re
-import json
-from collections import OrderedDict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.ansible import run_template
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice, Controller, ControllerSlice, ControllerUser, Node, TenantAttribute, Tag
-from services.onos.models import ONOSService, ONOSApp
-from xos.logger import Logger, logging
-from services.vrouter.models import VRouterService
-from services.vtn.models import VTNService
-from services.volt.models import VOLTService, VOLTDevice, AccessDevice
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncONOSApp(SyncInstanceUsingAnsible):
- provides=[ONOSApp]
- observes=ONOSApp
- requested_interval=0
- template_name = "sync_onosapp.yaml"
- #service_key_name = "/opt/xos/synchronizers/onos/onos_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncONOSApp, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = ONOSApp.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = ONOSApp.get_deleted_tenant_objects()
-
- return objs
-
- def get_instance(self, o):
- # We assume the ONOS service owns a slice, so pick one of the instances
- # inside that slice to sync to.
-
- serv = self.get_onos_service(o)
-
- if serv.no_container:
- raise Exception("get_instance() was called on a service that was marked no_container")
-
- if serv.slices.exists():
- slice = serv.slices.all()[0]
- if slice.instances.exists():
- return slice.instances.all()[0]
-
- return None
-
- def get_onos_service(self, o):
- if not o.provider_service:
- return None
-
- onoses = ONOSService.get_service_objects().filter(id=o.provider_service.id)
- if not onoses:
- return None
-
- return onoses[0]
-
- def is_no_container(self, o):
- return self.get_onos_service(o).no_container
-
- def skip_ansible_fields(self, o):
- return self.is_no_container(o)
-
- def get_files_dir(self, o):
- if not hasattr(Config(), "observer_steps_dir"):
- # make steps_dir mandatory; there's no valid reason for it to not
- # be defined.
- raise Exception("observer_steps_dir is not defined in config file")
-
- step_dir = Config().observer_steps_dir
-
- return os.path.join(step_dir, "..", "files", str(self.get_onos_service(o).id), o.name)
-
- def get_cluster_configuration(self, o):
- instance = self.get_instance(o)
- if not instance:
- raise Exception("No instance for ONOS App")
- node_ips = [socket.gethostbyname(instance.node.name)]
-
- ipPrefix = ".".join(node_ips[0].split(".")[:3]) + ".*"
- result = '{ "nodes": ['
- result = result + ",".join(['{ "ip": "%s"}' % ip for ip in node_ips])
- result = result + '], "ipPrefix": "%s"}' % ipPrefix
- return result
-
- def get_dynamic_parameter_value(self, o, param):
- instance = self.get_instance(o)
- if not instance:
- raise Exception("No instance for ONOS App")
- if param == 'rabbit_host':
- return instance.controller.rabbit_host
- if param == 'rabbit_user':
- return instance.controller.rabbit_user
- if param == 'rabbit_password':
- return instance.controller.rabbit_password
- if param == 'keystone_tenant_id':
- cslice = ControllerSlice.objects.get(slice=instance.slice)
- if not cslice:
- raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
- return cslice.tenant_id
- if param == 'keystone_user_id':
- cuser = ControllerUser.objects.get(user=instance.creator)
- if not cuser:
- raise Exception("Controller user object for %s does not exist" % instance.creator)
- return cuser.kuser_id
-
- def get_node_tag(self, o, node, tagname):
- tags = Tag.select_by_content_object(node).filter(name=tagname)
- return tags[0].value
-
- # Scan attrs for attribute name
- # If it's not present, save it as a TenantAttribute
- def attribute_default(self, tenant, attrs, name, default):
- if name in attrs:
- value = attrs[name]
- else:
- value = default
- logger.info("saving default value %s for attribute %s" % (value, name))
- ta = TenantAttribute(tenant=tenant, name=name, value=value)
- ta.save()
- return value
-
- # This function currently assumes a single Deployment and Site
- def get_vtn_config(self, o, attrs):
-
- privateGatewayMac = None
- localManagementIp = None
- ovsdbPort = None
- sshPort = None
- sshUser = None
- sshKeyFile = None
- mgmtSubnetBits = None
- xosEndpoint = None
- xosUser = None
- xosPassword = None
-
- # VTN-specific configuration from the VTN Service
- vtns = VTNService.get_service_objects().all()
- if vtns:
- vtn = vtns[0]
- privateGatewayMac = vtn.privateGatewayMac
- localManagementIp = vtn.localManagementIp
- ovsdbPort = vtn.ovsdbPort
- sshPort = vtn.sshPort
- sshUser = vtn.sshUser
- sshKeyFile = vtn.sshKeyFile
- mgmtSubnetBits = vtn.mgmtSubnetBits
- xosEndpoint = vtn.xosEndpoint
- xosUser = vtn.xosUser
- xosPassword = vtn.xosPassword
-
- # OpenStack endpoints and credentials
- keystone_server = "http://keystone:5000/v2.0/"
- user_name = "admin"
- password = "ADMIN_PASS"
- controllers = Controller.objects.all()
- if controllers:
- controller = controllers[0]
- keystone_server = controller.auth_url
- user_name = controller.admin_user
- tenant_name = controller.admin_tenant
- password = controller.admin_password
-
- data = {
- "apps" : {
- "org.onosproject.cordvtn" : {
- "cordvtn" : {
- "privateGatewayMac" : privateGatewayMac,
- "localManagementIp": localManagementIp,
- "ovsdbPort": ovsdbPort,
- "ssh": {
- "sshPort": sshPort,
- "sshUser": sshUser,
- "sshKeyFile": sshKeyFile
- },
- "openstack": {
- "endpoint": keystone_server,
- "tenant": tenant_name,
- "user": user_name,
- "password": password
- },
- "xos": {
- "endpoint": xosEndpoint,
- "user": xosUser,
- "password": xosPassword
- },
- "publicGateways": [],
- "nodes" : []
- }
- }
- }
- }
-
- # Generate apps->org.onosproject.cordvtn->cordvtn->nodes
- nodes = Node.objects.all()
- for node in nodes:
- nodeip = socket.gethostbyname(node.name)
-
- try:
- bridgeId = self.get_node_tag(o, node, "bridgeId")
- dataPlaneIntf = self.get_node_tag(o, node, "dataPlaneIntf")
- dataPlaneIp = self.get_node_tag(o, node, "dataPlaneIp")
- except:
- logger.error("not adding node %s to the VTN configuration" % node.name)
- continue
-
- node_dict = {
- "hostname": node.name,
- "hostManagementIp": "%s/%s" % (nodeip, mgmtSubnetBits),
- "bridgeId": bridgeId,
- "dataPlaneIntf": dataPlaneIntf,
- "dataPlaneIp": dataPlaneIp
- }
- data["apps"]["org.onosproject.cordvtn"]["cordvtn"]["nodes"].append(node_dict)
-
- # Generate apps->org.onosproject.cordvtn->cordvtn->publicGateways
- # Pull the gateway information from vRouter
- vrouters = VRouterService.get_service_objects().all()
- if vrouters:
- for gateway in vrouters[0].get_gateways():
- gatewayIp = gateway['gateway_ip'].split('/',1)[0]
- gatewayMac = gateway['gateway_mac']
- gateway_dict = {
- "gatewayIp": gatewayIp,
- "gatewayMac": gatewayMac
- }
- data["apps"]["org.onosproject.cordvtn"]["cordvtn"]["publicGateways"].append(gateway_dict)
-
- return json.dumps(data, indent=4, sort_keys=True)
-
- def get_volt_network_config(self, o, attrs):
- try:
- volt = VOLTService.get_service_objects().all()[0]
- except:
- return None
-
- devices = []
- for voltdev in volt.volt_devices.all():
- access_devices = []
- for access in voltdev.access_devices.all():
- access_device = {
- "uplink" : access.uplink,
- "vlan" : access.vlan
- }
- access_devices.append(access_device)
-
- if voltdev.access_agent:
- agent = voltdev.access_agent
- olts = {}
- for port_mapping in agent.port_mappings.all():
- olts[port_mapping.port] = port_mapping.mac
- agent_config = {
- "olts" : olts,
- "mac" : agent.mac
- }
-
- device = {
- voltdev.openflow_id : {
- "accessDevice" : access_devices,
- "accessAgent" : agent_config
- },
- "basic" : {
- "driver" : voltdev.driver
- }
- }
- devices.append(device)
-
- data = {
- "devices" : devices
- }
- return json.dumps(data, indent=4, sort_keys=True)
-
- def get_volt_component_config(self, o, attrs):
- data = {
- "org.ciena.onos.ext_notifier.KafkaNotificationBridge":{
- "rabbit.user": "<rabbit_user>",
- "rabbit.password": "<rabbit_password>",
- "rabbit.host": "<rabbit_host>",
- "publish.kafka": "false",
- "publish.rabbit": "true",
- "volt.events.rabbit.topic": "notifications.info",
- "volt.events.rabbit.exchange": "voltlistener",
- "volt.events.opaque.info": "{project_id: <keystone_tenant_id>, user_id: <keystone_user_id>}",
- "publish.volt.events": "true"
- }
- }
- return json.dumps(data, indent=4, sort_keys=True)
-
- def get_vrouter_network_config(self, o, attrs):
- # From the onosproject wiki:
- # https://wiki.onosproject.org/display/ONOS/vRouter
- data = {
- "devices" : {
- "of:00000000000000b1" : {
- "basic" : {
- "driver" : "softrouter"
- }
- }
- },
- "ports" : {
- "of:00000000000000b1/1" : {
- "interfaces" : [
- {
- "name" : "b1-1",
- "ips" : [ "10.0.1.2/24" ],
- "mac" : "00:00:00:00:00:01"
- }
- ]
- },
- "of:00000000000000b1/2" : {
- "interfaces" : [
- {
- "name" : "b1-2",
- "ips" : [ "10.0.2.2/24" ],
- "mac" : "00:00:00:00:00:01"
- }
- ]
- },
- "of:00000000000000b1/3" : {
- "interfaces" : [
- {
- "name" : "b1-3",
- "ips" : [ "10.0.3.2/24" ],
- "mac" : "00:00:00:00:00:01"
- }
- ]
- },
- "of:00000000000000b1/4" : {
- "interfaces" : [
- {
- "name" : "b1-4",
- "ips" : [ "10.0.4.2/24" ],
- "mac" : "00:00:00:00:00:02",
- "vlan" : "100"
- }
- ]
- }
- },
- "apps" : {
- "org.onosproject.router" : {
- "router" : {
- "controlPlaneConnectPoint" : "of:00000000000000b1/5",
- "ospfEnabled" : "true",
- "interfaces" : [ "b1-1", "b1-2", "b1-2", "b1-4" ]
- }
- }
- }
- }
- return json.dumps(data, indent=4, sort_keys=True)
-
- def write_configs(self, o):
- o.config_fns = []
- o.rest_configs = []
- o.component_configs = []
- o.files_dir = self.get_files_dir(o)
-
- if not os.path.exists(o.files_dir):
- os.makedirs(o.files_dir)
-
- # Combine the service attributes with the tenant attributes. Tenant
- # attribute can override service attributes.
- attrs = o.provider_service.serviceattribute_dict
- attrs.update(o.tenantattribute_dict)
-
- ordered_attrs = attrs.keys()
-
- onos = self.get_onos_service(o)
- if onos.node_key:
- file(os.path.join(o.files_dir, "node_key"),"w").write(onos.node_key)
- o.node_key_fn="node_key"
- else:
- o.node_key_fn=None
-
- o.early_rest_configs=[]
- if ("cordvtn" in o.dependencies) and (not self.is_no_container(o)):
- # For VTN, since it's running in a docker host container, we need
- # to make sure it configures the cluster using the right ip addresses.
- # NOTE: rest_onos/v1/cluster/configuration/ will reboot the cluster and
- # must go first.
- name="rest_onos/v1/cluster/configuration/"
- value= self.get_cluster_configuration(o)
- fn = name[5:].replace("/","_")
- endpoint = name[5:]
- file(os.path.join(o.files_dir, fn),"w").write(" " +value)
- o.early_rest_configs.append( {"endpoint": endpoint, "fn": fn} )
-
- # Generate config files and save them to the appropriate tenant attributes
- configs = []
- for key, value in attrs.iteritems():
- if key == "autogenerate" and value:
- for config in value.split(','):
- configs.append(config.strip())
-
- for label in configs:
- config = None
- value = None
- if label == "vtn-network-cfg":
- # Generate the VTN config file... where should this live?
- config = "rest_onos/v1/network/configuration/"
- value = self.get_vtn_config(o, attrs)
- elif label == "volt-network-cfg":
- config = "rest_onos/v1/network/configuration/"
- value = self.get_volt_network_config(o, attrs)
- elif label == "volt-component-cfg":
- config = "component_config"
- value = self.get_volt_component_config(o, attrs)
- elif label == "vrouter-network-cfg":
- config = "rest_onos/v1/network/configuration/"
- value = self.get_vrouter_network_config(o, attrs)
-
- if config:
- tas = TenantAttribute.objects.filter(tenant=o, name=config)
- if tas:
- ta = tas[0]
- if ta.value != value:
- logger.info("updating %s with autogenerated config" % config)
- ta.value = value
- ta.save()
- attrs[config] = value
- else:
- logger.info("saving autogenerated config %s" % config)
- ta = TenantAttribute(tenant=o, name=config, value=value)
- ta.save()
- attrs[config] = value
-
- for name in attrs.keys():
- value = attrs[name]
- if name.startswith("config_"):
- fn = name[7:] # .replace("_json",".json")
- o.config_fns.append(fn)
- file(os.path.join(o.files_dir, fn),"w").write(value)
- if name.startswith("rest_"):
- fn = name[5:].replace("/","_")
- endpoint = name[5:]
- # Ansible goes out of it's way to make our life difficult. If
- # 'lookup' sees a file that it thinks contains json, then it'll
- # insist on parsing and return a json object. We just want
- # a string, so prepend a space and then strip the space off
- # later.
- file(os.path.join(o.files_dir, fn),"w").write(" " +value)
- o.rest_configs.append( {"endpoint": endpoint, "fn": fn} )
- if name.startswith("component_config"):
- components = json.loads(value,object_pairs_hook=OrderedDict)
- for component in components.keys():
- config = components[component]
- for key in config.keys():
- config_val = config[key]
- found = re.findall('<(.+?)>',config_val)
- for x in found:
- #Get value corresponding to that string
- val = self.get_dynamic_parameter_value(o, x)
- if val:
- config_val = re.sub('<'+x+'>', val, config_val)
- #TODO: else raise an exception?
- o.component_configs.append( {"component": component, "config_params": "'{\""+key+"\":\""+config_val+"\"}'"} )
-
- def prepare_record(self, o):
- self.write_configs(o)
-
- def get_extra_attributes_common(self, o):
- fields = {}
-
- # These are attributes that are not dependent on Instance. For example,
- # REST API stuff.
-
- onos = self.get_onos_service(o)
-
- fields["files_dir"] = o.files_dir
- fields["appname"] = o.name
- fields["rest_configs"] = o.rest_configs
- fields["rest_hostname"] = onos.rest_hostname
- fields["rest_port"] = onos.rest_port
-
- if o.dependencies:
- fields["dependencies"] = [x.strip() for x in o.dependencies.split(",")]
- else:
- fields["dependencies"] = []
-
- return fields
-
- def get_extra_attributes_full(self, o):
- instance = self.get_instance(o)
-
- fields = self.get_extra_attributes_common(o)
-
- fields["config_fns"] = o.config_fns
- fields["early_rest_configs"] = o.early_rest_configs
- fields["component_configs"] = o.component_configs
- fields["node_key_fn"] = o.node_key_fn
-
- if o.install_dependencies:
- fields["install_dependencies"] = [x.strip() for x in o.install_dependencies.split(",")]
- else:
- fields["install_dependencies"] = []
-
- if (instance.isolation=="container"):
- fields["ONOS_container"] = "%s-%s" % (instance.slice.name, str(instance.id))
- else:
- fields["ONOS_container"] = "ONOS"
- return fields
-
- def get_extra_attributes(self, o):
- if self.is_no_container(o):
- return self.get_extra_attributes_common(o)
- else:
- return self.get_extra_attributes_full(o)
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
- super(SyncONOSApp, self).sync_fields(o, fields)
-
- def run_playbook(self, o, fields):
- if self.is_no_container(o):
- # There is no machine to SSH to, so use the synchronizer's
- # run_template method directly.
- run_template("sync_onosapp_nocontainer.yaml", fields)
- else:
- super(SyncONOSApp, self).run_playbook(o, fields)
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.yaml b/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.yaml
deleted file mode 100644
index 8235286..0000000
--- a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
- vars:
- appname: {{ appname }}
- dependencies: {{ dependencies }}
-{% if component_configs %}
- component_configs:
-{% for component_config in component_configs %}
- - component: {{ component_config.component }}
- config_params: {{ component_config.config_params }}
-{% endfor %}
-{% endif %}
-{% if rest_configs %}
- rest_configs:
-{% for rest_config in rest_configs %}
- - endpoint: {{ rest_config.endpoint }}
- body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
-{% if early_rest_configs %}
- early_rest_configs:
-{% for early_rest_config in early_rest_configs %}
- - endpoint: {{ early_rest_config.endpoint }}
- body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ early_rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
-
- tasks:
-
- - name: Get Docker IP
- script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ ONOS_container }}
- register: onosaddr
-
- - name: Wait for ONOS to come up
- wait_for:
- host={{ '{{' }} onosaddr.stdout {{ '}}' }}
- port={{ '{{' }} item {{ '}}' }}
- state=present
- with_items:
- - 8101
- - 8181
- - 9876
-
- - name: Config file directory
- file:
- path=/home/ubuntu/{{ appname }}/
- state=directory
-
-{% if node_key_fn %}
- - name: Copy over key
- copy:
- src={{ files_dir }}/{{ node_key_fn }}
- dest=/home/ubuntu/node_key
-
- - name: Copy node key into container
- shell: docker cp /home/ubuntu/node_key {{ ONOS_container }}:/root/node_key
-{% endif %}
-
-{% if config_fns %}
- - name: Copy over configuration files
- copy:
- src={{ files_dir }}/{{ '{{' }} item {{ '}}' }}
- dest=/home/ubuntu/{{ appname }}/{{ '{{' }} item {{ '}}' }}
- with_items:
- {% for config_fn in config_fns %}
- - {{ config_fn }}
- {% endfor %}
-
- - name: Make sure config directory exists
- shell: docker exec {{ ONOS_container }} mkdir -p /root/onos/config/
- sudo: yes
-
- - name: Copy config files into container
- shell: docker cp {{ appname }}/{{ '{{' }} item {{ '}}' }} {{ ONOS_container }}:/root/onos/config/
- sudo: yes
- with_items:
- {% for config_fn in config_fns %}
- - {{ config_fn }}
- {% endfor %}
-{% endif %}
-
- # Don't know how to check for this condition, just wait
- - name: Wait for ONOS to install the apps
- wait_for: timeout=15
-
-{% if early_rest_configs %}
- - name: Add ONOS early configuration values
- uri:
- url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/{{ '{{' }} item.endpoint {{ '}}' }}
- body: "{{ '{{' }} item.body {{ '}}' }}"
- body_format: raw
- method: POST
- user: karaf
- password: karaf
- with_items: "early_rest_configs"
-
- # Don't know how to check for this condition, just wait
- - name: Wait for ONOS to restart
- wait_for: timeout=15
-{% endif %}
-
-{% if install_dependencies %}
- - name: Install app file directory
- file:
- path=/home/ubuntu/{{ appname }}/apps/
- state=directory
-
- - name: Copy over app install files to ONOS host
- copy:
- src=/opt/xos/synchronizers/onos/{{ '{{' }} item {{ '}}' }}
- dest=/home/ubuntu/{{ appname }}/apps/{{ '{{' }} item {{ '}}' }}
- with_items:
- {% for install_app in install_dependencies %}
- - {{ install_app }}
- {% endfor %}
-
- - name: POST onos-app install command
- command: >
- curl -XPOST -HContent-Type:application/octet-stream -u karaf:karaf --data-binary @/home/ubuntu/{{ appname }}/apps/{{ '{{' }} item {{ '}}' }} http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/applications
- with_items:
- {% for dependency in install_dependencies %}
- - {{ dependency }}
- {% endfor %}
-{% endif %}
-
-{% if dependencies %}
- - name: Add dependencies to ONOS
- uri:
- url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/applications/{{ '{{' }} item {{ '}}' }}/active
- method: POST
- user: karaf
- password: karaf
- with_items:
- {% for dependency in dependencies %}
- - {{ dependency }}
- {% endfor %}
-{% endif %}
-
-{% if component_configs %}
- - name: Add ONOS component configuration values
- command: >
- curl -XPOST -HContent-Type:application/json -u karaf:karaf -d {{ '{{' }} item.config_params | to_json {{ '}}' }} http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/configuration/{{
- '{{' }} item.component {{ '}}' }}
- with_items: "component_configs"
-
-# uri:
-# url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/configuration/{{ '{{' }} item.component {{ '}}' }} #http://localhost:8181/onos/v1/configuration/
-# body: "{{ '{{' }} item.config_params | to_json {{ '}}' }}"
-# body_format: json
-# method: POST
-# user: karaf
-# password: karaf
-# with_items: "component_configs"
-{% endif %}
-
-{% if rest_configs %}
-# Do this after services have been activated, or it will cause an exception.
-# vOLT will re-read its net config; vbng may not.
- - name: Add ONOS configuration values
- uri:
- url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
- body: "{{ '{{' }} item.body {{ '}}' }}"
- body_format: raw
- method: POST
- user: karaf
- password: karaf
- with_items: "rest_configs"
-{% endif %}
diff --git a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp_nocontainer.yaml b/xos/onboard/onos-old/synchronizer/steps/sync_onosapp_nocontainer.yaml
deleted file mode 100644
index 5aad569..0000000
--- a/xos/onboard/onos-old/synchronizer/steps/sync_onosapp_nocontainer.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- vars:
- appname: {{ appname }}
- dependencies: {{ dependencies }}
-{% if component_configs %}
- component_configs:
-{% for component_config in component_configs %}
- - component: {{ component_config.component }}
- config_params: {{ component_config.config_params }}
-{% endfor %}
-{% endif %}
-{% if rest_configs %}
- rest_configs:
-{% for rest_config in rest_configs %}
- - endpoint: {{ rest_config.endpoint }}
- body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
-{% if early_rest_configs %}
- early_rest_configs:
-{% for early_rest_config in early_rest_configs %}
- - endpoint: {{ early_rest_config.endpoint }}
- body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ early_rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
- rest_hostname: {{ rest_hostname }}
- rest_port: {{ rest_port }}
-
- tasks:
-{% if dependencies %}
- - name: Add dependencies to ONOS
- uri:
- url: http://{{ '{{' }} rest_hostname {{ '}}' }}:{{ '{{' }} rest_port {{ '}}' }}/onos/v1/applications/{{ '{{' }} item {{ '}}' }}/active
- method: POST
- user: karaf
- password: karaf
- with_items:
- {% for dependency in dependencies %}
- - {{ dependency }}
- {% endfor %}
-{% endif %}
-
-{% if rest_configs %}
-# Do this after services have been activated, or it will cause an exception.
-# vOLT will re-read its net config; vbng may not.
- - name: Add ONOS configuration values
- uri:
- url: http://{{ '{{' }} rest_hostname {{ '}}' }}:{{ '{{' }} rest_port {{ '}}' }}/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
- body: "{{ '{{' }} item.body {{ '}}' }}"
- body_format: raw
- method: POST
- user: karaf
- password: karaf
- with_items: "rest_configs"
-{% endif %}
diff --git a/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.py b/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.py
deleted file mode 100644
index ce446cf..0000000
--- a/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.onos.models import ONOSService, ONOSApp
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncONOSService(SyncInstanceUsingAnsible):
- provides=[ONOSService]
- observes=ONOSService
- requested_interval=0
- template_name = "sync_onosservice.yaml"
- #service_key_name = "/opt/xos/synchronizers/onos/onos_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncONOSService, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = ONOSService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = ONOSService.get_deleted_service_objects()
-
- return objs
-
- def get_instance(self, o):
- # We assume the ONOS service owns a slice, so pick one of the instances
- # inside that slice to sync to.
-
- serv = o
-
- if serv.slices.exists():
- slice = serv.slices.all()[0]
- if slice.instances.exists():
- return slice.instances.all()[0]
-
- return None
-
- def get_extra_attributes(self, o):
- fields={}
- fields["instance_hostname"] = self.get_instance(o).instance_name.replace("_","-")
- fields["appname"] = o.name
- fields["ONOS_container"] = "ONOS"
- return fields
-
- def sync_record(self, o):
- if o.no_container:
- logger.info("no work to do for onos service, because o.no_container is set",extra=o.tologdict())
- o.save()
- else:
- super(SyncONOSService, self).sync_record(o)
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
- super(SyncONOSService, self).sync_fields(o, fields)
-
- def run_playbook(self, o, fields):
- instance = self.get_instance(o)
- if (instance.isolation=="container"):
- # If the instance is already a container, then we don't need to
- # install ONOS.
- return
- super(SyncONOSService, self).run_playbook(o, fields)
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.yaml b/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.yaml
deleted file mode 100644
index a51fde5..0000000
--- a/xos/onboard/onos-old/synchronizer/steps/sync_onosservice.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
-
- tasks:
-
- - name: Fix /etc/hosts
- lineinfile:
- dest=/etc/hosts
- regexp="127.0.0.1 localhost"
- line="127.0.0.1 localhost {{ instance_hostname }}"
-
- - name: Add repo key
- apt_key:
- keyserver=hkp://pgp.mit.edu:80
- id=58118E89F3A912897C070ADBF76221572C52609D
-
- - name: Install Docker repo
- apt_repository:
- repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
- state=present
-
- - name: Install Docker
- apt:
- name={{ '{{' }} item {{ '}}' }}
- state=latest
- update_cache=yes
- with_items:
- - docker-engine
- - python-pip
- - python-httplib2
-
- - name: Install docker-py
- pip:
- name=docker-py
- state=latest
-
- - name: Start ONOS container
- docker:
- docker_api_version: "1.18"
- name: {{ ONOS_container }}
- # was: reloaded
- state: running
- image: onosproject/onos
- ports:
- - "6653:6653"
- - "8101:8101"
- - "8181:8181"
- - "9876:9876"
-
- - name: Get Docker IP
- script: /opt/xos/synchronizers/onos/scripts/dockerip.sh {{ ONOS_container }}
- register: dockerip
-
- - name: Wait for ONOS to come up
- wait_for:
- host={{ '{{' }} dockerip.stdout {{ '}}' }}
- port={{ '{{' }} item {{ '}}' }}
- state=present
- with_items:
- - 8101
- - 8181
- - 9876
diff --git a/xos/onboard/onos-old/synchronizer/stop.sh b/xos/onboard/onos-old/synchronizer/stop.sh
deleted file mode 100755
index 17d6eb7..0000000
--- a/xos/onboard/onos-old/synchronizer/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f onos-observer.py
diff --git a/xos/onboard/onos-old/synchronizer/supervisor/onos-observer.conf b/xos/onboard/onos-old/synchronizer/supervisor/onos-observer.conf
deleted file mode 100644
index 995644e..0000000
--- a/xos/onboard/onos-old/synchronizer/supervisor/onos-observer.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[supervisord]
-logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log)
-pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
-nodaemon=true
-
-[program:synchronizer]
-command=python /opt/xos/synchronizers/onos/onos-synchronizer.py -C /opt/xos/synchronizers/onos/onos_synchronizer_config
-stderr_logfile=/var/log/supervisor/synchronizer.err.log
-stdout_logfile=/var/log/supervisor/synchronizer.out.log
diff --git a/xos/onboard/onos-old/templates/onosadmin.html b/xos/onboard/onos-old/templates/onosadmin.html
deleted file mode 100644
index e50660e..0000000
--- a/xos/onboard/onos-old/templates/onosadmin.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a class="btn btn-primary" href="/admin/onos/onosapp/">ONOS Apps</a>
- </div>
-</div>
-
diff --git a/xos/onboard/onos-old/tosca/resources/onosapp.py b/xos/onboard/onos-old/tosca/resources/onosapp.py
deleted file mode 100644
index a65c717..0000000
--- a/xos/onboard/onos-old/tosca/resources/onosapp.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from core.models import User, TenantAttribute, Service
-from services.onos.models import ONOSApp, ONOSService
-
-from xosresource import XOSResource
-
-class XOSONOSApp(XOSResource):
- provides = ["tosca.nodes.ONOSApp", "tosca.nodes.ONOSvBNGApp", "tosca.nodes.ONOSvOLTApp", "tosca.nodes.ONOSVTNApp", "tosca.nodes.ONOSvRouterApp"]
- xos_model = ONOSApp
- copyin_props = ["service_specific_id", "dependencies", "install_dependencies"]
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSONOSApp, self).get_xos_args()
-
- # provider_service is mandatory and must be the ONOS Service
- provider_name = self.get_requirement("tosca.relationships.TenantOfService", throw_exception=throw_exception)
- if provider_name:
- args["provider_service"] = self.get_xos_object(ONOSService, throw_exception=throw_exception, name=provider_name)
-
- # subscriber_service is optional and can be any service
- subscriber_name = self.get_requirement("tosca.relationships.UsedByService", throw_exception=False)
- if subscriber_name:
- args["subscriber_service"] = self.get_xos_object(Service, throw_exception=throw_exception, name=subscriber_name)
-
- return args
-
- def get_existing_objs(self):
- objs = ONOSApp.get_tenant_objects().all()
- objs = [x for x in objs if x.name == self.obj_name]
- return objs
-
- def set_tenant_attr(self, obj, prop_name, value):
- value = self.try_intrinsic_function(value)
- if value:
- attrs = TenantAttribute.objects.filter(tenant=obj, name=prop_name)
- if attrs:
- attr = attrs[0]
- if attr.value != value:
- self.info("updating attribute %s" % prop_name)
- attr.value = value
- attr.save()
- else:
- self.info("adding attribute %s" % prop_name)
- ta = TenantAttribute(tenant=obj, name=prop_name, value=value)
- ta.save()
-
- def postprocess(self, obj):
- props = self.nodetemplate.get_properties()
- for (k,d) in props.items():
- v = d.value
- if k.startswith("config_"):
- self.set_tenant_attr(obj, k, v)
- elif k.startswith("rest_") and (k!="rest_hostname") and (k!="rest_port"):
- self.set_tenant_attr(obj, k, v)
- elif k.startswith("component_config"):
- self.set_tenant_attr(obj, k, v)
- elif k == "autogenerate":
- self.set_tenant_attr(obj, k, v)
-
- def can_delete(self, obj):
- return super(XOSONOSApp, self).can_delete(obj)
diff --git a/xos/onboard/onos-old/tosca/resources/onosservice.py b/xos/onboard/onos-old/tosca/resources/onosservice.py
deleted file mode 100644
index 3540dd0..0000000
--- a/xos/onboard/onos-old/tosca/resources/onosservice.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from core.models import ServiceAttribute
-from services.onos.models import ONOSService
-
-from service import XOSService
-
-class XOSONOSService(XOSService):
- provides = "tosca.nodes.ONOSService"
- xos_model = ONOSService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "rest_hostname", "rest_port", "no_container", "node_key"]
-
- def set_service_attr(self, obj, prop_name, value):
- value = self.try_intrinsic_function(value)
- if value:
- attrs = ServiceAttribute.objects.filter(service=obj, name=prop_name)
- if attrs:
- attr = attrs[0]
- if attr.value != value:
- self.info("updating attribute %s" % prop_name)
- attr.value = value
- attr.save()
- else:
- self.info("adding attribute %s" % prop_name)
- ta = ServiceAttribute(service=obj, name=prop_name, value=value)
- ta.save()
-
- def postprocess(self, obj):
- props = self.nodetemplate.get_properties()
- for (k,d) in props.items():
- v = d.value
- if k.startswith("config_"):
- self.set_service_attr(obj, k, v)
- elif k.startswith("rest_") and (k!="rest_hostname") and (k!="rest_port"):
- self.set_service_attr(obj, k, v)
-
diff --git a/xos/onboard/openvpn/admin.py b/xos/onboard/openvpn/admin.py
deleted file mode 100644
index 28e778d..0000000
--- a/xos/onboard/openvpn/admin.py
+++ /dev/null
@@ -1,229 +0,0 @@
-from django import forms
-from django.contrib import admin
-
-from core.admin import ReadOnlyAwareAdmin, SliceInline, TenantPrivilegeInline
-from core.middleware import get_request
-from core.models import User
-from services.openvpn.models import OPENVPN_KIND, OpenVPNService, OpenVPNTenant
-from xos.exceptions import XOSValidationError
-
-
-class OpenVPNServiceForm(forms.ModelForm):
-
- exposed_ports = forms.CharField(required=True)
-
- def __init__(self, *args, **kwargs):
- super(OpenVPNServiceForm, self).__init__(*args, **kwargs)
-
- if self.instance:
- self.fields['exposed_ports'].initial = (
- self.instance.exposed_ports_str)
-
- def save(self, commit=True):
- self.instance.exposed_ports = self.cleaned_data['exposed_ports']
- return super(OpenVPNServiceForm, self).save(commit=commit)
-
- def clean_exposed_ports(self):
- exposed_ports = self.cleaned_data['exposed_ports']
- self.instance.exposed_ports_str = exposed_ports
- port_mapping = {"udp": [], "tcp": []}
- parts = exposed_ports.split(",")
- for part in parts:
- part = part.strip()
- if "/" in part:
- (protocol, ports) = part.split("/", 1)
- elif " " in part:
- (protocol, ports) = part.split(None, 1)
- else:
- raise XOSValidationError(
- 'malformed port specifier %s, format example: ' +
- '"tcp 123, tcp 201:206, udp 333"' % part)
-
- protocol = protocol.strip()
- ports = ports.strip()
-
- if not (protocol in ["udp", "tcp"]):
- raise XOSValidationError('unknown protocol %s' % protocol)
-
- if "-" in ports:
- port_mapping[protocol].extend(
- self.parse_port_range(ports, "-"))
- elif ":" in ports:
- port_mapping[protocol].extend(
- self.parse_port_range(ports, ":"))
- else:
- port_mapping[protocol].append(int(ports))
-
- return port_mapping
-
- def parse_port_range(self, port_str, split_str):
- (first, last) = port_str.split(split_str)
- first = int(first.strip())
- last = int(last.strip())
- return list(range(first, last))
-
- class Meta:
- model = OpenVPNService
-
-
-class OpenVPNServiceAdmin(ReadOnlyAwareAdmin):
- """Defines the admin for the OpenVPNService."""
- model = OpenVPNService
- form = OpenVPNServiceForm
- verbose_name = "OpenVPN Service"
-
- list_display = ("backend_status_icon", "name", "enabled")
-
- list_display_links = ('backend_status_icon', 'name', )
-
- fieldsets = [(None, {'fields': ['backend_status_text', 'name', 'enabled',
- 'versionNumber', 'description', "view_url",
- 'exposed_ports'],
- 'classes':['suit-tab suit-tab-general']})]
-
- readonly_fields = ('backend_status_text', )
-
- inlines = [SliceInline]
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs = (('general', 'VPN Service Details'),
- ('slices', 'Slices'),)
-
- def queryset(self, request):
- return OpenVPNService.get_service_objects_by_user(request.user)
-
-
-class OpenVPNTenantForm(forms.ModelForm):
- """The form used to create and edit a OpenVPNTenant.
-
- Attributes:
- creator (forms.ModelChoiceField): The XOS user that created this
- tenant.
- server_network (forms.GenericIPAddressField): The IP address of the VPN network.
- vpn_subnet (forms.GenericIPAddressField): The subnet used by the VPN network.
- is_persistent (forms.BooleanField): Determines if this Tenant keeps
- this connection alive through failures.
- clients_can_see_each_other (forms.BooleanField): Determines if the clients on the VPN can
- communicate with each other.
- failover_servers (forms.ModelMultipleChoiceField): The other OpenVPNTenants to use as failover
- servers.
- protocol (forms.ChoiceField): The protocol to use.
- use_ca_from (forms.ModelChoiceField): Another OpenVPNTenant to use the CA of, this is a very
- hacky way to let VPNs have the same clients.
- """
- creator = forms.ModelChoiceField(queryset=User.objects.all())
- server_network = forms.GenericIPAddressField(
- protocol="IPv4", required=True)
- vpn_subnet = forms.GenericIPAddressField(protocol="IPv4", required=True)
- is_persistent = forms.BooleanField(required=False)
- clients_can_see_each_other = forms.BooleanField(required=False)
- failover_servers = forms.ModelMultipleChoiceField(
- required=False, queryset=OpenVPNTenant.get_tenant_objects())
- protocol = forms.ChoiceField(required=True, choices=[
- ("tcp", "tcp"), ("udp", "udp")])
- use_ca_from = forms.ModelChoiceField(
- queryset=OpenVPNTenant.get_tenant_objects(), required=False)
-
- def __init__(self, *args, **kwargs):
- super(OpenVPNTenantForm, self).__init__(*args, **kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['failover_servers'].widget.attrs['rows'] = 300
- self.fields[
- 'provider_service'].queryset = (
- OpenVPNService.get_service_objects().all())
-
- self.fields['kind'].initial = OPENVPN_KIND
-
- if self.instance:
- self.fields['creator'].initial = self.instance.creator
- self.fields['vpn_subnet'].initial = self.instance.vpn_subnet
- self.fields[
- 'server_network'].initial = self.instance.server_network
- self.fields[
- 'clients_can_see_each_other'].initial = (
- self.instance.clients_can_see_each_other)
- self.fields['is_persistent'].initial = self.instance.is_persistent
- self.initial['protocol'] = self.instance.protocol
- self.fields['failover_servers'].queryset = (
- OpenVPNTenant.get_tenant_objects().exclude(pk=self.instance.pk))
- self.initial['failover_servers'] = OpenVPNTenant.get_tenant_objects().filter(
- pk__in=self.instance.failover_server_ids)
- self.fields['use_ca_from'].queryset = (
- OpenVPNTenant.get_tenant_objects().exclude(pk=self.instance.pk))
- if (self.instance.use_ca_from_id):
- self.initial['use_ca_from'] = (
- OpenVPNTenant.get_tenant_objects().filter(pk=self.instance.use_ca_from_id)[0])
-
- if (not self.instance) or (not self.instance.pk):
- self.fields['creator'].initial = get_request().user
- self.fields['vpn_subnet'].initial = "255.255.255.0"
- self.fields['server_network'].initial = "10.66.77.0"
- self.fields['clients_can_see_each_other'].initial = True
- self.fields['is_persistent'].initial = True
- self.fields['failover_servers'].queryset = (
- OpenVPNTenant.get_tenant_objects())
- if OpenVPNService.get_service_objects().exists():
- self.fields["provider_service"].initial = (
- OpenVPNService.get_service_objects().all()[0])
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get("creator")
- self.instance.is_persistent = self.cleaned_data.get('is_persistent')
- self.instance.vpn_subnet = self.cleaned_data.get("vpn_subnet")
- self.instance.server_network = self.cleaned_data.get('server_network')
- self.instance.clients_can_see_each_other = self.cleaned_data.get(
- 'clients_can_see_each_other')
-
- self.instance.failover_server_ids = [
- tenant.id for tenant in self.cleaned_data.get('failover_servers')]
-
- # Do not aquire a new port number if the protocol hasn't changed
- if ((not self.instance.protocol) or
- (self.instance.protocol != self.cleaned_data.get("protocol"))):
- self.instance.protocol = self.cleaned_data.get("protocol")
- self.instance.port_number = (
- self.instance.provider_service.get_next_available_port(
- self.instance.protocol))
-
- if (self.cleaned_data.get('use_ca_from')):
- self.instance.use_ca_from_id = self.cleaned_data.get(
- 'use_ca_from').id
- else:
- self.instance.use_ca_from_id = None
-
- return super(OpenVPNTenantForm, self).save(commit=commit)
-
- class Meta:
- model = OpenVPNTenant
-
-
-class OpenVPNTenantAdmin(ReadOnlyAwareAdmin):
- verbose_name = "OpenVPN Tenant Admin"
- list_display = ('id', 'backend_status_icon', 'instance',
- 'server_network', 'vpn_subnet')
- list_display_links = ('id', 'backend_status_icon',
- 'instance', 'server_network', 'vpn_subnet')
- fieldsets = [(None, {'fields': ['backend_status_text', 'kind',
- 'provider_service', 'instance', 'creator',
- 'server_network', 'vpn_subnet',
- 'is_persistent', 'use_ca_from',
- 'clients_can_see_each_other',
- 'failover_servers', "protocol"],
- 'classes': ['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'instance')
- form = OpenVPNTenantForm
- inlines = [TenantPrivilegeInline]
-
- suit_form_tabs = (('general', 'Details'),
- ('tenantprivileges', 'Privileges'))
-
- def queryset(self, request):
- return OpenVPNTenant.get_tenant_objects_by_user(request.user)
-
-
-# Associate the admin forms with the models.
-admin.site.register(OpenVPNService, OpenVPNServiceAdmin)
-admin.site.register(OpenVPNTenant, OpenVPNTenantAdmin)
diff --git a/xos/onboard/openvpn/api/tenant/openvpn/openvpn.py b/xos/onboard/openvpn/api/tenant/openvpn/openvpn.py
deleted file mode 100644
index 9cc13f0..0000000
--- a/xos/onboard/openvpn/api/tenant/openvpn/openvpn.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import jinja2
-
-from api.xosapi_helpers import PlusModelSerializer, ReadOnlyField, XOSViewSet
-from core.models import TenantPrivilege
-from rest_framework import serializers
-from services.openvpn.models import OpenVPNService, OpenVPNTenant
-
-
-def get_default_openvpn_service():
- openvpn_services = OpenVPNService.get_service_objects().all()
- if openvpn_services:
- return openvpn_services[0].id
- return None
-
-
-class OpenVPNTenantSerializer(PlusModelSerializer):
- """A Serializer for the OpenVPNTenant that has the minimum information required for clients.
-
- Attributes:
- id (ReadOnlyField): The ID of OpenVPNTenant.
- server_network (ReadOnlyField): The network of the VPN.
- vpn_subnet (ReadOnlyField): The subnet of the VPN.
- script_text (SerializerMethodField): The text of the script for the client to use to
- connect.
- """
- id = ReadOnlyField()
- server_network = ReadOnlyField()
- vpn_subnet = ReadOnlyField()
- script_text = serializers.SerializerMethodField()
-
- class Meta:
- model = OpenVPNTenant
- fields = ('id', 'service_specific_attribute', 'vpn_subnet',
- 'server_network', 'script_text')
-
- def get_script_text(self, obj):
- """Gets the text of the client script for the requesting user.
-
- Parameters:
- obj (services.openvpn.models.OpenVPNTenant): The OpenVPNTenant to connect to.
-
- Returns:
- str: The client script as a str.
- """
- env = jinja2.Environment(
- loader=jinja2.FileSystemLoader("/opt/xos/services/openvpn/templates"))
- template = env.get_template("connect.vpn.j2")
- client_name = self.context['request'].user.email + "-" + str(obj.id)
- remote_ids = list(obj.failover_server_ids)
- remote_ids.insert(0, obj.id)
- remotes = OpenVPNTenant.get_tenant_objects().filter(pk__in=remote_ids)
- pki_dir = OpenVPNService.get_pki_dir(obj)
- fields = {"client_name": client_name,
- "remotes": remotes,
- "is_persistent": obj.is_persistent,
- "ca_crt": obj.get_ca_crt(pki_dir),
- "client_crt": obj.get_client_cert(client_name, pki_dir),
- "client_key": obj.get_client_key(client_name, pki_dir)
- }
- return template.render(fields)
-
-
-class OpenVPNTenantViewSet(XOSViewSet):
- """Class that provides a list of OpenVPNTenants that the user has permission to access."""
- base_name = "openvpn"
- method_kind = "viewset"
- method_name = "list"
- serializer_class = OpenVPNTenantSerializer
-
- def get_queryset(self):
- # Get every privilege for this user
- tenants_privs = TenantPrivilege.objects.all().filter(
- user=self.request.user)
- vpn_tenants = []
- for priv in tenants_privs:
- vpn_tenants.append(
- OpenVPNTenant.get_tenant_objects().filter(pk=priv.tenant.pk)[0])
- return vpn_tenants
diff --git a/xos/onboard/openvpn/models.py b/xos/onboard/openvpn/models.py
deleted file mode 100644
index 8aaa825..0000000
--- a/xos/onboard/openvpn/models.py
+++ /dev/null
@@ -1,316 +0,0 @@
-from subprocess import PIPE, Popen
-
-from django.db import transaction
-
-from core.models import Service, TenantWithContainer
-from xos.exceptions import XOSConfigurationError, XOSValidationError
-
-OPENVPN_KIND = "openvpn"
-
-
-class OpenVPNService(Service):
- """Defines the Service for creating VPN servers."""
- KIND = OPENVPN_KIND
- OPENVPN_PREFIX = "/opt/openvpn/"
- """The location of the openvpn EASY RSA files and PKIs."""
- SERVER_PREFIX = OPENVPN_PREFIX + "server-"
- """The prefix for server PKIs."""
- VARS = OPENVPN_PREFIX + "vars"
- """The location of the vars file with information for using EASY RSA."""
- EASYRSA_LOC = OPENVPN_PREFIX + "easyrsa3/easyrsa"
- """The location of the EASY RSA binary."""
- EASYRSA_COMMAND_PREFIX = EASYRSA_LOC + " --vars=" + VARS
- """Prefix for EASY RSA commands."""
-
- @classmethod
- def execute_easyrsa_command(cls, pki_dir, command):
- """Executes the given EASY RSA command using the given PKI.
-
- Parameters:
- pki_dir (str): The directory for the pki to execute the command on.
- command (str): The command to execute using ESAY RSA.
- """
- full_command = (
- OpenVPNService.EASYRSA_COMMAND_PREFIX + " --pki-dir=" +
- pki_dir + " " + command)
- proc = Popen(
- full_command, shell=True, stdout=PIPE, stderr=PIPE
- )
- (stdout, stderr) = proc.communicate()
- if (proc.returncode != 0):
- raise XOSConfigurationError(
- full_command + " failed with standard out:" + str(stdout) +
- " and stderr: " + str(stderr))
-
- @classmethod
- def get_pki_dir(cls, tenant):
- """Gets the directory of the PKI for the given tenant.
-
- Parameters:
- tenant (services.openvpn.models.OpenVPNTenant): The tenant to get the PKI directory for.
-
- Returns:
- str: The pki directory for the tenant.
- """
- return OpenVPNService.SERVER_PREFIX + str(tenant.id)
-
- class Meta:
- proxy = True
- # The name used to find this service, all directories are named this
- app_label = "openvpn"
- verbose_name = "OpenVPN Service"
-
- default_attributes = {'exposed_ports': None,
- 'exposed_ports_str': None}
-
- @property
- def exposed_ports(self):
- """Mapping[str, list(str)]: maps protocols to a list of ports for that protocol."""
- return self.get_attribute("exposed_ports",
- self.default_attributes["exposed_ports"])
-
- @exposed_ports.setter
- def exposed_ports(self, value):
- self.set_attribute("exposed_ports", value)
-
- @property
- def exposed_ports_str(self):
- """str: a raw str representing the exposed ports."""
- return self.get_attribute("exposed_ports_str",
- self.default_attributes["exposed_ports_str"])
-
- @exposed_ports_str.setter
- def exposed_ports_str(self, value):
- self.set_attribute("exposed_ports_str", value)
-
- def get_next_available_port(self, protocol):
- """Gets the next free port for the given protocol.
-
- Parameters:
- protocol (str): The protocol to get a port for, must be tcp or udp.
-
- Returns:
- int: a port number.
-
- Raises:
- xos.exceptions.XOSValidationError: If there the protocol is not udp or tcp.
- xos.exceptions.XOSValidationError: If there are no available ports for the protocol.
- """
- if protocol != "udp" and protocol != "tcp":
- raise XOSValidationError("Port protocol must be udp or tcp")
- if not self.exposed_ports[protocol]:
- raise XOSValidationError(
- "No availble ports for protocol: " + protocol)
- tenants = [
- tenant for tenant in OpenVPNTenant.get_tenant_objects().all()
- if tenant.protocol == protocol]
- port_numbers = self.exposed_ports[protocol]
- for port_number in port_numbers:
- if (
- len([
- tenant for tenant in tenants
- if tenant.port_number == port_number]) == 0):
- return port_number
-
-
-class OpenVPNTenant(TenantWithContainer):
- """Defines the Tenant for creating VPN servers."""
-
- class Meta:
- proxy = True
- verbose_name = "OpenVPN Tenant"
-
- KIND = OPENVPN_KIND
-
- sync_attributes = ("nat_ip", "nat_mac",)
-
- default_attributes = {'vpn_subnet': None,
- 'server_network': None,
- 'clients_can_see_each_other': True,
- 'is_persistent': True,
- 'port': None,
- 'use_ca_from_id': None,
- 'failover_server_ids': list(),
- 'protocol': None}
-
- def __init__(self, *args, **kwargs):
- vpn_services = OpenVPNService.get_service_objects().all()
- if vpn_services:
- self._meta.get_field(
- "provider_service").default = vpn_services[0].id
- super(OpenVPNTenant, self).__init__(*args, **kwargs)
-
- def save(self, *args, **kwargs):
- super(OpenVPNTenant, self).save(*args, **kwargs)
- model_policy_vpn_tenant(self.pk)
-
- def delete(self, *args, **kwargs):
- self.cleanup_container()
- super(OpenVPNTenant, self).delete(*args, **kwargs)
-
- @property
- def protocol(self):
- """str: The protocol that this tenant is listening on."""
- return self.get_attribute(
- "protocol", self.default_attributes["protocol"])
-
- @protocol.setter
- def protocol(self, value):
- self.set_attribute("protocol", value)
-
- @property
- def use_ca_from_id(self):
- """int: The ID of OpenVPNTenant to use to obtain a CA."""
- return self.get_attribute(
- "use_ca_from_id", self.default_attributes["use_ca_from_id"])
-
- @use_ca_from_id.setter
- def use_ca_from_id(self, value):
- self.set_attribute("use_ca_from_id", value)
-
- @property
- def addresses(self):
- """Mapping[str, str]: The ip, mac address, and subnet of the NAT
- network of this Tenant."""
- if (not self.id) or (not self.instance):
- return {}
-
- addresses = {}
- for ns in self.instance.ports.all():
- if "nat" in ns.network.name.lower():
- addresses["ip"] = ns.ip
- addresses["mac"] = ns.mac
- break
-
- return addresses
-
- # This getter is necessary because nat_ip is a sync_attribute
- @property
- def nat_ip(self):
- """str: The IP of this Tenant on the NAT network."""
- return self.addresses.get("ip", None)
-
- # This getter is necessary because nat_mac is a sync_attribute
- @property
- def nat_mac(self):
- """str: The MAC address of this Tenant on the NAT network."""
- return self.addresses.get("mac", None)
-
- @property
- def server_network(self):
- """str: The IP address of the server on the VPN."""
- return self.get_attribute(
- 'server_network',
- self.default_attributes['server_network'])
-
- @server_network.setter
- def server_network(self, value):
- self.set_attribute("server_network", value)
-
- @property
- def vpn_subnet(self):
- """str: The IP address of the client on the VPN."""
- return self.get_attribute(
- 'vpn_subnet',
- self.default_attributes['vpn_subnet'])
-
- @vpn_subnet.setter
- def vpn_subnet(self, value):
- self.set_attribute("vpn_subnet", value)
-
- @property
- def is_persistent(self):
- """bool: True if the VPN connection is persistence, false otherwise."""
- return self.get_attribute(
- "is_persistent",
- self.default_attributes['is_persistent'])
-
- @is_persistent.setter
- def is_persistent(self, value):
- self.set_attribute("is_persistent", value)
-
- @property
- def failover_server_ids(self):
- """list(int): The IDs of the OpenVPNTenants to use as failover servers."""
- return self.get_attribute(
- "failover_server_ids", self.default_attributes["failover_server_ids"])
-
- @failover_server_ids.setter
- def failover_server_ids(self, value):
- self.set_attribute("failover_server_ids", value)
-
- @property
- def clients_can_see_each_other(self):
- """bool: True if the client can see the subnet of the server, false
- otherwise."""
- return self.get_attribute(
- "clients_can_see_each_other",
- self.default_attributes['clients_can_see_each_other'])
-
- @clients_can_see_each_other.setter
- def clients_can_see_each_other(self, value):
- self.set_attribute("clients_can_see_each_other", value)
-
- @property
- def port_number(self):
- """int: the integer representing the port number for this server"""
- return self.get_attribute("port", self.default_attributes['port'])
-
- @port_number.setter
- def port_number(self, value):
- self.set_attribute("port", value)
-
- def get_ca_crt(self, pki_dir):
- """Gets the lines fo the ca.crt file for this OpenVPNTenant.
-
- Parameters:
- pki_dir (str): The PKI directory to look in.
-
- Returns:
- list(str): The lines of the ca.crt file for this OpenVPNTenant.
- """
- with open(pki_dir + "/ca.crt", 'r') as f:
- return f.readlines()
-
- def get_client_cert(self, client_name, pki_dir):
- """Gets the lines fo the crt file for a client.
-
- Parameters:
- pki_dir (str): The PKI directory to look in.
- client_name (str): The client name to use.
-
- Returns:
- list(str): The lines of the crt file for the client.
- """
- with open(pki_dir + "/issued/" + client_name + ".crt", 'r') as f:
- return f.readlines()
-
- def get_client_key(self, client_name, pki_dir):
- """Gets the lines fo the key file for a client.
-
- Parameters:
- pki_dir (str): The PKI directory to look in.
- client_name (str): The client name to use.
-
- Returns:
- list(str): The lines of the key file for the client.
- """
- with open(pki_dir + "/private/" + client_name + ".key", 'r') as f:
- return f.readlines()
-
-
-def model_policy_vpn_tenant(pk):
- """Manages the container for the VPN Tenant.
-
- Parameters
- pk (int): The ID of this OpenVPNTenant.
- """
- # This section of code is atomic to prevent race conditions
- with transaction.atomic():
- # We find all of the tenants that are waiting to update
- tenant = OpenVPNTenant.objects.select_for_update().filter(pk=pk)
- if not tenant:
- return
- # Since this code is atomic it is safe to always use the first tenant
- tenant = tenant[0]
- tenant.manage_container()
diff --git a/xos/onboard/openvpn/openvpn-onboard.yaml b/xos/onboard/openvpn/openvpn-onboard.yaml
deleted file mode 100644
index 980e0b5..0000000
--- a/xos/onboard/openvpn/openvpn-onboard.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the OpenVPN Service
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#openvpn:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/vsg/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/connect.vpn.j2
- synchronizer: synchronizer/manifest
- synchronizer_run: openvpn-synchronizer.py
- #tosca_custom_types: exampleservice.yaml
- rest_tenant: subdirectory:openvpn api/tenant/openvpn/openvpn.py
- private_key: file:///opt/xos/key_import/openvpn_rsa
- public_key: file:///opt/xos/key_import/openvpn_rsa.pub
-
diff --git a/xos/onboard/openvpn/synchronizer/__init__.py b/xos/onboard/openvpn/synchronizer/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/onboard/openvpn/synchronizer/__init__.py
+++ /dev/null
diff --git a/xos/onboard/openvpn/synchronizer/manifest b/xos/onboard/openvpn/synchronizer/manifest
deleted file mode 100644
index 44d6986..0000000
--- a/xos/onboard/openvpn/synchronizer/manifest
+++ /dev/null
@@ -1,14 +0,0 @@
-manifest
-openvpn_config
-__init__.py
-steps/sync_tenantprivilege.py
-steps/sync_openvpntenant.yaml
-steps/__init__.py
-steps/roles/openvpn/templates/server.conf.j2
-steps/roles/openvpn/handlers/main.yml
-steps/roles/openvpn/tasks/main.yml
-steps/sync_openvpntenant.py
-stop.sh
-model-deps
-openvpn-synchronizer.py
-run.sh
diff --git a/xos/onboard/openvpn/synchronizer/model-deps b/xos/onboard/openvpn/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/openvpn/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/openvpn/synchronizer/openvpn-synchronizer.py b/xos/onboard/openvpn/synchronizer/openvpn-synchronizer.py
deleted file mode 100755
index 3227ed9..0000000
--- a/xos/onboard/openvpn/synchronizer/openvpn-synchronizer.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(
- os.path.realpath(__file__)), "../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/openvpn/synchronizer/openvpn_config b/xos/onboard/openvpn/synchronizer/openvpn_config
deleted file mode 100644
index 8a58b52..0000000
--- a/xos/onboard/openvpn/synchronizer/openvpn_config
+++ /dev/null
@@ -1,23 +0,0 @@
-# Required by XOS
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-# Required by XOS
-[api]
-nova_enabled=True
-
-# Sets options for the synchronizer
-[observer]
-name=openvpn
-dependency_graph=/opt/xos/synchronizers/openvpn/model-deps
-steps_dir=/opt/xos/synchronizers/openvpn/steps
-sys_dir=/opt/xos/synchronizers/openvpn/sys
-logfile=/var/log/xos_backend.log
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-proxy_ssh=False
diff --git a/xos/onboard/openvpn/synchronizer/run.sh b/xos/onboard/openvpn/synchronizer/run.sh
deleted file mode 100755
index a5d90c9..0000000
--- a/xos/onboard/openvpn/synchronizer/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python openvpn-synchronizer.py -C $XOS_DIR/synchronizers/openvpn/openvpn_config
diff --git a/xos/onboard/openvpn/synchronizer/steps/__init__.py b/xos/onboard/openvpn/synchronizer/steps/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/__init__.py
+++ /dev/null
diff --git a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/handlers/main.yml b/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/handlers/main.yml
deleted file mode 100644
index 8725e29..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/handlers/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-
-- name: restart openvpn
- shell: (kill -9 $(cat {{ pki_dir }}/pid) || true) && (openvpn {{ pki_dir }}/server.conf &)
diff --git a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/tasks/main.yml b/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/tasks/main.yml
deleted file mode 100644
index 47093b2..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/tasks/main.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-
-- name: install openvpn
- apt: name=openvpn state=present update_cache=yes
-
-- name: make sure /opt/openvpn exists
- file: path=/opt/openvpn state=directory
-
-- name: make sure directory for this server exists
- file: path={{ pki_dir }} state=directory
-
-- name: get server key
- copy: src={{ pki_dir }}/private/server.key dest={{ pki_dir }}/server.key
- notify:
- - restart openvpn
-
-- name: get server crt
- copy: src={{ pki_dir }}/issued/server.crt dest={{ pki_dir }}/server.crt
- notify:
- - restart openvpn
-
-- name: get ca crt
- copy: src={{ pki_dir }}/ca.crt dest={{ pki_dir }}/ca.crt
- notify:
- - restart openvpn
-
-- name: get crl
- copy: src={{ pki_dir }}/crl.pem dest={{ pki_dir }}/crl.pem
-
-- name: get dh
- copy: src={{ pki_dir }}/dh.pem dest={{ pki_dir }}/dh.pem
- notify:
- - restart openvpn
-
-- name: write config
- template: src=server.conf.j2 dest={{ pki_dir }}/server.conf owner=root group=root
- notify:
- - restart openvpn
diff --git a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/templates/server.conf.j2 b/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/templates/server.conf.j2
deleted file mode 100644
index 4766e7b..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/roles/openvpn/templates/server.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file autogenerated by OpenVPNTenant synchronizer
-# It contains the OPENVPN config file for the server
-script-security 3 system
-port {{ port_number }}
-proto {{ protocol }}
-dev tun
-writepid {{ pki_dir }}/pid
-ca {{ pki_dir }}/ca.crt
-cert {{ pki_dir }}/server.crt
-key {{ pki_dir }}/server.key
-dh {{ pki_dir }}/dh.pem
-crl-verify {{ pki_dir }}/crl.pem
-server {{ server_network }} {{ vpn_subnet }}
-ifconfig-pool-persist {{ pki_dir }}/ipp.txt
-status {{ pki_dir }}/openvpn-status.log
-verb 3
-{% if is_persistent %}
-keepalive 10 60
-persist-tun
-persist-key
-{% endif %}
-{% if clients_can_see_each_other %}
-client-to-client
-{% endif %}
diff --git a/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.py b/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.py
deleted file mode 100644
index b58dd94..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import os
-import shutil
-import sys
-
-from django.db.models import F, Q
-
-from services.openvpn.models import OpenVPNService, OpenVPNTenant
-from synchronizers.base.SyncInstanceUsingAnsible import \
- SyncInstanceUsingAnsible
-
-parentdir = os.path.join(os.path.dirname(__file__), "..")
-sys.path.insert(0, parentdir)
-
-
-class SyncOpenVPNTenant(SyncInstanceUsingAnsible):
- """Class for syncing a OpenVPNTenant using Ansible.
-
- This SyncStep creates any necessary files for the OpenVPNTenant using ESAY RSA and then runs the
- Ansible template to start the server on an instance.
- """
- provides = [OpenVPNTenant]
- observes = OpenVPNTenant
- requested_interval = 0
- template_name = "sync_openvpntenant.yaml"
- service_key_name = "/opt/xos/synchronizers/openvpn/openvpn_private_key"
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = OpenVPNTenant.get_tenant_objects().filter(
- Q(enacted__lt=F('updated')) |
- Q(enacted=None), Q(lazy_blocked=False))
- else:
- objs = OpenVPNTenant.get_deleted_tenant_objects()
-
- return objs
-
- def get_extra_attributes(self, tenant):
- return {"is_persistent": tenant.is_persistent,
- "vpn_subnet": tenant.vpn_subnet,
- "server_network": tenant.server_network,
- "clients_can_see_each_other": (
- tenant.clients_can_see_each_other),
- "port_number": tenant.port_number,
- "protocol": tenant.protocol,
- "pki_dir": OpenVPNService.get_pki_dir(tenant)
- }
-
- def sync_fields(self, o, fields):
- pki_dir = OpenVPNService.get_pki_dir(o)
-
- if (not os.path.isdir(pki_dir)):
- OpenVPNService.execute_easyrsa_command(pki_dir, "init-pki")
- OpenVPNService.execute_easyrsa_command(
- pki_dir, "--req-cn=XOS build-ca nopass")
-
- # Very hacky way to handle VPNs that need to share CAs
- if (o.use_ca_from_id):
- tenant = OpenVPNTenant.get_tenant_objects().filter(
- pk=o.use_ca_from_id)[0]
- other_pki_dir = OpenVPNService.get_pki_dir(tenant)
- shutil.copy2(other_pki_dir + "/ca.crt", pki_dir)
- shutil.copy2(other_pki_dir + "/private/ca.key",
- pki_dir + "/private")
-
- # If the server has to be built then we need to build it
- if (not os.path.isfile(pki_dir + "/issued/server.crt")):
- OpenVPNService.execute_easyrsa_command(
- pki_dir, "build-server-full server nopass")
- OpenVPNService.execute_easyrsa_command(pki_dir, "gen-dh")
-
- # Get the most recent list of revoked clients
- OpenVPNService.execute_easyrsa_command(pki_dir, "gen-crl")
-
- # Super runs the playbook
- super(SyncOpenVPNTenant, self).sync_fields(o, fields)
diff --git a/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.yaml b/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.yaml
deleted file mode 100644
index e36f51b..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/sync_openvpntenant.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- server_network: {{ server_network }}
- is_persistent: {{ is_persistent }}
- vpn_subnet: {{ vpn_subnet }}
- clients_can_see_each_other: {{ clients_can_see_each_other }}
- port_number: {{ port_number }}
- protocol: {{ protocol }}
- pki_dir: {{ pki_dir }}
-
- roles:
- - openvpn
diff --git a/xos/onboard/openvpn/synchronizer/steps/sync_tenantprivilege.py b/xos/onboard/openvpn/synchronizer/steps/sync_tenantprivilege.py
deleted file mode 100644
index 51ee6df..0000000
--- a/xos/onboard/openvpn/synchronizer/steps/sync_tenantprivilege.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-import sys
-
-from core.models import TenantPrivilege
-from services.openvpn.models import OPENVPN_KIND, OpenVPNService, OpenVPNTenant
-from synchronizers.base.syncstep import DeferredException, SyncStep
-
-parentdir = os.path.join(os.path.dirname(__file__), "..")
-sys.path.insert(0, parentdir)
-
-
-class SyncTenantPrivilege(SyncStep):
- """Class for syncing a TenantPrivilege for a OpenVPNTenant.
-
- This SyncStep isolates the updated TenantPrivileges that are for OpenVPNTenants and performs
- actions if the TenantPrivilege has been added or deleted. For added privileges a new client
- certificate and key are made, signed with the ca.crt file used by this OpenVPNTenant. For deleted
- privileges the client certificate is revoked and the files associated are deleted. In both
- cases the associated OpenVPNTenant is saved causing the OpenVPNTenant synchronizer to run.
- """
- provides = [TenantPrivilege]
- observes = TenantPrivilege
- requested_interval = 0
-
- def fetch_pending(self, deleted):
- privs = super(SyncTenantPrivilege, self).fetch_pending(deleted)
- # Get only the TenantPrivileges that relate to OpenVPNTenants
- privs = [priv for priv in privs if priv.tenant.kind == OPENVPN_KIND]
- return privs
-
- def sync_record(self, record):
- if (not record.tenant.id):
- raise DeferredException("Privilege waiting on VPN Tenant ID")
- certificate = self.get_certificate_name(record)
- tenant = OpenVPNTenant.get_tenant_objects().filter(pk=record.tenant.id)[0]
- if (not tenant):
- raise DeferredException("Privilege waiting on VPN Tenant")
- # Only add a certificate if ones does not yet exist
- pki_dir = OpenVPNService.get_pki_dir(tenant)
- if (not os.path.isfile(pki_dir + "/issued/" + certificate + ".crt")):
- OpenVPNService.execute_easyrsa_command(
- pki_dir, "build-client-full " + certificate + " nopass")
- tenant.save()
- record.save()
-
- def delete_record(self, record):
- if (not record.tenant.id):
- return
- certificate = self.get_certificate_name(record)
- tenant = OpenVPNTenant.get_tenant_objects().filter(pk=record.tenant.id)[0]
- if (not tenant):
- return
- # If the client has already been reovked don't do it again
- pki_dir = OpenVPNService.get_pki_dir(tenant)
- if (os.path.isfile(pki_dir + "/issued/" + certificate + ".crt")):
- OpenVPNService.execute_easyrsa_command(
- pki_dir, "revoke " + certificate)
- # Revoking a client cert does not delete any of the files
- # to make sure that we can add this user again we need to
- # delete all of the files created by easyrsa
- os.remove(pki_dir + "/issued/" + certificate + ".crt")
- os.remove(pki_dir + "/private/" + certificate + ".key")
- os.remove(pki_dir + "/reqs/" + certificate + ".req")
- tenant.save()
-
- record.delete()
-
- def get_certificate_name(self, tenant_privilege):
- """Gets the name of a certificate for the given TenantPrivilege
-
- Parameters:
- tenant_privilege (core.models.TenantPrivilege): The TenantPrivilege to use to generate
- the certificate name.
-
- Returns:
- str: The certificate name.
- """
- return (str(tenant_privilege.user.email) +
- "-" + str(tenant_privilege.tenant.id))
diff --git a/xos/onboard/openvpn/synchronizer/stop.sh b/xos/onboard/openvpn/synchronizer/stop.sh
deleted file mode 100755
index 4a83aca..0000000
--- a/xos/onboard/openvpn/synchronizer/stop.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-# Kill the observer
-pkill -9 -f openvpn-synchronizer.py
diff --git a/xos/onboard/openvpn/templates/connect.vpn.j2 b/xos/onboard/openvpn/templates/connect.vpn.j2
deleted file mode 100644
index 2028cd9..0000000
--- a/xos/onboard/openvpn/templates/connect.vpn.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /bin/bash
-# This file autogenerated by OpenVPNTenant.
-# It contains a script used to generate the OPENVPN client files.
-printf "%b" "client
-dev tun
-remote-cert-tls server
-resolv-retry 60
-nobind
-ca ca.crt
-cert {{ client_name }}.crt
-key {{ client_name }}.key
-verb 3
-{% for tenant in remotes %}remote {{ tenant.nat_ip }} {{ tenant.port_number }} {{ tenant.protocol }}{% endfor %}
-{% if is_persistent %}
-persist-tun
-persist-key
-{% endif %}
-" > client.conf
-printf "%b" "{% for line in ca_crt %}{{ line }}{% endfor %}" > ca.crt
-printf "%b" "{% for line in client_crt %}{{ line }}{% endfor %}" > {{ client_name }}.crt
-printf "%b" "{% for line in client_key %}{{ line }}{% endfor %}" > {{ client_name }}.key
-apt-get update
-apt-get install openvpn -y
-openvpn client.conf
diff --git a/xos/onboard/volt-old/admin.py b/xos/onboard/volt-old/admin.py
deleted file mode 100644
index cf5dfa6..0000000
--- a/xos/onboard/volt-old/admin.py
+++ /dev/null
@@ -1,237 +0,0 @@
-from django.contrib import admin
-
-from services.volt.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-#-----------------------------------------------------------------------------
-# vOLT
-#-----------------------------------------------------------------------------
-
-class VOLTServiceAdmin(ReadOnlyAwareAdmin):
- model = VOLTService
- verbose_name = "vOLT Service"
- verbose_name_plural = "vOLT Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description',"view_url","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'vOLT Service Details'),
- ('administration', 'Administration'),
- #('tools', 'Tools'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('voltadmin.html', 'top', 'administration'),
- ) #('hpctools.html', 'top', 'tools') )
-
- def queryset(self, request):
- return VOLTService.get_service_objects_by_user(request.user)
-
-class VOLTTenantForm(forms.ModelForm):
- s_tag = forms.CharField()
- c_tag = forms.CharField()
- creator = forms.ModelChoiceField(queryset=User.objects.all())
-
- def __init__(self,*args,**kwargs):
- super (VOLTTenantForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = VOLTService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['c_tag'].initial = self.instance.c_tag
- self.fields['s_tag'].initial = self.instance.s_tag
- self.fields['creator'].initial = self.instance.creator
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = VOLT_KIND
- self.fields['creator'].initial = get_request().user
- if VOLTService.get_service_objects().exists():
- self.fields["provider_service"].initial = VOLTService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.s_tag = self.cleaned_data.get("s_tag")
- self.instance.c_tag = self.cleaned_data.get("c_tag")
- self.instance.creator = self.cleaned_data.get("creator")
- return super(VOLTTenantForm, self).save(commit=commit)
-
- class Meta:
- model = VOLTTenant
-
-class VOLTTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'service_specific_id', 's_tag', 'c_tag', 'subscriber_root' )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_root', 'service_specific_id', # 'service_specific_attribute',
- 's_tag', 'c_tag', 'creator'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'service_specific_attribute')
- form = VOLTTenantForm
-
- suit_form_tabs = (('general','Details'),)
-
- def queryset(self, request):
- return VOLTTenant.get_tenant_objects_by_user(request.user)
-
-class AccessDeviceInline(XOSTabularInline):
- model = AccessDevice
- fields = ['volt_device','uplink','vlan']
- readonly_fields = []
- extra = 0
-# max_num = 0
- suit_classes = 'suit-tab suit-tab-accessdevices'
-
-# @property
-# def selflink_reverse_path(self):
-# return "admin:cord_volttenant_change"
-
-class VOLTDeviceAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'name', 'openflow_id', 'driver' )
- list_display_links = ('backend_status_icon', 'name', 'openflow_id')
- fieldsets = [ (None, {'fields': ['backend_status_text','name','volt_service','openflow_id','driver','access_agent'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text',)
- inlines = [AccessDeviceInline]
-
- suit_form_tabs = (('general','Details'), ('accessdevices','Access Devices'))
-
-class AccessDeviceAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'volt_device', 'uplink', 'vlan' )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text','volt_device','uplink','vlan'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text',)
-
- suit_form_tabs = (('general','Details'),)
-
-class AgentPortMappingInline(XOSTabularInline):
- model = AgentPortMapping
- fields = ['access_agent', 'mac', 'port']
- readonly_fields = []
- extra = 0
-# max_num = 0
- suit_classes = 'suit-tab suit-tab-accessportmaps'
-
-# @property
-# def selflink_reverse_path(self):
-# return "admin:cord_volttenant_change"
-
-class AccessAgentAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'name', 'mac' )
- list_display_links = ('backend_status_icon', 'name')
- fieldsets = [ (None, {'fields': ['backend_status_text','name','volt_service','mac'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text',)
- inlines= [AgentPortMappingInline]
-
- suit_form_tabs = (('general','Details'), ('accessportmaps', 'Port Mappings'))
-
-# -------------------------------------------
-# CORDSubscriberRoot
-# -------------------------------------------
-
-class VOLTTenantInline(XOSTabularInline):
- model = VOLTTenant
- fields = ['provider_service', 'subscriber_root', 'service_specific_id']
- readonly_fields = ['provider_service', 'subscriber_root', 'service_specific_id']
- extra = 0
- max_num = 0
- suit_classes = 'suit-tab suit-tab-volttenants'
- fk_name = 'subscriber_root'
- verbose_name = 'subscribed tenant'
- verbose_name_plural = 'subscribed tenants'
-
- @property
- def selflink_reverse_path(self):
- return "admin:cord_volttenant_change"
-
- def queryset(self, request):
- qs = super(VOLTTenantInline, self).queryset(request)
- return qs.filter(kind=VOLT_KIND)
-
-class CordSubscriberRootForm(forms.ModelForm):
- url_filter_level = forms.CharField(required = False)
- uplink_speed = forms.CharField(required = False)
- downlink_speed = forms.CharField(required = False)
- status = forms.ChoiceField(choices=CordSubscriberRoot.status_choices, required=True)
- enable_uverse = forms.BooleanField(required=False)
- cdn_enable = forms.BooleanField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (CordSubscriberRootForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- if self.instance:
- self.fields['url_filter_level'].initial = self.instance.url_filter_level
- self.fields['uplink_speed'].initial = self.instance.uplink_speed
- self.fields['downlink_speed'].initial = self.instance.downlink_speed
- self.fields['status'].initial = self.instance.status
- self.fields['enable_uverse'].initial = self.instance.enable_uverse
- self.fields['cdn_enable'].initial = self.instance.cdn_enable
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = CORD_SUBSCRIBER_KIND
- self.fields['uplink_speed'].initial = CordSubscriberRoot.get_default_attribute("uplink_speed")
- self.fields['downlink_speed'].initial = CordSubscriberRoot.get_default_attribute("downlink_speed")
- self.fields['status'].initial = CordSubscriberRoot.get_default_attribute("status")
- self.fields['enable_uverse'].initial = CordSubscriberRoot.get_default_attribute("enable_uverse")
- self.fields['cdn_enable'].initial = CordSubscriberRoot.get_default_attribute("cdn_enable")
-
- def save(self, commit=True):
- self.instance.url_filter_level = self.cleaned_data.get("url_filter_level")
- self.instance.uplink_speed = self.cleaned_data.get("uplink_speed")
- self.instance.downlink_speed = self.cleaned_data.get("downlink_speed")
- self.instance.status = self.cleaned_data.get("status")
- self.instance.enable_uverse = self.cleaned_data.get("enable_uverse")
- self.instance.cdn_enable = self.cleaned_data.get("cdn_enable")
- return super(CordSubscriberRootForm, self).save(commit=commit)
-
- class Meta:
- model = CordSubscriberRoot
-
-class CordSubscriberRootAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'name', )
- list_display_links = ('backend_status_icon', 'id', 'name', )
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'name', 'service_specific_id', # 'service_specific_attribute',
- 'url_filter_level', "uplink_speed", "downlink_speed", "status", "enable_uverse", "cdn_enable"],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'service_specific_attribute',)
- form = CordSubscriberRootForm
- inlines = (VOLTTenantInline, TenantRootPrivilegeInline)
-
- suit_form_tabs =(('general', 'Cord Subscriber Root Details'),
- ('volttenants','VOLT Tenancy'),
- ('tenantrootprivileges','Privileges')
- )
-
- def queryset(self, request):
- return CordSubscriberRoot.get_tenant_objects_by_user(request.user)
-
-admin.site.register(VOLTService, VOLTServiceAdmin)
-admin.site.register(VOLTTenant, VOLTTenantAdmin)
-admin.site.register(VOLTDevice, VOLTDeviceAdmin)
-admin.site.register(AccessDevice, AccessDeviceAdmin)
-admin.site.register(AccessAgent, AccessAgentAdmin)
-
-admin.site.register(CordSubscriberRoot, CordSubscriberRootAdmin)
-
diff --git a/xos/onboard/volt-old/api/tenant/cord/subscriber.py b/xos/onboard/volt-old/api/tenant/cord/subscriber.py
deleted file mode 100644
index 52f9b63..0000000
--- a/xos/onboard/volt-old/api/tenant/cord/subscriber.py
+++ /dev/null
@@ -1,384 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import viewsets
-from rest_framework import status
-from rest_framework.decorators import detail_route, list_route
-from rest_framework.views import APIView
-from core.models import *
-from django.forms import widgets
-from django.conf.urls import patterns, url
-from services.volt.models import VOLTTenant, CordSubscriberRoot
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-from django.shortcuts import get_object_or_404
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from xos.exceptions import *
-import json
-import subprocess
-from django.views.decorators.csrf import ensure_csrf_cookie
-
-class CordSubscriberNew(CordSubscriberRoot):
- class Meta:
- proxy = True
- app_label = "cord"
-
- def __init__(self, *args, **kwargs):
- super(CordSubscriberNew, self).__init__(*args, **kwargs)
-
- def __unicode__(self):
- return u"cordSubscriber-%s" % str(self.id)
-
- @property
- def features(self):
- return {"cdn": self.cdn_enable,
- "uplink_speed": self.uplink_speed,
- "downlink_speed": self.downlink_speed,
- "uverse": self.enable_uverse,
- "status": self.status}
-
- @features.setter
- def features(self, value):
- self.cdn_enable = value.get("cdn", self.get_default_attribute("cdn_enable"))
- self.uplink_speed = value.get("uplink_speed", self.get_default_attribute("uplink_speed"))
- self.downlink_speed = value.get("downlink_speed", self.get_default_attribute("downlink_speed"))
- self.enable_uverse = value.get("uverse", self.get_default_attribute("enable_uverse"))
- self.status = value.get("status", self.get_default_attribute("status"))
-
-
- def update_features(self, value):
- d=self.features
- d.update(value)
- self.features = d
-
- @property
- def identity(self):
- return {"account_num": self.service_specific_id,
- "name": self.name}
-
- @identity.setter
- def identity(self, value):
- self.service_specific_id = value.get("account_num", self.service_specific_id)
- self.name = value.get("name", self.name)
-
- def update_identity(self, value):
- d=self.identity
- d.update(value)
- self.identity = d
-
- @property
- def related(self):
- related = {}
- if self.volt:
- related["volt_id"] = self.volt.id
- related["s_tag"] = self.volt.s_tag
- related["c_tag"] = self.volt.c_tag
- if self.volt.vcpe:
- related["vsg_id"] = self.volt.vcpe.id
- if self.volt.vcpe.instance:
- related["instance_id"] = self.volt.vcpe.instance.id
- related["instance_name"] = self.volt.vcpe.instance.name
- related["wan_container_ip"] = self.volt.vcpe.wan_container_ip
- if self.volt.vcpe.instance.node:
- related["compute_node_name"] = self.volt.vcpe.instance.node.name
- return related
-
- def save(self, *args, **kwargs):
- super(CordSubscriberNew, self).save(*args, **kwargs)
-
-class CordDevice(object):
- def __init__(self, d={}, subscriber=None):
- self.d = d
- self.subscriber = subscriber
-
- @property
- def mac(self):
- return self.d.get("mac", None)
-
- @mac.setter
- def mac(self, value):
- self.d["mac"] = value
-
- @property
- def identity(self):
- return {"name": self.d.get("name", None)}
-
- @identity.setter
- def identity(self, value):
- self.d["name"] = value.get("name", None)
-
- @property
- def features(self):
- return {"uplink_speed": self.d.get("uplink_speed", None),
- "downlink_speed": self.d.get("downlink_speed", None)}
-
- @features.setter
- def features(self, value):
- self.d["uplink_speed"] = value.get("uplink_speed", None)
- self.d["downlink_speed"] = value.get("downlink_speed", None)
-
- def update_features(self, value):
- d=self.features
- d.update(value)
- self.features = d
-
- def update_identity(self, value):
- d=self.identity
- d.update(value)
- self.identity = d
-
- def save(self):
- if self.subscriber:
- dev=self.subscriber.find_device(self.mac)
- if dev:
- self.subscriber.update_device(**self.d)
- else:
- self.subscriber.create_device(**self.d)
-
-# Add some structure to the REST API by subdividing the object into
-# features, identity, and related.
-
-class FeatureSerializer(serializers.Serializer):
- cdn = serializers.BooleanField(required=False)
- uplink_speed = serializers.IntegerField(required=False)
- downlink_speed = serializers.IntegerField(required=False)
- uverse = serializers.BooleanField(required=False)
- status = serializers.CharField(required=False)
-
-class IdentitySerializer(serializers.Serializer):
- account_num = serializers.CharField(required=False)
- name = serializers.CharField(required=False)
-
-class DeviceFeatureSerializer(serializers.Serializer):
- uplink_speed = serializers.IntegerField(required=False)
- downlink_speed = serializers.IntegerField(required=False)
-
-class DeviceIdentitySerializer(serializers.Serializer):
- name = serializers.CharField(required=False)
-
-class DeviceSerializer(serializers.Serializer):
- mac = serializers.CharField(required=True)
- identity = DeviceIdentitySerializer(required=False)
- features = DeviceFeatureSerializer(required=False)
-
- class Meta:
- fields = ('mac', 'identity', 'features')
-
-class CordSubscriberSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- features = FeatureSerializer(required=False)
- identity = IdentitySerializer(required=False)
- related = serializers.DictField(required=False)
-
- nested_fields = ["features", "identity"]
-
- class Meta:
- model = CordSubscriberNew
- fields = ('humanReadableName',
- 'id',
- 'features',
- 'identity',
- 'related')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-# @ensure_csrf_cookie
-class CordSubscriberViewSet(XOSViewSet):
- base_name = "subscriber"
- method_name = "subscriber"
- method_kind = "viewset"
- queryset = CordSubscriberNew.get_tenant_objects().select_related().all()
- serializer_class = CordSubscriberSerializer
-
- custom_serializers = {"set_features": FeatureSerializer,
- "set_feature": FeatureSerializer,
- "set_identities": IdentitySerializer,
- "set_identity": IdentitySerializer,
- "get_devices": DeviceSerializer,
- "add_device": DeviceSerializer,
- "get_device_feature": DeviceFeatureSerializer,
- "set_device_feature": DeviceFeatureSerializer}
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(CordSubscriberViewSet, self).get_urlpatterns(api_path=api_path)
- patterns.append( self.detail_url("features/$", {"get": "get_features", "put": "set_features"}, "features") )
- patterns.append( self.detail_url("features/(?P<feature>[a-zA-Z0-9\-_]+)/$", {"get": "get_feature", "put": "set_feature"}, "get_feature") )
- patterns.append( self.detail_url("identity/$", {"get": "get_identities", "put": "set_identities"}, "identities") )
- patterns.append( self.detail_url("identity/(?P<identity>[a-zA-Z0-9\-_]+)/$", {"get": "get_identity", "put": "set_identity"}, "get_identity") )
-
- patterns.append( self.detail_url("devices/$", {"get": "get_devices", "post": "add_device"}, "devicees") )
- patterns.append( self.detail_url("devices/(?P<mac>[a-zA-Z0-9\-_:]+)/$", {"get": "get_device", "delete": "delete_device"}, "getset_device") )
- patterns.append( self.detail_url("devices/(?P<mac>[a-zA-Z0-9\-_:]+)/features/(?P<feature>[a-zA-Z0-9\-_]+)/$", {"get": "get_device_feature", "put": "set_device_feature"}, "getset_device_feature") )
- patterns.append( self.detail_url("devices/(?P<mac>[a-zA-Z0-9\-_:]+)/identity/(?P<identity>[a-zA-Z0-9\-_]+)/$", {"get": "get_device_identity", "put": "set_device_identity"}, "getset_device_identity") )
-
- patterns.append( url(self.api_path + "account_num_lookup/(?P<account_num>[0-9\-]+)/$", self.as_view({"get": "account_num_detail"}), name="account_num_detail") )
-
- patterns.append( url(self.api_path + "ssidmap/(?P<ssid>[0-9\-]+)/$", self.as_view({"get": "ssiddetail"}), name="ssiddetail") )
- patterns.append( url(self.api_path + "ssidmap/$", self.as_view({"get": "ssidlist"}), name="ssidlist") )
-
- return patterns
-
- def list(self, request):
- object_list = self.filter_queryset(self.get_queryset())
-
- serializer = self.get_serializer(object_list, many=True)
-
- return Response(serializer.data)
-
- def get_features(self, request, pk=None):
- subscriber = self.get_object()
- return Response(FeatureSerializer(subscriber.features).data)
-
- def set_features(self, request, pk=None):
- subscriber = self.get_object()
- ser = FeatureSerializer(subscriber.features, data=request.data)
- ser.is_valid(raise_exception = True)
- subscriber.update_features(ser.validated_data)
- subscriber.save()
- return Response(FeatureSerializer(subscriber.features).data)
-
- def get_feature(self, request, pk=None, feature=None):
- subscriber = self.get_object()
- return Response({feature: FeatureSerializer(subscriber.features).data[feature]})
-
- def set_feature(self, request, pk=None, feature=None):
- subscriber = self.get_object()
- if [feature] != request.data.keys():
- raise serializers.ValidationError("feature %s does not match keys in request body (%s)" % (feature, ",".join(request.data.keys())))
- ser = FeatureSerializer(subscriber.features, data=request.data)
- ser.is_valid(raise_exception = True)
- subscriber.update_features(ser.validated_data)
- subscriber.save()
- return Response({feature: FeatureSerializer(subscriber.features).data[feature]})
-
- def get_identities(self, request, pk=None):
- subscriber = self.get_object()
- return Response(IdentitySerializer(subscriber.identity).data)
-
- def set_identities(self, request, pk=None):
- subscriber = self.get_object()
- ser = IdentitySerializer(subscriber.identity, data=request.data)
- ser.is_valid(raise_exception = True)
- subscriber.update_identity(ser.validated_data)
- subscriber.save()
- return Response(IdentitySerializer(subscriber.identity).data)
-
- def get_identity(self, request, pk=None, identity=None):
- subscriber = self.get_object()
- return Response({identity: IdentitySerializer(subscriber.identity).data[identity]})
-
- def set_identity(self, request, pk=None, identity=None):
- subscriber = self.get_object()
- if [identity] != request.data.keys():
- raise serializers.ValidationError("identity %s does not match keys in request body (%s)" % (identity, ",".join(request.data.keys())))
- ser = IdentitySerializer(subscriber.identity, data=request.data)
- ser.is_valid(raise_exception = True)
- subscriber.update_identity(ser.validated_data)
- subscriber.save()
- return Response({identity: IdentitySerializer(subscriber.identity).data[identity]})
-
- def get_devices(self, request, pk=None):
- subscriber = self.get_object()
- result = []
- for device in subscriber.devices:
- device = CordDevice(device, subscriber)
- result.append(DeviceSerializer(device).data)
- return Response(result)
-
- def add_device(self, request, pk=None):
- subscriber = self.get_object()
- ser = DeviceSerializer(subscriber.devices, data=request.data)
- ser.is_valid(raise_exception = True)
- newdevice = CordDevice(subscriber.create_device(**ser.validated_data), subscriber)
- subscriber.save()
- return Response(DeviceSerializer(newdevice).data)
-
- def get_device(self, request, pk=None, mac=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- return Response(DeviceSerializer(CordDevice(device, subscriber)).data)
-
- def delete_device(self, request, pk=None, mac=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- subscriber.delete_device(mac)
- subscriber.save()
- return Response("Okay")
-
- def get_device_feature(self, request, pk=None, mac=None, feature=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- return Response({feature: DeviceFeatureSerializer(CordDevice(device, subscriber).features).data[feature]})
-
- def set_device_feature(self, request, pk=None, mac=None, feature=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- if [feature] != request.data.keys():
- raise serializers.ValidationError("feature %s does not match keys in request body (%s)" % (feature, ",".join(request.data.keys())))
- device = CordDevice(device, subscriber)
- ser = DeviceFeatureSerializer(device.features, data=request.data)
- ser.is_valid(raise_exception = True)
- device.update_features(ser.validated_data)
- device.save()
- subscriber.save()
- return Response({feature: DeviceFeatureSerializer(device.features).data[feature]})
-
- def get_device_identity(self, request, pk=None, mac=None, identity=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- return Response({identity: DeviceIdentitySerializer(CordDevice(device, subscriber).identity).data[identity]})
-
- def set_device_identity(self, request, pk=None, mac=None, identity=None):
- subscriber = self.get_object()
- device = subscriber.find_device(mac)
- if not device:
- return Response("Failed to find device %s" % mac, status=status.HTTP_404_NOT_FOUND)
- if [identity] != request.data.keys():
- raise serializers.ValidationError("identity %s does not match keys in request body (%s)" % (feature, ",".join(request.data.keys())))
- device = CordDevice(device, subscriber)
- ser = DeviceIdentitySerializer(device.identity, data=request.data)
- ser.is_valid(raise_exception = True)
- device.update_identity(ser.validated_data)
- device.save()
- subscriber.save()
- return Response({identity: DeviceIdentitySerializer(device.identity).data[identity]})
-
- def account_num_detail(self, pk=None, account_num=None):
- object_list = CordSubscriberNew.get_tenant_objects().all()
- object_list = [x for x in object_list if x.service_specific_id == account_num]
- if not object_list:
- return Response("Failed to find account_num %s" % account_num, status=status.HTTP_404_NOT_FOUND)
-
- return Response( object_list[0].id )
-
- def ssidlist(self, request):
- object_list = CordSubscriberNew.get_tenant_objects().all()
-
- ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list ]
-
- return Response({"ssidmap": ssidmap})
-
- def ssiddetail(self, pk=None, ssid=None):
- object_list = CordSubscriberNew.get_tenant_objects().all()
-
- ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list if str(x.service_specific_id)==str(ssid) ]
-
- if len(ssidmap)==0:
- raise XOSNotFound("didn't find ssid %s" % str(ssid))
-
- return Response( ssidmap[0] )
-
diff --git a/xos/onboard/volt-old/api/tenant/cord/volt.py b/xos/onboard/volt-old/api/tenant/cord/volt.py
deleted file mode 100644
index 5c9634a..0000000
--- a/xos/onboard/volt-old/api/tenant/cord/volt.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from services.volt.models import VOLTTenant, VOLTService, CordSubscriberRoot
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-def get_default_volt_service():
- volt_services = VOLTService.get_service_objects().all()
- if volt_services:
- return volt_services[0].id
- return None
-
-class VOLTTenantForAPI(VOLTTenant):
- class Meta:
- proxy = True
- app_label = "cord"
-
- @property
- def subscriber(self):
- return self.subscriber_root.id
-
- @subscriber.setter
- def subscriber(self, value):
- self.subscriber_root = value # CordSubscriberRoot.get_tenant_objects().get(id=value)
-
- @property
- def related(self):
- related = {}
- if self.vcpe:
- related["vsg_id"] = self.vcpe.id
- if self.vcpe.instance:
- related["instance_id"] = self.vcpe.instance.id
- related["instance_name"] = self.vcpe.instance.name
- related["wan_container_ip"] = self.vcpe.wan_container_ip
- if self.vcpe.instance.node:
- related["compute_node_name"] = self.vcpe.instance.node.name
- return related
-
-class VOLTTenantSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- service_specific_id = serializers.CharField(required=False)
- s_tag = serializers.CharField()
- c_tag = serializers.CharField()
- subscriber = serializers.PrimaryKeyRelatedField(queryset=CordSubscriberRoot.get_tenant_objects().all(), required=False)
- related = serializers.DictField(required=False)
-
- property_fields=["subscriber"]
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- class Meta:
- model = VOLTTenantForAPI
- fields = ('humanReadableName', 'id', 'service_specific_id', 's_tag', 'c_tag', 'subscriber', 'related' )
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class VOLTTenantViewSet(XOSViewSet):
- base_name = "volt"
- method_name = "volt"
- method_kind = "viewset"
- queryset = VOLTTenantForAPI.get_tenant_objects().all() # select_related().all()
- serializer_class = VOLTTenantSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(VOLTTenantViewSet, self).get_urlpatterns(api_path=api_path)
-
- return patterns
-
- def list(self, request):
- queryset = self.filter_queryset(self.get_queryset())
-
- c_tag = self.request.query_params.get('c_tag', None)
- if c_tag is not None:
- ids = [x.id for x in queryset if x.get_attribute("c_tag", None)==c_tag]
- queryset = queryset.filter(id__in=ids)
-
- s_tag = self.request.query_params.get('s_tag', None)
- if s_tag is not None:
- ids = [x.id for x in queryset if x.get_attribute("s_tag", None)==s_tag]
- queryset = queryset.filter(id__in=ids)
-
- serializer = self.get_serializer(queryset, many=True)
-
- return Response(serializer.data)
-
-
-
-
-
diff --git a/xos/onboard/volt-old/models.py b/xos/onboard/volt-old/models.py
deleted file mode 100644
index 8f3cc1f..0000000
--- a/xos/onboard/volt-old/models.py
+++ /dev/null
@@ -1,367 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool, User
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-from core.models import Tag
-from core.models.service import LeastLoadedNodeScheduler
-from services.vrouter.models import VRouterService, VRouterTenant
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-class ConfigurationError(Exception):
- pass
-
-VOLT_KIND = "vOLT"
-CORD_SUBSCRIBER_KIND = "CordSubscriberRoot"
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-# -------------------------------------------
-# CordSubscriberRoot
-# -------------------------------------------
-
-class CordSubscriberRoot(Subscriber):
- class Meta:
- proxy = True
-
- KIND = CORD_SUBSCRIBER_KIND
-
- status_choices = (("enabled", "Enabled"),
- ("suspended", "Suspended"),
- ("delinquent", "Delinquent"),
- ("copyrightviolation", "Copyright Violation"))
-
- # 'simple_attributes' will be expanded into properties and setters that
- # store the attribute using self.set_attribute / self.get_attribute.
-
- simple_attributes = ( ("firewall_enable", False),
- ("firewall_rules", "accept all anywhere anywhere"),
- ("url_filter_enable", False),
- ("url_filter_rules", "allow all"),
- ("url_filter_level", "PG"),
- ("cdn_enable", False),
- ("devices", []),
- ("is_demo_user", False),
-
- ("uplink_speed", 1000000000), # 1 gigabit, a reasonable default?
- ("downlink_speed", 1000000000),
- ("enable_uverse", True) )
-
- default_attributes = {"status": "enabled"}
-
- sync_attributes = ("firewall_enable",
- "firewall_rules",
- "url_filter_enable",
- "url_filter_rules",
- "cdn_enable",
- "uplink_speed",
- "downlink_speed",
- "enable_uverse",
- "status")
-
- def __init__(self, *args, **kwargs):
- super(CordSubscriberRoot, self).__init__(*args, **kwargs)
- self.cached_volt = None
- self._initial_url_filter_enable = self.url_filter_enable
-
- @property
- def volt(self):
- volt = self.get_newest_subscribed_tenant(VOLTTenant)
- if not volt:
- return None
-
- # always return the same object when possible
- if (self.cached_volt) and (self.cached_volt.id == volt.id):
- return self.cached_volt
-
- #volt.caller = self.creator
- self.cached_volt = volt
- return volt
-
- @property
- def status(self):
- return self.get_attribute("status", self.default_attributes["status"])
-
- @status.setter
- def status(self, value):
- if not value in [x[0] for x in self.status_choices]:
- raise Exception("invalid status %s" % value)
- self.set_attribute("status", value)
-
- def find_device(self, mac):
- for device in self.devices:
- if device["mac"] == mac:
- return device
- return None
-
- def update_device(self, mac, **kwargs):
- # kwargs may be "level" or "mac"
- # Setting one of these to None will cause None to be stored in the db
- devices = self.devices
- for device in devices:
- if device["mac"] == mac:
- for arg in kwargs.keys():
- device[arg] = kwargs[arg]
- self.devices = devices
- return device
- raise ValueError("Device with mac %s not found" % mac)
-
- def create_device(self, **kwargs):
- if "mac" not in kwargs:
- raise XOSMissingField("The mac field is required")
-
- if self.find_device(kwargs['mac']):
- raise XOSDuplicateKey("Device with mac %s already exists" % kwargs["mac"])
-
- device = kwargs.copy()
-
- devices = self.devices
- devices.append(device)
- self.devices = devices
-
- return device
-
- def delete_device(self, mac):
- devices = self.devices
- for device in devices:
- if device["mac"]==mac:
- devices.remove(device)
- self.devices = devices
- return
-
- raise ValueError("Device with mac %s not found" % mac)
-
- #--------------------------------------------------------------------------
- # Deprecated -- devices used to be called users
-
- def find_user(self, uid):
- return self.find_device(uid)
-
- def update_user(self, uid, **kwargs):
- return self.update_device(uid, **kwargs)
-
- def create_user(self, **kwargs):
- return self.create_device(**kwargs)
-
- def delete_user(self, uid):
- return self.delete_user(uid)
-
- # ------------------------------------------------------------------------
-
- @property
- def services(self):
- return {"cdn": self.cdn_enable,
- "url_filter": self.url_filter_enable,
- "firewall": self.firewall_enable}
-
- @services.setter
- def services(self, value):
- pass
-
- def save(self, *args, **kwargs):
- self.validate_unique_service_specific_id(none_okay=True)
- if (not hasattr(self, 'caller') or not self.caller.is_admin):
- if (self.has_field_changed("service_specific_id")):
- raise XOSPermissionDenied("You do not have permission to change service_specific_id")
- super(CordSubscriberRoot, self).save(*args, **kwargs)
- if (self.volt) and (self.volt.vcpe): # and (self._initial_url_filter_enabled != self.url_filter_enable):
- # 1) trigger manage_bbs_account to run
- # 2) trigger vcpe observer to wake up
- self.volt.vcpe.save()
-
-CordSubscriberRoot.setup_simple_attributes()
-
-# -------------------------------------------
-# VOLT
-# -------------------------------------------
-
-class VOLTService(Service):
- KIND = VOLT_KIND
-
- class Meta:
- app_label = "volt"
- verbose_name = "vOLT Service"
-
-class VOLTTenant(Tenant):
- KIND = VOLT_KIND
-
- class Meta:
- app_label = "volt"
- verbose_name = "vOLT Tenant"
-
- s_tag = models.IntegerField(null=True, blank=True, help_text="s-tag")
- c_tag = models.IntegerField(null=True, blank=True, help_text="c-tag")
-
- # at some point, this should probably end up part of Tenant.
- creator = models.ForeignKey(User, related_name='created_volts', blank=True, null=True)
-
- def __init__(self, *args, **kwargs):
- volt_services = VOLTService.get_service_objects().all()
- if volt_services:
- self._meta.get_field("provider_service").default = volt_services[0].id
- super(VOLTTenant, self).__init__(*args, **kwargs)
- self.cached_vcpe = None
-
- @property
- def vcpe(self):
- from services.vsg.models import VSGTenant
- vcpe = self.get_newest_subscribed_tenant(VSGTenant)
- if not vcpe:
- return None
-
- # always return the same object when possible
- if (self.cached_vcpe) and (self.cached_vcpe.id == vcpe.id):
- return self.cached_vcpe
-
- vcpe.caller = self.creator
- self.cached_vcpe = vcpe
- return vcpe
-
- @vcpe.setter
- def vcpe(self, value):
- raise XOSConfigurationError("vOLT.vCPE cannot be set this way -- create a new vCPE object and set its subscriber_tenant instead")
-
- @property
- def subscriber(self):
- if not self.subscriber_root:
- return None
- subs = CordSubscriberRoot.objects.filter(id=self.subscriber_root.id)
- if not subs:
- return None
- return subs[0]
-
- def manage_vcpe(self):
- # Each VOLT object owns exactly one VCPE object
-
- if self.deleted:
- return
-
- if self.vcpe is None:
- from services.vsg.models import VSGService, VSGTenant
- vsgServices = VSGService.get_service_objects().all()
- if not vsgServices:
- raise XOSConfigurationError("No VSG Services available")
-
- vcpe = VSGTenant(provider_service = vsgServices[0],
- subscriber_tenant = self)
- vcpe.caller = self.creator
- vcpe.save()
-
- def manage_subscriber(self):
- if (self.subscriber_root is None):
- # The vOLT is not connected to a Subscriber, so either find an
- # existing subscriber with the same SSID, or autogenerate a new
- # subscriber.
- #
- # TODO: This probably goes away when we rethink the ONOS-to-XOS
- # vOLT API.
-
- subs = CordSubscriberRoot.get_tenant_objects().filter(service_specific_id = self.service_specific_id)
- if subs:
- sub = subs[0]
- else:
- sub = CordSubscriberRoot(service_specific_id = self.service_specific_id,
- name = "autogenerated-for-vOLT-%s" % self.id)
- sub.save()
- self.subscriber_root = sub
- self.save()
-
- def cleanup_vcpe(self):
- if self.vcpe:
- # print "XXX cleanup vcpe", self.vcpe
- self.vcpe.delete()
-
- def cleanup_orphans(self):
- from services.vsg.models import VSGTenant
- # ensure vOLT only has one vCPE
- cur_vcpe = self.vcpe
- for vcpe in list(self.get_subscribed_tenants(VSGTenant)):
- if (not cur_vcpe) or (vcpe.id != cur_vcpe.id):
- # print "XXX clean up orphaned vcpe", vcpe
- vcpe.delete()
-
- def save(self, *args, **kwargs):
- # VOLTTenant probably doesn't need a SSID anymore; that will be handled
- # by CORDSubscriberRoot...
- # self.validate_unique_service_specific_id()
-
- if (self.subscriber_root is not None):
- subs = self.subscriber_root.get_subscribed_tenants(VOLTTenant)
- if (subs) and (self not in subs):
- raise XOSDuplicateKey("Subscriber should only be linked to one vOLT")
-
- if not self.creator:
- if not getattr(self, "caller", None):
- # caller must be set when creating a vCPE since it creates a slice
- raise XOSProgrammingError("VOLTTenant's self.caller was not set")
- self.creator = self.caller
- if not self.creator:
- raise XOSProgrammingError("VOLTTenant's self.creator was not set")
-
- super(VOLTTenant, self).save(*args, **kwargs)
- model_policy_volt(self.pk)
-
- def delete(self, *args, **kwargs):
- self.cleanup_vcpe()
- super(VOLTTenant, self).delete(*args, **kwargs)
-
-def model_policy_volt(pk):
- # TODO: this should be made in to a real model_policy
- with transaction.atomic():
- volt = VOLTTenant.objects.select_for_update().filter(pk=pk)
- if not volt:
- return
- volt = volt[0]
- volt.manage_vcpe()
- volt.manage_subscriber()
- volt.cleanup_orphans()
-
-class VOLTDevice(PlCoreBase):
- class Meta:
- app_label = "volt"
-
- name = models.CharField(max_length=254, help_text="name of device", null=False, blank=False)
- volt_service = models.ForeignKey(VOLTService, related_name='volt_devices')
- openflow_id = models.CharField(max_length=254, help_text="OpenFlow ID", null=True, blank=True)
- driver = models.CharField(max_length=254, help_text="driver", null=True, blank=True)
- access_agent = models.ForeignKey("AccessAgent", related_name='volt_devices', blank=True, null=True)
-
- def __unicode__(self): return u'%s' % (self.name)
-
-class AccessDevice(PlCoreBase):
- class Meta:
- app_label = "volt"
-
- volt_device = models.ForeignKey(VOLTDevice, related_name='access_devices')
- uplink = models.IntegerField(null=True, blank=True)
- vlan = models.IntegerField(null=True, blank=True)
-
- def __unicode__(self): return u'%s-%d:%d' % (self.volt_device.name,self.uplink,self.vlan)
-
-class AccessAgent(PlCoreBase):
- class Meta:
- app_label = "volt"
-
- name = models.CharField(max_length=254, help_text="name of agent", null=False, blank=False)
- volt_service = models.ForeignKey(VOLTService, related_name='access_agents')
- mac = models.CharField(max_length=32, help_text="MAC Address or Access Agent", null=True, blank=True)
-
- def __unicode__(self): return u'%s' % (self.name)
-
-class AgentPortMapping(PlCoreBase):
- class Meta:
- app_label = "volt"
-
- access_agent = models.ForeignKey(AccessAgent, related_name='port_mappings')
- mac = models.CharField(max_length=32, help_text="MAC Address", null=True, blank=True)
- port = models.CharField(max_length=32, help_text="Openflow port ID", null=True, blank=True)
-
- def __unicode__(self): return u'%s-%s-%s' % (self.access_agent.name, self.port, self.mac)
-
-
-
diff --git a/xos/onboard/volt-old/templates/voltadmin.html b/xos/onboard/volt-old/templates/voltadmin.html
deleted file mode 100644
index 807ab2c..0000000
--- a/xos/onboard/volt-old/templates/voltadmin.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a href="/admin/cord/volttenant/">vOLT Tenants</a>
- </div><div class="col-xs-12">
- <a href="/admin/cord/voltdevice/">vOLT Devices</a>
- </div><div class="col-xs-12">
- <a href="/admin/cord/accessagent/">vOLT Access Agents</a>
- </div>
-</div>
-
diff --git a/xos/onboard/volt-old/tosca/resources/CORDSubscriber.py b/xos/onboard/volt-old/tosca/resources/CORDSubscriber.py
deleted file mode 100644
index 5cdb2ef..0000000
--- a/xos/onboard/volt-old/tosca/resources/CORDSubscriber.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from core.models import User, TenantRootPrivilege, TenantRootRole
-from services.volt.models import CordSubscriberRoot
-
-from xosresource import XOSResource
-
-class XOSCORDSubscriber(XOSResource):
- provides = "tosca.nodes.CORDSubscriber"
- xos_model = CordSubscriberRoot
- copyin_props = ["service_specific_id", "firewall_enable", "url_filter_enable", "cdn_enable", "url_filter_level"]
-
- def postprocess(self, obj):
- rolemap = ( ("tosca.relationships.AdminPrivilege", "admin"), ("tosca.relationships.AccessPrivilege", "access"), )
- self.postprocess_privileges(TenantRootRole, TenantRootPrivilege, rolemap, obj, "tenant_root")
-
- def can_delete(self, obj):
- return super(XOSCORDSubscriber, self).can_delete(obj)
-
diff --git a/xos/onboard/volt-old/tosca/resources/CORDUser.py b/xos/onboard/volt-old/tosca/resources/CORDUser.py
deleted file mode 100644
index d1ae1cc..0000000
--- a/xos/onboard/volt-old/tosca/resources/CORDUser.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from core.models import User
-from services.volt.models import CordSubscriberRoot
-
-from xosresource import XOSResource
-
-class XOSCORDUser(XOSResource):
- provides = "tosca.nodes.CORDUser"
-
- def get_model_class_name(self):
- return "CORDUser"
-
- def get_subscriber_root(self, throw_exception=True):
- sub_name = self.get_requirement("tosca.relationships.SubscriberDevice", throw_exception=throw_exception)
- sub = self.get_xos_object(CordSubscriberRoot, name=sub_name, throw_exception=throw_exception)
- return sub
-
- def get_existing_objs(self):
- result = []
- sub = self.get_subscriber_root(throw_exception=False)
- if not sub:
- return []
- for user in sub.devices:
- if user["name"] == self.obj_name:
- result.append(user)
- return result
-
- def get_xos_args(self):
- args = {"name": self.obj_name,
- "level": self.get_property("level"),
- "mac": self.get_property("mac")}
- return args
-
-
- def create(self):
- xos_args = self.get_xos_args()
- sub = self.get_subscriber_root()
-
- sub.create_device(**xos_args)
- sub.save()
-
- self.info("Created CORDUser %s for Subscriber %s" % (self.obj_name, sub.name))
-
- def update(self, obj):
- pass
-
- def delete(self, obj):
- if (self.can_delete(obj)):
- self.info("destroying CORDUser %s" % obj["name"])
- sub = self.get_subscriber_root()
- sub.delete_user(obj["id"])
- sub.save()
-
- def can_delete(self, obj):
- return True
-
diff --git a/xos/onboard/volt-old/tosca/resources/VOLTTenant.py b/xos/onboard/volt-old/tosca/resources/VOLTTenant.py
deleted file mode 100644
index cbc3837..0000000
--- a/xos/onboard/volt-old/tosca/resources/VOLTTenant.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-import pdb
-
-from core.models import User
-from services.volt.models import VOLTTenant, VOLTService, CordSubscriberRoot, VOLT_KIND
-
-from xosresource import XOSResource
-
-class XOSVOLTTenant(XOSResource):
- provides = "tosca.nodes.VOLTTenant"
- xos_model = VOLTTenant
- copyin_props = ["service_specific_id", "s_tag", "c_tag"]
- name_field = None
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSVOLTTenant, self).get_xos_args()
-
- provider_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
- if provider_name:
- args["provider_service"] = self.get_xos_object(VOLTService, throw_exception=throw_exception, name=provider_name)
-
- subscriber_name = self.get_requirement("tosca.relationships.BelongsToSubscriber")
- if subscriber_name:
- args["subscriber_root"] = self.get_xos_object(CordSubscriberRoot, throw_exception=throw_exception, name=subscriber_name)
-
- return args
-
- def get_existing_objs(self):
- args = self.get_xos_args(throw_exception=False)
- provider_service = args.get("provider_service", None)
- service_specific_id = args.get("service_specific_id", None)
- if (provider_service) and (service_specific_id):
- existing_obj = self.get_xos_object(VOLTTenant, kind=VOLT_KIND, provider_service=provider_service, service_specific_id=service_specific_id, throw_exception=False)
- if existing_obj:
- return [ existing_obj ]
- return []
-
- def postprocess(self, obj):
- pass
-
- def can_delete(self, obj):
- return super(XOSVOLTTenant, self).can_delete(obj)
-
diff --git a/xos/onboard/volt-old/tosca/resources/accessagent.py b/xos/onboard/volt-old/tosca/resources/accessagent.py
deleted file mode 100644
index e40a1cb..0000000
--- a/xos/onboard/volt-old/tosca/resources/accessagent.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.volt.models import AccessAgent, VOLTDevice, VOLTService, AgentPortMapping
-from xosresource import XOSResource
-
-class XOSAccessAgent(XOSResource):
- provides = "tosca.nodes.AccessAgent"
- xos_model = AccessAgent
- copyin_props = ["mac"]
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSAccessAgent, self).get_xos_args()
-
- volt_service_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
- if volt_service_name:
- args["volt_service"] = self.get_xos_object(VOLTService, throw_exception=throw_exception, name=volt_service_name)
-
- return args
-
- def postprocess(self, obj):
- # For convenient, allow the port mappings to be specified by a Tosca
- # string with commas between lines.
- # <port> <mac>,
- # <port> <mac>,
- # ...
- # <port> <mac>
-
- port_mappings_str = self.get_property("port_mappings")
- port_mappings = []
- if port_mappings_str:
- lines = [x.strip() for x in port_mappings_str.split(",")]
- for line in lines:
- if not (" " in line):
- raise "Malformed port mapping `%s`", line
- (port, mac) = line.split(" ")
- port=port.strip()
- mac=mac.strip()
- port_mappings.append( (port, mac) )
-
- for apm in list(AgentPortMapping.objects.filter(access_agent=obj)):
- if (apm.port, apm.mac) not in port_mappings:
- print "Deleting AgentPortMapping '%s'" % apm
- apm.delete()
-
- for port_mapping in port_mappings:
- existing_objs = AgentPortMapping.objects.filter(access_agent=obj, port=port_mapping[0], mac=port_mapping[1])
- if not existing_objs:
- apm = AgentPortMapping(access_agent=obj, port=port_mapping[0], mac=port_mapping[1])
- apm.save()
- print "Created AgentPortMapping '%s'" % apm
-
diff --git a/xos/onboard/volt-old/tosca/resources/accessdevice.py b/xos/onboard/volt-old/tosca/resources/accessdevice.py
deleted file mode 100644
index f31b37a..0000000
--- a/xos/onboard/volt-old/tosca/resources/accessdevice.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.volt.models import AccessDevice, VOLTDevice
-from xosresource import XOSResource
-
-class XOSAccessDevice(XOSResource):
- provides = "tosca.nodes.AccessDevice"
- xos_model = AccessDevice
- copyin_props = ["uplink", "vlan"]
- name_field = None
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSAccessDevice, self).get_xos_args()
-
- volt_device_name = self.get_requirement("tosca.relationships.MemberOfDevice", throw_exception=throw_exception)
- if volt_device_name:
- args["volt_device"] = self.get_xos_object(VOLTDevice, throw_exception=throw_exception, name=volt_device_name)
-
- return args
-
- # AccessDevice has no name field, so we rely on matching the keys. We assume
- # the for a given VOLTDevice, there is only one AccessDevice per (uplink, vlan)
- # pair.
-
- def get_existing_objs(self):
- args = self.get_xos_args(throw_exception=False)
- volt_device = args.get("volt_device", None)
- uplink = args.get("uplink", None)
- vlan = args.get("vlan", None)
- if (volt_device is not None) and (uplink is not None) and (vlan is not None):
- existing_obj = self.get_xos_object(AccessDevice, volt_device=volt_device, uplink=uplink, vlan=vlan, throw_exception=False)
- if existing_obj:
- return [ existing_obj ]
- return []
-
diff --git a/xos/onboard/volt-old/tosca/resources/voltdevice.py b/xos/onboard/volt-old/tosca/resources/voltdevice.py
deleted file mode 100644
index 9665b85..0000000
--- a/xos/onboard/volt-old/tosca/resources/voltdevice.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.volt.models import VOLTDevice, VOLTService, AccessDevice, AccessAgent
-from xosresource import XOSResource
-
-class XOSVOLTDevice(XOSResource):
- provides = "tosca.nodes.VOLTDevice"
- xos_model = VOLTDevice
- copyin_props = ["openflow_id", "driver"]
-
- def get_xos_args(self, throw_exception=True):
- args = super(XOSVOLTDevice, self).get_xos_args()
-
- volt_service_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
- if volt_service_name:
- args["volt_service"] = self.get_xos_object(VOLTService, throw_exception=throw_exception, name=volt_service_name)
-
- agent_name = self.get_requirement("tosca.relationships.UsesAgent", throw_exception=throw_exception)
- if agent_name:
- args["access_agent"] = self.get_xos_object(AccessAgent, throw_exception=throw_exception, name=agent_name)
-
- return args
-
- def postprocess(self, obj):
- access_devices_str = self.get_property("access_devices")
- access_devices = []
- if access_devices_str:
- lines = [x.strip() for x in access_devices_str.split(",")]
- for line in lines:
- if not (" " in line):
- raise "Malformed access device `%s`", line
- (uplink, vlan) = line.split(" ")
- uplink=int(uplink.strip())
- vlan=int(vlan.strip())
- access_devices.append( (uplink, vlan) )
-
- for ad in list(AccessDevice.objects.filter(volt_device=obj)):
- if (ad.uplink, ad.vlan) not in access_devices:
- print "Deleting AccessDevice '%s'" % ad
- ad.delete()
-
- for access_device in access_devices:
- existing_objs = AccessDevice.objects.filter(volt_device=obj, uplink=access_device[0], vlan=access_device[1])
- if not existing_objs:
- ad = AccessDevice(volt_device=obj, uplink=access_device[0], vlan=access_device[1])
- ad.save()
- print "Created AccessDevice '%s'" % ad
diff --git a/xos/onboard/volt-old/tosca/resources/voltservice.py b/xos/onboard/volt-old/tosca/resources/voltservice.py
deleted file mode 100644
index 9df4259..0000000
--- a/xos/onboard/volt-old/tosca/resources/voltservice.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.volt.models import VOLTService
-
-from service import XOSService
-
-class XOSVOLTService(XOSService):
- provides = "tosca.nodes.VOLTService"
- xos_model = VOLTService
- copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "private_key_fn", "versionNumber"]
diff --git a/xos/onboard/volt-old/volt-onboard.yaml b/xos/onboard/volt-old/volt-onboard.yaml
deleted file mode 100644
index e91ea93..0000000
--- a/xos/onboard/volt-old/volt-onboard.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#volt:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/volt/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/voltadmin.html
- #synchronizer: synchronizer/manifest
- tosca_resource: tosca/resources/voltdevice.py, tosca/resources/voltservice.py, tosca/resources/CORDSubscriber.py, tosca/resources/CORDUser.py, tosca/resources/VOLTTenant.py, tosca/resources/accessagent.py, tosca/resources/accessdevice.py
- rest_tenant: subdirectory:cord api/tenant/cord/volt.py, subdirectory:cord api/tenant/cord/subscriber.py
- private_key: file:///opt/xos/key_import/volt_rsa
- public_key: file:///opt/xos/key_import/volt_rsa.pub
-
diff --git a/xos/onboard/vrouter-old/admin.py b/xos/onboard/vrouter-old/admin.py
deleted file mode 100644
index 4bd99b6..0000000
--- a/xos/onboard/vrouter-old/admin.py
+++ /dev/null
@@ -1,114 +0,0 @@
-from django.contrib import admin
-
-from services.vrouter.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.models import AddressPool
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, AddressPoolInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class VRouterServiceForm(forms.ModelForm):
- def __init__(self,*args,**kwargs):
- super (VRouterServiceForm,self ).__init__(*args,**kwargs)
-
- def save(self, commit=True):
- return super(VRouterServiceForm, self).save(commit=commit)
-
- class Meta:
- model = VRouterService
-
-class VRouterServiceAdmin(ReadOnlyAwareAdmin):
- model = VRouterService
- verbose_name = "vRouter Service"
- verbose_name_plural = "vRouter Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "view_url", "icon_url", ],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline,AddressPoolInline]
- form = VRouterServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'vRouter Service Details'),
- ('administration', 'Administration'),
- ('addresspools', 'Addresses'),
- #('tools', 'Tools'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('vrouteradmin.html', 'top', 'administration'),
- ) #('hpctools.html', 'top', 'tools') )
-
- def queryset(self, request):
- return VRouterService.get_service_objects_by_user(request.user)
-
-class VRouterTenantForm(forms.ModelForm):
- public_ip = forms.CharField(required=True)
- public_mac = forms.CharField(required=True)
- gateway_ip = forms.CharField(required=False)
- gateway_mac = forms.CharField(required=False)
- cidr = forms.CharField(required=False)
- address_pool = forms.ModelChoiceField(queryset=AddressPool.objects.all(),required=False)
-
- def __init__(self,*args,**kwargs):
- super (VRouterTenantForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = VRouterService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['address_pool'].initial = self.instance.address_pool
- self.fields['public_ip'].initial = self.instance.public_ip
- self.fields['public_mac'].initial = self.instance.public_mac
- self.fields['gateway_ip'].initial = self.instance.gateway_ip
- self.fields['gateway_mac'].initial = self.instance.gateway_mac
- self.fields['cidr'].initial = self.instance.cidr
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = VROUTER_KIND
- if VRouterService.get_service_objects().exists():
- self.fields["provider_service"].initial = VRouterService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.public_ip = self.cleaned_data.get("public_ip")
- self.instance.public_mac = self.cleaned_data.get("public_mac")
- self.instance.address_pool = self.cleaned_data.get("address_pool")
- return super(VRouterTenantForm, self).save(commit=commit)
-
- class Meta:
- model = VRouterTenant
-
-class VRouterTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'subscriber_tenant', 'public_ip' )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_tenant', 'subscriber_service',
- 'address_pool', 'public_ip', 'public_mac', 'gateway_ip', 'gateway_mac', 'cidr'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'service_specific_attribute', 'gateway_ip', 'gateway_mac', 'cidr')
- form = VRouterTenantForm
-
- suit_form_tabs = (('general','Details'),)
-
- def queryset(self, request):
- return VRouterTenant.get_tenant_objects_by_user(request.user)
-
-admin.site.register(VRouterService, VRouterServiceAdmin)
-admin.site.register(VRouterTenant, VRouterTenantAdmin)
-
diff --git a/xos/onboard/vrouter-old/models.py b/xos/onboard/vrouter-old/models.py
deleted file mode 100644
index d302b13..0000000
--- a/xos/onboard/vrouter-old/models.py
+++ /dev/null
@@ -1,143 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-from core.models import Tag
-from core.models.service import LeastLoadedNodeScheduler
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-class ConfigurationError(Exception):
- pass
-
-
-VROUTER_KIND = "vROUTER"
-
-# NOTE: don't change VROUTER_KIND unless you also change the reference to it
-# in tosca/resources/network.py
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-class VRouterService(Service):
- KIND = VROUTER_KIND
-
- class Meta:
- app_label = "vrouter"
- verbose_name = "vRouter Service"
- proxy = True
-
- def ip_to_mac(self, ip):
- (a, b, c, d) = ip.split('.')
- return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
-
- def get_gateways(self):
- gateways=[]
-
- aps = self.addresspools.all()
- for ap in aps:
- gateways.append( {"gateway_ip": ap.gateway_ip, "gateway_mac": ap.gateway_mac} )
-
- return gateways
-
- def get_address_pool(self, name):
- ap = AddressPool.objects.filter(name=name, service=self)
- if not ap:
- raise Exception("vRouter unable to find addresspool %s" % name)
- return ap[0]
-
- def get_tenant(self, **kwargs):
- address_pool_name = kwargs.pop("address_pool_name")
-
- ap = self.get_address_pool(address_pool_name)
-
- ip = ap.get_address()
- if not ip:
- raise Exception("AddressPool '%s' has run out of addresses." % ap.name)
-
- t = VRouterTenant(provider_service=self, **kwargs)
- t.public_ip = ip
- t.public_mac = self.ip_to_mac(ip)
- t.address_pool_id = ap.id
- t.save()
-
- return t
-
-#VRouterService.setup_simple_attributes()
-
-class VRouterTenant(Tenant):
- class Meta:
- proxy = True
-
- KIND = VROUTER_KIND
-
- simple_attributes = ( ("public_ip", None),
- ("public_mac", None),
- ("address_pool_id", None),
- )
-
- @property
- def gateway_ip(self):
- if not self.address_pool:
- return None
- return self.address_pool.gateway_ip
-
- @property
- def gateway_mac(self):
- if not self.address_pool:
- return None
- return self.address_pool.gateway_mac
-
- @property
- def cidr(self):
- if not self.address_pool:
- return None
- return self.address_pool.cidr
-
- @property
- def netbits(self):
- # return number of bits in the network portion of the cidr
- if self.cidr:
- parts = self.cidr.split("/")
- if len(parts)==2:
- return int(parts[1].strip())
- return None
-
- @property
- def address_pool(self):
- if getattr(self, "cached_address_pool", None):
- return self.cached_address_pool
- if not self.address_pool_id:
- return None
- aps=AddressPool.objects.filter(id=self.address_pool_id)
- if not aps:
- return None
- ap=aps[0]
- self.cached_address_pool = ap
- return ap
-
- @address_pool.setter
- def address_pool(self, value):
- if value:
- value = value.id
- if (value != self.get_attribute("address_pool_id", None)):
- self.cached_address_pool=None
- self.set_attribute("address_pool_id", value)
-
- def cleanup_addresspool(self):
- if self.address_pool_id:
- ap = AddressPool.objects.filter(id=self.address_pool_id)
- if ap:
- ap[0].put_address(self.public_ip)
- self.public_ip = None
-
- def delete(self, *args, **kwargs):
- self.cleanup_addresspool()
- super(VRouterTenant, self).delete(*args, **kwargs)
-
-VRouterTenant.setup_simple_attributes()
-
diff --git a/xos/onboard/vrouter-old/templates/vrouteradmin.html b/xos/onboard/vrouter-old/templates/vrouteradmin.html
deleted file mode 100644
index e69de29..0000000
--- a/xos/onboard/vrouter-old/templates/vrouteradmin.html
+++ /dev/null
diff --git a/xos/onboard/vrouter-old/tosca/resources/vrouterservice.py b/xos/onboard/vrouter-old/tosca/resources/vrouterservice.py
deleted file mode 100644
index 14dabcc..0000000
--- a/xos/onboard/vrouter-old/tosca/resources/vrouterservice.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.vrouter.models import VRouterService
-
-from service import XOSService
-
-class XOSVRouterService(XOSService):
- provides = "tosca.nodes.VRouterService"
- xos_model = VRouterService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber"]
-
diff --git a/xos/onboard/vrouter-old/vrouter-onboard.yaml b/xos/onboard/vrouter-old/vrouter-onboard.yaml
deleted file mode 100644
index e956c96..0000000
--- a/xos/onboard/vrouter-old/vrouter-onboard.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the vRouter SErvice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#vrouter:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/vrouter/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/vrouteradmin.html
- tosca_resource: tosca/resources/vrouterservice.py
-
diff --git a/xos/onboard/vsg-old/admin.py b/xos/onboard/vsg-old/admin.py
deleted file mode 100644
index 92fa51d..0000000
--- a/xos/onboard/vsg-old/admin.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from django.contrib import admin
-
-from services.vsg.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-#-----------------------------------------------------------------------------
-# vSG
-#-----------------------------------------------------------------------------
-
-class VSGServiceForm(forms.ModelForm):
- bbs_api_hostname = forms.CharField(required=False)
- bbs_api_port = forms.IntegerField(required=False)
- bbs_server = forms.CharField(required=False)
- backend_network_label = forms.CharField(required=False)
- bbs_slice = forms.ModelChoiceField(queryset=Slice.objects.all(), required=False)
- dns_servers = forms.CharField(required=False)
- url_filter_kind = forms.ChoiceField(choices=VSGService.URL_FILTER_KIND_CHOICES, required=False)
- node_label = forms.CharField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (VSGServiceForm,self ).__init__(*args,**kwargs)
- if self.instance:
- self.fields['bbs_api_hostname'].initial = self.instance.bbs_api_hostname
- self.fields['bbs_api_port'].initial = self.instance.bbs_api_port
- self.fields['bbs_server'].initial = self.instance.bbs_server
- self.fields['backend_network_label'].initial = self.instance.backend_network_label
- self.fields['bbs_slice'].initial = self.instance.bbs_slice
- self.fields['dns_servers'].initial = self.instance.dns_servers
- self.fields['url_filter_kind']. initial = self.instance.url_filter_kind
- self.fields['node_label'].initial = self.instance.node_label
-
- def save(self, commit=True):
- self.instance.bbs_api_hostname = self.cleaned_data.get("bbs_api_hostname")
- self.instance.bbs_api_port = self.cleaned_data.get("bbs_api_port")
- self.instance.bbs_server = self.cleaned_data.get("bbs_server")
- self.instance.backend_network_label = self.cleaned_data.get("backend_network_label")
- self.instance.bbs_slice = self.cleaned_data.get("bbs_slice")
- self.instance.dns_servers = self.cleaned_data.get("dns_servers")
- self.instance.url_filter_kind = self.cleaned_data.get("url_filter_kind")
- self.instance.node_label = self.cleaned_data.get("node_label")
- return super(VSGServiceForm, self).save(commit=commit)
-
- class Meta:
- model = VSGService
-
-class VSGServiceAdmin(ReadOnlyAwareAdmin):
- model = VSGService
- verbose_name = "vSG Service"
- verbose_name_plural = "vSG Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "view_url", "icon_url", "service_specific_attribute", "node_label"],
- 'classes':['suit-tab suit-tab-general']}),
- ("backend config", {'fields': [ "backend_network_label", "url_filter_kind", "bbs_api_hostname", "bbs_api_port", "bbs_server", "bbs_slice"],
- 'classes':['suit-tab suit-tab-backend']}),
- ("vSG config", {'fields': ["dns_servers"],
- 'classes':['suit-tab suit-tab-vsg']}) ]
- readonly_fields = ('backend_status_text', "service_specific_attribute")
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
- form = VSGServiceForm
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'Service Details'),
- ('backend', 'Backend Config'),
- ('vsg', 'vSG Config'),
- ('administration', 'Administration'),
- #('tools', 'Tools'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges') ,
- )
-
- suit_form_includes = (('vcpeadmin.html', 'top', 'administration'),
- ) #('hpctools.html', 'top', 'tools') )
-
- def queryset(self, request):
- return VSGService.get_service_objects_by_user(request.user)
-
-class VSGTenantForm(forms.ModelForm):
- bbs_account = forms.CharField(required=False)
- creator = forms.ModelChoiceField(queryset=User.objects.all())
- instance = forms.ModelChoiceField(queryset=Instance.objects.all(),required=False)
- last_ansible_hash = forms.CharField(required=False)
- wan_container_ip = forms.CharField(required=False)
- wan_container_mac = forms.CharField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (VSGTenantForm,self ).__init__(*args,**kwargs)
- self.fields['kind'].widget.attrs['readonly'] = True
- self.fields['provider_service'].queryset = VSGService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['bbs_account'].initial = self.instance.bbs_account
- self.fields['creator'].initial = self.instance.creator
- self.fields['instance'].initial = self.instance.instance
- self.fields['last_ansible_hash'].initial = self.instance.last_ansible_hash
- self.fields['wan_container_ip'].initial = self.instance.wan_container_ip
- self.fields['wan_container_mac'].initial = self.instance.wan_container_mac
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = VCPE_KIND
- self.fields['creator'].initial = get_request().user
- if VSGService.get_service_objects().exists():
- self.fields["provider_service"].initial = VSGService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.creator = self.cleaned_data.get("creator")
- self.instance.instance = self.cleaned_data.get("instance")
- self.instance.last_ansible_hash = self.cleaned_data.get("last_ansible_hash")
- return super(VSGTenantForm, self).save(commit=commit)
-
- class Meta:
- model = VSGTenant
-
-class VSGTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'subscriber_tenant' )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_tenant', 'service_specific_id', # 'service_specific_attribute',
- 'wan_container_ip', 'wan_container_mac', 'bbs_account', 'creator', 'instance', 'last_ansible_hash'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'service_specific_attribute', 'bbs_account', 'wan_container_ip', 'wan_container_mac')
- form = VSGTenantForm
-
- suit_form_tabs = (('general','Details'),)
-
- def queryset(self, request):
- return VSGTenant.get_tenant_objects_by_user(request.user)
-
-
-admin.site.register(VSGService, VSGServiceAdmin)
-admin.site.register(VSGTenant, VSGTenantAdmin)
-
diff --git a/xos/onboard/vsg-old/api/service/vsg/vsgservice.py b/xos/onboard/vsg-old/api/service/vsg/vsgservice.py
deleted file mode 100644
index a04fb3e..0000000
--- a/xos/onboard/vsg-old/api/service/vsg/vsgservice.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import viewsets
-from rest_framework import status
-from rest_framework.decorators import detail_route, list_route
-from rest_framework.views import APIView
-from core.models import *
-from django.forms import widgets
-from django.conf.urls import patterns, url
-from services.vsg.models import VSGService
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-from django.shortcuts import get_object_or_404
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from xos.exceptions import *
-import json
-import subprocess
-from django.views.decorators.csrf import ensure_csrf_cookie
-
-class VSGServiceForApi(VSGService):
- class Meta:
- proxy = True
- app_label = "cord"
-
- def __init__(self, *args, **kwargs):
- super(VSGServiceForApi, self).__init__(*args, **kwargs)
-
- def save(self, *args, **kwargs):
- super(VSGServiceForApi, self).save(*args, **kwargs)
-
- def __init__(self, *args, **kwargs):
- super(VSGService, self).__init__(*args, **kwargs)
-
-class VSGServiceSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- wan_container_gateway_ip = serializers.CharField(required=False)
- wan_container_gateway_mac = serializers.CharField(required=False)
- dns_servers = serializers.CharField(required=False)
- url_filter_kind = serializers.CharField(required=False)
- node_label = serializers.CharField(required=False)
-
- class Meta:
- model = VSGServiceForApi
- fields = ('humanReadableName',
- 'id',
- 'wan_container_gateway_ip',
- 'wan_container_gateway_mac',
- 'dns_servers',
- 'url_filter_kind',
- 'node_label')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-# @ensure_csrf_cookie
-class VSGServiceViewSet(XOSViewSet):
- base_name = "vsgservice"
- method_name = None # use the api endpoint /api/service/vsg/
- method_kind = "viewset"
- queryset = VSGService.get_service_objects().select_related().all()
- serializer_class = VSGServiceSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(VSGServiceViewSet, self).get_urlpatterns(api_path=api_path)
-
- return patterns
-
- def list(self, request):
- object_list = self.filter_queryset(self.get_queryset())
-
- serializer = self.get_serializer(object_list, many=True)
-
- return Response(serializer.data)
-
diff --git a/xos/onboard/vsg-old/api/tenant/cord/vsg.py b/xos/onboard/vsg-old/api/tenant/cord/vsg.py
deleted file mode 100644
index c6a4247..0000000
--- a/xos/onboard/vsg-old/api/tenant/cord/vsg.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from services.vsg.models import VSGTenant, VSGService
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-def get_default_vsg_service():
- vsg_services = VSGService.get_service_objects().all()
- if vsg_services:
- return vsg_services[0].id
- return None
-
-class VSGTenantForAPI(VSGTenant):
- class Meta:
- proxy = True
- app_label = "cord"
-
- @property
- def related(self):
- related = {}
- if self.instance:
- related["instance_id"] = self.instance.id
- return related
-
-class VSGTenantSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- wan_container_ip = serializers.CharField()
- wan_container_mac = ReadOnlyField()
- related = serializers.DictField(required=False)
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- class Meta:
- model = VSGTenantForAPI
- fields = ('humanReadableName', 'id', 'wan_container_ip', 'wan_container_mac', 'related' )
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class VSGTenantViewSet(XOSViewSet):
- base_name = "vsg"
- method_name = "vsg"
- method_kind = "viewset"
- queryset = VSGTenantForAPI.get_tenant_objects().all()
- serializer_class = VSGTenantSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(VSGTenantViewSet, self).get_urlpatterns(api_path=api_path)
-
- return patterns
-
-
-
-
-
-
diff --git a/xos/onboard/vsg-old/models.py b/xos/onboard/vsg-old/models.py
deleted file mode 100644
index ad25c98..0000000
--- a/xos/onboard/vsg-old/models.py
+++ /dev/null
@@ -1,448 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool, User
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-from core.models import Tag
-from core.models.service import LeastLoadedNodeScheduler
-from services.vrouter.models import VRouterService, VRouterTenant
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-class ConfigurationError(Exception):
- pass
-
-VCPE_KIND = "vCPE"
-CORD_SUBSCRIBER_KIND = "CordSubscriberRoot"
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-# -------------------------------------------
-# VCPE
-# -------------------------------------------
-
-class VSGService(Service):
- KIND = VCPE_KIND
-
- URL_FILTER_KIND_CHOICES = ( (None, "None"), ("safebrowsing", "Safe Browsing"), ("answerx", "AnswerX") )
-
- simple_attributes = ( ("bbs_api_hostname", None),
- ("bbs_api_port", None),
- ("bbs_server", None),
- ("backend_network_label", "hpc_client"),
- ("dns_servers", "8.8.8.8"),
- ("url_filter_kind", None),
- ("node_label", None) )
-
- def __init__(self, *args, **kwargs):
- super(VSGService, self).__init__(*args, **kwargs)
-
- class Meta:
- app_label = "vsg"
- verbose_name = "vSG Service"
- proxy = True
-
- def allocate_bbs_account(self):
- vcpes = VSGTenant.get_tenant_objects().all()
- bbs_accounts = [vcpe.bbs_account for vcpe in vcpes]
-
- # There's a bit of a race here; some other user could be trying to
- # allocate a bbs_account at the same time we are.
-
- for i in range(2,21):
- account_name = "bbs%02d@onlab.us" % i
- if (account_name not in bbs_accounts):
- return account_name
-
- raise XOSConfigurationError("We've run out of available broadbandshield accounts. Delete some vcpe and try again.")
-
- @property
- def bbs_slice(self):
- bbs_slice_id=self.get_attribute("bbs_slice_id")
- if not bbs_slice_id:
- return None
- bbs_slices=Slice.objects.filter(id=bbs_slice_id)
- if not bbs_slices:
- return None
- return bbs_slices[0]
-
- @bbs_slice.setter
- def bbs_slice(self, value):
- if value:
- value = value.id
- self.set_attribute("bbs_slice_id", value)
-
-VSGService.setup_simple_attributes()
-
-class VSGTenant(TenantWithContainer):
- class Meta:
- proxy = True
-
- KIND = VCPE_KIND
-
- sync_attributes = ("wan_container_ip", "wan_container_mac", "wan_container_netbits",
- "wan_container_gateway_ip", "wan_container_gateway_mac",
- "wan_vm_ip", "wan_vm_mac")
-
- default_attributes = {"instance_id": None,
- "container_id": None,
- "users": [],
- "bbs_account": None,
- "last_ansible_hash": None,
- "wan_container_ip": None}
-
- def __init__(self, *args, **kwargs):
- super(VSGTenant, self).__init__(*args, **kwargs)
- self.cached_vrouter=None
-
- @property
- def vbng(self):
- # not supported
- return None
-
- @vbng.setter
- def vbng(self, value):
- raise XOSConfigurationError("vCPE.vBNG cannot be set this way -- create a new vBNG object and set it's subscriber_tenant instead")
-
- @property
- def vrouter(self):
- vrouter = self.get_newest_subscribed_tenant(VRouterTenant)
- if not vrouter:
- return None
-
- # always return the same object when possible
- if (self.cached_vrouter) and (self.cached_vrouter.id == vrouter.id):
- return self.cached_vrouter
-
- vrouter.caller = self.creator
- self.cached_vrouter = vrouter
- return vrouter
-
- @vrouter.setter
- def vrouter(self, value):
- raise XOSConfigurationError("vCPE.vRouter cannot be set this way -- create a new vRuter object and set its subscriber_tenant instead")
-
- @property
- def volt(self):
- from services.volt.models import VOLTTenant
- if not self.subscriber_tenant:
- return None
- volts = VOLTTenant.objects.filter(id=self.subscriber_tenant.id)
- if not volts:
- return None
- return volts[0]
-
- @property
- def bbs_account(self):
- return self.get_attribute("bbs_account", self.default_attributes["bbs_account"])
-
- @bbs_account.setter
- def bbs_account(self, value):
- return self.set_attribute("bbs_account", value)
-
- @property
- def last_ansible_hash(self):
- return self.get_attribute("last_ansible_hash", self.default_attributes["last_ansible_hash"])
-
- @last_ansible_hash.setter
- def last_ansible_hash(self, value):
- return self.set_attribute("last_ansible_hash", value)
-
- @property
- def ssh_command(self):
- if self.instance:
- return self.instance.get_ssh_command()
- else:
- return "no-instance"
-
- @ssh_command.setter
- def ssh_command(self, value):
- pass
-
- def get_vrouter_field(self, name, default=None):
- if self.vrouter:
- return getattr(self.vrouter, name, default)
- else:
- return default
-
- @property
- def wan_container_ip(self):
- return self.get_vrouter_field("public_ip", None)
-
- @property
- def wan_container_mac(self):
- return self.get_vrouter_field("public_mac", None)
-
- @property
- def wan_container_netbits(self):
- return self.get_vrouter_field("netbits", None)
-
- @property
- def wan_container_gateway_ip(self):
- return self.get_vrouter_field("gateway_ip", None)
-
- @property
- def wan_container_gateway_mac(self):
- return self.get_vrouter_field("gateway_mac", None)
-
- @property
- def wan_vm_ip(self):
- tags = Tag.select_by_content_object(self.instance).filter(name="vm_vrouter_tenant")
- if tags:
- tenant = VRouterTenant.objects.get(id=tags[0].value)
- return tenant.public_ip
- else:
- raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
-
- @property
- def wan_vm_mac(self):
- tags = Tag.select_by_content_object(self.instance).filter(name="vm_vrouter_tenant")
- if tags:
- tenant = VRouterTenant.objects.get(id=tags[0].value)
- return tenant.public_mac
- else:
- raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
-
- @property
- def is_synced(self):
- return (self.enacted is not None) and (self.enacted >= self.updated)
-
- @is_synced.setter
- def is_synced(self, value):
- pass
-
- def get_vrouter_service(self):
- vrouterServices = VRouterService.get_service_objects().all()
- if not vrouterServices:
- raise XOSConfigurationError("No VROUTER Services available")
- return vrouterServices[0]
-
- def manage_vrouter(self):
- # Each vCPE object owns exactly one vRouterTenant object
-
- if self.deleted:
- return
-
- if self.vrouter is None:
- vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_vsg", subscriber_tenant = self)
- vrouter.caller = self.creator
- vrouter.save()
-
- def cleanup_vrouter(self):
- if self.vrouter:
- # print "XXX cleanup vrouter", self.vrouter
- self.vrouter.delete()
-
- def cleanup_orphans(self):
- # ensure vCPE only has one vRouter
- cur_vrouter = self.vrouter
- for vrouter in list(self.get_subscribed_tenants(VRouterTenant)):
- if (not cur_vrouter) or (vrouter.id != cur_vrouter.id):
- # print "XXX clean up orphaned vrouter", vrouter
- vrouter.delete()
-
- if self.orig_instance_id and (self.orig_instance_id != self.get_attribute("instance_id")):
- instances=Instance.objects.filter(id=self.orig_instance_id)
- if instances:
- # print "XXX clean up orphaned instance", instances[0]
- instances[0].delete()
-
- def get_slice(self):
- if not self.provider_service.slices.count():
- print self, "dio porco"
- raise XOSConfigurationError("The service has no slices")
- slice = self.provider_service.slices.all()[0]
- return slice
-
- def get_vsg_service(self):
- return VSGService.get_service_objects().get(id=self.provider_service.id)
-
- def find_instance_for_s_tag(self, s_tag):
- #s_tags = STagBlock.objects.find(s_s_tag)
- #if s_tags:
- # return s_tags[0].instance
-
- tags = Tag.objects.filter(name="s_tag", value=s_tag)
- if tags:
- return tags[0].content_object
-
- return None
-
- def find_or_make_instance_for_s_tag(self, s_tag):
- instance = self.find_instance_for_s_tag(self.volt.s_tag)
- if instance:
- return instance
-
- flavors = Flavor.objects.filter(name="m1.small")
- if not flavors:
- raise XOSConfigurationError("No m1.small flavor")
-
- slice = self.provider_service.slices.all()[0]
-
- if slice.default_isolation == "container_vm":
- (node, parent) = ContainerVmScheduler(slice).pick()
- else:
- (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service().node_label).pick()
-
- instance = Instance(slice = slice,
- node = node,
- image = self.image,
- creator = self.creator,
- deployment = node.site_deployment.deployment,
- flavor = flavors[0],
- isolation = slice.default_isolation,
- parent = parent)
-
- self.save_instance(instance)
-
- return instance
-
- def manage_container(self):
- from core.models import Instance, Flavor
-
- if self.deleted:
- return
-
- # For container or container_vm isolation, use what TenantWithCotnainer
- # provides us
- slice = self.get_slice()
- if slice.default_isolation in ["container_vm", "container"]:
- super(VSGTenant,self).manage_container()
- return
-
- if not self.volt:
- raise XOSConfigurationError("This vCPE container has no volt")
-
- if self.instance:
- # We're good.
- return
-
- instance = self.find_or_make_instance_for_s_tag(self.volt.s_tag)
- self.instance = instance
- super(TenantWithContainer, self).save()
-
- def cleanup_container(self):
- if self.get_slice().default_isolation in ["container_vm", "container"]:
- super(VSGTenant,self).cleanup_container()
-
- # To-do: cleanup unused instances
- pass
-
- def manage_bbs_account(self):
- if self.deleted:
- return
-
- if self.volt and self.volt.subscriber and self.volt.subscriber.url_filter_enable:
- if not self.bbs_account:
- # make sure we use the proxied VSGService object, not the generic Service object
- vcpe_service = VSGService.objects.get(id=self.provider_service.id)
- self.bbs_account = vcpe_service.allocate_bbs_account()
- super(VSGTenant, self).save()
- else:
- if self.bbs_account:
- self.bbs_account = None
- super(VSGTenant, self).save()
-
- def find_or_make_port(self, instance, network, **kwargs):
- port = Port.objects.filter(instance=instance, network=network)
- if port:
- port = port[0]
- else:
- port = Port(instance=instance, network=network, **kwargs)
- port.save()
- return port
-
- def get_lan_network(self, instance):
- slice = self.provider_service.slices.all()[0]
- if CORD_USE_VTN:
- # there should only be one network private network, and its template should not be the management template
- lan_networks = [x for x in slice.networks.all() if x.template.visibility=="private" and (not "management" in x.template.name)]
- if len(lan_networks)>1:
- raise XOSProgrammingError("The vSG slice should only have one non-management private network")
- else:
- lan_networks = [x for x in slice.networks.all() if "lan" in x.name]
- if not lan_networks:
- raise XOSProgrammingError("No lan_network")
- return lan_networks[0]
-
- def save_instance(self, instance):
- with transaction.atomic():
- instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
- super(VSGTenant, self).save_instance(instance)
-
- if instance.isolation in ["container", "container_vm"]:
- lan_network = self.get_lan_network(instance)
- port = self.find_or_make_port(instance, lan_network, ip="192.168.0.1", port_id="unmanaged")
- port.set_parameter("c_tag", self.volt.c_tag)
- port.set_parameter("s_tag", self.volt.s_tag)
- port.set_parameter("device", "eth1")
- port.set_parameter("bridge", "br-lan")
-
- wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
- if not wan_networks:
- raise XOSProgrammingError("No wan_network")
- port = self.find_or_make_port(instance, wan_networks[0])
- port.set_parameter("next_hop", value="10.0.1.253") # FIX ME
- port.set_parameter("device", "eth0")
-
- if instance.isolation in ["vm"]:
- lan_network = self.get_lan_network(instance)
- port = self.find_or_make_port(instance, lan_network)
- port.set_parameter("c_tag", self.volt.c_tag)
- port.set_parameter("s_tag", self.volt.s_tag)
- port.set_parameter("neutron_port_name", "stag-%s" % self.volt.s_tag)
- port.save()
-
- # tag the instance with the s-tag, so we can easily find the
- # instance later
- if self.volt and self.volt.s_tag:
- tags = Tag.objects.filter(name="s_tag", value=self.volt.s_tag)
- if not tags:
- tag = Tag(service=self.provider_service, content_object=instance, name="s_tag", value=self.volt.s_tag)
- tag.save()
-
- # VTN-CORD needs a WAN address for the VM, so that the VM can
- # be configured.
- if CORD_USE_VTN:
- tags = Tag.select_by_content_object(instance).filter(name="vm_vrouter_tenant")
- if not tags:
- vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_vsg", subscriber_service = self.provider_service)
- vrouter.set_attribute("tenant_for_instance_id", instance.id)
- vrouter.save()
- tag = Tag(service=self.provider_service, content_object=instance, name="vm_vrouter_tenant", value="%d" % vrouter.id)
- tag.save()
-
- def save(self, *args, **kwargs):
- if not self.creator:
- if not getattr(self, "caller", None):
- # caller must be set when creating a vCPE since it creates a slice
- raise XOSProgrammingError("VSGTenant's self.caller was not set")
- self.creator = self.caller
- if not self.creator:
- raise XOSProgrammingError("VSGTenant's self.creator was not set")
-
- super(VSGTenant, self).save(*args, **kwargs)
- model_policy_vcpe(self.pk)
-
- def delete(self, *args, **kwargs):
- self.cleanup_vrouter()
- self.cleanup_container()
- super(VSGTenant, self).delete(*args, **kwargs)
-
-def model_policy_vcpe(pk):
- # TODO: this should be made in to a real model_policy
- with transaction.atomic():
- vcpe = VSGTenant.objects.select_for_update().filter(pk=pk)
- if not vcpe:
- return
- vcpe = vcpe[0]
- vcpe.manage_container()
- vcpe.manage_vrouter()
- vcpe.manage_bbs_account()
- vcpe.cleanup_orphans()
-
-
diff --git a/xos/onboard/vsg-old/synchronizer/broadbandshield.py b/xos/onboard/vsg-old/synchronizer/broadbandshield.py
deleted file mode 100644
index dd2f00b..0000000
--- a/xos/onboard/vsg-old/synchronizer/broadbandshield.py
+++ /dev/null
@@ -1,396 +0,0 @@
-import requests
-import logging
-import json
-import sys
-from rest_framework.exceptions import APIException
-
-""" format of settings
-
- ["settings"]
- ["watershed"]
- ["rating"]
- ["categories"]
- ["blocklist"]
- ["allowlist"]
-
- ["users"]
- array
- ["account_id"] - 58
- ["reporting"] - False
- ["name"] - Scott1
- ["devices"]
- ["settings"] -
- ["watershed"]
- ["rating"]
- ["categories"]
- ["blocklist"]
- ["allowlist"]
-
- ["devices"]
- array
- ["username"] - "Scott1" or "" if whole-house
- ["uuid"] - empty
- ["mac_address"] - mac address as hex digits in ascii
- ["type"] - "laptop"
- ["name"] - human readable name of device ("Scott's laptop")
- ["settings"]
- ["watershed"]
- array
- array
- ["rating"]
- ["category"]
- ["rating"] - ["G" | "NONE"]
- ["categories"] - list of categories set by rating
- ["blocklist"] - []
- ["allowlist"] - []
-"""
-
-class BBS_Failure(APIException):
- status_code=400
- def __init__(self, why="broadbandshield error", fields={}):
- APIException.__init__(self, {"error": "BBS_Failure",
- "specific_error": why,
- "fields": fields})
-
-
-class BBS:
- level_map = {"PG_13": "PG13",
- "NONE": "OFF",
- "ALL": "NONE",
- None: "NONE"}
-
- def __init__(self, username, password, bbs_hostname=None, bbs_port=None):
- self.username = username
- self.password = password
-
- # XXX not tested on port 80
- #self.bbs_hostname = "www.broadbandshield.com"
- #self.bbs_port = 80
-
- if not bbs_hostname:
- bbs_hostname = "cordcompute01.onlab.us"
- if not bbs_port:
- bbs_port = 8018
-
- self.bbs_hostname = bbs_hostname
- self.bbs_port = int(bbs_port)
-
- self.api = "http://%s:%d/api" % (self.bbs_hostname, self.bbs_port)
- self.nic_update = "http://%s:%d/nic/update" % (self.bbs_hostname, self.bbs_port)
-
- self.session = None
- self.settings = None
-
- def login(self):
- self.session = requests.Session()
- r = self.session.post(self.api + "/login", data = json.dumps({"email": self.username, "password": self.password}))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to login (%d)" % r.status_code)
-
- def get_account(self):
- if not self.session:
- self.login()
-
- r = self.session.get(self.api + "/account")
- if (r.status_code != 200):
- raise BBS_Failure("Failed to get account settings (%d)" % r.status_code)
- self.settings = r.json()
-
- return self.settings
-
- def post_account(self):
- if not self.settings:
- raise XOSProgrammingError("no settings to post")
-
- r = self.session.post(self.api + "/account/settings", data= json.dumps(self.settings))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to set account settings (%d)" % r.status_code)
-
- def add_device(self, name, mac, type="tablet", username=""):
- data = {"name": name, "mac_address": mac, "type": type, "username": username}
- r = self.session.post(self.api + "/device", data = json.dumps(data))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to add device (%d)" % r.status_code)
-
- def delete_device(self, data):
- r = self.session.delete(self.api + "/device", data = json.dumps(data))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to delete device (%d)" % r.status_code)
-
- def add_user(self, name, rating="NONE", categories=[]):
- data = {"name": name, "settings": {"rating": rating, "categories": categories}}
- r = self.session.post(self.api + "/users", data = json.dumps(data))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to add user (%d)" % r.status_code)
-
- def delete_user(self, data):
- r = self.session.delete(self.api + "/users", data = json.dumps(data))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to delete user (%d)" % r.status_code)
-
- def clear_users_and_devices(self):
- if not self.settings:
- self.get_account()
-
- for device in self.settings["devices"]:
- self.delete_device(device)
-
- for user in self.settings["users"]:
- self.delete_user(user)
-
- def get_whole_home_level(self):
- if not self.settings:
- self.get_account()
-
- return self.settings["settings"]["rating"]
-
- def sync(self, whole_home_level, users):
- if not self.settings:
- self.get_account()
-
- vcpe_users = {}
- for user in users:
- user = user.copy()
- user["level"] = self.level_map.get(user["level"], user["level"])
- user["mac"] = user.get("mac", "")
- vcpe_users[user["name"]] = user
-
- whole_home_level = self.level_map.get(whole_home_level, whole_home_level)
-
- if (whole_home_level != self.settings["settings"]["rating"]):
- print "*** set whole_home", whole_home_level, "***"
- self.settings["settings"]["rating"] = whole_home_level
- self.post_account()
-
- bbs_usernames = [bbs_user["name"] for bbs_user in self.settings["users"]]
- bbs_devicenames = [bbs_device["name"] for bbs_device in self.settings["devices"]]
-
- add_users = []
- add_devices = []
- delete_users = []
- delete_devices = []
-
- for bbs_user in self.settings["users"]:
- bbs_username = bbs_user["name"]
- if bbs_username in vcpe_users.keys():
- vcpe_user = vcpe_users[bbs_username]
- if bbs_user["settings"]["rating"] != vcpe_user["level"]:
- print "set user", vcpe_user["name"], "rating", vcpe_user["level"]
- #bbs_user["settings"]["rating"] = vcpe_user["level"]
- # add can be used as an update
- add_users.append(vcpe_user)
- else:
- delete_users.append(bbs_user)
-
- for bbs_device in self.settings["devices"]:
- bbs_devicename = bbs_device["name"]
- if bbs_devicename in vcpe_users.keys():
- vcpe_user = vcpe_users[bbs_devicename]
- if bbs_device["mac_address"] != vcpe_user["mac"]:
- print "set device", vcpe_user["name"], "mac", vcpe_user["mac"]
- #bbs_device["mac_address"] = vcpe_user["mac"]
- # add of a device can't be used as an update, as you'll end
- # up with two of them.
- delete_devices.append(bbs_device)
- add_devices.append(vcpe_user)
- else:
- delete_devices.append(bbs_device)
-
- for (username, user) in vcpe_users.iteritems():
- if not username in bbs_usernames:
- add_users.append(user)
- if not username in bbs_devicenames:
- add_devices.append(user)
-
- for bbs_user in delete_users:
- print "delete user", bbs_user["name"]
- self.delete_user(bbs_user)
-
- for bbs_device in delete_devices:
- print "delete device", bbs_device["name"]
- self.delete_device(bbs_device)
-
- for vcpe_user in add_users:
- print "add user", vcpe_user["name"], "level", vcpe_user["level"]
- self.add_user(vcpe_user["name"], vcpe_user["level"])
-
- for vcpe_user in add_devices:
- print "add device", vcpe_user["name"], "mac", vcpe_user["mac"]
- self.add_device(vcpe_user["name"], vcpe_user["mac"], "tablet", vcpe_user["name"])
-
- def get_whole_home_rating(self):
- return self.settings["settings"]["rating"]
-
- def get_user(self, name):
- for user in self.settings["users"]:
- if user["name"]==name:
- return user
- return None
-
- def get_device(self, name):
- for device in self.settings["devices"]:
- if device["name"]==name:
- return device
- return None
-
- def dump(self):
- if not self.settings:
- self.get_account()
-
- print "whole_home_rating:", self.settings["settings"]["rating"]
- print "users:"
- for user in self.settings["users"]:
- print " user", user["name"], "rating", user["settings"]["rating"]
-
- print "devices:"
- for device in self.settings["devices"]:
- print " device", device["name"], "user", device["username"], "rating", device["settings"]["rating"], "mac", device["mac_address"]
-
- def associate(self, ip):
- bbs_hostname = "cordcompute01.onlab.us"
- r = requests.get(self.nic_update, params={"hostname": "onlab.us"}, headers={"X-Forwarded-For": ip}, auth=requests.auth.HTTPBasicAuth(self.username,self.password))
- if (r.status_code != 200):
- raise BBS_Failure("Failed to associate account with ip (%d)" % r.status_code)
-
-def dump():
- bbs = BBS(sys.argv[2], sys.argv[3])
- bbs.dump()
-
-def associate():
- if len(sys.argv)<5:
- print "you need to specify IP address"
- sys.exit(-1)
-
- bbs = BBS(sys.argv[2], sys.argv[3])
- bbs.associate(sys.argv[4])
-
-def self_test():
- bbs = BBS(sys.argv[2], sys.argv[3])
-
- print "*** initial ***"
- bbs.dump()
-
- open("bbs.json","w").write(json.dumps(bbs.settings))
-
- # a new BBS account will throw a 500 error if it has no rating
- bbs.settings["settings"]["rating"] = "R"
- #bbs.settings["settings"]["category"] = [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'CYBERBULLY', u'ANONYMIZERS', u'SUICIDE', u'MALWARE']
- #bbs.settings["settings"]["blocklist"] = []
- #bbs.settings["settings"]["allowlist"] = []
- #for water in bbs.settings["settings"]["watershed"];
- # water["categories"]=[]
- # delete everything
- bbs.post_account()
- bbs.clear_users_and_devices()
-
- print "*** cleared ***"
- bbs.settings=None
- bbs.dump()
-
- users = [{"name": "Moms pc", "level": "R", "mac": "010203040506"},
- {"name": "Dads pc", "level": "R", "mac": "010203040507"},
- {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
- {"name": "Jills iphone", "level": "G", "mac": "010203040509"}]
-
- print "*** syncing mom-R, Dad-R, jack-PG, Jill-G, wholehome-PG-13 ***"
-
- bbs.settings = None
- bbs.sync("PG-13", users)
-
- print "*** after sync ***"
- bbs.settings=None
- bbs.dump()
- assert(bbs.get_whole_home_rating() == "PG-13")
- assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
- assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
- assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
- assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
- assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
- assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
-
- print "*** update whole home level ***"
- bbs.settings=None
- bbs.get_account()
- bbs.settings["settings"]["rating"] = "PG"
- bbs.post_account()
-
- print "*** after sync ***"
- bbs.settings=None
- bbs.dump()
- assert(bbs.get_whole_home_rating() == "PG")
- assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
- assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
- assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
- assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
- assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
- assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
-
- print "*** delete dad, change moms IP, change jills level to PG, change whole home to PG-13 ***"
- users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
- {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
- {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
-
- bbs.settings = None
- bbs.sync("PG-13", users)
-
- print "*** after sync ***"
- bbs.settings=None
- bbs.dump()
- assert(bbs.get_whole_home_rating() == "PG-13")
- assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Dads pc") == None)
- assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
- assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
- assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
- assert(bbs.get_device("Dads pc") == None)
- assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-
- print "add dad's laptop"
- users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
- {"name": "Dads laptop", "level": "PG-13", "mac": "010203040512"},
- {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
- {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
-
- bbs.settings = None
- bbs.sync("PG-13", users)
-
- print "*** after sync ***"
- bbs.settings=None
- bbs.dump()
- assert(bbs.get_whole_home_rating() == "PG-13")
- assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
- assert(bbs.get_user("Dads pc") == None)
- assert(bbs.get_user("Dads laptop")["settings"]["rating"] == "PG-13")
- assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
- assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
- assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
- assert(bbs.get_device("Dads pc") == None)
- assert(bbs.get_device("Dads laptop")["mac_address"] == "010203040512")
- assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-
- #bbs.add_user("tom", "G", [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'SOCIAL', u'CYBERBULLY', u'GAMES', u'ANONYMIZERS', u'SUICIDE', u'MALWARE'])
- #bbs.add_device(name="tom's iphone", mac="010203040506", type="tablet", username="tom")
-
-def main():
- if len(sys.argv)<4:
- print "syntax: broadbandshield.py <operation> <email> <password>"
- print " operation = [dump | selftest | assocate"
- sys.exit(-1)
-
- operation = sys.argv[1]
-
- if operation=="dump":
- dump()
- elif operation=="selftest":
- self_test()
- elif operation=="associate":
- associate()
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/xos/onboard/vsg-old/synchronizer/files/docker.list b/xos/onboard/vsg-old/synchronizer/files/docker.list
deleted file mode 100644
index 0ee9ae0..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/docker.list
+++ /dev/null
@@ -1 +0,0 @@
-deb https://get.docker.com/ubuntu docker main
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/rc.local b/xos/onboard/vsg-old/synchronizer/files/etc/rc.local
deleted file mode 100755
index 2c7588f..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/rc.local
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh -e
-#
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-#
-# By default this script does nothing.
-
-ufw enable
-ufw allow bootps
-ufw allow from 192.168.0.0/24
-ufw route allow in on eth1 out on eth0
-ufw route allow in on eth1 out on eth2
-
-BWLIMIT=/usr/local/sbin/bwlimit.sh
-[ -e $BWLIMIT ] && $BWLIMIT start || true
-
-exit 0
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/service/message/run b/xos/onboard/vsg-old/synchronizer/files/etc/service/message/run
deleted file mode 100755
index 7b587d8..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/service/message/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/python
-
-import BaseHTTPServer
-
-
-class HTTPHandlerOne(BaseHTTPServer.BaseHTTPRequestHandler):
- def do_GET(self):
- with open('./message.html', 'r') as msgfile:
- message = msgfile.read()
- self.wfile.write(message)
-
-
-def run(server_class=BaseHTTPServer.HTTPServer,
- handler_class=BaseHTTPServer.BaseHTTPRequestHandler):
- server_address = ('192.168.0.1', 8000)
- httpd = server_class(server_address, handler_class)
- httpd.serve_forever()
-
-run(handler_class=HTTPHandlerOne)
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.init b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.init
deleted file mode 100644
index e89217d..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.init
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# after.init: if executable, called by ufw-init. See 'man ufw-framework' for
-# details. Note that output from these scripts is not seen via the
-# the ufw command, but instead via ufw-init.
-#
-# Copyright 2013 Canonical Ltd.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3,
-# as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-set -e
-
-case "$1" in
-start)
- # typically required
- ;;
-stop)
- # typically required
- ;;
-status)
- # optional
- ;;
-flush-all)
- # optional
- ;;
-*)
- echo "'$1' not supported"
- echo "Usage: after.init {start|stop|flush-all|status}"
- ;;
-esac
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.rules b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.rules
deleted file mode 100644
index 0d6c646..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after.rules
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# rules.input-after
-#
-# Rules that should be run after the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-# ufw-after-input
-# ufw-after-output
-# ufw-after-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw-after-input - [0:0]
-:ufw-after-output - [0:0]
-:ufw-after-forward - [0:0]
-# End required lines
-
-# don't log noisy services by default
--A ufw-after-input -p udp --dport 137 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 138 -j ufw-skip-to-policy-input
--A ufw-after-input -p tcp --dport 139 -j ufw-skip-to-policy-input
--A ufw-after-input -p tcp --dport 445 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 67 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 68 -j ufw-skip-to-policy-input
-
-# don't log noisy broadcast
--A ufw-after-input -m addrtype --dst-type BROADCAST -j ufw-skip-to-policy-input
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after6.rules b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after6.rules
deleted file mode 100644
index 0d99672..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/after6.rules
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# rules.input-after
-#
-# Rules that should be run after the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-# ufw6-after-input
-# ufw6-after-output
-# ufw6-after-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw6-after-input - [0:0]
-:ufw6-after-output - [0:0]
-:ufw6-after-forward - [0:0]
-# End required lines
-
-# don't log noisy services by default
--A ufw6-after-input -p udp --dport 137 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 138 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p tcp --dport 139 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p tcp --dport 445 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 546 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 547 -j ufw6-skip-to-policy-input
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/applications.d/openssh-server b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/applications.d/openssh-server
deleted file mode 100644
index 9bbe906..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/applications.d/openssh-server
+++ /dev/null
@@ -1,4 +0,0 @@
-[OpenSSH]
-title=Secure shell server, an rshd replacement
-description=OpenSSH is a free implementation of the Secure Shell protocol.
-ports=22/tcp
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before.init b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before.init
deleted file mode 100644
index 1348cb1..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before.init
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# before.init: if executable, called by ufw-init. See 'man ufw-framework' for
-# details. Note that output from these scripts is not seen via the
-# the ufw command, but instead via ufw-init.
-#
-# Copyright 2013 Canonical Ltd.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3,
-# as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-set -e
-
-case "$1" in
-start)
- # typically required
- ;;
-stop)
- # typically required
- ;;
-status)
- # optional
- ;;
-flush-all)
- # optional
- ;;
-*)
- echo "'$1' not supported"
- echo "Usage: before.init {start|stop|flush-all|status}"
- ;;
-esac
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before6.rules b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before6.rules
deleted file mode 100644
index 0b26ed8..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/before6.rules
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-# rules.before
-#
-# Rules that should be run before the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-# ufw6-before-input
-# ufw6-before-output
-# ufw6-before-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw6-before-input - [0:0]
-:ufw6-before-output - [0:0]
-:ufw6-before-forward - [0:0]
-# End required lines
-
-
-# allow all on loopback
--A ufw6-before-input -i lo -j ACCEPT
--A ufw6-before-output -o lo -j ACCEPT
-
-# drop packets with RH0 headers
--A ufw6-before-input -m rt --rt-type 0 -j DROP
--A ufw6-before-forward -m rt --rt-type 0 -j DROP
--A ufw6-before-output -m rt --rt-type 0 -j DROP
-
-# for stateless autoconfiguration (restrict NDP messages to hop limit of 255)
--A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type router-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type router-advertisement -m hl --hl-eq 255 -j ACCEPT
-
-# quickly process packets for which we already have a connection
--A ufw6-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw6-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw6-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-# for multicast ping replies from link-local addresses (these don't have an
-# associated connection and would otherwise be marked INVALID)
--A ufw6-before-input -p icmpv6 --icmpv6-type echo-reply -s fe80::/10 -j ACCEPT
-
-# drop INVALID packets (logs these in loglevel medium and higher)
--A ufw6-before-input -m conntrack --ctstate INVALID -j ufw6-logging-deny
--A ufw6-before-input -m conntrack --ctstate INVALID -j DROP
-
-# ok icmp codes for INPUT
--A ufw6-before-input -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type echo-request -j ACCEPT
-
-# ok icmp code for FORWARD
--A ufw6-before-forward -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type echo-request -j ACCEPT
-
-# allow dhcp client to work
--A ufw6-before-input -p udp -s fe80::/10 --sport 547 -d fe80::/10 --dport 546 -j ACCEPT
-
-# allow MULTICAST mDNS for service discovery
--A ufw6-before-input -p udp -d ff02::fb --dport 5353 -j ACCEPT
-
-# allow MULTICAST UPnP for service discovery
--A ufw6-before-input -p udp -d ff02::f --dport 1900 -j ACCEPT
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/sysctl.conf b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/sysctl.conf
deleted file mode 100644
index 8707032..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/sysctl.conf
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Configuration file for setting network variables. Please note these settings
-# override /etc/sysctl.conf. If you prefer to use /etc/sysctl.conf, please
-# adjust IPT_SYSCTL in /etc/default/ufw.
-#
-
-# Uncomment this to allow this host to route packets between interfaces
-#net/ipv4/ip_forward=1
-#net/ipv6/conf/default/forwarding=1
-#net/ipv6/conf/all/forwarding=1
-
-# Turn on Source Address Verification in all interfaces to prevent some
-# spoofing attacks
-net/ipv4/conf/default/rp_filter=1
-net/ipv4/conf/all/rp_filter=1
-
-# Do not accept IP source route packets (we are not a router)
-net/ipv4/conf/default/accept_source_route=0
-net/ipv4/conf/all/accept_source_route=0
-net/ipv6/conf/default/accept_source_route=0
-net/ipv6/conf/all/accept_source_route=0
-
-# Disable ICMP redirects. ICMP redirects are rarely used but can be used in
-# MITM (man-in-the-middle) attacks. Disabling ICMP may disrupt legitimate
-# traffic to those sites.
-net/ipv4/conf/default/accept_redirects=0
-net/ipv4/conf/all/accept_redirects=0
-net/ipv6/conf/default/accept_redirects=0
-net/ipv6/conf/all/accept_redirects=0
-
-# Ignore bogus ICMP errors
-net/ipv4/icmp_echo_ignore_broadcasts=1
-net/ipv4/icmp_ignore_bogus_error_responses=1
-net/ipv4/icmp_echo_ignore_all=0
-
-# Don't log Martian Packets (impossible packets)
-net/ipv4/conf/default/log_martians=0
-net/ipv4/conf/all/log_martians=0
-
-# Change to '1' to enable TCP/IP SYN cookies This disables TCP Window Scaling
-# (http://lkml.org/lkml/2008/2/5/167)
-net/ipv4/tcp_syncookies=0
-
-#net/ipv4/tcp_fin_timeout=30
-#net/ipv4/tcp_keepalive_intvl=1800
-
-# normally allowing tcp_sack is ok, but if going through OpenBSD 3.8 RELEASE or
-# earlier pf firewall, should set this to 0
-net/ipv4/tcp_sack=1
-
-# Uncomment this to turn off ipv6 autoconfiguration
-#net/ipv6/conf/default/autoconf=0
-#net/ipv6/conf/all/autoconf=0
-
-# Uncomment this to enable ipv6 privacy addressing
-#net/ipv6/conf/default/use_tempaddr=2
-#net/ipv6/conf/all/use_tempaddr=2
diff --git a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/ufw.conf b/xos/onboard/vsg-old/synchronizer/files/etc/ufw/ufw.conf
deleted file mode 100644
index 28fe534..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/etc/ufw/ufw.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# /etc/ufw/ufw.conf
-#
-
-# Set to yes to start on boot. If setting this remotely, be sure to add a rule
-# to allow your remote connection before starting ufw. Eg: 'ufw allow 22/tcp'
-ENABLED=yes
-
-# Please use the 'ufw' command to set the loglevel. Eg: 'ufw logging medium'.
-# See 'man ufw' for details.
-LOGLEVEL=low
diff --git a/xos/onboard/vsg-old/synchronizer/files/vcpe.conf b/xos/onboard/vsg-old/synchronizer/files/vcpe.conf
deleted file mode 100644
index 752c57a..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/vcpe.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
- /usr/local/sbin/start-vcpe.sh
-end script
diff --git a/xos/onboard/vsg-old/synchronizer/files/vcpe.dnsmasq b/xos/onboard/vsg-old/synchronizer/files/vcpe.dnsmasq
deleted file mode 100644
index 2b2687b..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/vcpe.dnsmasq
+++ /dev/null
@@ -1,2 +0,0 @@
-listen-address=192.168.0.1
-dhcp-range=192.168.0.2,192.168.0.254,6
diff --git a/xos/onboard/vsg-old/synchronizer/files/vm-resolv.conf b/xos/onboard/vsg-old/synchronizer/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/onboard/vsg-old/synchronizer/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/onboard/vsg-old/synchronizer/manifest b/xos/onboard/vsg-old/synchronizer/manifest
deleted file mode 100644
index d13ee05..0000000
--- a/xos/onboard/vsg-old/synchronizer/manifest
+++ /dev/null
@@ -1,49 +0,0 @@
-templates/bwlimit.sh.j2
-templates/vlan_sample.j2
-templates/before.rules.j2
-templates/start-vcpe.sh.j2
-templates/dnsmasq_safe_servers.j2
-templates/firewall_sample.j2
-templates/rc.local.j2
-templates/vcpe.conf.j2
-templates/message.html.j2
-templates/dnsmasq_servers.j2
-templates/start-vcpe-vtn.sh.j2
-manifest
-broadbandshield.py
-observer_ansible_test.py
-vcpe_synchronizer_config
-start-bbs.sh
-steps/sync_vcpetenant.py
-steps/sync_vcpetenant_new.yaml
-steps/sync_vcpetenant_vtn.yaml
-steps/sync_vcpetenant.yaml
-steps/test.yaml
-steps/ansible_test/README
-steps/ansible_test/test.yaml
-steps/ansible_test/xos.py
-steps/ansible_test/test.sh
-steps/ansible_test/inventory.txt
-start.sh
-files/vcpe.conf
-files/etc/service/message/run
-files/etc/rc.local
-files/etc/ufw/after6.rules
-files/etc/ufw/applications.d/openssh-server
-files/etc/ufw/sysctl.conf
-files/etc/ufw/ufw.conf
-files/etc/ufw/before6.rules
-files/etc/ufw/after.init
-files/etc/ufw/before.init
-files/etc/ufw/after.rules
-files/vm-resolv.conf
-files/docker.list
-files/vcpe.dnsmasq
-run-vtn.sh
-stop.sh
-vcpe-synchronizer.py
-model-deps
-supervisor/vcpe-observer.conf
-run.sh
-vtn_vcpe_synchronizer_config
-vcpe_stats_notifier.py
diff --git a/xos/onboard/vsg-old/synchronizer/model-deps b/xos/onboard/vsg-old/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/vsg-old/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/vsg-old/synchronizer/observer_ansible_test.py b/xos/onboard/vsg-old/synchronizer/observer_ansible_test.py
deleted file mode 100644
index b28da63..0000000
--- a/xos/onboard/vsg-old/synchronizer/observer_ansible_test.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-import os
-import sys
-
-sys.path.append("../..")
-import synchronizers.base.ansible
-
-print sys.argv
-
-private_key="""-----BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEAtJiuarud5S4Y2quDeWyaS0UCQGQtfuSzzNhplFwujYnJGL65
-e14REtv+UuHGymyr/SfkTrBd8vH5NI2UZ/4sZW13ieI/1d97OeVe2+ct0Y4BaFEI
-3Hja6DIpsY3Q2cBQsWUwcQzbMIF9jIq8SzwR1zk8UtZi09fNxqjCchRPlBvbiLKX
-g0/yirN237WbaKzK++8EPy3nuv83216MXHFFSjuxfICe/RhjaqMzVp7isSbv1suU
-geyvNNzU71c/K13gTggdcIXeRQBiJYio2Sn3h2nsV6AFqFH4fjERxWG55Q4e3jeE
-tWM/Dw+hqYKg/25UcmM862a6sUmuDCmM5S3VEQIDAQABAoIBACH88iZoNOki6c6N
-pOq/Q7KSxD+2GtHc3PHacNRZHPHKUqxziJjtNS1lddHHaBBEr4GvdkpQ6v2ECLCZ
-TKrdrSFRnsO2bukjbB+TSWz9byQXI7CsP4yuuhQlDK+7zuiMRyN7tcgw8TeJx0Uh
-/xnxrjHhNbcpXeQcoz+WFzI9HFT1MEGmMS4Lyp/zLB/pmfY9h7V9d+EeRZDi78jq
-Vir6MI6iCTa0T02dvHUFOg+wXLb0nb8V1xKDL+6cAJla7LzwoG8lTnvp5DSYCojI
-5JrILYafeO8RbBV2GWmaE5mkHgeBkFZ+qZQ7K0MjR30Yh6tajB7P3+F/Max8FUgW
-xLHr8AECgYEA2+o0ge3HtZcepEFBKKYnLTwoEpPCfLElWZHzUJYDz259s4JLsfak
-tROANFEdsJUjpmWG52MCL+bgKFFOedDkt4p1jgcIneaHk0jvoU11wG7W3jZZVy1q
-WjQNH5vDU+hg5tm/CREwm7lbUxR9Xuj9K63CNAAGp8KO7h2tOH8woIECgYEA0jrb
-LUg30RxO3+vrq9dUYohrDRisk5zKXuRLfxRA+E+ruvZ7CctG2OpM+658/qZM/w95
-7pOj6zz3//w7tAvH9erY+JOISnzaYKx04sYC1MfbFiFkq5j0gpuYm/MULDYNvFqr
-NU2Buj4dW+ZB+SeficsQOqm5QeNxh1kgiDCs7JECgYEAjSLGCAzeesA9vhTTCI95
-3SIaZbHGw9e8rLtqeHGOiHXU3nvksJYmJsAZK3pTn5xXgNbvuVhlcvCtM7LatntG
-DjUiNMB22z+0CuZoRBE+XP3FkF84/yX6d2Goenyw4wzkA8QDQoJxu789yRgBTgQh
-VwLw/AZ4PvoyWMdbAENApgECgYEAvFikosYP09XTyIPaKaOKY5iqqBoSC1GucSOB
-jAG+T3k5dxB6nQS0nYQUomvqak7drqnT6O33Lrr5ySrW5nCjnmvgJZwvv+Rp1bDM
-K5uRT8caPpJ+Wcp4TUdPi3BVA2MOHVDyEJg3AH/D1+DL/IgGQ/JcwOHsKt61iLhO
-EBXj5zECgYEAk+HuwksUPkSxg/AiJGbapGDK6XGymEUzo2duWlnofRqGcZ3NT3bB
-/kDI1KxQdlpODXSi4/BuTpbQiFOrzcEq5e5ytoMxlCHh3Fl3Jxl+JlgO21vAUvP6
-4SET7Q/6LxmfBlCVRg0dXDwcfJLgbnWxyvprIcz4e0FSFVZTBs/6tFk=
------END RSA PRIVATE KEY-----
-"""
-
-observer.ansible.run_template_ssh("test.yaml",
- {"instance_name": "onlab_test405-378",
- "instance_id": "instance-0000004d",
- "hostname": "node67.washington.vicci.org",
- "private_key": private_key})
-
diff --git a/xos/onboard/vsg-old/synchronizer/run-vtn.sh b/xos/onboard/vsg-old/synchronizer/run-vtn.sh
deleted file mode 100755
index c4c3b00..0000000
--- a/xos/onboard/vsg-old/synchronizer/run-vtn.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-cp /root/setup/node_key $XOS_DIR/synchronizers/vcpe/node_key
-chmod 0600 $XOS_DIR/synchronizers/vcpe/node_key
-python vcpe-synchronizer.py -C $XOS_DIR/synchronizers/vcpe/vtn_vcpe_synchronizer_config
diff --git a/xos/onboard/vsg-old/synchronizer/run.sh b/xos/onboard/vsg-old/synchronizer/run.sh
deleted file mode 100755
index f7c670d..0000000
--- a/xos/onboard/vsg-old/synchronizer/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-python vcpe-synchronizer.py -C $XOS_DIR/synchronizers/vcpe/vcpe_synchronizer_config
diff --git a/xos/onboard/vsg-old/synchronizer/start-bbs.sh b/xos/onboard/vsg-old/synchronizer/start-bbs.sh
deleted file mode 100755
index c8ee147..0000000
--- a/xos/onboard/vsg-old/synchronizer/start-bbs.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#! /bin/bash
-
-# put this in /opt/xerocole/start-bbs.sh
-# make sure it's executable
-# set it up in crontab
-# @reboot /opt/xerocole/start-bbs.sh
-
-ulimit -n 200000
-cd /opt/xerocole/answerx
-/opt/xerocole/answerx/startStop checkconfig answerx
-/opt/xerocole/answerx/startStop start answerx
-cd /opt/xerocole/namecontrols
-nohup /opt/xerocole/namecontrols/broadbandshield &
-nohup socat TCP-LISTEN:80,bind=0.0.0.0,fork TCP4:127.0.0.1:8018 &
diff --git a/xos/onboard/vsg-old/synchronizer/start.sh b/xos/onboard/vsg-old/synchronizer/start.sh
deleted file mode 100755
index dc5619b..0000000
--- a/xos/onboard/vsg-old/synchronizer/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python vcpe-synchronizer.py -C $XOS_DIR/synchronizers/vcpe/vcpe_synchronizer_config > /dev/null 2>&1 &
diff --git a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/README b/xos/onboard/vsg-old/synchronizer/steps/ansible_test/README
deleted file mode 100644
index d3b2c54..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/README
+++ /dev/null
@@ -1,4 +0,0 @@
-Some scripts used while testing the Ansible instance configuraiton observer
-
-xos.py was probably the prototype of an XOS SSH module for Ansible, that understood how to SSH into the instances
-without needing to play config file and environment tricks.
diff --git a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/inventory.txt b/xos/onboard/vsg-old/synchronizer/steps/ansible_test/inventory.txt
deleted file mode 100644
index bd5b542..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/inventory.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-[onlab_hpc-355]
-node67.washington.vicci.org instance_id=instance-00000045 instance_name=onlab_hpc-355
-
-[onlab_test405-372]
-node67.washington.vicci.org instance_id=instance-0000004c instance_name=onlab_test405-372
-
-[onlab_test405-376]
-node1.cs.arizona.edu
-
-[onlab_test405-378]
-node67.washington.vicci.org ansible_ssh_private_key_file=/home/smbaker/.ssh/id_rsa
-#/home/smbaker/projects/vicci/keys/test_service_key_rsa
-
-[mysite_test2-48]
-cordcompute02.onlab.us ansible_ssh_private_key_file=/home/smbaker/projects/vicci/keys/demo_admin.rsa
-
diff --git a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.sh b/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.sh
deleted file mode 100755
index 157ba9c..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/bash
-ansible-playbook --private-key /home/smbaker/.ssh/id_rsa -i ./inventory.txt test.yaml
diff --git a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.yaml b/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.yaml
deleted file mode 100644
index 6a29d56..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/test.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: onlab_test405-372
- connection: xos
- user: ubuntu
- vars:
- foo: 25
-# instance_name: instance-00000045
-# slice_name: onlab_hpc-355
-
- tasks:
- - name: foobar
- shell: echo foo > /tmp/foobar
diff --git a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/xos.py b/xos/onboard/vsg-old/synchronizer/steps/ansible_test/xos.py
deleted file mode 100755
index eb4f3eb..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/ansible_test/xos.py
+++ /dev/null
@@ -1,444 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import re
-import subprocess
-import shlex
-import pipes
-import random
-import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
-from hashlib import sha1
-import ansible.constants as C
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-
-class Connection(object):
- ''' ssh based connections '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.ipv6 = ':' in self.host
- self.port = port
- self.user = str(user)
- self.password = password
- self.private_key_file = private_key_file
- self.HASHED_KEY_MAGIC = "|1|"
- self.has_pipelining = True
- #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
- #self.instance_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "instance_name", "SLIVER_NAME", None)
-
- inject={}
- inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
-
- self.instance_id = inject["instance_id"]
- self.instance_name = inject["instance_name"]
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
- def connect(self):
- ''' connect to the remote host '''
-
- vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
-
- self.common_args = []
- extra_args = C.ANSIBLE_SSH_ARGS
- if extra_args is not None:
- # make sure there is no empty string added as this can produce weird errors
- self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
- else:
- self.common_args += ["-o", "ControlMaster=auto",
- "-o", "ControlPersist=60s",
- "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- self.common_args += ["-o", "ProxyCommand ssh -q -i %s %s@%s" % (self.private_key_file, self.instance_id, self.host)]
-
- cp_in_use = False
- cp_path_set = False
- for arg in self.common_args:
- if "ControlPersist" in arg:
- cp_in_use = True
- if "ControlPath" in arg:
- cp_path_set = True
-
- if cp_in_use and not cp_path_set:
- self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- if not C.HOST_KEY_CHECKING:
- self.common_args += ["-o", "StrictHostKeyChecking=no"]
-
- if self.port is not None:
- self.common_args += ["-o", "Port=%d" % (self.port)]
- if self.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
- elif self.runner.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
- if self.password:
- self.common_args += ["-o", "GSSAPIAuthentication=no",
- "-o", "PubkeyAuthentication=no"]
- else:
- self.common_args += ["-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no"]
- if self.user != pwd.getpwuid(os.geteuid())[0]:
- self.common_args += ["-o", "User="+self.user]
- self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
-
- return self
-
- def _run(self, cmd, indata):
- if indata:
- # do not use pseudo-pty
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
- else:
- # try to use upseudo-pty
- try:
- # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
- master, slave = pty.openpty()
- p = subprocess.Popen(cmd, stdin=slave,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'w', 0)
- os.close(slave)
- except:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- return (p, stdin)
-
- def _password_cmd(self):
- if self.password:
- try:
- p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- except OSError:
- raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
- (self.rfd, self.wfd) = os.pipe()
- return ["sshpass", "-d%d" % self.rfd]
- return []
-
- def _send_password(self):
- if self.password:
- os.close(self.rfd)
- os.write(self.wfd, "%s\n" % self.password)
- os.close(self.wfd)
-
- def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- # We can't use p.communicate here because the ControlMaster may have stdout open as well
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- if indata:
- try:
- stdin.write(indata)
- stdin.close()
- except:
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- # Read stdout/stderr from process
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- # fail early if the sudo/su password is wrong
- if self.runner.sudo and sudoable:
- if self.runner.sudo_pass:
- incorrect_password = gettext.dgettext(
- "sudo", "Sorry, try again.")
- if stdout.endswith("%s\r\n%s" % (incorrect_password,
- prompt)):
- raise errors.AnsibleError('Incorrect sudo password')
-
- if stdout.endswith(prompt):
- raise errors.AnsibleError('Missing sudo password')
-
- if self.runner.su and su and self.runner.su_pass:
- incorrect_password = gettext.dgettext(
- "su", "Sorry")
- if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect su password')
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually p.poll() is always None here if rpipes is empty
- elif not rpipes and p.poll() == None:
- p.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
- return (p.returncode, stdout, stderr)
-
- def not_in_host_file(self, host):
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- if (hfiles_not_found == len(host_file_list)):
- vvv("EXEC previous known host file not found for %s" % host)
- return True
-
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
- ''' run a command on the remote host '''
-
- ssh_cmd = self._password_cmd()
- ssh_cmd += ["ssh", "-C"]
- if not in_data:
- # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
- # inside a tty automatically invokes the python interactive-mode but the modules are not
- # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
- ssh_cmd += ["-tt"]
- if utils.VERBOSITY > 3:
- ssh_cmd += ["-vvv"]
- else:
- ssh_cmd += ["-q"]
- ssh_cmd += self.common_args
-
- if self.ipv6:
- ssh_cmd += ['-6']
- #ssh_cmd += [self.host]
- ssh_cmd += [self.instance_name]
-
- if su and su_user:
- sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
- prompt_re = re.compile(prompt)
- ssh_cmd.append(sudocmd)
- elif not self.runner.sudo or not sudoable:
- prompt = None
- if executable:
- ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- else:
- ssh_cmd.append(cmd)
- else:
- sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd)
- ssh_cmd.append(sudocmd)
-
- vvv("EXEC %s" % ssh_cmd, host=self.host)
-
- not_in_host_file = self.not_in_host_file(self.host)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- # create process
- (p, stdin) = self._run(ssh_cmd, in_data)
-
- self._send_password()
-
- if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
- (self.runner.su and su and self.runner.su_pass):
- # several cases are handled for sudo privileges with password
- # * NOPASSWD (tty & no-tty): detect success_key on stdout
- # * without NOPASSWD:
- # * detect prompt on stdout (tty)
- # * detect prompt on stderr (no-tty)
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- sudo_output = ''
- sudo_errput = ''
-
- while True:
- if success_key in sudo_output or \
- (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
- (self.runner.su_pass and prompt_re.match(sudo_output)):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout], self.runner.timeout)
- if p.stderr in rfd:
- chunk = p.stderr.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- sudo_errput += chunk
- incorrect_password = gettext.dgettext(
- "sudo", "Sorry, try again.")
- if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- raise errors.AnsibleError('Incorrect sudo password')
- elif sudo_errput.endswith(prompt):
- stdin.write(self.runner.sudo_pass + '\n')
-
- if p.stdout in rfd:
- chunk = p.stdout.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- sudo_output += chunk
-
- if not rfd:
- # timeout. wrap up process communication
- stdout = p.communicate()
- raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
-
- if success_key not in sudo_output:
- if sudoable:
- stdin.write(self.runner.sudo_pass + '\n')
- elif su:
- stdin.write(self.runner.su_pass + '\n')
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
- controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
- 'unknown configuration option: ControlPersist' in stderr
-
- if C.HOST_KEY_CHECKING:
- if ssh_cmd[0] == "sshpass" and p.returncode == 6:
- raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- if p.returncode != 0 and controlpersisterror:
- raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
- if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
-
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [in_path,host + ":" + pipes.quote(out_path)]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
-
- (p, stdin) = self._run(cmd, indata)
-
- self._send_password()
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
-
- if returncode != 0:
- raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [host + ":" + in_path, out_path]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "get %s %s\n" % (in_path, out_path)
-
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- self._send_password()
- stdout, stderr = p.communicate(indata)
-
- if p.returncode != 0:
- raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
-
- def close(self):
- ''' not applicable since we're executing openssh binaries '''
- pass
-
diff --git a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.py b/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.py
deleted file mode 100644
index 0b777c7..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice, Tag
-from services.vsg.models import VSGService, VSGTenant
-from services.hpc.models import HpcService, CDNPrefix
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from broadbandshield import BBS
-
-logger = Logger(level=logging.INFO)
-
-ENABLE_QUICK_UPDATE=False
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-class SyncVSGTenant(SyncInstanceUsingAnsible):
- provides=[VSGTenant]
- observes=VSGTenant
- requested_interval=0
- template_name = "sync_vcpetenant.yaml"
-
- def __init__(self, *args, **kwargs):
- super(SyncVSGTenant, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = VSGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = VSGTenant.get_deleted_tenant_objects()
-
- return objs
-
- def get_vcpe_service(self, o):
- if not o.provider_service:
- return None
-
- vcpes = VSGService.get_service_objects().filter(id=o.provider_service.id)
- if not vcpes:
- return None
-
- return vcpes[0]
-
- def get_extra_attributes(self, o):
- # This is a place to include extra attributes that aren't part of the
- # object itself. In the case of vCPE, we need to know:
- # 1) the addresses of dnsdemux, to setup dnsmasq in the vCPE
- # 2) CDN prefixes, so we know what URLs to send to dnsdemux
- # 3) BroadBandShield server addresses, for parental filtering
- # 4) vlan_ids, for setting up networking in the vCPE VM
-
- vcpe_service = self.get_vcpe_service(o)
-
- dnsdemux_ip = None
- cdn_prefixes = []
-
- cdn_config_fn = "/opt/xos/synchronizers/vsg/cdn_config"
- if os.path.exists(cdn_config_fn):
- # manual CDN configuration
- # the first line is the address of dnsredir
- # the remaining lines are domain names, one per line
- lines = file(cdn_config_fn).readlines()
- if len(lines)>=2:
- dnsdemux_ip = lines[0].strip()
- cdn_prefixes = [x.strip() for x in lines[1:] if x.strip()]
- else:
- # automatic CDN configuiration
- # it learns everything from CDN objects in XOS
- # not tested on pod.
- if vcpe_service.backend_network_label:
- # Connect to dnsdemux using the network specified by
- # vcpe_service.backend_network_label
- for service in HpcService.objects.all():
- for slice in service.slices.all():
- if "dnsdemux" in slice.name:
- for instance in slice.instances.all():
- for ns in instance.ports.all():
- if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
- dnsdemux_ip = ns.ip
- if not dnsdemux_ip:
- logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label,extra=o.tologdict())
- else:
- # Connect to dnsdemux using the instance's public address
- for service in HpcService.objects.all():
- for slice in service.slices.all():
- if "dnsdemux" in slice.name:
- for instance in slice.instances.all():
- if dnsdemux_ip=="none":
- try:
- dnsdemux_ip = socket.gethostbyname(instance.node.name)
- except:
- pass
- if not dnsdemux_ip:
- logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict())
-
- for prefix in CDNPrefix.objects.all():
- cdn_prefixes.append(prefix.prefix)
-
- dnsdemux_ip = dnsdemux_ip or "none"
-
- # Broadbandshield can either be set up internally, using vcpe_service.bbs_slice,
- # or it can be setup externally using vcpe_service.bbs_server.
-
- bbs_addrs = []
- if vcpe_service.bbs_slice:
- if vcpe_service.backend_network_label:
- for bbs_instance in vcpe_service.bbs_slice.instances.all():
- for ns in bbs_instance.ports.all():
- if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
- bbs_addrs.append(ns.ip)
- else:
- logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict())
- if not bbs_addrs:
- logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict())
- elif vcpe_service.bbs_server:
- bbs_addrs.append(vcpe_service.bbs_server)
- else:
- logger.info("neither bbs_slice nor bbs_server is configured in the vCPE",extra=o.tologdict())
-
- s_tags = []
- c_tags = []
- if o.volt:
- s_tags.append(o.volt.s_tag)
- c_tags.append(o.volt.c_tag)
-
- try:
- full_setup = Config().observer_full_setup
- except:
- full_setup = True
-
- safe_macs=[]
- if vcpe_service.url_filter_kind == "safebrowsing":
- if o.volt and o.volt.subscriber:
- for user in o.volt.subscriber.devices:
- level = user.get("level",None)
- mac = user.get("mac",None)
- if level in ["G", "PG"]:
- if mac:
- safe_macs.append(mac)
-
- fields = {"s_tags": s_tags,
- "c_tags": c_tags,
- "dnsdemux_ip": dnsdemux_ip,
- "cdn_prefixes": cdn_prefixes,
- "bbs_addrs": bbs_addrs,
- "full_setup": full_setup,
- "isolation": o.instance.isolation,
- "safe_browsing_macs": safe_macs,
- "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]),
- "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")],
- "url_filter_kind": vcpe_service.url_filter_kind }
-
- # add in the sync_attributes that come from the SubscriberRoot object
-
- if o.volt and o.volt.subscriber and hasattr(o.volt.subscriber, "sync_attributes"):
- for attribute_name in o.volt.subscriber.sync_attributes:
- fields[attribute_name] = getattr(o.volt.subscriber, attribute_name)
-
- return fields
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
-
- super(SyncVSGTenant, self).sync_fields(o, fields)
-
- # now do all of our broadbandshield stuff...
-
- service = self.get_vcpe_service(o)
- if not service:
- # Ansible uses the service's keypair in order to SSH into the
- # instance. It would be bad if the slice had no service.
-
- raise Exception("Slice %s is not associated with a service" % instance.slice.name)
-
- # Make sure the slice is configured properly
- if (service != o.instance.slice.service):
- raise Exception("Slice %s is associated with some service that is not %s" % (str(instance.slice), str(service)))
-
- # only enable filtering if we have a subscriber object (see below)
- url_filter_enable = False
-
- # for attributes that come from CordSubscriberRoot
- if o.volt and o.volt.subscriber:
- url_filter_enable = o.volt.subscriber.url_filter_enable
- url_filter_level = o.volt.subscriber.url_filter_level
- url_filter_users = o.volt.subscriber.devices
-
- if service.url_filter_kind == "broadbandshield":
- # disable url_filter if there are no bbs_addrs
- if url_filter_enable and (not fields.get("bbs_addrs",[])):
- logger.info("disabling url_filter because there are no bbs_addrs",extra=o.tologdict())
- url_filter_enable = False
-
- if url_filter_enable:
- bbs_hostname = None
- if service.bbs_api_hostname and service.bbs_api_port:
- bbs_hostname = service.bbs_api_hostname
- else:
- # TODO: extract from slice
- bbs_hostname = "cordcompute01.onlab.us"
-
- if service.bbs_api_port:
- bbs_port = service.bbs_api_port
- else:
- bbs_port = 8018
-
- if not bbs_hostname:
- logger.info("broadbandshield is not configured",extra=o.tologdict())
- else:
- tStart = time.time()
- bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
- bbs.sync(url_filter_level, url_filter_users)
-
- if o.hpc_client_ip:
- logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip),extra=o.tologdict())
- bbs.associate(o.hpc_client_ip)
- else:
- logger.info("no hpc_client_ip to associate",extra=o.tologdict())
-
- logger.info("bbs update time %d" % int(time.time()-tStart),extra=o.tologdict())
-
-
- def run_playbook(self, o, fields):
- ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
- quick_update = (o.last_ansible_hash == ansible_hash)
-
- if ENABLE_QUICK_UPDATE and quick_update:
- logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
- else:
- if o.instance.isolation in ["container", "container_vm"]:
- super(SyncVSGTenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
- else:
- if CORD_USE_VTN:
- super(SyncVSGTenant, self).run_playbook(o, fields, template_name="sync_vcpetenant_vtn.yaml")
- else:
- super(SyncVSGTenant, self).run_playbook(o, fields)
-
- o.last_ansible_hash = ansible_hash
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.yaml b/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.yaml
deleted file mode 100644
index 880895e..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant.yaml
+++ /dev/null
@@ -1,179 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- cdn_enable: {{ cdn_enable }}
- dnsdemux_ip: {{ dnsdemux_ip }}
- firewall_enable: {{ firewall_enable }}
- url_filter_enable: {{ url_filter_enable }}
- c_tags:
- {% for c_tag in c_tags %}
- - {{ c_tag }}
- {% endfor %}
- s_tags:
- {% for s_tag in s_tags %}
- - {{ s_tag }}
- {% endfor %}
- firewall_rules:
- {% for firewall_rule in firewall_rules.split("\n") %}
- - {{ firewall_rule }}
- {% endfor %}
- cdn_prefixes:
- {% for prefix in cdn_prefixes %}
- - {{ prefix }}
- {% endfor %}
- bbs_addrs:
- {% for bbs_addr in bbs_addrs %}
- - {{ bbs_addr }}
- {% endfor %}
- dns_servers:
- {% for dns_server in dns_servers %}
- - {{ dns_server }}
- {% endfor %}
- nat_ip: {{ nat_ip }}
- nat_mac: {{ nat_mac }}
- lan_ip: {{ lan_ip }}
- lan_mac: {{ lan_mac }}
- wan_ip: {{ wan_ip }}
- wan_mac: {{ wan_mac }}
- wan_container_mac: {{ wan_container_mac }}
- wan_next_hop: 10.0.1.253 # FIX ME
- private_ip: {{ private_ip }}
- private_mac: {{ private_mac }}
- hpc_client_ip: {{ hpc_client_ip }}
- hpc_client_mac: {{ hpc_client_mac }}
- keystone_tenant_id: {{ keystone_tenant_id }}
- keystone_user_id: {{ keystone_user_id }}
- rabbit_user: {{ rabbit_user }}
- rabbit_password: {{ rabbit_password }}
- rabbit_host: {{ rabbit_host }}
- safe_browsing:
- {% for mac in safe_browsing_macs %}
- - {{ mac }}
- {% endfor %}
- uplink_speed: {{ uplink_speed }}
- downlink_speed: {{ downlink_speed }}
- status: {{ status }}
- enable_uverse: {{ enable_uverse }}
- url_filter_kind: {{ url_filter_kind }}
-
- tasks:
-{% if full_setup %}
- - name: Docker repository
- copy: src=/opt/xos/synchronizers/vsg/files/docker.list
- dest=/etc/apt/sources.list.d/docker.list
-
- - name: Import the repository key
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-
- - name: install Docker
- apt: name=lxc-docker state=present update_cache=yes
-
- - name: install python-setuptools
- apt: name=python-setuptools state=present
-
- - name: install pip
- easy_install: name=pip
-
- - name: install docker-py
- pip: name=docker-py version=0.5.3
-
- - name: install Pipework
- get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
- dest=/usr/local/bin/pipework
- mode=0755
-
- - name: make sure /etc/dnsmasq.d exists
- file: path=/etc/dnsmasq.d state=directory owner=root group=root
-
- - name: Disable resolvconf service
- shell: service resolvconf stop
- shell: echo manual > /etc/init/resolvconf.override
- shell: rm -f /etc/resolv.conf
-
- - name: Install resolv.conf
- copy: src=/opt/xos/synchronizers/vsg/files/vm-resolv.conf
- dest=/etc/resolv.conf
-
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
- register: cron_job_pids_count
-
-# - name: DEBUG
-# debug: var=cron_job_pids_count.stdout
-
-# - name: make sure ~/bin exists
-# file: path=~/bin state=directory owner=root group=root
-# when: cron_job_pids_count.stdout == "0"
-
- - name: Copy cron job to destination
- copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
- dest=/usr/local/sbin/vcpe_stats_notifier.py
- when: cron_job_pids_count.stdout == "0"
-
- - name: install python-kombu
- apt: name=python-kombu state=present
- when: cron_job_pids_count.stdout == "0"
-
- - name: Initiate vcpe_stats_notifier cron job
- command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
- async: 9999999999999999
- poll: 0
- when: cron_job_pids_count.stdout == "0"
-{% endif %}
-
- - name: vCPE upstart
- template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
-
- - name: vCPE startup script
- template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
- notify:
-# - restart vcpe
- - stop vcpe
- - remove container
- - start vcpe
-
- - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
- file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
-
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
-# These are samples, not necessary for correct function of demo
-
-# - name: networking info
-# template: src=/opt/xos/synchronizers/vsg/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
-
-# - name: firewall info
-# template: src=/opt/xos/synchronizers/vsg/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
-
- - name: Make sure vCPE service is running
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
- handlers:
- # Dnsmasq is automatically restarted in the container
- - name: restart dnsmasq
- shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
-
- - name: restart vcpe
- shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
-
- - name: stop vcpe
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
-
- - name: remove container
- docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
-
- - name: start vcpe
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
diff --git a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_new.yaml b/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_new.yaml
deleted file mode 100644
index 9c59280..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_new.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
- vars:
- container_name: {{ container_name }}
- cdn_enable: {{ cdn_enable }}
- dnsdemux_ip: {{ dnsdemux_ip }}
- firewall_enable: {{ firewall_enable }}
- url_filter_enable: {{ url_filter_enable }}
- c_tags:
- {% for c_tag in c_tags %}
- - {{ c_tag }}
- {% endfor %}
- s_tags:
- {% for s_tag in s_tags %}
- - {{ s_tag }}
- {% endfor %}
- firewall_rules:
- {% for firewall_rule in firewall_rules.split("\n") %}
- - {{ firewall_rule }}
- {% endfor %}
- cdn_prefixes:
- {% for prefix in cdn_prefixes %}
- - {{ prefix }}
- {% endfor %}
- bbs_addrs:
- {% for bbs_addr in bbs_addrs %}
- - {{ bbs_addr }}
- {% endfor %}
- dns_servers:
- {% for dns_server in dns_servers %}
- - {{ dns_server }}
- {% endfor %}
- nat_ip: {{ nat_ip }}
- nat_mac: {{ nat_mac }}
- lan_ip: {{ lan_ip }}
- lan_mac: {{ lan_mac }}
- wan_ip: {{ wan_ip }}
- wan_mac: {{ wan_mac }}
- wan_container_mac: {{ wan_container_mac }}
- wan_next_hop: 10.0.1.253 # FIX ME
- private_ip: {{ private_ip }}
- private_mac: {{ private_mac }}
- hpc_client_ip: {{ hpc_client_ip }}
- hpc_client_mac: {{ hpc_client_mac }}
- keystone_tenant_id: {{ keystone_tenant_id }}
- keystone_user_id: {{ keystone_user_id }}
- rabbit_user: {{ rabbit_user }}
- rabbit_password: {{ rabbit_password }}
- rabbit_host: {{ rabbit_host }}
- safe_browsing:
- {% for mac in safe_browsing_macs %}
- - {{ mac }}
- {% endfor %}
- uplink_speed: {{ uplink_speed }}
- downlink_speed: {{ downlink_speed }}
- status: {{ status }}
- enable_uverse: {{ enable_uverse }}
- url_filter_kind: {{ url_filter_kind }}
-
- tasks:
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
- register: cron_job_pids_count
-
-# - name: DEBUG
-# debug: var=cron_job_pids_count.stdout
-
- - name: make sure ~/bin exists
- file: path=~/bin state=directory owner=root group=root
- when: cron_job_pids_count.stdout == "0"
-
- - name: Copy cron job to destination
- copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
- dest=~/bin/vcpe_stats_notifier.py
- when: cron_job_pids_count.stdout == "0"
-
- - name: install python-kombu
- apt: name=python-kombu state=present
- when: cron_job_pids_count.stdout == "0"
-
- - name: Initiate vcpe_stats_notifier cron job
- command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
- async: 9999999999999999
- poll: 0
- when: cron_job_pids_count.stdout == "0"
-
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: create directory for "safe" config
- file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
-
- - name: dnsmasq "safe" config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: copy base ufw files
- synchronize: src=/opt/xos/synchronizers/vsg/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
- notify:
- - reload ufw
-
- - name: redirection rules for safe DNS
- template: src=/opt/xos/synchronizers/vsg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
- notify:
- - reload ufw
-
- - name: base ufw setup uses /etc/rc.local
- copy: src=/opt/xos/synchronizers/vsg/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
- notify:
- - copy in /etc/rc.local
-
- handlers:
- # Dnsmasq is automatically restarted in the container
- - name: restart dnsmasq
- shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
-
- - name: reload ufw
- shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
-
- # Use docker cp instead of single-file volume
- # The reason is that changes to external file volume don't show up inside the container
- # Probably Ansible deletes and then recreates the external file, and container has old version
- - name: copy in /etc/rc.local
- shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_vtn.yaml b/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_vtn.yaml
deleted file mode 100644
index 0226354..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/sync_vcpetenant_vtn.yaml
+++ /dev/null
@@ -1,244 +0,0 @@
----
-- hosts: {{ instance_name }}
- #gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- container_name: {{ container_name }}
- cdn_enable: {{ cdn_enable }}
- dnsdemux_ip: {{ dnsdemux_ip }}
- firewall_enable: {{ firewall_enable }}
- url_filter_enable: {{ url_filter_enable }}
- c_tags:
- {% for c_tag in c_tags %}
- - {{ c_tag }}
- {% endfor %}
- s_tags:
- {% for s_tag in s_tags %}
- - {{ s_tag }}
- {% endfor %}
- firewall_rules:
- {% for firewall_rule in firewall_rules.split("\n") %}
- - {{ firewall_rule }}
- {% endfor %}
- cdn_prefixes:
- {% for prefix in cdn_prefixes %}
- - {{ prefix }}
- {% endfor %}
- bbs_addrs:
- {% for bbs_addr in bbs_addrs %}
- - {{ bbs_addr }}
- {% endfor %}
- dns_servers:
- {% for dns_server in dns_servers %}
- - {{ dns_server }}
- {% endfor %}
- nat_ip: {{ nat_ip }}
- nat_mac: {{ nat_mac }}
- lan_ip: {{ lan_ip }}
- lan_mac: {{ lan_mac }}
- wan_ip: {{ wan_ip }}
- wan_mac: {{ wan_mac }}
- wan_container_ip: {{ wan_container_ip }}
- wan_container_netbits: {{ wan_container_netbits }}
- wan_container_mac: {{ wan_container_mac }}
- wan_container_gateway_ip: {{ wan_container_gateway_ip }}
- wan_vm_ip: {{ wan_vm_ip }}
- wan_vm_mac: {{ wan_vm_mac }}
- wan_next_hop: 10.0.1.253 # FIX ME
- private_ip: {{ private_ip }}
- private_mac: {{ private_mac }}
- hpc_client_ip: {{ hpc_client_ip }}
- hpc_client_mac: {{ hpc_client_mac }}
- keystone_tenant_id: {{ keystone_tenant_id }}
- keystone_user_id: {{ keystone_user_id }}
- rabbit_user: {{ rabbit_user }}
- rabbit_password: {{ rabbit_password }}
- rabbit_host: {{ rabbit_host }}
- safe_browsing:
- {% for mac in safe_browsing_macs %}
- - {{ mac }}
- {% endfor %}
- uplink_speed: {{ uplink_speed }}
- downlink_speed: {{ downlink_speed }}
- status: {{ status }}
- enable_uverse: {{ enable_uverse }}
- url_filter_kind: {{ url_filter_kind }}
-
-
- tasks:
- - name: Add hostname to /etc/hosts
- lineinfile: dest=/etc/hosts
- regexp='^127\.0\.0\.1'
- line="127.0.0.1 localhost {{ '{{' }} ansible_hostname {{ '}}' }}"
- owner=root group=root mode=0644
-
- - name: Verify that bridge-utils is installed
- shell: stat /sbin/brctl
-
- - name: Verify that docker is installed
- shell: stat /usr/bin/docker
-
- - name: Check to see if network is setup
- stat: path=/root/network_is_setup
- register: network_is_setup
-
- - name: set up the network
- shell: "{{ '{{' }} item {{ '}}' }}"
- with_items:
- - ip link del link eth0 eth0.500 || true
- - ip link add link eth0 eth0.500 type vlan id 500
- - ip link set eth0.500 up
- - ifconfig br-wan down || true
- - brctl delbr br-wan || true
- - brctl addbr br-wan
- - brctl addif br-wan eth0.500
- - ifconfig br-wan hw ether {{ wan_vm_mac }}
- - ip addr add {{ wan_vm_ip }}/{{ wan_container_netbits }} dev br-wan
- - ip link set br-wan up
- - ip route del default || true
- - ip route add default via {{ wan_container_gateway_ip }}
- - ip link set dev br-wan promisc on
- when: network_is_setup.stat.exists == False
-
- - name: Remember that the network is setup, so we never do the above again
- shell: touch /root/network_is_setup
-
-{% if full_setup %}
- - name: Check to see if environment is setup
- stat: path=/root/environment_is_setup
- register: environment_is_setup
-
-# Everything here is now baked into the vCPE image
-# Leave this spot in place for future temporary setup stuff
-
- - name: Remember that the environment is setup, so we never do the above again
- shell: touch /root/environment_is_setup
-
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
- register: cron_job_pids_count
-
-# - name: DEBUG
-# debug: var=cron_job_pids_count.stdout
-
-# - name: make sure ~/bin exists
-# file: path=~/bin state=directory owner=root group=root
-# when: cron_job_pids_count.stdout == "0"
-
-# - name: Copy cron job to destination
-# copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-# dest=/usr/local/sbin/vcpe_stats_notifier.py
-# when: cron_job_pids_count.stdout == "0"
-
-# - name: install python-kombu
-# apt: name=python-kombu state=present
-# when: cron_job_pids_count.stdout == "0"
-
-# - name: Initiate vcpe_stats_notifier cron job
-# command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
-# async: 9999999999999999
-# poll: 0
-# when: cron_job_pids_count.stdout == "0"
-{% endif %}
-
- - name: vCPE upstart
- template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/{{ container_name }}.conf
-
- - name: vCPE startup script
- template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
- notify:
-# - restart vcpe
- - stop vcpe
- - remove container
- - start vcpe
-
- - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
- file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
-
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq "safe" config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: create /var/container_volumes/{{ container_name }}/mount/
- file: path=/var/container_volumes/{{ container_name }}/mount state=directory owner=root group=root
-
- - name: redirection rules for safe DNS
- template: src=/opt/xos/synchronizers/vsg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/mount/before.rules owner=root group=root mode=0644
- notify:
- - reload ufw
-
- - name: base ufw setup uses /etc/rc.local
- template: src=/opt/xos/synchronizers/vsg/templates/rc.local.j2 dest=/var/container_volumes/{{ container_name }}/mount/rc.local owner=root group=root mode=0755
- notify:
- - rerun /etc/rc.local
-
- - name: create directory for local programs
- file: path=/var/container_volumes/{{ container_name }}/usr/local/sbin state=directory
-
- - name: bandwidth limit script
- template: src=/opt/xos/synchronizers/vsg/templates/bwlimit.sh.j2 dest=/var/container_volumes/{{ container_name }}/usr/local/sbin/bwlimit.sh owner=root group=root mode=0755
- notify:
- - reset bwlimits
-
- - name: create directory for simple webserver
- file: path=/var/container_volumes/{{ container_name }}/etc/service/message state=directory
-
- - name: copy simple webserver
- copy: src=/opt/xos/synchronizers/vsg/files/etc/service/ dest=/var/container_volumes/{{ container_name }}/etc/service/ owner=root group=root
- when: status != "enabled"
-
- - name: make webserver script executable
- file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run mode=0755
- when: status != "enabled"
-
- - name: generate the message page
- template: src=/opt/xos/synchronizers/vsg/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
- when: status != "enabled"
- #notify: restart vcpe
-
- - name: remove simple webserver
- file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
- when: status == "enabled"
- #notify: restart vcpe
-
- - name: Make sure vCPE service is running
- service: name={{ container_name }} state=started
-
- handlers:
- # Dnsmasq is automatically restarted in the container
- - name: restart dnsmasq
- shell: docker exec {{ container_name }} killall dnsmasq
-
- - name: stop vcpe
- service: name={{ container_name }} state=stopped
-
- - name: remove container
- docker: name={{ container_name }} state=absent image=docker-vcpe
-
- - name: start vcpe
- service: name={{ container_name }} state=started
-
- - name: reload ufw
- shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /sbin/iptables -t nat -F POSTROUTING; /usr/sbin/ufw reload"
-
- - name: rerun /etc/rc.local
- shell: docker exec {{ container_name }} bash -c "/etc/rc.local"
-
- - name: reset bwlimits
- shell: docker exec {{ container_name }} bash -c "/usr/local/sbin/bwlimit.sh restart"
-
- - name: restart vcpe
- shell: service {{ container_name }} stop; sleep 1; service {{ container_name }} start
diff --git a/xos/onboard/vsg-old/synchronizer/steps/test.yaml b/xos/onboard/vsg-old/synchronizer/steps/test.yaml
deleted file mode 100644
index fc8251d..0000000
--- a/xos/onboard/vsg-old/synchronizer/steps/test.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: {{ instance_name }}
- connection: ssh
- user: ubuntu
- tasks:
- - name: foobar
- shell: echo foo > /tmp/foobar
diff --git a/xos/onboard/vsg-old/synchronizer/stop.sh b/xos/onboard/vsg-old/synchronizer/stop.sh
deleted file mode 100755
index e90e16c..0000000
--- a/xos/onboard/vsg-old/synchronizer/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f vcpe-observer.py
diff --git a/xos/onboard/vsg-old/synchronizer/supervisor/vcpe-observer.conf b/xos/onboard/vsg-old/synchronizer/supervisor/vcpe-observer.conf
deleted file mode 100644
index 2d90293..0000000
--- a/xos/onboard/vsg-old/synchronizer/supervisor/vcpe-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:vcpe-observer]
-command=python /opt/xos/synchronizers/vsg/vcpe-synchronizer.py -C /opt/xos/synchronizers/vsg/vcpe_synchronizer_config
diff --git a/xos/onboard/vsg-old/synchronizer/templates/before.rules.j2 b/xos/onboard/vsg-old/synchronizer/templates/before.rules.j2
deleted file mode 100644
index b60aaef..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/before.rules.j2
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# rules.before
-#
-# Rules that should be run before the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-# ufw-before-input
-# ufw-before-output
-# ufw-before-forward
-#
-
-# nat Table rules
-*nat
-:POSTROUTING ACCEPT [0:0]
-
-# Forward traffic from eth1 through eth0.
--A POSTROUTING -o eth0 -j MASQUERADE
-
-# Set up NAT for CDN services
--A POSTROUTING -o eth2 -j MASQUERADE
-
-# DNS safe browsing
-{% if safe_browsing %}
-{% for mac in safe_browsing %}
--A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p udp --dport 53 -j REDIRECT --to-port 5353
--A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p tcp --dport 53 -j REDIRECT --to-port 5353
-{% endfor %}
-{% endif %}
-
-{% if status != "enabled" %}
--A PREROUTING -i eth1 -p tcp --dport 80 -j REDIRECT --to-port 8000
-{% endif %}
-
-# don't delete the 'COMMIT' line or these nat table rules won't be processed
-COMMIT
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw-before-input - [0:0]
-:ufw-before-output - [0:0]
-:ufw-before-forward - [0:0]
-:ufw-not-local - [0:0]
-# End required lines
-
-# allow all on loopback
--A ufw-before-input -i lo -j ACCEPT
--A ufw-before-output -o lo -j ACCEPT
-
-# quickly process packets for which we already have a connection
--A ufw-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-# drop INVALID packets (logs these in loglevel medium and higher)
--A ufw-before-input -m conntrack --ctstate INVALID -j ufw-logging-deny
--A ufw-before-input -m conntrack --ctstate INVALID -j DROP
-
-# ok icmp codes for INPUT
--A ufw-before-input -p icmp --icmp-type destination-unreachable -j ACCEPT
--A ufw-before-input -p icmp --icmp-type source-quench -j ACCEPT
--A ufw-before-input -p icmp --icmp-type time-exceeded -j ACCEPT
--A ufw-before-input -p icmp --icmp-type parameter-problem -j ACCEPT
--A ufw-before-input -p icmp --icmp-type echo-request -j ACCEPT
-
-# ok icmp code for FORWARD
--A ufw-before-forward -p icmp --icmp-type destination-unreachable -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type source-quench -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type time-exceeded -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type parameter-problem -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type echo-request -j ACCEPT
-
-# allow dhcp client to work
--A ufw-before-input -p udp --sport 67 --dport 68 -j ACCEPT
-
-#
-# ufw-not-local
-#
--A ufw-before-input -j ufw-not-local
-
-# if LOCAL, RETURN
--A ufw-not-local -m addrtype --dst-type LOCAL -j RETURN
-
-# if MULTICAST, RETURN
--A ufw-not-local -m addrtype --dst-type MULTICAST -j RETURN
-
-# if BROADCAST, RETURN
--A ufw-not-local -m addrtype --dst-type BROADCAST -j RETURN
-
-# all other non-local packets are dropped
--A ufw-not-local -m limit --limit 3/min --limit-burst 10 -j ufw-logging-deny
--A ufw-not-local -j DROP
-
-# allow MULTICAST mDNS for service discovery (be sure the MULTICAST line above
-# is uncommented)
--A ufw-before-input -p udp -d 224.0.0.251 --dport 5353 -j ACCEPT
-
-# allow MULTICAST UPnP for service discovery (be sure the MULTICAST line above
-# is uncommented)
--A ufw-before-input -p udp -d 239.255.255.250 --dport 1900 -j ACCEPT
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/onboard/vsg-old/synchronizer/templates/bwlimit.sh.j2 b/xos/onboard/vsg-old/synchronizer/templates/bwlimit.sh.j2
deleted file mode 100644
index b267ada..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/bwlimit.sh.j2
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/bin/bash
-# tc uses the following units when passed as a parameter.
-# kbps: Kilobytes per second
-# mbps: Megabytes per second
-# kbit: Kilobits per second
-# mbit: Megabits per second
-# bps: Bytes per second
-# Amounts of data can be specified in:
-# kb or k: Kilobytes
-# mb or m: Megabytes
-# mbit: Megabits
-# kbit: Kilobits
-# To get the byte figure from bits, divide the number by 8 bit
-#
-
-TC=/sbin/tc
-
-WAN=eth0 # External (WAN side) interface
-LAN=eth1 # Customer (LAN side) interface
-
-MAXRATE=10gbit # Maximum upload/download rate
-DNLD={{ downlink_speed }} # DOWNLOAD Limit
-UPLD={{ uplink_speed }} # UPLOAD Limit
-
-[ "$DNLD" == "None" ] && DNLD=$MAXRATE
-[ "$UPLD" == "None" ] && UPLD=$MAXRATE
-
-start() {
-
-# We'll use Hierarchical Token Bucket (HTB) to shape bandwidth.
-# For detailed configuration options, please consult Linux man
-# page.
-
- #
- # WAN side (upload limiting)
- #
- $TC qdisc add dev $WAN root handle 1: htb default 30
- $TC class add dev $WAN parent 1: classid 1:1 htb rate $MAXRATE burst 15k
-
- # The default class
- $TC class add dev $WAN parent 1:1 classid 1:30 htb rate 1kbit ceil $UPLD burst 15k
- $TC qdisc add dev $WAN parent 1:30 handle 30: sfq perturb 10
-
- # This class is exempt from the upload limit
- $TC class add dev $WAN parent 1:1 classid 1:50 htb rate 1kbit ceil $MAXRATE burst 15k
- $TC qdisc add dev $WAN parent 1:50 handle 50: sfq perturb 10
-
- #
- # LAN side (download limiting)
- #
- $TC qdisc add dev $LAN root handle 1: htb default 30
- $TC class add dev $LAN parent 1: classid 1:1 htb rate $MAXRATE burst 15k
-
- # The default class
- $TC class add dev $LAN parent 1:1 classid 1:30 htb rate 1kbit ceil $DNLD burst 15k
- $TC qdisc add dev $LAN parent 1:30 handle 30: sfq perturb 10
-
- # This class is exempt from the download limit
- $TC class add dev $LAN parent 1:1 classid 1:50 htb rate 1kbit ceil $MAXRATE burst 15k
- $TC qdisc add dev $LAN parent 1:50 handle 50: sfq perturb 10
-
-}
-
-stop() {
-
-# Stop the bandwidth shaping.
- $TC qdisc del dev $WAN root
- $TC qdisc del dev $LAN root
-
-}
-
-restart() {
-
-# Self-explanatory.
- stop
- sleep 1
- start
-
-}
-
-show() {
-
-# Display status of traffic control status.
- echo "Download ($LAN):"
- $TC -s class show dev $LAN
-
- echo ""
- echo "Upload ($WAN):"
- $TC -s class show dev $WAN
-
-}
-
-case "$1" in
-
- start)
-
- echo -n "Starting bandwidth shaping: "
- start
- echo "done"
- ;;
-
- stop)
-
- echo -n "Stopping bandwidth shaping: "
- stop
- echo "done"
- ;;
-
- restart)
-
- echo -n "Restarting bandwidth shaping: "
- restart
- echo "done"
- ;;
-
- show)
-
- echo "Bandwidth shaping status:"
- show
- echo ""
- ;;
-
- *)
-
- pwd=$(pwd)
- echo "Usage: tc.bash {start|stop|restart|show}"
- ;;
-
-esac
-
-exit 0
diff --git a/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_safe_servers.j2 b/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_safe_servers.j2
deleted file mode 100644
index 0b3c807..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_safe_servers.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file autogenerated by vCPE observer
-# It contains a list of DNS servers for dnsmasq to use.
-no-resolv
-
-{% if cdn_enable %}
-{% if cdn_prefixes %}
-# CDN
-{% for prefix in cdn_prefixes %}
-server=/{{ prefix }}/{{ dnsdemux_ip }}
-{% endfor %}
-{% endif %}
-{% endif %}
-
-# use OpenDNS service
-server=208.67.222.123
-server=208.67.220.123
diff --git a/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_servers.j2 b/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_servers.j2
deleted file mode 100644
index 7ecb319..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/dnsmasq_servers.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file autogenerated by vCPE observer
-# It contains a list of DNS servers for dnsmasq to use.
-no-resolv
-
-{% if cdn_enable %}
-{% if cdn_prefixes %}
-# CDN
-{% for prefix in cdn_prefixes %}
-server=/{{ prefix }}/{{ dnsdemux_ip }}
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if url_filter_kind=="answerx" %}
-cache-size=0
-add-mac
-{% endif %}
-
-# temporary for ONS demo
-address=/z.cdn.turner.com/207.141.192.134
-
-# use google's DNS service
-{% for dns_server in dns_servers %}
-server={{ dns_server }}
-{% endfor %}
-
diff --git a/xos/onboard/vsg-old/synchronizer/templates/firewall_sample.j2 b/xos/onboard/vsg-old/synchronizer/templates/firewall_sample.j2
deleted file mode 100644
index ce85e68..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/firewall_sample.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-firewall_enable = {{ firewall_enable }}
-
-{% for firewall_rule in firewall_rules %}
-{{ firewall_rule }}
-{% endfor %}
diff --git a/xos/onboard/vsg-old/synchronizer/templates/message.html.j2 b/xos/onboard/vsg-old/synchronizer/templates/message.html.j2
deleted file mode 100644
index eb4497a..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/message.html.j2
+++ /dev/null
@@ -1,111 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-<head>
- <meta charset="UTF-8">
- <title>Service Suspended</title>
- <style>
-
- .row {
- width: 100%;
- }
-
- .col-sm-offset-2 {
- margin-left: 16.66666667%;
- }
-
- .col-sm-8 {
- width: 66.66666667%;
- }
-
- .alert-danger {
- color: #a94442;
- background-color: #f2dede;
- border-color: #a94442;
- }
- .alert {
- padding: 15px;
- margin-bottom: 20px;
- border: 1px solid transparent;
- border-radius: 4px;
- }
-
- body {
- background-size: cover;
- background-color: #00BFEC;
- font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
- font-size: 18px;
- line-height: 1.42857143;
- }
-
- .vertical-center {
- min-height: 100%; /* Fallback for browsers do NOT support vh unit */
- min-height: 100vh; /* These two lines are counted as one :-) */
-
- display: flex;
- align-items: center;
- }
-
- .jumbotron {
- padding: 60px;
- border-radius: 6px;
- background-color: #eee;
- box-shadow: 4px 4px 18px black;
- }
-
- .cord-logo-title{
- font-size: 150px;
- display: inline-block;
- color: #007EC4;
- }
-
- path {
- fill: #B2181E;
- }
-
- #cord-logo {
- transform: scale(1.3)
- }
-
- @media only screen
- and (min-device-width : 768px)
- and (max-device-width : 1024px) {
- #cord-logo {
- transform: scale(1.2)
- }
- .cord-logo-title{
- font-size: 100px;
- }
- }
- </style>
-</head>
-<body>
-
- <div class="container vertical-center">
- <div class="row">
- <div class="col-sm-8 col-sm-offset-2">
- <div class="jumbotron">
- <div class="cord-logo-title">
- <svg height="150" width="150">
- <path id="cord-logo" d="M92.5,62.3l-33,33,2.5,2.5c4.1,4.1,7.4,3.6,11.2-.1L95.9,75l-4.5-4.5,4.7-4.7-3.6-3.6Zm2.6,7L98.4,66l3.3,3.3-3.3,3.3-3.3-3.3ZM94.5,60l4.9-4.9,4.9,4.9-4.9,4.9ZM36.2,36.1L18.6,53.8c-7.8,7.8-5.8,17.4-2.4,22l-2.2-2.2c-10.6-10.6-11.2-20,0-31.2L28.2,28.1L31.3,25l8,8-3.1,3.1ZM55.5,55.4l3.6-3.6L66.9,44l-8-8l-2.5,2.5-5.2,5.2l-3.6,3.6L33.2,61.6C22,72.7,22.5,82.2,33.2,92.8L35.4,95c-3.4-4.5-5.4-14.1,2.4-22L55.5,55.4ZM50.7,21.7l-8-8L35,21.2l8,8,7.6-7.6ZM62.8,9.6L55.4,17l-8-8,7.4-7.4,8,8Zm0.7,18.3-7.6,7.6-8-8,7.6-7.6,8,8Zm26.1-6.6-8.1,8.1-8-8,8.1-8.1,8,8ZM79.3,31.5l-7.4,7.4-8-8,7.4-7.4,8,8ZM45.7,45.6L54.3,37l-8-8-8.6,8.6L23.4,51.8C12.2,63,12.8,72.4,23.4,83l2.2,2.2c-3.4-4.5-5.4-14.1,2.4-22ZM34.9,80.7l20.6,20.5c2,2,4.6,4.1,7.9,3.2-2.9,2.9-8.9,1.7-11.9-1.3L35.1,86.8,35,86.6H34.9l-0.8-.8a15,15,0,0,1,.1-1.9,14.7,14.7,0,0,1,.7-3.2Zm-0.6,7.4a21.3,21.3,0,0,0,5.9,11.7l5.7,5.7c3,3,9,4.1,11.9,1.3-3.3.9-5.9-1.2-7.9-3.2L34.3,88.1Zm3.5-12.4a16.6,16.6,0,0,0-2.3,3.6L57,100.8c3,3,9,4.1,11.9,1.3-3.3.9-5.9-1.2-7.9-3.2Z" />
- Sorry, your browser does not support inline SVG.
- </svg>
- CORD
- </div>
- <div class="alert alert-danger">
- {% if status == "delinquent" %}
- Your account is delinquent. Please visit the customer portal to pay your bill.
- {% elif status == "copyrightviolation" %}
- Someone in your home has been illegally downloading copyrighted material.
- Please visit the customer portal and perform the Copyright Training course.
- {% else %}
- Your service has been suspended. Please visit the customer portal to resume.
- {% endif %}
- </div>
- </div>
- </div>
- </div>
- </div>
-
-
-</body>
-</html>
\ No newline at end of file
diff --git a/xos/onboard/vsg-old/synchronizer/templates/rc.local.j2 b/xos/onboard/vsg-old/synchronizer/templates/rc.local.j2
deleted file mode 100755
index 4226a48..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/rc.local.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh -e
-#
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-#
-# By default this script does nothing.
-
-ufw enable
-ufw allow bootps
-ufw allow from 192.168.0.0/24
-{% if status == "enabled" %}
-ufw route allow in on eth1 out on eth0
-ufw route allow in on eth1 out on eth2
-{% else %}
-ufw route deny in on eth1 out on eth0
-ufw route deny in on eth1 out on eth2
-{% endif %}
-
-BWLIMIT=/usr/local/sbin/bwlimit.sh
-[ -e $BWLIMIT ] && $BWLIMIT restart || true
-
-exit 0
diff --git a/xos/onboard/vsg-old/synchronizer/templates/start-vcpe-vtn.sh.j2 b/xos/onboard/vsg-old/synchronizer/templates/start-vcpe-vtn.sh.j2
deleted file mode 100644
index dfdce0a..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/start-vcpe-vtn.sh.j2
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-function mac_to_iface {
- MAC=$1
- ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-STAG={{ s_tags[0] }}
-CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
-
-docker inspect $VCPE > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
- docker pull andybavier/docker-vcpe
- docker run -d --name=$VCPE --privileged=true --net=none \
- -v /var/container_volumes/$VCPE/mount:/mount:ro \
- -v /var/container_volumes/$VCPE/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
- -v /var/container_volumes/$VCPE/etc/service/message:/etc/service/message \
- -v /var/container_volumes/$VCPE/usr/local/sbin:/usr/local/sbin:ro \
- andybavier/docker-vcpe
-else
- docker start $VCPE
-fi
-
-# Set up networking via pipework
-WAN_IFACE=br-wan
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
-
-LAN_IFACE=eth0
-ifconfig $LAN_IFACE >> /dev/null
-if [ "$?" == 0 ]
-then
- ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
- ifconfig $LAN_IFACE.$STAG up
- docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
-fi
-
-#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Attach to container
-docker start -a $VCPE
diff --git a/xos/onboard/vsg-old/synchronizer/templates/start-vcpe.sh.j2 b/xos/onboard/vsg-old/synchronizer/templates/start-vcpe.sh.j2
deleted file mode 100755
index c4128f3..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/start-vcpe.sh.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-function mac_to_iface {
- MAC=$1
- ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-STAG={{ s_tags[0] }}
-CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
-
-docker inspect $VCPE > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
- docker pull andybavier/docker-vcpe
- docker run -d --name=$VCPE --privileged=true --net=none -v /etc/$VCPE/dnsmasq.d:/etc/dnsmasq.d andybavier/docker-vcpe
-else
- docker start $VCPE
-fi
-
-# Set up networking via pipework
-WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-
-# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
-# Assumes that br-lan has been set up appropriately by a previous step
-LAN_IFACE=br-lan
-ifconfig $LAN_IFACE >> /dev/null
-if [ "$?" == 0 ]
-then
- ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
- ifconfig $LAN_IFACE.$STAG up
- docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
-fi
-
-#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now can start up dnsmasq
-docker exec $VCPE service dnsmasq start
-
-# Attach to container
-docker start -a $VCPE
diff --git a/xos/onboard/vsg-old/synchronizer/templates/vcpe.conf.j2 b/xos/onboard/vsg-old/synchronizer/templates/vcpe.conf.j2
deleted file mode 100644
index fa7885e..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/vcpe.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
- /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
-end script
diff --git a/xos/onboard/vsg-old/synchronizer/templates/vlan_sample.j2 b/xos/onboard/vsg-old/synchronizer/templates/vlan_sample.j2
deleted file mode 100644
index b73954b..0000000
--- a/xos/onboard/vsg-old/synchronizer/templates/vlan_sample.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-# below is a list of all vlan_ids associated with this vcpe
-
-{% for vlan_id in c_tags %}
-{{ vlan_id }}
-{% endfor %}
diff --git a/xos/onboard/vsg-old/synchronizer/vcpe-synchronizer.py b/xos/onboard/vsg-old/synchronizer/vcpe-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/vsg-old/synchronizer/vcpe-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/vsg-old/synchronizer/vcpe_stats_notifier.py b/xos/onboard/vsg-old/synchronizer/vcpe_stats_notifier.py
deleted file mode 100644
index 4d2cc76..0000000
--- a/xos/onboard/vsg-old/synchronizer/vcpe_stats_notifier.py
+++ /dev/null
@@ -1,344 +0,0 @@
-import six
-import uuid
-import datetime
-from kombu.connection import BrokerConnection
-from kombu.messaging import Exchange, Queue, Consumer, Producer
-import subprocess
-import re
-import time, threading
-import sys, getopt
-import logging
-import os
-
-
-logfile = "vcpe_stats_notifier.log"
-level=logging.INFO
-logger=logging.getLogger('vcpe_stats_notifier')
-logger.setLevel(level)
-# create formatter
-formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s")
-handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
-# add formatter to handler
-handler.setFormatter(formatter)
-logger.addHandler(handler)
-
-def get_all_docker_containers():
- p = subprocess.Popen('docker ps --no-trunc', shell=True, stdout=subprocess.PIPE)
- firstline = True
- dockercontainers = {}
- while True:
- out = p.stdout.readline()
- if out == '' and p.poll() != None:
- break
- if out != '':
- if firstline is True:
- firstline = False
- else:
- fields = out.split()
- container_fields = {}
- container_fields['id'] = fields[0]
- dockercontainers[fields[-1]] = container_fields
- return dockercontainers
-
-def extract_compute_stats_from_all_vcpes(dockercontainers):
- for k,v in dockercontainers.iteritems():
- cmd = 'sudo docker stats --no-stream=true ' + v['id']
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
- firstline = True
- while True:
- out = p.stdout.readline()
- if out == '' and p.poll() != None:
- break
- if out != '':
- if firstline is True:
- firstline = False
- else:
- fields = out.split()
- #['CONTAINER_ID', 'CPU%', 'MEMUSE', 'UNITS', '/', 'MEMLIMIT', 'UNITS', 'MEM%', 'NET I/O', 'UNITS', '/', 'NET I/O LIMIT', 'UNITS', 'BLOCK I/O', 'UNITS', '/', 'BLOCK I/O LIMIT', 'UNITS']
- v['cpu_util'] = fields[1][:-1]
- if fields[6] == 'GB':
- v['memory'] = str(float(fields[5]) * 1000)
- else:
- v['memory'] = fields[5]
- if fields[3] == 'GB':
- v['memory_usage'] = str(float(fields[2]) * 1000)
- else:
- v['memory_usage'] = fields[2]
- v['network_stats'] = []
- for intf in ['eth0', 'eth1']:
- cmd = 'sudo docker exec ' + v['id'] + ' ifconfig ' + intf
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
- out,err = p.communicate()
- if out:
- intf_stats = {}
- m = re.search("RX bytes:(\d+)", str(out))
- if m:
- intf_stats['rx_bytes'] = m.group(1)
- m = re.search("TX bytes:(\d+)", str(out))
- if m:
- intf_stats['tx_bytes'] = m.group(1)
- m = re.search("RX packets:(\d+)", str(out))
- if m:
- intf_stats['rx_packets'] = m.group(1)
- m = re.search("TX packets:(\d+)", str(out))
- if m:
- intf_stats['tx_packets'] = m.group(1)
- if intf_stats:
- intf_stats['intf'] = intf
- v['network_stats'].append(intf_stats)
-
-def extract_dns_stats_from_all_vcpes(dockercontainers):
- for k,v in dockercontainers.iteritems():
- cmd = 'docker exec ' + v['id'] + ' killall -10 dnsmasq'
- p = subprocess.Popen (cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
- (output, error) = p.communicate()
- if error:
- logger.error("killall dnsmasq command failed with error = %s",error)
- continue
- cmd = 'docker exec ' + v['id'] + ' tail -7 /var/log/syslog'
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
- (output, error) = p.communicate()
- if error:
- logger.error("tail on dnsmasq log command failed with error = %s",error)
- continue
- log_list = output.splitlines()
- i = 0
- while i < len(log_list):
- m = re.search('(?<=:\scache size\s)(\S*)(?=,\s),\s(\S*)(?=/)/(\S*)(?=\scache insertions re-used unexpired cache entries)', log_list[i])
- if m == None:
- i = i+1
- continue;
- v['cache_size'] = m.group(1)
- v['replaced_unexpired_entries'] = m.group(2)
- v['total_inserted_entries'] = m.group(3)
- i = i+1
- m = re.search('(?<=:\squeries forwarded\s)(\S*)(?=,),\squeries answered locally\s(\S*)(?=$)', log_list[i])
- v['queries_forwarded'] = m.group(1)
- v['queries_answered_locally'] = m.group(2)
- break;
- i = i+2
- v['server_stats'] = []
- while i < len(log_list):
- m = re.search('(?<=:\sserver\s)(\S*)(?=#)#\d*:\squeries sent\s(\S*)(?=,),\sretried or failed\s(\S*)(?=$)', log_list[i])
- if m == None:
- i = i+1
- continue
- dns_server = {}
- dns_server['id'] = m.group(1)
- dns_server['queries_sent'] = m.group(2)
- dns_server['queries_failed'] = m.group(3)
- v['server_stats'].append(dns_server)
- i = i+1
- return dockercontainers
-
-
-keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
-keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
-rabbit_user='openstack'
-rabbit_password='80608318c273f348a7c3'
-rabbit_host='10.11.10.1'
-vcpeservice_rabbit_exchange='vcpeservice'
-cpe_publisher_id='vcpe_publisher'
-
-producer = None
-
-def setup_rabbit_mq_channel():
- global producer
- global rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange,cpe_publisher_id
- vcpeservice_exchange = Exchange(vcpeservice_rabbit_exchange, "topic", durable=False)
- # connections/channels
- connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
- logger.info('Connection to RabbitMQ server successful')
- channel = connection.channel()
- # produce
- producer = Producer(channel, exchange=vcpeservice_exchange, routing_key='notifications.info')
- p = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE)
- (hostname, error) = p.communicate()
- cpe_publisher_id = cpe_publisher_id + '_on_' + hostname
- logger.info('cpe_publisher_id=%s',cpe_publisher_id)
-
-def publish_cpe_stats():
- global producer
- global keystone_tenant_id, keystone_user_id, cpe_publisher_id
-
- logger.debug('publish_cpe_stats invoked')
-
- dockercontainers = get_all_docker_containers()
- cpe_container_compute_stats = extract_compute_stats_from_all_vcpes(dockercontainers)
- cpe_container_dns_stats = extract_dns_stats_from_all_vcpes(dockercontainers)
-
- for k,v in cpe_container_dns_stats.iteritems():
- msg = {'event_type': 'vcpe',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe event: %s', msg)
-
- compute_payload = {}
- if 'cpu_util' in v:
- compute_payload['cpu_util']= v['cpu_util']
- if 'memory' in v:
- compute_payload['memory']= v['memory']
- if 'memory_usage' in v:
- compute_payload['memory_usage']= v['memory_usage']
- if ('network_stats' in v) and (v['network_stats']):
- compute_payload['network_stats']= v['network_stats']
- if compute_payload:
- compute_payload['vcpe_id'] = k
- compute_payload['user_id'] = keystone_user_id
- compute_payload['tenant_id'] = keystone_tenant_id
- msg = {'event_type': 'vcpe.compute.stats',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': compute_payload
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.cache.size event: %s', msg)
-
- if 'cache_size' in v:
- msg = {'event_type': 'vcpe.dns.cache.size',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'cache_size':v['cache_size']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.cache.size event: %s', msg)
-
- if 'total_inserted_entries' in v:
- msg = {'event_type': 'vcpe.dns.total_inserted_entries',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'total_inserted_entries':v['total_inserted_entries']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.total_inserted_entries event: %s', msg)
-
- if 'replaced_unexpired_entries' in v:
- msg = {'event_type': 'vcpe.dns.replaced_unexpired_entries',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'replaced_unexpired_entries':v['replaced_unexpired_entries']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.replaced_unexpired_entries event: %s', msg)
-
- if 'queries_forwarded' in v:
- msg = {'event_type': 'vcpe.dns.queries_forwarded',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'queries_forwarded':v['queries_forwarded']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.queries_forwarded event: %s', msg)
-
- if 'queries_answered_locally' in v:
- msg = {'event_type': 'vcpe.dns.queries_answered_locally',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'queries_answered_locally':v['queries_answered_locally']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.queries_answered_locally event: %s', msg)
-
- if 'server_stats' in v:
- for server in v['server_stats']:
- msg = {'event_type': 'vcpe.dns.server.queries_sent',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'upstream_server':server['id'],
- 'queries_sent':server['queries_sent']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.server.queries_sent event: %s', msg)
-
- msg = {'event_type': 'vcpe.dns.server.queries_failed',
- 'message_id':six.text_type(uuid.uuid4()),
- 'publisher_id': cpe_publisher_id,
- 'timestamp':datetime.datetime.now().isoformat(),
- 'priority':'INFO',
- 'payload': {'vcpe_id':k,
- 'user_id':keystone_user_id,
- 'tenant_id':keystone_tenant_id,
- 'upstream_server':server['id'],
- 'queries_failed':server['queries_failed']
- }
- }
- producer.publish(msg)
- logger.debug('Publishing vcpe.dns.server.queries_failed event: %s', msg)
-
-def periodic_publish():
- publish_cpe_stats()
- #Publish every 5minutes
- threading.Timer(300, periodic_publish).start()
-
-def main(argv):
- global keystone_tenant_id, keystone_user_id, rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange
- try:
- opts, args = getopt.getopt(argv,"",["keystone_tenant_id=","keystone_user_id=","rabbit_host=","rabbit_user=","rabbit_password=","vcpeservice_rabbit_exchange="])
- except getopt.GetoptError:
- print 'vcpe_stats_notifier.py keystone_tenant_id=<keystone_tenant_id> keystone_user_id=<keystone_user_id> rabbit_host=<IP addr> rabbit_user=<user> rabbit_password=<password> vcpeservice_rabbit_exchange=<exchange name>'
- sys.exit(2)
- for opt, arg in opts:
- if opt in ("--keystone_tenant_id"):
- keystone_tenant_id = arg
- elif opt in ("--keystone_user_id"):
- keystone_user_id = arg
- elif opt in ("--rabbit_user"):
- rabbit_user = arg
- elif opt in ("--rabbit_password"):
- rabbit_password = arg
- elif opt in ("--rabbit_host"):
- rabbit_host = arg
- elif opt in ("--vcpeservice_rabbit_exchange"):
- vcpeservice_rabbit_exchange = arg
- logger.info("vcpe_stats_notifier args:keystone_tenant_id=%s keystone_user_id=%s rabbit_user=%s rabbit_host=%s vcpeservice_rabbit_exchange=%s",keystone_tenant_id,keystone_user_id,rabbit_user,rabbit_host,vcpeservice_rabbit_exchange)
- setup_rabbit_mq_channel()
- periodic_publish()
-
-if __name__ == "__main__":
- main(sys.argv[1:])
diff --git a/xos/onboard/vsg-old/synchronizer/vcpe_synchronizer_config b/xos/onboard/vsg-old/synchronizer/vcpe_synchronizer_config
deleted file mode 100644
index e41dc9a..0000000
--- a/xos/onboard/vsg-old/synchronizer/vcpe_synchronizer_config
+++ /dev/null
@@ -1,42 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vcpe
-dependency_graph=/opt/xos/synchronizers/vsg/model-deps
-steps_dir=/opt/xos/synchronizers/vsg/steps
-sys_dir=/opt/xos/synchronizers/vsg/sys
-deleters_dir=/opt/xos/synchronizers/vsg/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/vsg-old/synchronizer/vtn_vcpe_synchronizer_config b/xos/onboard/vsg-old/synchronizer/vtn_vcpe_synchronizer_config
deleted file mode 100644
index b43d831..0000000
--- a/xos/onboard/vsg-old/synchronizer/vtn_vcpe_synchronizer_config
+++ /dev/null
@@ -1,47 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vcpe
-dependency_graph=/opt/xos/synchronizers/vsg/model-deps
-steps_dir=/opt/xos/synchronizers/vsg/steps
-sys_dir=/opt/xos/synchronizers/vsg/sys
-deleters_dir=/opt/xos/synchronizers/vsg/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-full_setup=True
-proxy_ssh=True
-proxy_ssh_key=/opt/xos/synchronizers/vsg/node_key
-proxy_ssh_user=root
-
-[networking]
-use_vtn=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/vsg-old/templates/vcpeadmin.html b/xos/onboard/vsg-old/templates/vcpeadmin.html
deleted file mode 100644
index 334a3e8..0000000
--- a/xos/onboard/vsg-old/templates/vcpeadmin.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-6">
- <a class="btn btn-primary" href="/admin/cord/vsgtenant/">vSG Tenants</a>
- </div>
- <div class="col-xs-6">
- <a class="btn btn-primary" href="/admin/dashboard/subscribers/">Subscriber View</a>
- </div>
-</div>
-
diff --git a/xos/onboard/vsg-old/tosca/resources/vcpeservice.py b/xos/onboard/vsg-old/tosca/resources/vcpeservice.py
deleted file mode 100644
index 956d888..0000000
--- a/xos/onboard/vsg-old/tosca/resources/vcpeservice.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.vsg.models import VSGService
-
-from service import XOSService
-
-class XOSVsgService(XOSService):
- provides = "tosca.nodes.VSGService"
- xos_model = VSGService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key",
- "private_key_fn", "versionNumber", "backend_network_label",
- "dns_servers", "node_label"]
-
diff --git a/xos/onboard/vsg-old/vsg-onboard.yaml b/xos/onboard/vsg-old/vsg-onboard.yaml
deleted file mode 100644
index 48f6ad4..0000000
--- a/xos/onboard/vsg-old/vsg-onboard.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#vsg:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/vsg/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/vcpeadmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: vcpe-synchronizer.py
- #tosca_custom_types: exampleservice.yaml
- tosca_resource: tosca/resources/vcpeservice.py
- rest_service: subdirectory:vsg api/service/vsg/vsgservice.py
- rest_tenant: subdirectory:cord api/tenant/cord/vsg.py
- private_key: file:///opt/xos/key_import/vsg_rsa
- public_key: file:///opt/xos/key_import/vsg_rsa.pub
-
diff --git a/xos/onboard/vtn-old/admin.py b/xos/onboard/vtn-old/admin.py
deleted file mode 100644
index 464f197..0000000
--- a/xos/onboard/vtn-old/admin.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from django.contrib import admin
-
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
-from core.middleware import get_request
-
-from services.vtn.models import *
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class VTNServiceForm(forms.ModelForm):
- privateGatewayMac = forms.CharField(required=False)
- localManagementIp = forms.CharField(required=False)
- ovsdbPort = forms.CharField(required=False)
- sshPort = forms.CharField(required=False)
- sshUser = forms.CharField(required=False)
- sshKeyFile = forms.CharField(required=False)
- mgmtSubnetBits = forms.CharField(required=False)
- xosEndpoint = forms.CharField(required=False)
- xosUser = forms.CharField(required=False)
- xosPassword = forms.CharField(required=False)
-
- def __init__(self,*args,**kwargs):
- super (VTNServiceForm,self ).__init__(*args,**kwargs)
- if self.instance:
- self.fields['privateGatewayMac'].initial = self.instance.privateGatewayMac
- self.fields['localManagementIp'].initial = self.instance.localManagementIp
- self.fields['ovsdbPort'].initial = self.instance.ovsdbPort
- self.fields['sshPort'].initial = self.instance.sshPort
- self.fields['sshUser'].initial = self.instance.sshUser
- self.fields['sshKeyFile'].initial = self.instance.sshKeyFile
- self.fields['mgmtSubnetBits'].initial = self.instance.mgmtSubnetBits
- self.fields['xosEndpoint'].initial = self.instance.xosEndpoint
- self.fields['xosUser'].initial = self.instance.xosUser
- self.fields['xosPassword'].initial = self.instance.xosPassword
-
- def save(self, commit=True):
- self.instance.privateGatewayMac = self.cleaned_data.get("privateGatewayMac")
- self.instance.localManagementIp = self.cleaned_data.get("localManagementIp")
- self.instance.ovsdbPort = self.cleaned_data.get("ovsdbPort")
- self.instance.sshPort = self.cleaned_data.get("sshPort")
- self.instance.sshUser = self.cleaned_data.get("sshUser")
- self.instance.sshKeyFile = self.cleaned_data.get("sshKeyFile")
- self.instance.mgmtSubnetBits = self.cleaned_data.get("mgmtSubnetBits")
- self.instance.xosEndpoint = self.cleaned_data.get("xosEndpoint")
- self.instance.xosUser = self.cleaned_data.get("xosUser")
- self.instance.xosPassword = self.cleaned_data.get("xosPassword")
- return super(VTNServiceForm, self).save(commit=commit)
-
- class Meta:
- model = VTNService
-
-class VTNServiceAdmin(ReadOnlyAwareAdmin):
- model = VTNService
- form = VTNServiceForm
- verbose_name = "VTN Service"
- verbose_name_plural = "VTN Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber','description',"view_url","icon_url",
- 'privateGatewayMac', 'localManagementIp', 'ovsdbPort', 'sshPort', 'sshUser', 'sshKeyFile', 'mgmtSubnetBits', 'xosEndpoint', 'xosUser', 'xosPassword' ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'VTN Service Details'),
-# ('administration', 'Administration'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = ( # ('vtnadmin.html', 'top', 'administration'),
- ) #('hpctools.html', 'top', 'tools') )
-
- def queryset(self, request):
- return VTNService.get_service_objects_by_user(request.user)
-
-admin.site.register(VTNService, VTNServiceAdmin)
diff --git a/xos/onboard/vtn-old/api/service/vtn.py b/xos/onboard/vtn-old/api/service/vtn.py
deleted file mode 100644
index 6b02616..0000000
--- a/xos/onboard/vtn-old/api/service/vtn.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import viewsets
-from rest_framework.decorators import detail_route, list_route
-from rest_framework.views import APIView
-from core.models import *
-from services.vtn.models import VTNService
-from django.forms import widgets
-from django.conf.urls import patterns, url
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-from django.shortcuts import get_object_or_404
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from xos.exceptions import *
-import json
-import subprocess
-
-class VTNServiceSerializer(PlusModelSerializer):
- id = ReadOnlyField()
-
- privateGatewayMac = serializers.CharField(required=False)
- localManagementIp = serializers.CharField(required=False)
- ovsdbPort = serializers.IntegerField(required=False)
- sshPort = serializers.IntegerField(required=False)
- sshUser = serializers.CharField(required=False)
- sshKeyFile = serializers.CharField(required=False)
- mgmtSubnetBits = serializers.IntegerField(required=False)
- xosEndpoint = serializers.CharField(required=False)
- xosUser = serializers.CharField(required=False)
- xosPassword = serializers.CharField(required=False)
-
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- class Meta:
- model = VTNService
- fields = ('humanReadableName', 'id', 'privateGatewayMac', 'localManagementIp', 'ovsdbPort', 'sshPort', 'sshUser', 'sshKeyFile',
- 'mgmtSubnetBits', 'xosEndpoint', 'xosUser', 'xosPassword')
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
-class VTNViewSet(XOSViewSet):
- base_name = "vtn"
- method_name = "vtn"
- method_kind = "viewset"
-
- # these are just because ViewSet needs some queryset and model, even if we don't use the
- # default endpoints
- queryset = VTNService.get_service_objects().all()
- model = VTNService
- serializer_class = VTNServiceSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = []
-
- patterns.append( self.detail_url("services/$", {"get": "get_services"}, "services") )
- patterns.append( self.detail_url("services_names/$", {"get": "get_services_names"}, "services") )
- patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/$", {"get": "get_service"}, "get_service") )
-
- # Not as RESTful as it could be, but maintain these endpoints for compability
- patterns.append( self.list_url("services/$", {"get": "get_services"}, "rootvtn_services") )
- patterns.append( self.list_url("services_names/$", {"get": "get_services_names"}, "rootvtn_services") )
- patterns.append( self.list_url("services/(?P<service>[a-zA-Z0-9\-_]+)/$", {"get": "get_service"}, "rootvtn_get_service") )
-
- patterns = patterns + super(VTNViewSet,self).get_urlpatterns(api_path)
-
- return patterns
-
- def get_services_names(self, request, pk=None):
- result = {}
- for service in Service.objects.all():
- for id in service.get_vtn_src_names():
- dependencies = service.get_vtn_dependencies_names()
- if dependencies:
- result[id] = dependencies
- return Response(result)
-
- def get_services(self, request, pk=None):
- result = {}
- for service in Service.objects.all():
- for id in service.get_vtn_src_ids():
- dependencies = service.get_vtn_dependencies_ids()
- if dependencies:
- result[id] = dependencies
- return Response(result)
-
- def get_service(self, request, pk=None, service=None):
- for xos_service in Service.objects.all():
- if service in xos_service.get_vtn_src_ids():
- return Response(xos_service.get_vtn_dependencies_ids())
- return Response([])
-
-
diff --git a/xos/onboard/vtn-old/models.py b/xos/onboard/vtn-old/models.py
deleted file mode 100644
index c805f24..0000000
--- a/xos/onboard/vtn-old/models.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-from core.models import Tag
-from core.models.service import LeastLoadedNodeScheduler
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-class ConfigurationError(Exception):
- pass
-
-VTN_KIND = "VTN"
-
-# -------------------------------------------
-# VTN
-# -------------------------------------------
-
-class VTNService(Service):
- KIND = VTN_KIND
-
- class Meta:
- app_label = "vtn"
- verbose_name = "VTN Service"
- proxy = True
-
- simple_attributes = ( ("privateGatewayMac", "00:00:00:00:00:01"),
- ("localManagementIp", "172.27.0.1/24"),
- ("ovsdbPort", "6641"),
- ("sshPort", "22"),
- ("sshUser", "root"),
- ("sshKeyFile", "/root/node_key") ,
- ("mgmtSubnetBits", "24"),
- ("xosEndpoint", "http://xos/"),
- ("xosUser", "padmin@vicci.org"),
- ("xosPassword", "letmein"),
-
- )
-
-VTNService.setup_simple_attributes()
diff --git a/xos/onboard/vtn-old/synchronizer/manifest b/xos/onboard/vtn-old/synchronizer/manifest
deleted file mode 100644
index dccfcdc..0000000
--- a/xos/onboard/vtn-old/synchronizer/manifest
+++ /dev/null
@@ -1,10 +0,0 @@
-manifest
-vtn-synchronizer.py
-steps/sync_tenant.py
-steps/sync_port_addresses.py
-start.sh
-stop.sh
-model-deps
-supervisor/vtn-observer.conf
-run.sh
-vtn_synchronizer_config
diff --git a/xos/onboard/vtn-old/synchronizer/model-deps b/xos/onboard/vtn-old/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/vtn-old/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/vtn-old/synchronizer/run.sh b/xos/onboard/vtn-old/synchronizer/run.sh
deleted file mode 100755
index 000a563..0000000
--- a/xos/onboard/vtn-old/synchronizer/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python vtn-synchronizer.py -C $XOS_DIR/synchronizers/vtn/vtn_synchronizer_config
diff --git a/xos/onboard/vtn-old/synchronizer/start.sh b/xos/onboard/vtn-old/synchronizer/start.sh
deleted file mode 100755
index 2c43440..0000000
--- a/xos/onboard/vtn-old/synchronizer/start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-nohup python vtn-synchronizer.py -C $XOS_DIR/synchronizers/vtn/vtn_synchronizer_config > /dev/null 2>&1 &
diff --git a/xos/onboard/vtn-old/synchronizer/steps/sync_port_addresses.py b/xos/onboard/vtn-old/synchronizer/steps/sync_port_addresses.py
deleted file mode 100644
index 553df6f..0000000
--- a/xos/onboard/vtn-old/synchronizer/steps/sync_port_addresses.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os
-import requests
-import socket
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service, Port, Controller, Tag, Tenant
-from core.models.service import COARSE_KIND
-from services.vsg.models import VSGTenant
-from xos.logger import Logger, logging
-from requests.auth import HTTPBasicAuth
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-# XXX should save and load this
-glo_saved_vtn_maps = []
-
-class SyncPortAddresses(SyncStep):
- requested_interval = 0 # 3600
- provides=[Port]
- observes=Port
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
-
- def call(self, **args):
- global glo_saved_vtn_maps
-
- logger.info("sync'ing vsg tenant to port addresses")
-
- # build up a dictionary of port-->[wan_addrs] mappings
- port_addrs = {}
- for vsg in VSGTenant.get_tenant_objects().all():
- if not vsg.instance:
- logger.info("skipping vsg %s because it has no instance" % vsg)
-
- wan_ip = vsg.wan_container_ip
- if not wan_ip:
- logger.info("skipping vsg %s because it has no wan_container_ip" % vsg)
-
- wan_mac = vsg.wan_container_mac
- if not wan_mac:
- logger.info("skipping vsg %s because it has no wan_container_mac" % vsg)
-
- lan_network = vsg.get_lan_network(vsg.instance)
- if not lan_network:
- logger.info("skipping vsg %s because it has no lan_network" % vsg)
-
- lan_port = Port.objects.filter(instance = vsg.instance, network=lan_network)
- if not lan_port:
- logger.info("skipping vsg %s because it has no lan_port" % vsg)
- lan_port = lan_port[0]
-
- if not lan_port.port_id:
- logger.info("skipping vsg %s because its lan_port has no port_id" % vsg)
-
- if not (lan_port.pk in port_addrs):
- port_addrs[lan_port.pk] = []
- entry = {"mac_address": wan_mac, "ip_address": wan_ip}
- addr_pairs = port_addrs[lan_port.pk]
- if not entry in addr_pairs:
- addr_pairs.append(entry)
-
- # now do the VM_WAN_IP from the instance
- if vsg.instance:
- wan_vm_ip = vsg.wan_vm_ip
- wan_vm_mac = vsg.wan_vm_mac
- entry = {"mac_address": wan_vm_mac, "ip_address": wan_vm_ip}
- if not entry in addr_pairs:
- addr_pairs.append(entry)
-
- # Get all ports in all controllers
- ports_by_id = {}
- for controller in Controller.objects.all():
- if not controller.admin_tenant:
- logger.info("controller %s has no admin_tenant" % controller)
- continue
- try:
- driver = self.driver.admin_driver(controller = controller)
- ports = driver.shell.quantum.list_ports()["ports"]
- except:
- logger.log_exc("failed to get ports from controller %s" % controller)
- continue
-
- for port in ports:
- ports_by_id[port["id"]] = port
-
- for port_pk in port_addrs.keys():
- port = Port.objects.get(pk=port_pk)
- addr_pairs = port_addrs[port_pk]
- neutron_port = ports_by_id.get(port.port_id,None)
- if not neutron_port:
- logger.info("failed to get neutron port for port %s" % port)
- continue
-
- ips = [x["ip_address"] for x in addr_pairs]
-
- changed = False
-
- # delete addresses in neutron that don't exist in XOS
- aaps = neutron_port.get("allowed_address_pairs", [])
- for aap in aaps[:]:
- if not aap["ip_address"] in ips:
- logger.info("removing address %s from port %s" % (aap["ip_address"], port))
- aaps.remove(aap)
- changed = True
-
- aaps_ips = [x["ip_address"] for x in aaps]
-
- # add addresses in XOS that don't exist in neutron
- for addr in addr_pairs:
- if not addr["ip_address"] in aaps_ips:
- logger.info("adding address %s to port %s" % (addr, port))
- aaps.append( addr )
- aaps_ips.append(addr["ip_address"])
- changed = True
-
- if changed:
- logger.info("updating port %s" % port)
- driver.shell.quantum.update_port(port.port_id, {"port": {"allowed_address_pairs": aaps}})
-
-
-
-
-
-
-
diff --git a/xos/onboard/vtn-old/synchronizer/steps/sync_tenant.py b/xos/onboard/vtn-old/synchronizer/steps/sync_tenant.py
deleted file mode 100644
index a0e6cdb..0000000
--- a/xos/onboard/vtn-old/synchronizer/steps/sync_tenant.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import os
-import requests
-import socket
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service, Tenant
-from core.models.service import COARSE_KIND
-from xos.logger import Logger, logging
-from requests.auth import HTTPBasicAuth
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-# XXX should save and load this
-glo_saved_vtn_maps = []
-
-class SyncTenant(SyncStep):
- provides=[Tenant]
- observes=Tenant
- requested_interval=0
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
-
- def get_vtn_onos_service(self):
-# vtn_tenant = Tenant.objects.filter(name="VTN_ONOS_app") # XXX fixme - hardcoded
-# if not vtn_tenant:
-# raise "No VTN Onos App found"
-# vtn_tenant = vtn_tenant[0]
-#
-# vtn_service = vtn_tenant.provider_service
- vtn_service = Service.objects.filter(name="service_ONOS_VTN") # XXX fixme - harcoded
- if not vtn_service:
- raise "No VTN Onos Service"
-
- return vtn_service[0]
-
- def get_vtn_addr(self):
- vtn_service = self.get_vtn_onos_service()
-
- if not vtn_service.slices.exists():
- raise "VTN Service has no slices"
-
- vtn_slice = vtn_service.slices.all()[0]
-
- if not vtn_slice.instances.exists():
- raise "VTN Slice has no instances"
-
- vtn_instance = vtn_slice.instances.all()[0]
-
- return vtn_instance.node.name
-
- def call(self, **args):
- global glo_saved_vtn_maps
-
- logger.info("sync'ing vtn services")
-
- vtn_maps = []
- for service in Service.objects.all():
- for id in service.get_vtn_src_ids():
- dependencies = service.get_vtn_dependencies_ids()
- if dependencies:
- for dependency in dependencies:
- vtn_maps.append( (id, dependency) )
-
- for vtn_map in vtn_maps:
- if not (vtn_map in glo_saved_vtn_maps):
- # call vtn rest api to add map
- url = "http://" + self.get_vtn_addr() + ":8181/onos/cordvtn/service-dependency/%s/%s" % (vtn_map[0], vtn_map[1])
-
- print "POST %s" % url
- r = requests.post(url, auth=HTTPBasicAuth('karaf', 'karaf') ) # XXX fixme - hardcoded auth
- if (r.status_code != 200):
- raise Exception("Received error from vtn service (%d)" % r.status_code)
-
- for vtn_map in glo_saved_vtn_maps:
- if not vtn_map in vtn_maps:
- # call vtn rest api to delete map
- url = "http://" + self.get_vtn_addr() + ":8181/onos/cordvtn/service-dependency/%s/%s" % (vtn_map[0],vtn_map[1])
-
- print "DELETE %s" % url
- r = requests.delete(url, auth=HTTPBasicAuth('karaf', 'karaf') ) # XXX fixme - hardcoded auth
- if (r.status_code != 200):
- raise Exception("Received error from vtn service (%d)" % r.status_code)
-
- glo_saved_vtn_maps = vtn_maps
- # TODO: save this
-
diff --git a/xos/onboard/vtn-old/synchronizer/stop.sh b/xos/onboard/vtn-old/synchronizer/stop.sh
deleted file mode 100755
index 7ff2b06..0000000
--- a/xos/onboard/vtn-old/synchronizer/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f vtn-synchronizer.py
diff --git a/xos/onboard/vtn-old/synchronizer/supervisor/vtn-observer.conf b/xos/onboard/vtn-old/synchronizer/supervisor/vtn-observer.conf
deleted file mode 100644
index 714afa7..0000000
--- a/xos/onboard/vtn-old/synchronizer/supervisor/vtn-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:vtn-observer]
-command=python /opt/xos/observers/vbng/vtn-observer.py -C /opt/xos/observers/vbng/vtn_observer_config
diff --git a/xos/onboard/vtn-old/synchronizer/vtn-synchronizer.py b/xos/onboard/vtn-old/synchronizer/vtn-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/vtn-old/synchronizer/vtn-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/vtn-old/synchronizer/vtn_synchronizer_config b/xos/onboard/vtn-old/synchronizer/vtn_synchronizer_config
deleted file mode 100644
index d931839..0000000
--- a/xos/onboard/vtn-old/synchronizer/vtn_synchronizer_config
+++ /dev/null
@@ -1,44 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vtn
-dependency_graph=/opt/xos/synchronizers/vtn/model-deps
-steps_dir=/opt/xos/synchronizers/vtn/steps
-sys_dir=/opt/xos/synchronizers/vtn/sys
-deleters_dir=/opt/xos/synchronizers/vtn/deleters
-log_file=console
-#/var/log/hpc.log
-driver=openstack
-pretend=False
-backoff_disabled=True
-
-[nova]
-ca_ssl_cert=/etc/ssl/certs/ca-certificates.crt
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
-
-[networking]
-use_vtn=True
diff --git a/xos/onboard/vtn-old/templates/vtnadmin.html b/xos/onboard/vtn-old/templates/vtnadmin.html
deleted file mode 100644
index a3a2a52..0000000
--- a/xos/onboard/vtn-old/templates/vtnadmin.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a href="/admin/vtn/vnrtenant/">vTN Tenants</a>
- </div>
-</div>
diff --git a/xos/onboard/vtn-old/tosca/resources/vtnservice.py b/xos/onboard/vtn-old/tosca/resources/vtnservice.py
deleted file mode 100644
index 2a5738f..0000000
--- a/xos/onboard/vtn-old/tosca/resources/vtnservice.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import pdb
-import sys
-import tempfile
-sys.path.append("/opt/tosca")
-from translator.toscalib.tosca_template import ToscaTemplate
-
-from services.vtn.models import VTNService
-
-from service import XOSService
-
-class XOSVTNService(XOSService):
- provides = "tosca.nodes.VTNService"
- xos_model = VTNService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", 'privateGatewayMac', 'localManagementIp', 'ovsdbPort', 'sshPort', 'sshUser', 'sshKeyFile', 'mgmtSubnetBits', 'xosEndpoint', 'xosUser', 'xosPassword']
diff --git a/xos/onboard/vtn-old/vtn-onboard.yaml b/xos/onboard/vtn-old/vtn-onboard.yaml
deleted file mode 100644
index 460af5a..0000000
--- a/xos/onboard/vtn-old/vtn-onboard.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#vtn:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/vtn/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/vtnadmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: vtn-synchronizer.py
- tosca_resource: tosca/resources/vtnservice.py
- rest_service: api/service/vtn.py
- #private_key: file:///opt/xos/key_import/vsg_rsa
- #public_key: file:///opt/xos/key_import/vsg_rsa.pub
-
diff --git a/xos/onboard/vtr-old/admin.py b/xos/onboard/vtr-old/admin.py
deleted file mode 100644
index 0120e66..0000000
--- a/xos/onboard/vtr-old/admin.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from django.contrib import admin
-
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
-from core.middleware import get_request
-
-from services.vtr.models import *
-from services.volt.models import CordSubscriberRoot
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class VTRServiceAdmin(ReadOnlyAwareAdmin):
- model = VTRService
- verbose_name = "vTR Service"
- verbose_name_plural = "vTR Service"
- list_display = ("backend_status_icon", "name", "enabled")
- list_display_links = ('backend_status_icon', 'name', )
- fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description',"view_url","icon_url" ], 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', )
- inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
-
- extracontext_registered_admins = True
-
- user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
- suit_form_tabs =(('general', 'vTR Service Details'),
- ('administration', 'Administration'),
- ('slices','Slices'),
- ('serviceattrs','Additional Attributes'),
- ('serviceprivileges','Privileges'),
- )
-
- suit_form_includes = (('vtradmin.html', 'top', 'administration'),
- ) #('hpctools.html', 'top', 'tools') )
-
- def queryset(self, request):
- return VTRService.get_service_objects_by_user(request.user)
-
-class VTRTenantForm(forms.ModelForm):
- test = forms.ChoiceField(choices=VTRTenant.TEST_CHOICES, required=True)
- scope = forms.ChoiceField(choices=VTRTenant.SCOPE_CHOICES, required=True)
- argument = forms.CharField(required=False)
- result_code = forms.CharField(required=False)
- result = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 10, 'cols': 80, 'class': 'input-xxlarge'}))
- target = forms.ModelChoiceField(queryset=CordSubscriberRoot.objects.all())
-
- def __init__(self,*args,**kwargs):
- super (VTRTenantForm,self ).__init__(*args,**kwargs)
- self.fields['provider_service'].queryset = VTRService.get_service_objects().all()
- if self.instance:
- # fields for the attributes
- self.fields['test'].initial = self.instance.test
- self.fields['argument'].initial = self.instance.argument
- self.fields['target'].initial = self.instance.target
- self.fields['scope'].initial = self.instance.scope
- if (self.instance.enacted is not None) and (self.instance.enacted >= self.instance.updated):
- self.fields['result'].initial = self.instance.result
- self.fields['result_code'].initial = self.instance.result_code
- else:
- self.fields['result'].initial = ""
- self.fields['result_code'].initial= ""
- if (not self.instance) or (not self.instance.pk):
- # default fields for an 'add' form
- self.fields['kind'].initial = VTR_KIND
- self.fields["scope"].initial = VTRTenant.get_default_attribute("scope")
- if VTRService.get_service_objects().exists():
- self.fields["provider_service"].initial = VTRService.get_service_objects().all()[0]
-
- def save(self, commit=True):
- self.instance.test = self.cleaned_data.get("test")
- self.instance.argument = self.cleaned_data.get("argument")
- self.instance.target = self.cleaned_data.get("target")
- self.instance.result = self.cleaned_data.get("result")
- self.instance.result_code = self.cleaned_data.get("result_code")
- self.instance.scope = self.cleaned_data.get("scope")
- return super(VTRTenantForm, self).save(commit=commit)
-
- class Meta:
- model = VTRTenant
-
-class VTRTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'target', 'test', 'argument' )
- list_display_links = ('backend_status_icon', 'id')
- fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', # 'subscriber_root', 'service_specific_id', 'service_specific_attribute',
- 'target', 'scope', 'test', 'argument', 'is_synced', 'result_code', 'result'],
- 'classes':['suit-tab suit-tab-general']})]
- readonly_fields = ('backend_status_text', 'service_specific_attribute', 'is_synced')
- form = VTRTenantForm
-
- suit_form_tabs = (('general','Details'),)
-
- def is_synced(self, obj):
- return (obj.enacted is not None) and (obj.enacted >= obj.updated)
-
- def queryset(self, request):
- return VTRTenant.get_tenant_objects_by_user(request.user)
-
-admin.site.register(VTRService, VTRServiceAdmin)
-admin.site.register(VTRTenant, VTRTenantAdmin)
-
diff --git a/xos/onboard/vtr-old/api/tenant/truckroll.py b/xos/onboard/vtr-old/api/tenant/truckroll.py
deleted file mode 100644
index cc8d62b..0000000
--- a/xos/onboard/vtr-old/api/tenant/truckroll.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from rest_framework.decorators import api_view
-from rest_framework.response import Response
-from rest_framework.reverse import reverse
-from rest_framework import serializers
-from rest_framework import generics
-from rest_framework import status
-from core.models import *
-from django.forms import widgets
-from services.vtr.models import VTRTenant, VTRService
-from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
-from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
-
-def get_default_vtr_service():
- vtr_services = VTRService.get_service_objects().all()
- if vtr_services:
- return vtr_services[0]
- return None
-
-class VTRTenantForAPI(VTRTenant):
- class Meta:
- proxy = True
- app_label = "cord"
-
-class VTRTenantSerializer(PlusModelSerializer):
- id = ReadOnlyField()
- target_id = serializers.IntegerField()
- test = serializers.CharField()
- scope = serializers.CharField()
- argument = serializers.CharField(required=False)
- provider_service = serializers.PrimaryKeyRelatedField(queryset=VTRService.get_service_objects().all(), default=get_default_vtr_service)
- result = serializers.CharField(required=False)
- result_code = serializers.CharField(required=False)
- backend_status = ReadOnlyField()
-
- humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
- is_synced = serializers.SerializerMethodField("isSynced")
-
- class Meta:
- model = VTRTenantForAPI
- fields = ('humanReadableName', 'id', 'provider_service', 'target_id', 'scope', 'test', 'argument', 'result', 'result_code', 'is_synced', 'backend_status' )
-
- def getHumanReadableName(self, obj):
- return obj.__unicode__()
-
- def isSynced(self, obj):
- return (obj.enacted is not None) and (obj.enacted >= obj.updated)
-
-class TruckRollViewSet(XOSViewSet):
- base_name = "truckroll"
- method_name = "truckroll"
- method_kind = "viewset"
- queryset = VTRTenantForAPI.get_tenant_objects().all() # select_related().all()
- serializer_class = VTRTenantSerializer
-
- @classmethod
- def get_urlpatterns(self, api_path="^"):
- patterns = super(TruckRollViewSet, self).get_urlpatterns(api_path=api_path)
-
- return patterns
-
- def list(self, request):
- queryset = self.filter_queryset(self.get_queryset())
-
- serializer = self.get_serializer(queryset, many=True)
-
- return Response(serializer.data)
-
diff --git a/xos/onboard/vtr-old/models.py b/xos/onboard/vtr-old/models.py
deleted file mode 100644
index ce2e345..0000000
--- a/xos/onboard/vtr-old/models.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool
-from core.models.plcorebase import StrippedCharField
-import os
-from django.db import models, transaction
-from django.forms.models import model_to_dict
-from django.db.models import Q
-from operator import itemgetter, attrgetter, methodcaller
-from core.models import Tag
-from core.models.service import LeastLoadedNodeScheduler
-from services.volt.models import CordSubscriberRoot
-import traceback
-from xos.exceptions import *
-from xos.config import Config
-
-class ConfigurationError(Exception):
- pass
-
-VTR_KIND = "vTR"
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-# -------------------------------------------
-# VOLT
-# -------------------------------------------
-
-class VTRService(Service):
- KIND = VTR_KIND
-
- class Meta:
- app_label = "vtr"
- verbose_name = "vTR Service"
- proxy = True
-
-class VTRTenant(Tenant):
- class Meta:
- proxy = True
-
- KIND = VTR_KIND
-
- TEST_CHOICES = ( ("ping", "Ping"), ("traceroute", "Trace Route"), ("tcpdump", "Tcp Dump") )
- SCOPE_CHOICES = ( ("container", "Container"), ("vm", "VM") )
-
- simple_attributes = ( ("test", None),
- ("argument", None),
- ("result", None),
- ("result_code", None),
- ("target_id", None),
- ("scope", "container") )
-
- sync_attributes = ( 'test', 'argument', "scope" )
-
- def __init__(self, *args, **kwargs):
- vtr_services = VTRService.get_service_objects().all()
- if vtr_services:
- self._meta.get_field("provider_service").default = vtr_services[0].id
- super(VTRTenant, self).__init__(*args, **kwargs)
-
- @property
- def target(self):
- if getattr(self, "cached_target", None):
- return self.cached_target
- target_id=self.target_id
- if not target_id:
- return None
- users=CordSubscriberRoot.objects.filter(id=target_id)
- if not users:
- return None
- user=users[0]
- self.cached_target = users[0]
- return user
-
- @target.setter
- def target(self, value):
- if value:
- value = value.id
- if (value != self.get_attribute("target_id", None)):
- self.cached_target=None
- self.target_id = value
-
- def save(self, *args, **kwargs):
- super(VTRTenant, self).save(*args, **kwargs)
-
- def delete(self, *args, **kwargs):
- super(VTRTenant, self).delete(*args, **kwargs)
-
-
-VTRTenant.setup_simple_attributes()
-
diff --git a/xos/onboard/vtr-old/synchronizer/files/run_tcpdump.sh b/xos/onboard/vtr-old/synchronizer/files/run_tcpdump.sh
deleted file mode 100644
index ed75bf0..0000000
--- a/xos/onboard/vtr-old/synchronizer/files/run_tcpdump.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash
-INTERFACE=$1
-tcpdump -n -e -i $INTERFACE -c 100 &
-PID_TCPDUMP=$!
-curl http://www.xosproject.org/ &> /dev/null &
-PID_CURL=$!
-sleep 30s
-kill $PID_TCPDUMP
-kill $PIUD_CURL
diff --git a/xos/onboard/vtr-old/synchronizer/manifest b/xos/onboard/vtr-old/synchronizer/manifest
deleted file mode 100644
index 61ffb39..0000000
--- a/xos/onboard/vtr-old/synchronizer/manifest
+++ /dev/null
@@ -1,10 +0,0 @@
-manifest
-vtr-synchronizer.py
-vtn_vtr_synchronizer_config
-steps/sync_vtrtenant.py
-steps/sync_vtrtenant.yaml
-vtr_synchronizer_config
-files/run_tcpdump.sh
-run-vtn.sh
-model-deps
-run.sh
diff --git a/xos/onboard/vtr-old/synchronizer/model-deps b/xos/onboard/vtr-old/synchronizer/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/onboard/vtr-old/synchronizer/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/onboard/vtr-old/synchronizer/run-vtn.sh b/xos/onboard/vtr-old/synchronizer/run-vtn.sh
deleted file mode 100755
index b2f9518..0000000
--- a/xos/onboard/vtr-old/synchronizer/run-vtn.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-export XOS_DIR=/opt/xos
-cp /root/setup/node_key $XOS_DIR/synchronizers/vtr/node_key
-chmod 0600 $XOS_DIR/synchronizers/vtr/node_key
-python vtr-synchronizer.py -C $XOS_DIR/synchronizers/vtr/vtn_vtr_synchronizer_config
diff --git a/xos/onboard/vtr-old/synchronizer/run.sh b/xos/onboard/vtr-old/synchronizer/run.sh
deleted file mode 100755
index 388fdf9..0000000
--- a/xos/onboard/vtr-old/synchronizer/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python vtr-synchronizer.py -C $XOS_DIR/synchronizers/vtr/vtr_synchronizer_config
diff --git a/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.py b/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.py
deleted file mode 100644
index f0f7ef3..0000000
--- a/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice, Tag
-from services.vsg.models import VSGService, VCPE_KIND
-from services.vtr.models import VTRService, VTRTenant
-from services.hpc.models import HpcService, CDNPrefix
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
-
-class SyncVTRTenant(SyncInstanceUsingAnsible):
- provides=[VTRTenant]
- observes=VTRTenant
- requested_interval=0
- template_name = "sync_vtrtenant.yaml"
- #service_key_name = "/opt/xos/services/vtr/vcpe_private_key"
-
- def __init__(self, *args, **kwargs):
- super(SyncVTRTenant, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deleted):
- if (not deleted):
- objs = VTRTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
- else:
- objs = VTRTenant.get_deleted_tenant_objects()
-
- return objs
-
- def get_vtr_service(self, o):
- if not o.provider_service:
- return None
-
- vtrs = VTRService.get_service_objects().filter(id=o.provider_service.id)
- if not vtrs:
- return None
-
- return vtrs[0]
-
- def get_vcpe_service(self, o):
- if o.target:
- # o.target is a CordSubscriberRoot
- if o.target.volt and o.target.volt.vcpe:
- vcpes = VSGService.get_service_objects().filter(id=o.target.volt.vcpe.provider_service.id)
- if not vcpes:
- return None
- return vcpes[0]
- return None
-
- def get_instance(self, o):
- if o.target and o.target.volt and o.target.volt.vcpe:
- return o.target.volt.vcpe.instance
- else:
- return None
-
- def get_key_name(self, instance):
- if instance.slice.service and (instance.slice.service.kind==VCPE_KIND):
- # We need to use the vsg service's private key. Onboarding won't
- # by default give us another service's private key, so let's assume
- # onboarding has been configured to add vsg_rsa to the vtr service.
- return "/opt/xos/services/vtr/keys/vsg_rsa"
- else:
- raise Exception("VTR doesn't know how to get the private key for this instance")
-
- def get_extra_attributes(self, o):
- vtr_service = self.get_vtr_service(o)
- vcpe_service = self.get_vcpe_service(o)
-
- if not vcpe_service:
- raise Exception("No vcpeservice")
-
- instance = self.get_instance(o)
-
- if not instance:
- raise Exception("No instance")
-
- s_tags = []
- c_tags = []
- if o.target and o.target.volt:
- s_tags.append(o.target.volt.s_tag)
- c_tags.append(o.target.volt.c_tag)
-
- fields = {"s_tags": s_tags,
- "c_tags": c_tags,
- "isolation": instance.isolation,
- "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]),
- "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")],
-
- "result_fn": "%s-vcpe-%s-%s" % (o.test, s_tags[0], c_tags[0]),
- "resultcode_fn": "code-%s-vcpe-%s-%s" % (o.test, s_tags[0], c_tags[0]) }
-
- # add in the sync_attributes that come from the vSG object
- # this will be wan_ip, wan_mac, wan_container_ip, wan_container_mac, ...
- if o.target and o.target.volt and o.target.volt.vcpe:
- for attribute_name in o.target.volt.vcpe.sync_attributes:
- fields[attribute_name] = getattr(o.target.volt.vcpe, attribute_name)
-
- # add in the sync_attributes that come from the SubscriberRoot object
- if o.target and hasattr(o.target, "sync_attributes"):
- for attribute_name in o.target.sync_attributes:
- fields[attribute_name] = getattr(o.target, attribute_name)
-
- for attribute_name in o.sync_attributes:
- fields[attribute_name] = getattr(o,attribute_name)
-
- return fields
-
- def sync_fields(self, o, fields):
- # the super causes the playbook to be run
-
- super(SyncVTRTenant, self).sync_fields(o, fields)
-
- def run_playbook(self, o, fields):
- o.result = ""
-
- result_fn = os.path.join("/opt/xos/synchronizers/vtr/result", fields["result_fn"])
- if os.path.exists(result_fn):
- os.remove(result_fn)
-
- resultcode_fn = os.path.join("/opt/xos/synchronizers/vtr/result", fields["resultcode_fn"])
- if os.path.exists(resultcode_fn):
- os.remove(resultcode_fn)
-
- super(SyncVTRTenant, self).run_playbook(o, fields)
-
- if os.path.exists(result_fn):
- o.result = open(result_fn).read()
-
- if os.path.exists(resultcode_fn):
- o.result_code = open(resultcode_fn).read()
-
-
- def delete_record(self, m):
- pass
diff --git a/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.yaml b/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.yaml
deleted file mode 100644
index 35d9032..0000000
--- a/xos/onboard/vtr-old/synchronizer/steps/sync_vtrtenant.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-- hosts: {{ instance_name }}
- #gather_facts: False
- connection: ssh
- user: ubuntu
- sudo: yes
- vars:
- container_name: {{ container_name }}
- wan_container_ip: {{ wan_container_ip }}
- wan_container_netbits: {{ wan_container_netbits }}
- wan_container_mac: {{ wan_container_mac }}
- wan_container_gateway_ip: {{ wan_container_gateway_ip }}
- wan_vm_ip: {{ wan_vm_ip }}
- wan_vm_mac: {{ wan_vm_mac }}
-
- scope: {{ scope }}
- test: {{ test }}
- argument: {{ argument }}
- result_fn: {{ result_fn }}
- resultcode_fn: {{ resultcode_fn }}
-
-
- tasks:
- - name: Remove any old result file
- shell: rm -f /tmp/{{ result_fn }}
-
- - name: Copy run_tcpdump.sh to VM
- copy: src=/opt/xos/synchronizers/vtr/files/run_tcpdump.sh dest=/root/run_tcpdump.sh mode=0755
- when: (test=="tcpdump")
-
-
-# -----------------
-# scope == VM
-# -----------------
-
- - name: Send the pings from VM
- shell: ping -c 10 {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: vm_ping_result
- when: (scope=="vm") and (test=="ping")
-
- - name: Store VM ping resultcode to file
- shell: echo "{{ '{{' }} vm_ping_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="vm") and (test=="ping")
-
- - name: Install traceroute
- apt: name=traceroute state=present
- when: (scope=="vm") and (test=="traceroute")
-
- - name: Send traceroute from VM
- shell: traceroute {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: vm_traceroute_result
- when: (scope=="vm") and (test=="traceroute")
-
- - name: Store VM traceroute resultcode to file
- shell: echo "{{ '{{' }} vm_traceroute_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="vm") and (test=="traceroute")
-
- - name: Run tcpdump for 30 seconds on VM
- shell: /root/run_tcpdump.sh {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: vm_tcpdump_result
- when: (scope=="vm") and (test=="tcpdump")
-
- - name: Store VM tcpdump resultcode to file
- shell: echo "{{ '{{' }} vm_tcpdump_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="vm") and (test=="tcpdump")
-
-# ------------------
-# scope == container
-# ------------------
-
- - name: Send the pings from Container
- shell: docker exec {{ container_name }} ping -c 10 {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: ctr_ping_result
- when: (scope=="container") and (test=="ping")
-
- - name: Store ctr ping resultcode to file
- shell: echo "{{ '{{' }} ctr_ping_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="container") and (test=="ping")
-
- - name: Install traceroute into Container
- shell: docker exec {{ container_name }} apt-get -y install traceroute
- when: (scope=="container") and (test=="traceroute")
-
- - name: Send traceroute from Container
- shell: docker exec {{ container_name }} traceroute {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: ctr_traceroute_result
- when: (scope=="container") and (test=="traceroute")
-
- - name: Store ctr traceroute resultcode to file
- shell: echo "{{ '{{' }} ctr_traceroute_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="container") and (test=="traceroute")
-
- - name: Copy run_tcpdump.sh to container
- command: docker cp /root/run_tcpdump.sh {{ container_name }}:/root/run_tcpdump.sh
- when: (scope=="container") and (test=="tcpdump")
-
- - name: Run tcpdump for 30 seconds from Container
- shell: docker exec {{ container_name }} /root/run_tcpdump.sh {{ argument }} 2>&1 > /tmp/{{ result_fn }}
- ignore_errors: yes
- register: diagresult
- when: (scope=="container") and (test=="tcpdump")
-
- - name: Store ctr tcpdump resultcode to file
- shell: echo "{{ '{{' }} ctr_tcpdump_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
- when: (scope=="container") and (test=="tcpdump")
-
-# ------------------
-# scope == *
-# ------------------
- - name: Fetch the result
- fetch: src=/tmp/{{ result_fn }} dest=/opt/xos/synchronizers/vtr/result/{{ result_fn }} flat=yes
-
- - name: Fetch the resultcode
- fetch: src=/tmp/{{ resultcode_fn }} dest=/opt/xos/synchronizers/vtr/result/{{ resultcode_fn }} flat=yes
-
-
-
-
diff --git a/xos/onboard/vtr-old/synchronizer/vtn_vtr_synchronizer_config b/xos/onboard/vtr-old/synchronizer/vtn_vtr_synchronizer_config
deleted file mode 100644
index 2c9140a..0000000
--- a/xos/onboard/vtr-old/synchronizer/vtn_vtr_synchronizer_config
+++ /dev/null
@@ -1,47 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vtr
-dependency_graph=/opt/xos/synchronizers/vtr/model-deps
-steps_dir=/opt/xos/synchronizers/vtr/steps
-sys_dir=/opt/xos/synchronizers/vtr/sys
-deleters_dir=/opt/xos/synchronizers/vtr/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-full_setup=True
-proxy_ssh=True
-proxy_ssh_key=/opt/xos/synchronizers/vtr/node_key
-proxy_ssh_user=root
-
-[networking]
-use_vtn=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/vtr-old/synchronizer/vtr-synchronizer.py b/xos/onboard/vtr-old/synchronizer/vtr-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/onboard/vtr-old/synchronizer/vtr-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/onboard/vtr-old/synchronizer/vtr_synchronizer_config b/xos/onboard/vtr-old/synchronizer/vtr_synchronizer_config
deleted file mode 100644
index 51bf25a..0000000
--- a/xos/onboard/vtr-old/synchronizer/vtr_synchronizer_config
+++ /dev/null
@@ -1,41 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vtr
-dependency_graph=/opt/xos/synchronizers/vtr/model-deps
-steps_dir=/opt/xos/synchronizers/vtr/steps
-sys_dir=/opt/xos/synchronizers/vtr/sys
-deleters_dir=/opt/xos/synchronizers/vtr/deleters
-log_file=console
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/onboard/vtr-old/templates/vtradmin.html b/xos/onboard/vtr-old/templates/vtradmin.html
deleted file mode 100644
index e8a33bc..0000000
--- a/xos/onboard/vtr-old/templates/vtradmin.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<div class = "row text-center">
- <div class="col-xs-12">
- <a href="/admin/vtr/vtrtenant/">vTR Tenants</a>
- </div>
-</div>
-
diff --git a/xos/onboard/vtr-old/vtr-onboard.yaml b/xos/onboard/vtr-old/vtr-onboard.yaml
deleted file mode 100644
index 38dddd1..0000000
--- a/xos/onboard/vtr-old/vtr-onboard.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-description: Onboard the exampleservice
-
-imports:
- - custom_types/xos.yaml
-
-topology_template:
- node_templates:
- servicecontroller#vtr:
- type: tosca.nodes.ServiceController
- properties:
- base_url: file:///opt/xos/onboard/vtr/
- # The following will concatenate with base_url automatically, if
- # base_url is non-null.
- models: models.py
- admin: admin.py
- admin_template: templates/vtradmin.html
- synchronizer: synchronizer/manifest
- synchronizer_run: vtr-synchronizer.py
- rest_tenant: api/tenant/truckroll.py
- private_key: file:///opt/xos/key_import/vsg_rsa
- public_key: file:///opt/xos/key_import/vsg_rsa.pub
-