resolve merge conflicts
diff --git a/xos/cord/admin.py b/xos/cord/admin.py
index b84ee22..4c98750 100644
--- a/xos/cord/admin.py
+++ b/xos/cord/admin.py
@@ -152,7 +152,7 @@
class VCPETenantForm(forms.ModelForm):
bbs_account = forms.CharField(required=False)
creator = forms.ModelChoiceField(queryset=User.objects.all())
- sliver = forms.ModelChoiceField(queryset=Sliver.objects.all(),required=False)
+ instance = forms.ModelChoiceField(queryset=Instance.objects.all(),required=False)
last_ansible_hash = forms.CharField(required=False)
def __init__(self,*args,**kwargs):
@@ -164,12 +164,12 @@
# fields for the attributes
self.fields['bbs_account'].initial = self.instance.bbs_account
self.fields['creator'].initial = self.instance.creator
- self.fields['sliver'].initial = self.instance.sliver
+ self.fields['instance'].initial = self.instance.instance
self.fields['last_ansible_hash'].initial = self.instance.last_ansible_hash
def save(self, commit=True):
self.instance.creator = self.cleaned_data.get("creator")
- self.instance.sliver = self.cleaned_data.get("sliver")
+ self.instance.instance = self.cleaned_data.get("instance")
self.instance.last_ansible_hash = self.cleaned_data.get("last_ansible_hash")
return super(VCPETenantForm, self).save(commit=commit)
@@ -180,7 +180,7 @@
list_display = ('backend_status_icon', 'id', 'subscriber_tenant' )
list_display_links = ('backend_status_icon', 'id')
fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_tenant', 'service_specific_id', # 'service_specific_attribute',
- 'bbs_account', 'creator', 'sliver', 'last_ansible_hash'],
+ 'bbs_account', 'creator', 'instance', 'last_ansible_hash'],
'classes':['suit-tab suit-tab-general']})]
readonly_fields = ('backend_status_text', 'service_specific_attribute', 'bbs_account')
form = VCPETenantForm
diff --git a/xos/cord/models.py b/xos/cord/models.py
index 8befa17..f94befc 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -1,5 +1,5 @@
from django.db import models
-from core.models import Service, PlCoreBase, Slice, Sliver, Tenant, Node, Image, User, Flavor, Subscriber
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, Node, Image, User, Flavor, Subscriber
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
@@ -470,7 +470,7 @@
"hpc_client_ip",
"wan_mac")
- default_attributes = {"sliver_id": None,
+ default_attributes = {"instance_id": None,
"users": [],
"bbs_account": None,
"last_ansible_hash": None}
@@ -478,8 +478,8 @@
def __init__(self, *args, **kwargs):
super(VCPETenant, self).__init__(*args, **kwargs)
self.cached_vbng=None
- self.cached_sliver=None
- self.orig_sliver_id = self.get_initial_attribute("sliver_id")
+ self.cached_instance=None
+ self.orig_instance_id = self.get_initial_attribute("instance_id")
@property
def image(self):
@@ -495,27 +495,27 @@
raise XOSProgrammingError("No VPCE image (looked for %s)" % str(LOOK_FOR_IMAGES))
@property
- def sliver(self):
- if getattr(self, "cached_sliver", None):
- return self.cached_sliver
- sliver_id=self.get_attribute("sliver_id")
- if not sliver_id:
+ def instance(self):
+ if getattr(self, "cached_instance", None):
+ return self.cached_instance
+ instance_id=self.get_attribute("instance_id")
+ if not instance_id:
return None
- slivers=Sliver.objects.filter(id=sliver_id)
- if not slivers:
+ instances=Instance.objects.filter(id=instance_id)
+ if not instances:
return None
- sliver=slivers[0]
- sliver.caller = self.creator
- self.cached_sliver = sliver
- return sliver
+ instance=instances[0]
+ instance.caller = self.creator
+ self.cached_instance = instance
+ return instance
- @sliver.setter
- def sliver(self, value):
+ @instance.setter
+ def instance(self, value):
if value:
value = value.id
- if (value != self.get_attribute("sliver_id", None)):
- self.cached_sliver=None
- self.set_attribute("sliver_id", value)
+ if (value != self.get_attribute("instance_id", None)):
+ self.cached_instance=None
+ self.set_attribute("instance_id", value)
@property
def creator(self):
@@ -584,10 +584,10 @@
@property
def ssh_command(self):
- if self.sliver:
- return self.sliver.get_ssh_command()
+ if self.instance:
+ return self.instance.get_ssh_command()
else:
- return "no-sliver"
+ return "no-instance"
@ssh_command.setter
def ssh_command(self, value):
@@ -595,7 +595,7 @@
@property
def addresses(self):
- if not self.sliver:
+ if not self.instance:
return {}
addresses = {}
@@ -656,20 +656,20 @@
nodes = list(Node.objects.all())
# TODO: logic to filter nodes by which nodes are up, and which
# nodes the slice can instantiate on.
- nodes = sorted(nodes, key=lambda node: node.slivers.all().count())
+ nodes = sorted(nodes, key=lambda node: node.instances.all().count())
return nodes[0]
- def manage_sliver(self):
- # Each VCPE object owns exactly one sliver.
+ def manage_instance(self):
+ # Each VCPE object owns exactly one instance.
if self.deleted:
return
- if (self.sliver is not None) and (self.sliver.image != self.image):
- self.sliver.delete()
- self.sliver = None
+ if (self.instance is not None) and (self.instance.image != self.image):
+ self.instance.delete()
+ self.instance = None
- if self.sliver is None:
+ if self.instance is None:
if not self.provider_service.slices.count():
raise XOSConfigurationError("The VCPE service has no slices")
@@ -678,26 +678,26 @@
raise XOSConfigurationError("No m1.small flavor")
node =self.pick_node()
- sliver = Sliver(slice = self.provider_service.slices.all()[0],
+ instance = Instance(slice = self.provider_service.slices.all()[0],
node = node,
image = self.image,
creator = self.creator,
deployment = node.site_deployment.deployment,
flavor = flavors[0])
- sliver.save()
+ instance.save()
try:
- self.sliver = sliver
+ self.instance = instance
super(VCPETenant, self).save()
except:
- sliver.delete()
+ instance.delete()
raise
- def cleanup_sliver(self):
- if self.sliver:
- # print "XXX cleanup sliver", self.sliver
- self.sliver.delete()
- self.sliver = None
+ def cleanup_instance(self):
+ if self.instance:
+ # print "XXX cleanup instance", self.instance
+ self.instance.delete()
+ self.instance = None
def manage_vbng(self):
# Each vCPE object owns exactly one vBNG object
@@ -728,11 +728,11 @@
# print "XXX clean up orphaned vbng", vbng
vbng.delete()
- if self.orig_sliver_id and (self.orig_sliver_id != self.get_attribute("sliver_id")):
- slivers=Sliver.objects.filter(id=self.orig_sliver_id)
- if slivers:
- # print "XXX clean up orphaned sliver", slivers[0]
- slivers[0].delete()
+ if self.orig_instance_id and (self.orig_instance_id != self.get_attribute("instance_id")):
+ instances=Instance.objects.filter(id=self.orig_instance_id)
+ if instances:
+ # print "XXX clean up orphaned instance", instances[0]
+ instances[0].delete()
def manage_bbs_account(self):
if self.deleted:
@@ -760,14 +760,14 @@
super(VCPETenant, self).save(*args, **kwargs)
model_policy_vcpe(self.pk)
- #self.manage_sliver()
+ #self.manage_instance()
#self.manage_vbng()
#self.manage_bbs_account()
#self.cleanup_orphans()
def delete(self, *args, **kwargs):
self.cleanup_vbng()
- self.cleanup_sliver()
+ self.cleanup_instance()
super(VCPETenant, self).delete(*args, **kwargs)
def model_policy_vcpe(pk):
@@ -777,7 +777,7 @@
if not vcpe:
return
vcpe = vcpe[0]
- vcpe.manage_sliver()
+ vcpe.manage_instance()
vcpe.manage_vbng()
vcpe.manage_bbs_account()
vcpe.cleanup_orphans()
diff --git a/xos/core/admin.py b/xos/core/admin.py
index a5d4c2b..2dae51b 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -402,47 +402,80 @@
def queryset(self, request):
return Tag.select_by_user(request.user)
-class SliverInline(XOSTabularInline):
- model = Sliver
+class NetworkLookerUpper:
+ """ This is a callable that looks up a network name in a instance and returns
+ the ip address for that network.
+ """
+
+ byNetworkName = {} # class variable
+
+ def __init__(self, name):
+ self.short_description = name
+ self.__name__ = name
+ self.network_name = name
+
+ def __call__(self, obj):
+ if obj is not None:
+ for nbs in obj.networkinstance_set.all():
+ if (nbs.network.name == self.network_name):
+ return nbs.ip
+ return ""
+
+ def __str__(self):
+ return self.network_name
+
+ @staticmethod
+ def get(network_name):
+ """ We want to make sure we alwars return the same NetworkLookerUpper
+ because sometimes django will cause them to be instantiated multiple
+ times (and we don't want different ones in form.fields vs
+ InstanceInline.readonly_fields).
+ """
+ if network_name not in NetworkLookerUpper.byNetworkName:
+ NetworkLookerUpper.byNetworkName[network_name] = NetworkLookerUpper(network_name)
+ return NetworkLookerUpper.byNetworkName[network_name]
+
+class InstanceInline(XOSTabularInline):
+ model = Instance
fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'deployment', 'flavor', 'image', 'node', 'no_sync']
extra = 0
readonly_fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name']
- suit_classes = 'suit-tab suit-tab-slivers'
+ suit_classes = 'suit-tab suit-tab-instances'
def queryset(self, request):
- return Sliver.select_by_user(request.user)
+ return Instance.select_by_user(request.user)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'deployment':
kwargs['queryset'] = Deployment.select_by_acl(request.user).filter(sitedeployments__nodes__isnull=False).distinct()
- kwargs['widget'] = forms.Select(attrs={'onChange': "sliver_deployment_changed(this);"})
+ kwargs['widget'] = forms.Select(attrs={'onChange': "instance_deployment_changed(this);"})
if db_field.name == 'flavor':
- kwargs['widget'] = forms.Select(attrs={'onChange': "sliver_flavor_changed(this);"})
+ kwargs['widget'] = forms.Select(attrs={'onChange': "instance_flavor_changed(this);"})
- field = super(SliverInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
+ field = super(InstanceInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
return field
-class CordSliverInline(XOSTabularInline):
- model = Sliver
+class CordInstanceInline(XOSTabularInline):
+ model = Instance
fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'flavor', 'image', 'node']
extra = 0
readonly_fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name']
- suit_classes = 'suit-tab suit-tab-slivers'
+ suit_classes = 'suit-tab suit-tab-instances'
def queryset(self, request):
- return Sliver.select_by_user(request.user)
+ return Instance.select_by_user(request.user)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'deployment':
kwargs['queryset'] = Deployment.select_by_acl(request.user).filter(sitedeployments__nodes__isnull=False).distinct()
- kwargs['widget'] = forms.Select(attrs={'onChange': "sliver_deployment_changed(this);"})
+ kwargs['widget'] = forms.Select(attrs={'onChange': "instance_deployment_changed(this);"})
if db_field.name == 'flavor':
- kwargs['widget'] = forms.Select(attrs={'onChange': "sliver_flavor_changed(this);"})
+ kwargs['widget'] = forms.Select(attrs={'onChange': "instance_flavor_changed(this);"})
- field = super(CordSliverInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
+ field = super(CordInstanceInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
return field
@@ -932,7 +965,7 @@
# hide MyInline in the add view
if obj is None:
continue
- if isinstance(inline, SliverInline):
+ if isinstance(inline, InstanceInline):
inline.model.caller = request.user
yield inline.get_formset(request, obj)
@@ -1033,12 +1066,12 @@
class SliceAdmin(XOSBaseAdmin):
form = SliceForm
- fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_slivers']
+ fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances']
fieldsets = [('Slice Details', {'fields': fieldList, 'classes':['suit-tab suit-tab-general']}),]
readonly_fields = ('backend_status_text', )
- list_display = ('backend_status_icon', 'name', 'site','serviceClass', 'slice_url', 'max_slivers')
+ list_display = ('backend_status_icon', 'name', 'site','serviceClass', 'slice_url', 'max_instances')
list_display_links = ('backend_status_icon', 'name', )
- normal_inlines = [SlicePrivilegeInline, SliverInline, TagInline, ReservationInline, SliceNetworkInline]
+ normal_inlines = [SlicePrivilegeInline, InstanceInline, TagInline, ReservationInline, SliceNetworkInline]
inlines = normal_inlines
admin_inlines = [ControllerSliceInline]
@@ -1049,7 +1082,7 @@
tabs =[('general', 'Slice Details'),
('slicenetworks','Networks'),
('sliceprivileges','Privileges'),
- ('slivers','Slivers'),
+ ('instances','Instances'),
#('reservations','Reservations'),
('tags','Tags'),
]
@@ -1115,7 +1148,7 @@
# hide MyInline in the add view
if obj is None:
continue
- if isinstance(inline, SliverInline):
+ if isinstance(inline, InstanceInline):
inline.model.caller = request.user
yield inline.get_formset(request, obj)
@@ -1126,7 +1159,7 @@
# XXX this approach is better than clobbering self.inlines, so
# try to make this work post-demo.
if (obj is not None) and (obj.name == "mysite_vcpe"):
- cord_vcpe_inlines = [ SlicePrivilegeInline, CordSliverInline, TagInline, ReservationInline,SliceNetworkInline]
+ cord_vcpe_inlines = [ SlicePrivilegeInline, CordInstanceInline, TagInline, ReservationInline,SliceNetworkInline]
inlines=[]
for inline_class in cord_vcpe_inlines:
@@ -1183,9 +1216,9 @@
]
readonly_fields = ('backend_status_text', )
- suit_form_tabs =(('general','Image Details'),('slivers','Slivers'),('imagedeployments','Deployments'), ('controllerimages', 'Controllers'))
+ suit_form_tabs =(('general','Image Details'),('instances','Instances'),('imagedeployments','Deployments'), ('controllerimages', 'Controllers'))
- inlines = [SliverInline, ControllerImagesInline]
+ inlines = [InstanceInline, ControllerImagesInline]
user_readonly_fields = ['name', 'disk_format', 'container_format']
@@ -1205,22 +1238,22 @@
list_display_links = ('backend_status_icon', 'name', )
list_filter = ('site_deployment',)
- inlines = [TagInline,SliverInline]
+ inlines = [TagInline,InstanceInline]
fieldsets = [('Node Details', {'fields': ['backend_status_text', 'name','site_deployment'], 'classes':['suit-tab suit-tab-details']})]
readonly_fields = ('backend_status_text', )
user_readonly_fields = ['name','site_deployment']
- user_readonly_inlines = [TagInline,SliverInline]
+ user_readonly_inlines = [TagInline,InstanceInline]
- suit_form_tabs =(('details','Node Details'),('slivers','Slivers'))
+ suit_form_tabs =(('details','Node Details'),('instances','Instances'))
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'site':
kwargs['queryset'] = Site.select_by_user(request.user).filter(hosts_nodes=True)
-class SliverForm(forms.ModelForm):
+class InstanceForm(forms.ModelForm):
class Meta:
- model = Sliver
+ model = Instance
ip = forms.CharField(widget=PlainTextWidget)
instance_name = forms.CharField(widget=PlainTextWidget)
widgets = {
@@ -1239,8 +1272,8 @@
user_readonly_fields = ['service', 'name', 'value', 'content_type', 'content_object',]
user_readonly_inlines = []
-class SliverPortInline(XOSTabularInline):
- fields = ['backend_status_icon', 'network', 'sliver', 'ip']
+class InstancePortInline(XOSTabularInline):
+ fields = ['backend_status_icon', 'network', 'instance', 'ip']
readonly_fields = ("backend_status_icon", "ip", )
model = Port
selflink_fieldname = "network"
@@ -1249,16 +1282,16 @@
verbose_name = "Port"
suit_classes = 'suit-tab suit-tab-ports'
-class SliverAdmin(XOSBaseAdmin):
- form = SliverForm
+class InstanceAdmin(XOSBaseAdmin):
+ form = InstanceForm
fieldsets = [
- ('Sliver Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'flavor', 'image', 'ssh_command', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
+ ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'flavor', 'image', 'ssh_command', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
]
readonly_fields = ('backend_status_text', 'ssh_command', 'all_ips_string')
list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'flavor', 'image', 'node', 'deployment']
list_display_links = ('backend_status_icon', 'all_ips_string', 'instance_id', )
- suit_form_tabs =(('general', 'Sliver Details'), ('ports', 'Ports'))
+ suit_form_tabs =(('general', 'Instance Details'), ('ports', 'Ports'))
inlines = [TagInline, SliverPortInline]
@@ -1275,12 +1308,12 @@
if db_field.name == 'slice':
kwargs['queryset'] = Slice.select_by_user(request.user)
- return super(SliverAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
+ return super(InstanceAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def queryset(self, request):
- # admins can see all slivers. Users can only see slivers of
+ # admins can see all instances. Users can only see instances of
# the slices they belong to.
- return Sliver.select_by_user(request.user)
+ return Instance.select_by_user(request.user)
def get_formsets(self, request, obj=None):
@@ -1294,7 +1327,7 @@
# hide MyInline in the add view
if obj is None:
continue
- if isinstance(inline, SliverInline):
+ if isinstance(inline, InstanceInline):
inline.model.caller = request.user
yield inline.get_formset(request, obj)
@@ -1515,8 +1548,8 @@
field.initial = field.queryset.all()[0]
else:
field.queryset = field.queryset.none()
- elif db_field.name == 'sliver':
- # restrict slivers to those that belong to the slice
+ elif db_field.name == 'instance':
+ # restrict instances to those that belong to the slice
if request._slice is not None:
field.queryset = field.queryset.filter(slice = request._slice)
else:
@@ -1668,10 +1701,10 @@
readonly_fields = ('backend_status_icon', )
class NetworkPortInline(XOSTabularInline):
- fields = ['backend_status_icon', 'network', 'sliver', 'ip']
+ fields = ['backend_status_icon', 'network', 'instance', 'ip']
readonly_fields = ("backend_status_icon", "ip", )
model = Port
- selflink_fieldname = "sliver"
+ selflink_fieldname = "instance"
extra = 0
verbose_name_plural = "Ports"
verbose_name = "Port"
@@ -1708,7 +1741,6 @@
list_display = ("backend_status_icon", "name", "subnet", "ports", "labels")
list_display_links = ('backend_status_icon', 'name', )
readonly_fields = ("subnet", )
-
inlines = [NetworkParameterInline, NetworkPortInline, NetworkSlicesInline, RouterInline]
admin_inlines = [ControllerNetworkInline]
@@ -1959,7 +1991,7 @@
admin.site.register(Node, NodeAdmin)
#admin.site.register(SlicePrivilege, SlicePrivilegeAdmin)
#admin.site.register(SitePrivilege, SitePrivilegeAdmin)
- admin.site.register(Sliver, SliverAdmin)
+ admin.site.register(Instance, InstanceAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(DashboardView, DashboardViewAdmin)
admin.site.register(Flavor, FlavorAdmin)
diff --git a/xos/core/api/slivers.py b/xos/core/api/instances.py
similarity index 64%
rename from xos/core/api/slivers.py
rename to xos/core/api/instances.py
index 2ce24c7..92e928c 100644
--- a/xos/core/api/slivers.py
+++ b/xos/core/api/instances.py
@@ -1,26 +1,26 @@
from types import StringTypes
from django.contrib.auth import authenticate
from openstack.manager import OpenStackManager
-from core.models import Sliver, Slice
+from core.models import Instance, Slice
from core.api.images import _get_images
from core.api.slices import _get_slices
from core.api.deployment_networks import _get_deployment_networks
from core.api.nodes import _get_nodes
-def _get_slivers(filter):
+def _get_instances(filter):
if isinstance(filter, StringTypes) and filter.isdigit():
filter = int(filter)
if isinstance(filter, int):
- slivers = Sliver.objects.filter(id=filter)
+ instances = Instance.objects.filter(id=filter)
elif isinstance(filter, StringTypes):
- slivers = Sliver.objects.filter(name=filter)
+ instances = Instance.objects.filter(name=filter)
elif isinstance(filter, dict):
- slivers = Sliver.objects.filter(**filter)
+ instances = Instance.objects.filter(**filter)
else:
- slivers = []
- return slivers
+ instances = []
+ return instances
-def add_sliver(auth, fields):
+def add_instance(auth, fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
@@ -33,33 +33,33 @@
if deployment_networks: fields['deploymentNetwork'] = deployment_networks[0]
if nodes: fields['node'] = nodes[0]
- sliver = Sliver(**fields)
- auth['tenant'] = sliver.slice.name
- sliver.os_manager = OpenStackManager(auth=auth, caller = user)
- sliver.save()
- return sliver
+ instance = Instance(**fields)
+ auth['tenant'] = instance.slice.name
+ instance.os_manager = OpenStackManager(auth=auth, caller = user)
+ instance.save()
+ return instance
-def update_sliver(auth, sliver, **fields):
+def update_instance(auth, instance, **fields):
return
-def delete_sliver(auth, filter={}):
+def delete_instance(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
- slivers = _get_slivers(filter)
- for sliver in slivers:
- auth['tenant'] = sliver.slice.name
+ instances = _get_instances(filter)
+ for instance in instances:
+ auth['tenant'] = instance.slice.name
slice.os_manager = OpenStackManager(auth=auth, caller = user)
- sliver.delete()
+ instance.delete()
return 1
-def get_slivers(auth, filter={}):
+def get_instances(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
if 'slice' in filter:
slices = _get_slices(filter.get('slice'))
if slices: filter['slice'] = slices[0]
- slivers = _get_slivers(filter)
- return slivers
+ instances = _get_instances(filter)
+ return instances
diff --git a/xos/core/api_root.py b/xos/core/api_root.py
index 4ac267a..c3d69f1 100644
--- a/xos/core/api_root.py
+++ b/xos/core/api_root.py
@@ -15,7 +15,7 @@
'serviceresources': reverse('serviceresource-list', request=request, format=format),
'sites': reverse('site-list', request=request, format=format),
'slices': reverse('slice-list', request=request, format=format),
- 'slivers': reverse('sliver-list', request=request, format=format),
+ 'instances': reverse('instance-list', request=request, format=format),
'tags': reverse('tag-list', request=request, format=format),
'users': reverse('user-list', request=request, format=format),
})
diff --git a/xos/core/dashboard/sites.py b/xos/core/dashboard/sites.py
index b77a096..de100af 100644
--- a/xos/core/dashboard/sites.py
+++ b/xos/core/dashboard/sites.py
@@ -14,7 +14,7 @@
from django.conf.urls import patterns, url
from views import DashboardCustomize, DashboardDynamicView, SimulatorView, LoggedInView, \
DashboardUserSiteView, \
- TenantViewData, TenantCreateSlice, TenantAddUser,TenantAddOrRemoveSliverView, TenantPickSitesView, TenantDeleteSliceView, \
+ TenantViewData, TenantCreateSlice, TenantAddUser,TenantAddOrRemoveInstanceView, TenantPickSitesView, TenantDeleteSliceView, \
TenantUpdateSlice, DashboardSliceInteractions, RequestAccessView
from views import view_urls
@@ -46,8 +46,8 @@
name="welcome"),
url(r'^simulator/', self.admin_view(SimulatorView.as_view()),
name="simulator"),
- url(r'^tenantaddorremsliver/$', self.admin_view(TenantAddOrRemoveSliverView.as_view()),
- name="tenantaddorremsliver"),
+ url(r'^tenantaddorreminstance/$', self.admin_view(TenantAddOrRemoveInstanceView.as_view()),
+ name="tenantaddorreminstance"),
url(r'^tenantview/$', self.admin_view(TenantViewData.as_view()),
name="tenantview"),
url(r'^createnewslice/$', self.admin_view(TenantCreateSlice.as_view()),
diff --git a/xos/core/dashboard/views/__init__.py b/xos/core/dashboard/views/__init__.py
index bbe403b..8c693c8 100644
--- a/xos/core/dashboard/views/__init__.py
+++ b/xos/core/dashboard/views/__init__.py
@@ -1,7 +1,7 @@
#from home import DashboardWelcomeView, DashboardDynamicView
-#from tenant import TenantCreateSlice, TenantUpdateSlice, TenantDeleteSliceView, TenantAddOrRemoveSliverView, TenantPickSitesView, TenantViewData
+#from tenant import TenantCreateSlice, TenantUpdateSlice, TenantDeleteSliceView, TenantAddOrRemoveInstanceView, TenantPickSitesView, TenantViewData
#from simulator import SimulatorView
-#from cdn import DashboardSummaryAjaxView, DashboardAddOrRemoveSliverView, DashboardAjaxView
+#from cdn import DashboardSummaryAjaxView, DashboardAddOrRemoveInstanceView, DashboardAjaxView
#from analytics import DashboardAnalyticsAjaxView
#from customize import DashboardCustomize
#from interactions import DashboardSliceInteractions
diff --git a/xos/core/dashboard/views/download_ssh_commands.py b/xos/core/dashboard/views/download_ssh_commands.py
index 96d1393..92bcac4 100644
--- a/xos/core/dashboard/views/download_ssh_commands.py
+++ b/xos/core/dashboard/views/download_ssh_commands.py
@@ -9,8 +9,8 @@
def get(self, request, sliceid=None, **kwargs):
#slice = Slices.objects.get(id=sliceid);
- #for sliver in slice.slivers.all():
- # if (sliver.instance_id && sliver.instance_name):
+ #for instance in slice.instances.all():
+ # if (instance.instance_id && instance.instance_name):
slice = SlicePlus.objects.get(id=sliceid)
diff --git a/xos/core/dashboard/views/interactions.py b/xos/core/dashboard/views/interactions.py
index 89bc766..f74d01e 100644
--- a/xos/core/dashboard/views/interactions.py
+++ b/xos/core/dashboard/views/interactions.py
@@ -28,11 +28,11 @@
elif name=="sites":
result["title"] = "Slice interactions by site ownership"
result["objectName"] = "sites"
- elif name=="sliver_sites":
- result["title"] = "Slice interactions by sliver sites"
+ elif name=="instance_sites":
+ result["title"] = "Slice interactions by instance sites"
result["objectName"] = "sites"
- elif name=="sliver_nodes":
- result["title"] = "Slice interactions by sliver nodes"
+ elif name=="instance_nodes":
+ result["title"] = "Slice interactions by instance nodes"
result["objectName"] = "nodes"
return HttpResponse(json.dumps(result), content_type='application/javascript')
@@ -83,12 +83,12 @@
ids.append(sp.network.id)
elif name=="sites":
ids = [slice.site.id]
- elif name=="sliver_sites":
- for sp in slice.slivers.all():
+ elif name=="instance_sites":
+ for sp in slice.instances.all():
if sp.node.site.id not in ids:
ids.append(sp.node.site.id)
- elif name=="sliver_nodes":
- for sp in slice.slivers.all():
+ elif name=="instance_nodes":
+ for sp in slice.instances.all():
if sp.node.id not in ids:
ids.append(sp.node.id)
return ids
diff --git a/xos/core/dashboard/views/shell.py b/xos/core/dashboard/views/shell.py
index e852b16..c49133b 100644
--- a/xos/core/dashboard/views/shell.py
+++ b/xos/core/dashboard/views/shell.py
@@ -20,20 +20,20 @@
d2[k] = v
return d2
-def sliver_to_dict(sliver):
- d = model_to_dict(sliver)
- d["slice_id"] = sliver.slice.id
- d["node_id"] = sliver.node.id
+def instance_to_dict(instance):
+ d = model_to_dict(instance)
+ d["slice_id"] = instance.slice.id
+ d["node_id"] = instance.node.id
return d
def slice_to_dict(slice):
d = model_to_dict(slice)
- d["slivers"] = [sliver_to_dict(x) for x in slice.slivers]
+ d["instances"] = [instance_to_dict(x) for x in slice.instances]
return d
def node_to_dict(node):
d = model_to_dict(node)
- d["slivers"] = []
+ d["instances"] = []
class OpenCloudData:
@@ -43,7 +43,7 @@
def loadAll(self):
self.allNodes = list(Node.objects.all())
self.allSlices = list(Slice.objects.all())
- self.allSlivers = list(Sliver.objects.all())
+ self.allInstances = list(Instance.objects.all())
self.allSites = list(Site.objects.all())
self.site_id = {}
@@ -56,29 +56,29 @@
self.node_id = {}
for node in self.allNodes:
d = model_to_dict(node)
- d["sliver_ids"] = []
+ d["instance_ids"] = []
self.node_id[node.id] = ensure_serializable(d)
self.site_id[node.site_id]["node_ids"].append(node.id)
self.slice_id = {}
for slice in self.allSlices:
d = model_to_dict(slice)
- d["sliver_ids"] = []
+ d["instance_ids"] = []
self.slice_id[slice.id] = ensure_serializable(d)
self.site_id[slice.site_id]["slice_ids"].append(site.id)
print self.slice_id.keys()
- self.sliver_id = {}
- for sliver in self.allSlivers:
- self.sliver_id[sliver.id] = model_to_dict(sliver)
+ self.instance_id = {}
+ for instance in self.allInstances:
+ self.instance_id[instance.id] = model_to_dict(instance)
- self.slice_id[sliver.slice_id]["sliver_ids"].append(sliver.id)
- self.node_id[sliver.node_id]["sliver_ids"].append(sliver.id)
+ self.slice_id[instance.slice_id]["instance_ids"].append(instance.id)
+ self.node_id[instance.node_id]["instance_ids"].append(instance.id)
def get_opencloud_data(self):
return {"slices": self.slice_id.values(),
- "slivers": self.sliver_id.values(),
+ "instances": self.instance_id.values(),
"nodes": self.node_id.values(),
"sites": self.site_id.values()}
diff --git a/xos/core/dashboard/views/tenant.py b/xos/core/dashboard/views/tenant.py
index 951efc2..6fbc4d6 100644
--- a/xos/core/dashboard/views/tenant.py
+++ b/xos/core/dashboard/views/tenant.py
@@ -201,21 +201,21 @@
preferredImage = entry.image_preference
#sliceDataSet = entry.mount_data_sets
sliceNetwork = {}
- numSliver = 0
+ numInstance = 0
sliceImage=""
sliceSite = {}
sliceNode = {}
sliceInstance= {}
#createPrivateVolume(user,sliceName)
available_sites = getAvailableSites()
- for sliver in slice.slivers.all():
- if sliver.node.site.name in available_sites:
- sliceSite[sliver.node.site.name] = sliceSite.get(sliver.node.site.name,0) + 1
- sliceImage = sliver.image.name
- sliceNode[str(sliver)] = sliver.node.name
- numSliver = sum(sliceSite.values())
+ for instance in slice.instances.all():
+ if instance.node.site.name in available_sites:
+ sliceSite[instance.node.site.name] = sliceSite.get(instance.node.site.name,0) + 1
+ sliceImage = instance.image.name
+ sliceNode[str(instance)] = instance.node.name
+ numInstance = sum(sliceSite.values())
numSites = len(sliceSite)
- userSliceInfo.append({'sliceName': sliceName,'sliceServiceClass': sliceServiceClass,'preferredImage':preferredImage,'numOfSites':numSites, 'sliceSite':sliceSite,'sliceImage':sliceImage,'numOfSlivers':numSliver,'instanceNodePair':sliceNode})
+ userSliceInfo.append({'sliceName': sliceName,'sliceServiceClass': sliceServiceClass,'preferredImage':preferredImage,'numOfSites':numSites, 'sliceSite':sliceSite,'sliceImage':sliceImage,'numOfInstances':numInstance,'instanceNodePair':sliceNode})
return userSliceInfo
def getTenantSitesInfo():
@@ -344,14 +344,14 @@
sliceToDel.delete()
return HttpResponse(json.dumps("Slice deleted"), content_type='application/javascript')
-class TenantAddOrRemoveSliverView(View):
- """ Add or remove slivers from a Slice
+class TenantAddOrRemoveInstanceView(View):
+ """ Add or remove instances from a Slice
Arguments:
siteName - name of site. If not specified, XOS will pick the
best site.,
actionToDo - [add | rem]
- count - number of slivers to add or remove
+ count - number of instances to add or remove
sliceName - name of slice
noAct - if set, no changes will be made to db, but result will still
show which sites would have been modified.
@@ -384,9 +384,9 @@
if (siteList is None):
siteList = tenant_pick_sites(user, user_ip, slice, count)
- sitesChanged = slice_increase_slivers(request.user, user_ip, siteList, slice, image, count, noAct)
+ sitesChanged = slice_increase_instances(request.user, user_ip, siteList, slice, image, count, noAct)
elif (actionToDo == "rem"):
- sitesChanged = slice_decrease_slivers(request.user, siteList, slice, count, noAct)
+ sitesChanged = slice_decrease_instances(request.user, siteList, slice, count, noAct)
else:
return HttpResponseServerError("Unknown actionToDo %s" % actionToDo)
@@ -411,16 +411,16 @@
def siteSortKey(site, slice=None, count=None, lat=None, lon=None):
# try to pick a site we're already using
- has_slivers_here=False
+ has_instances_here=False
if slice:
- for sliver in slice.slivers.all():
- if sliver.node.site.name == site.name:
- has_slivers_here=True
+ for instance in slice.instances.all():
+ if instance.node.site.name == site.name:
+ has_instances_here=True
# Haversine method
d = haversine(site.location.latitude, site.location.longitude, lat, lon)
- return (-has_slivers_here, d)
+ return (-has_instances_here, d)
def tenant_pick_sites(user, user_ip=None, slice=None, count=None):
""" Returns list of sites, sorted from most favorable to least favorable """
diff --git a/xos/core/dashboard/views/view_common.py b/xos/core/dashboard/views/view_common.py
index 37da7cf..0ef422e 100644
--- a/xos/core/dashboard/views/view_common.py
+++ b/xos/core/dashboard/views/view_common.py
@@ -66,67 +66,67 @@
continue
slice = slice[0]
slicename = slice.name
- sliverList=Sliver.objects.all()
+ instanceList=Instance.objects.all()
sites_used = {}
- for sliver in slice.slivers.all():
- #sites_used['deploymentSites'] = sliver.node.deployment.name
- # sites_used[sliver.image.name] = sliver.image.name
- sites_used[sliver.node.site_deployment.site] = 1 #sliver.numberCores
+ for instance in slice.instances.all():
+ #sites_used['deploymentSites'] = instance.node.deployment.name
+ # sites_used[instance.image.name] = instance.image.name
+ sites_used[instance.node.site_deployment.site] = 1 #instance.numberCores
sliceid = Slice.objects.get(id=entry.slice.id).id
try:
- sliverList = Sliver.objects.filter(slice=entry.slice.id)
+ instanceList = Instance.objects.filter(slice=entry.slice.id)
siteList = {}
- for x in sliverList:
+ for x in instanceList:
if x.node.site_deployment.site not in siteList:
siteList[x.node.site_deployment.site] = 1
- slivercount = len(sliverList)
+ instancecount = len(instanceList)
sitecount = len(siteList)
except:
traceback.print_exc()
- slivercount = 0
+ instancecount = 0
sitecount = 0
userSliceInfo.append({'slicename': slicename, 'sliceid':sliceid,
'sitesUsed':sites_used,
'role': SliceRole.objects.get(id=entry.role.id).role,
- 'slivercount': slivercount,
+ 'instancecount': instancecount,
'sitecount':sitecount})
return userSliceInfo
-def slice_increase_slivers(user, user_ip, siteList, slice, image, count, noAct=False):
+def slice_increase_instances(user, user_ip, siteList, slice, image, count, noAct=False):
sitesChanged = {}
- # let's compute how many slivers are in use in each node of each site
+ # let's compute how many instances are in use in each node of each site
for site in siteList:
site.nodeList = list(site.nodes.all())
for node in site.nodeList:
- node.sliverCount = 0
- for sliver in node.slivers.all():
- if sliver.slice.id == slice.id:
- node.sliverCount = node.sliverCount + 1
+ node.instanceCount = 0
+ for instance in node.instances.all():
+ if instance.slice.id == slice.id:
+ node.instanceCount = node.instanceCount + 1
- # Allocate slivers to nodes
- # for now, assume we want to allocate all slivers from the same site
+ # Allocate instances to nodes
+ # for now, assume we want to allocate all instances from the same site
nodes = siteList[0].nodeList
while (count>0):
- # Sort the node list by number of slivers per node, then pick the
- # node with the least number of slivers.
- nodes = sorted(nodes, key=attrgetter("sliverCount"))
+ # Sort the node list by number of instances per node, then pick the
+ # node with the least number of instances.
+ nodes = sorted(nodes, key=attrgetter("instanceCount"))
node = nodes[0]
- print "adding sliver at node", node.name, "of site", node.site.name
+ print "adding instance at node", node.name, "of site", node.site.name
if not noAct:
- sliver = Sliver(name=node.name,
+ instance = Instance(name=node.name,
slice=slice,
node=node,
image = image,
creator = User.objects.get(email=user),
deploymentNetwork=node.deployment)
- sliver.save()
+ instance.save()
- node.sliverCount = node.sliverCount + 1
+ node.instanceCount = node.instanceCount + 1
count = count - 1
@@ -134,20 +134,20 @@
return sitesChanged
-def slice_decrease_slivers(user, siteList, slice, count, noAct=False):
+def slice_decrease_instances(user, siteList, slice, count, noAct=False):
sitesChanged = {}
if siteList:
siteNames = [site.name for site in siteList]
else:
siteNames = None
- for sliver in list(slice.slivers.all()):
+ for instance in list(slice.instances.all()):
if count>0:
- if(not siteNames) or (sliver.node.site.name in siteNames):
- sliver.delete()
- print "deleting sliver",sliver.name,"at node",sliver.node.name
+ if(not siteNames) or (instance.node.site.name in siteNames):
+ instance.delete()
+ print "deleting instance",instance.name,"at node",instance.node.name
count=count-1
- sitesChanged[sliver.node.site.name] = sitesChanged.get(sliver.node.site.name,0) - 1
+ sitesChanged[instance.node.site.name] = sitesChanged.get(instance.node.site.name,0) - 1
return sitesChanged
diff --git a/xos/core/migrations/0001_initial.py b/xos/core/migrations/0001_initial.py
index fe34224..db7dad0 100644
--- a/xos/core/migrations/0001_initial.py
+++ b/xos/core/migrations/0001_initial.py
@@ -3,7 +3,7 @@
from django.db import models, migrations
import timezones.fields
-import core.models.sliver
+import core.models.instance
import core.models.network
import geoposition.fields
import encrypted_fields.fields
@@ -528,7 +528,7 @@
bases=(models.Model,),
),
migrations.CreateModel(
- name='NetworkSliver',
+ name='NetworkInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
@@ -537,9 +537,9 @@
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_status', models.CharField(default=b'Provisioning in progress', max_length=140)),
('deleted', models.BooleanField(default=False)),
- ('ip', models.GenericIPAddressField(help_text=b'Sliver ip address', null=True, blank=True)),
+ ('ip', models.GenericIPAddressField(help_text=b'Instance ip address', null=True, blank=True)),
('port_id', models.CharField(help_text=b'Quantum port id', max_length=256, null=True, blank=True)),
- ('network', models.ForeignKey(related_name=b'networkslivers', to='core.Network')),
+ ('network', models.ForeignKey(related_name=b'networkinstances', to='core.Network')),
],
options={
'abstract': False,
@@ -952,7 +952,7 @@
('omf_friendly', models.BooleanField(default=False)),
('description', models.TextField(help_text=b'High level description of the slice and expected activities', max_length=1024, blank=True)),
('slice_url', models.URLField(max_length=512, blank=True)),
- ('max_slivers', models.IntegerField(default=10)),
+ ('max_instances', models.IntegerField(default=10)),
('network', models.CharField(default=b'Private Only', max_length=256, null=True, blank=True)),
('mount_data_sets', models.CharField(default=b'GenBank', max_length=256, null=True, blank=True)),
('creator', models.ForeignKey(related_name=b'slices', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
@@ -1040,7 +1040,7 @@
bases=(models.Model,),
),
migrations.CreateModel(
- name='Sliver',
+ name='Instance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
@@ -1051,17 +1051,17 @@
('deleted', models.BooleanField(default=False)),
('instance_id', models.CharField(help_text=b'Nova instance id', max_length=200, null=True, blank=True)),
('instance_uuid', models.CharField(help_text=b'Nova instance uuid', max_length=200, null=True, blank=True)),
- ('name', models.CharField(help_text=b'Sliver name', max_length=200)),
+ ('name', models.CharField(help_text=b'Instance name', max_length=200)),
('instance_name', models.CharField(help_text=b'OpenStack generated name', max_length=200, null=True, blank=True)),
- ('ip', models.GenericIPAddressField(help_text=b'Sliver ip address', null=True, blank=True)),
- ('numberCores', models.IntegerField(default=0, help_text=b'Number of cores for sliver', verbose_name=b'Number of Cores')),
+ ('ip', models.GenericIPAddressField(help_text=b'Instance ip address', null=True, blank=True)),
+ ('numberCores', models.IntegerField(default=0, help_text=b'Number of cores for instance', verbose_name=b'Number of Cores')),
('userData', models.TextField(help_text=b'user_data passed to instance during creation', null=True, blank=True)),
- ('creator', models.ForeignKey(related_name=b'slivers', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
- ('deployment', models.ForeignKey(related_name=b'sliver_deployment', verbose_name=b'deployment', to='core.Deployment')),
- ('flavor', models.ForeignKey(default=core.models.sliver.get_default_flavor, to='core.Flavor', help_text=b'Flavor of this instance')),
- ('image', models.ForeignKey(related_name=b'slivers', to='core.Image')),
- ('node', models.ForeignKey(related_name=b'slivers', to='core.Node')),
- ('slice', models.ForeignKey(related_name=b'slivers', to='core.Slice')),
+ ('creator', models.ForeignKey(related_name=b'instances', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
+ ('deployment', models.ForeignKey(related_name=b'instance_deployment', verbose_name=b'deployment', to='core.Deployment')),
+ ('flavor', models.ForeignKey(default=core.models.instance.get_default_flavor, to='core.Flavor', help_text=b'Flavor of this instance')),
+ ('image', models.ForeignKey(related_name=b'instances', to='core.Image')),
+ ('node', models.ForeignKey(related_name=b'instances', to='core.Node')),
+ ('slice', models.ForeignKey(related_name=b'instances', to='core.Slice')),
],
options={
'abstract': False,
@@ -1207,8 +1207,8 @@
),
migrations.AddField(
model_name='reservedresource',
- name='sliver',
- field=models.ForeignKey(related_name=b'reservedresources', to='core.Sliver'),
+ name='instance',
+ field=models.ForeignKey(related_name=b'reservedresources', to='core.Instance'),
preserve_default=True,
),
migrations.AddField(
@@ -1242,9 +1242,9 @@
preserve_default=True,
),
migrations.AddField(
- model_name='networksliver',
- name='sliver',
- field=models.ForeignKey(related_name=b'networkslivers', to='core.Sliver'),
+ model_name='networkinstance',
+ name='instance',
+ field=models.ForeignKey(related_name=b'networkinstances', to='core.Instance'),
preserve_default=True,
),
migrations.AddField(
@@ -1279,8 +1279,8 @@
),
migrations.AddField(
model_name='network',
- name='slivers',
- field=models.ManyToManyField(related_name=b'networks', through='core.NetworkSliver', to='core.Sliver', blank=True),
+ name='instances',
+ field=models.ManyToManyField(related_name=b'networks', through='core.NetworkInstance', to='core.Instance', blank=True),
preserve_default=True,
),
migrations.AddField(
diff --git a/xos/core/migrations/0011_sliver_instance_uuid.py b/xos/core/migrations/0011_sliver_instance_uuid.py
index dd8d05c..3320158 100644
--- a/xos/core/migrations/0011_sliver_instance_uuid.py
+++ b/xos/core/migrations/0011_sliver_instance_uuid.py
@@ -12,7 +12,7 @@
operations = [
migrations.AddField(
- model_name='sliver',
+ model_name='instance',
name='instance_uuid',
field=models.CharField(help_text=b'Nova instance uuid', max_length=200, null=True, blank=True),
preserve_default=True,
diff --git a/xos/core/models/__init__.py b/xos/core/models/__init__.py
index d7bf036..4a2051f 100644
--- a/xos/core/models/__init__.py
+++ b/xos/core/models/__init__.py
@@ -23,10 +23,10 @@
from .site import SitePrivilege
from .node import Node
from .slicetag import SliceTag
-from .sliver import Sliver
+from .instance import Instance
from .reservation import ReservedResource
from .reservation import Reservation
-from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, Port, NetworkTemplate, Router, NetworkSlice, ControllerNetwork
+from .network import Network, NetworkParameterType, NetworkParameter, NetworkInstance, Port, NetworkTemplate, Router, NetworkSlice, ControllerNetwork
from .billing import Account, Invoice, Charge, UsableObject, Payment
from .program import Program
diff --git a/xos/core/models/billing.py b/xos/core/models/billing.py
index 765e42f..6e517b4 100644
--- a/xos/core/models/billing.py
+++ b/xos/core/models/billing.py
@@ -2,7 +2,7 @@
import os
import socket
from django.db import models
-from core.models import PlCoreBase, Site, Slice, Sliver, Deployment
+from core.models import PlCoreBase, Site, Slice, Instance, Deployment
from core.models.plcorebase import StrippedCharField
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
diff --git a/xos/core/models/sliver.py b/xos/core/models/instance.py
similarity index 80%
rename from xos/core/models/sliver.py
rename to xos/core/models/instance.py
index 4a00161..eb38ff9 100644
--- a/xos/core/models/sliver.py
+++ b/xos/core/models/instance.py
@@ -21,7 +21,7 @@
config = Config()
def get_default_flavor(controller = None):
- # Find a default flavor that can be used for a sliver. This is particularly
+ # Find a default flavor that can be used for a instance. This is particularly
# useful in evolution. It's also intended this helper function can be used
# for admin.py when users
@@ -39,9 +39,9 @@
return flavors[0]
-class SliverDeletionManager(PlCoreBaseDeletionManager):
+class InstanceDeletionManager(PlCoreBaseDeletionManager):
def get_queryset(self):
- parent=super(SliverDeletionManager, self)
+ parent=super(InstanceDeletionManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
@@ -58,9 +58,9 @@
return self.get_queryset()
-class SliverManager(PlCoreBaseManager):
+class InstanceManager(PlCoreBaseManager):
def get_queryset(self):
- parent=super(SliverManager, self)
+ parent=super(InstanceManager, self)
try:
backend_type = config.observer_backend_type
@@ -79,21 +79,21 @@
return self.get_queryset()
# Create your models here.
-class Sliver(PlCoreBase):
- objects = SliverManager()
- deleted_objects = SliverDeletionManager()
+class Instance(PlCoreBase):
+ objects = InstanceManager()
+ deleted_objects = InstanceDeletionManager()
instance_id = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance id")
instance_uuid = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance uuid")
- name = StrippedCharField(max_length=200, help_text="Sliver name")
+ name = StrippedCharField(max_length=200, help_text="Instance name")
instance_name = StrippedCharField(blank=True, null=True, max_length=200, help_text="OpenStack generated name")
- ip = models.GenericIPAddressField(help_text="Sliver ip address", blank=True, null=True)
- image = models.ForeignKey(Image, related_name='slivers')
- #key = models.ForeignKey(Key, related_name='slivers')
- creator = models.ForeignKey(User, related_name='slivers', blank=True, null=True)
- slice = models.ForeignKey(Slice, related_name='slivers')
- deployment = models.ForeignKey(Deployment, verbose_name='deployment', related_name='sliver_deployment')
- node = models.ForeignKey(Node, related_name='slivers')
- numberCores = models.IntegerField(verbose_name="Number of Cores", help_text="Number of cores for sliver", default=0)
+ ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
+ image = models.ForeignKey(Image, related_name='instances')
+ #key = models.ForeignKey(Key, related_name='instances')
+ creator = models.ForeignKey(User, related_name='instances', blank=True, null=True)
+ slice = models.ForeignKey(Slice, related_name='instances')
+ deployment = models.ForeignKey(Deployment, verbose_name='deployment', related_name='instance_deployment')
+ node = models.ForeignKey(Node, related_name='instances')
+ numberCores = models.IntegerField(verbose_name="Number of Cores", help_text="Number of cores for instance", default=0)
flavor = models.ForeignKey(Flavor, help_text="Flavor of this instance", default=get_default_flavor)
tags = generic.GenericRelation(Tag)
userData = models.TextField(blank=True, null=True, help_text="user_data passed to instance during creation")
@@ -106,16 +106,16 @@
def __unicode__(self):
if self.name and Slice.objects.filter(id=self.slice_id) and (self.name != self.slice.name):
# NOTE: The weird check on self.slice_id was due to a problem when
- # deleting the slice before the sliver.
+ # deleting the slice before the instance.
return u'%s' % self.name
elif self.instance_name:
return u'%s' % (self.instance_name)
elif self.id:
return u'uninstantiated-%s' % str(self.id)
elif self.slice:
- return u'unsaved-sliver on %s' % self.slice.name
+ return u'unsaved-instance on %s' % self.slice.name
else:
- return u'unsaved-sliver'
+ return u'unsaved-instance'
def save(self, *args, **kwds):
if not self.name:
@@ -123,20 +123,20 @@
if not self.creator and hasattr(self, 'caller'):
self.creator = self.caller
if not self.creator:
- raise ValidationError('sliver has no creator')
+ raise ValidationError('instance has no creator')
if (self.slice.creator != self.creator):
# Check to make sure there's a slice_privilege for the user. If there
# isn't, then keystone will throw an exception inside the observer.
slice_privs = SlicePrivilege.objects.filter(slice=self.slice, user=self.creator)
if not slice_privs:
- raise ValidationError('sliver creator has no privileges on slice')
+ raise ValidationError('instance creator has no privileges on slice')
# XXX smbaker - disabled for now, was causing fault in tenant view create slice
# if not self.controllerNetwork.test_acl(slice=self.slice):
# raise exceptions.ValidationError("Deployment %s's ACL does not allow any of this slice %s's users" % (self.controllerNetwork.name, self.slice.name))
- super(Sliver, self).save(*args, **kwds)
+ super(Instance, self).save(*args, **kwds)
def can_update(self, user):
return user.can_update_slice(self.slice)
@@ -166,18 +166,18 @@
@staticmethod
def select_by_user(user):
if user.is_admin:
- qs = Sliver.objects.all()
+ qs = Instance.objects.all()
else:
slices = Slice.select_by_user(user)
- qs = Sliver.objects.filter(slice__in=slices)
+ qs = Instance.objects.filter(slice__in=slices)
return qs
def get_cpu_stats(self):
- filter = 'instance_id=%s'%self.sliver_id
+ filter = 'instance_id=%s'%self.instance_id
return monitor.get_meter('cpu',filter,None)
def get_bw_stats(self):
- filter = 'instance_id=%s'%self.sliver_id
+ filter = 'instance_id=%s'%self.instance_id
return monitor.get_meter('network.outgoing.bytes',filter,None)
def get_node_stats(self):
@@ -211,4 +211,4 @@
except:
instance.controller = None
-models.signals.post_init.connect(controller_setter, Sliver)
+models.signals.post_init.connect(controller_setter, Instance)
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index b7f6f3d..f7032b0 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -2,7 +2,7 @@
import socket
import sys
from django.db import models
-from core.models import PlCoreBase, Site, Slice, Sliver, Controller
+from core.models import PlCoreBase, Site, Slice, Instance, Controller
from core.models import ControllerLinkManager,ControllerLinkDeletionManager
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
@@ -108,7 +108,7 @@
permit_all_slices = models.BooleanField(default=False)
permitted_slices = models.ManyToManyField(Slice, blank=True, related_name="availableNetworks")
slices = models.ManyToManyField(Slice, blank=True, related_name="networks", through="NetworkSlice")
- slivers = models.ManyToManyField(Sliver, blank=True, related_name="networks", through="NetworkSliver")
+ instances = models.ManyToManyField(Instance, blank=True, related_name="networks", through="NetworkInstance")
topology_parameters = models.TextField(null=True, blank=True)
controller_url = models.CharField(null=True, blank=True, max_length=1024)
@@ -189,9 +189,9 @@
def save(self, *args, **kwds):
slice = self.slice
if (slice not in self.network.permitted_slices.all()) and (slice != self.network.owner) and (not self.network.permit_all_slices):
- # to add a sliver to the network, then one of the following must be true:
- # 1) sliver's slice is in network's permittedSlices list,
- # 2) sliver's slice is network's owner, or
+ # to add a instance to the network, then one of the following must be true:
+ # 1) instance's slice is in network's permittedSlices list,
+ # 2) instance's slice is network's owner, or
# 3) network's permitAllSlices is true
raise ValueError("Slice %s is not allowed to connect to network %s" % (str(slice), str(self.network)))
@@ -211,43 +211,43 @@
qs = NetworkSlice.objects.filter(id__in=slice_ids)
return qs
-class NetworkSliver(PlCoreBase):
+class NetworkInstance(PlCoreBase):
# Please use "Port" instead of "NetworkSliver". NetworkSliver will soon be
# removed.
- network = models.ForeignKey(Network,related_name='networkslivers') # related_name='links'
- sliver = models.ForeignKey(Sliver, null=True, blank=True, related_name='networkslivers') # related_name='ports'
- ip = models.GenericIPAddressField(help_text="Sliver ip address", blank=True, null=True)
+ network = models.ForeignKey(Network,related_name='networkinstances')
+ instance = models.ForeignKey(Instance, null=True, blank=True, related_name='networkinstances')
+ ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum port id")
class Meta:
- unique_together = ('network', 'sliver')
+ unique_together = ('network', 'instance')
def save(self, *args, **kwds):
- if self.sliver:
- slice = self.sliver.slice
+ if self.instance:
+ slice = self.instance.slice
if (slice not in self.network.permitted_slices.all()) and (slice != self.network.owner) and (not self.network.permit_all_slices):
- # to add a sliver to the network, then one of the following must be true:
- # 1) sliver's slice is in network's permittedSlices list,
- # 2) sliver's slice is network's owner, or
+ # to add a instance to the network, then one of the following must be true:
+ # 1) instance's slice is in network's permittedSlices list,
+ # 2) instance's slice is network's owner, or
# 3) network's permitAllSlices is true
raise ValueError("Slice %s is not allowed to connect to network %s" % (str(slice), str(self.network)))
if (not self.ip) and (NO_OBSERVER):
from util.network_subnet_allocator import find_unused_address
self.ip = find_unused_address(self.network.subnet,
- [x.ip for x in self.network.networksliver_set.all()])
- super(NetworkSliver, self).save(*args, **kwds)
+ [x.ip for x in self.network.networkinstance_set.all()])
+ super(NetworkInstance, self).save(*args, **kwds)
def __unicode__(self):
- if self.sliver:
- return u'%s-%s' % (self.network.name, self.sliver.instance_name)
+ if self.instance:
+ return u'%s-%s' % (self.network.name, self.instance.instance_name)
else:
return u'%s-unboundport-%s' % (self.network.name, self.id)
def can_update(self, user):
- if self.sliver:
- return user.can_update_slice(self.sliver.slice)
+ if self.instance:
+ return user.can_update_slice(self.instance.slice)
if self.network:
return user.can_update_slice(self.network.owner)
return False
@@ -255,13 +255,13 @@
@staticmethod
def select_by_user(user):
if user.is_admin:
- qs = NetworkSliver.objects.all()
+ qs = NetworkInstance.objects.all()
else:
- sliver_ids = [s.id for s in NetworkSliver.select_by_user(user)]
- qs = NetworkSliver.objects.filter(id__in=sliver_ids)
+ instance_ids = [s.id for s in NetworkInstance.select_by_user(user)]
+ qs = NetworkInstance.objects.filter(id__in=instance_ids)
return qs
-class Port(NetworkSliver):
+class Port(NetworkInstance):
# Rename in progress: NetworkSliver->Port
class Meta:
proxy = True
diff --git a/xos/core/models/reservation.py b/xos/core/models/reservation.py
index 1a838a2..ecf207c 100644
--- a/xos/core/models/reservation.py
+++ b/xos/core/models/reservation.py
@@ -2,7 +2,7 @@
import datetime
from django.db import models
from core.models import PlCoreBase
-from core.models import Sliver
+from core.models import Instance
from core.models import Slice
from core.models import ServiceResource
@@ -32,7 +32,7 @@
return qs
class ReservedResource(PlCoreBase):
- sliver = models.ForeignKey(Sliver, related_name="reservedresources")
+ instance = models.ForeignKey(Instance, related_name="reservedresources")
resource = models.ForeignKey(ServiceResource, related_name="reservedresources")
quantity = models.IntegerField(default=1)
reservationSet = models.ForeignKey(Reservation, related_name="reservedresources")
@@ -40,18 +40,18 @@
class Meta(PlCoreBase.Meta):
verbose_name_plural = "Reserved Resources"
- def __unicode__(self): return u'%d %s on %s' % (self.quantity, self.resource, self.sliver)
+ def __unicode__(self): return u'%d %s on %s' % (self.quantity, self.resource, self.instance)
def can_update(self, user):
- return user.can_update(self.sliver.slice)
+ return user.can_update(self.instance.slice)
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = ReservedResource.objects.all()
else:
- sliver_ids = [s.id for s in Sliver.select_by_user(user)]
- qs = ReservedResource.objects.filter(id__in=sliver_ids)
+ instance_ids = [s.id for s in Instance.select_by_user(user)]
+ qs = ReservedResource.objects.filter(id__in=instance_ids)
return qs
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 0940176..6eed241 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -88,25 +88,25 @@
Get a list of nodes that can be used to scale up a slice.
slice - slice to scale up
- max_per_node - maximum numbers of slivers that 'slice' can have on a single node
+ max_per_node - maximum numbers of instances that 'slice' can have on a single node
exclusive_slices - list of slices that must have no nodes in common with 'slice'.
"""
- from core.models import Node, Sliver # late import to get around order-of-imports constraint in __init__.py
+ from core.models import Node, Instance # late import to get around order-of-imports constraint in __init__.py
nodes = list(Node.objects.all())
- conflicting_slivers = Sliver.objects.filter(slice__in = exclusive_slices)
- conflicting_nodes = Node.objects.filter(slivers__in = conflicting_slivers)
+ conflicting_instances = Instance.objects.filter(slice__in = exclusive_slices)
+ conflicting_nodes = Node.objects.filter(instances__in = conflicting_instances)
nodes = [x for x in nodes if x not in conflicting_nodes]
- # If max_per_node is set, then limit the number of slivers this slice
+ # If max_per_node is set, then limit the number of instances this slice
# can have on a single node.
if max_per_node:
acceptable_nodes = []
for node in nodes:
- existing_count = node.slivers.filter(slice=slice).count()
+ existing_count = node.instances.filter(slice=slice).count()
if existing_count < max_per_node:
acceptable_nodes.append(node)
nodes = acceptable_nodes
@@ -117,22 +117,22 @@
# Pick the best node to scale up a slice.
nodes = self.get_scalable_nodes(slice, max_per_node, exclusive_slices)
- nodes = sorted(nodes, key=lambda node: node.slivers.all().count())
+ nodes = sorted(nodes, key=lambda node: node.instances.all().count())
if not nodes:
return None
return nodes[0]
def adjust_scale(self, slice_hint, scale, max_per_node=None, exclusive_slices=[]):
- from core.models import Sliver # late import to get around order-of-imports constraint in __init__.py
+ from core.models import Instance # late import to get around order-of-imports constraint in __init__.py
slices = [x for x in self.slices.all() if slice_hint in x.name]
for slice in slices:
- while slice.slivers.all().count() > scale:
- s = slice.slivers.all()[0]
- # print "drop sliver", s
+ while slice.instances.all().count() > scale:
+ s = slice.instances.all()[0]
+ # print "drop instance", s
s.delete()
- while slice.slivers.all().count() < scale:
+ while slice.instances.all().count() < scale:
node = self.pick_node(slice, max_per_node, exclusive_slices)
if not node:
# no more available nodes
@@ -146,7 +146,7 @@
if not flavor:
raise XOSConfigurationError("No default_flavor for slice %s" % slice.name)
- s = Sliver(slice=slice,
+ s = Instance(slice=slice,
node=node,
creator=slice.creator,
image=image,
@@ -154,7 +154,7 @@
deployment=node.site_deployment.deployment)
s.save()
- # print "add sliver", s
+ # print "add instance", s
class ServiceAttribute(PlCoreBase):
name = models.SlugField(help_text="Attribute Name", max_length=128)
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 76f5041..18d3cb6 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -25,7 +25,7 @@
description=models.TextField(blank=True,help_text="High level description of the slice and expected activities", max_length=1024)
slice_url = models.URLField(blank=True, max_length=512)
site = models.ForeignKey(Site, related_name='slices', help_text="The Site this Slice belongs to")
- max_slivers = models.IntegerField(default=10)
+ max_instances = models.IntegerField(default=10)
service = models.ForeignKey(Service, related_name='slices', null=True, blank=True)
network = StrippedCharField(default="Private Only",null=True, blank=True, max_length=256)
tags = generic.GenericRelation(Tag)
@@ -192,4 +192,4 @@
return monitor.get_meter('network.outgoing.bytes',filter,None)
def get_node_stats(self):
- return len(self.slice.slivers)
+ return len(self.slice.instances)
diff --git a/xos/core/models/user.py b/xos/core/models/user.py
index 2ea90da..cb90145 100644
--- a/xos/core/models/user.py
+++ b/xos/core/models/user.py
@@ -339,12 +339,12 @@
def get_readable_objects(self, filter_by=None):
""" Returns a list of objects that the user is allowed to read. """
- from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Sliver, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
+ from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Instance, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
models = []
if filter_by and isinstance(filter_by, list):
models = [m for m in filter_by if issubclass(m, PlModelMixIn)]
if not models:
- models = [Deployment, Network, Site, Slice, SliceTag, Sliver, Tag, User]
+ models = [Deployment, Network, Site, Slice, SliceTag, Instance, Tag, User]
readable_objects = []
for model in models:
readable_objects.extend(model.select_by_user(self))
@@ -362,7 +362,7 @@
list of dicts
"""
- from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Sliver, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
+ from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Instance, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
READ = 'r'
READWRITE = 'rw'
models = []
@@ -371,7 +371,7 @@
deployment_priv_objs = [Image, NetworkTemplate, Flavor]
site_priv_objs = [Node, Slice, User]
- slice_priv_objs = [Sliver, Network]
+ slice_priv_objs = [Instance, Network]
# maps the set of objects a paticular role has write access
write_map = {
diff --git a/xos/core/serializers.py b/xos/core/serializers.py
index 3fb726d..d84f111 100644
--- a/xos/core/serializers.py
+++ b/xos/core/serializers.py
@@ -109,7 +109,7 @@
# HyperlinkedModelSerializer doesn't include the id by default
id = serializers.Field()
site = serializers.HyperlinkedRelatedField(view_name='site-detail')
- slivers = serializers.HyperlinkedRelatedField(view_name='sliver-detail')
+ instances = serializers.HyperlinkedRelatedField(view_name='instance-detail')
class Meta:
model = Slice
fields = ('id',
@@ -128,7 +128,7 @@
'network',
'mountDataSets',
'site',
- 'slivers',
+ 'instances',
'updated',
'created')
@@ -185,7 +185,7 @@
'site',
'role')
-class SliverSerializer(serializers.HyperlinkedModelSerializer):
+class InstanceSerializer(serializers.HyperlinkedModelSerializer):
# HyperlinkedModelSerializer doesn't include the id by default
id = serializers.Field()
image = serializers.HyperlinkedRelatedField(view_name='image-detail')
@@ -196,7 +196,7 @@
#slice = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
- model = Sliver
+ model = Instance
fields = ('id',
'url',
'instance_id',
@@ -261,7 +261,7 @@
SitePrivilege: SitePrivilegeSerializer,
Slice: SliceSerializer,
SlicePrivilege: SlicePrivilegeSerializer,
- Sliver: SliverSerializer,
+ Instance: InstanceSerializer,
Tag: TagSerializer,
User: UserSerializer,
None: None,
diff --git a/xos/core/tests.py b/xos/core/tests.py
index a02497e..e3d7faa 100644
--- a/xos/core/tests.py
+++ b/xos/core/tests.py
@@ -7,7 +7,7 @@
from datetime import datetime
FIXTURES_FILE = 'core/fixtures/initial_data.json'
-MODELS = ['Deployment','Image','Node','Reservation','Slice','Sliver','User']
+MODELS = ['Deployment','Image','Node','Reservation','Slice','Instance','User']
def is_dynamic_type(x):
t = type(x)
diff --git a/xos/core/views/hpc_config.py b/xos/core/views/hpc_config.py
index af183bf..c83c01b 100644
--- a/xos/core/views/hpc_config.py
+++ b/xos/core/views/hpc_config.py
@@ -58,11 +58,11 @@
if not cmiSlice:
return HttpResponseServerError("Error: no CMI slice")
- if len(cmiSlice.slivers.all())==0:
- return HttpResponseServerError("Error: CMI slice has no slivers")
+ if len(cmiSlice.instances.all())==0:
+ return HttpResponseServerError("Error: CMI slice has no instances")
# for now, assuming using NAT
- cmi_hostname = cmiSlice.slivers.all()[0].node.name
+ cmi_hostname = cmiSlice.instances.all()[0].node.name
if not hpcSlice:
return HttpResponseServerError("Error: no HPC slice")
diff --git a/xos/core/views/instances.py b/xos/core/views/instances.py
new file mode 100644
index 0000000..e424538
--- /dev/null
+++ b/xos/core/views/instances.py
@@ -0,0 +1,13 @@
+from core.serializers import InstanceSerializer
+from rest_framework import generics
+from core.models import Instance
+
+class InstanceList(generics.ListCreateAPIView):
+ queryset = Instance.objects.all()
+ serializer_class = InstanceSerializer
+
+class InstanceDetail(generics.RetrieveUpdateDestroyAPIView):
+ queryset = Instance.objects.all()
+ serializer_class = InstanceSerializer
+
+
diff --git a/xos/core/views/legacyapi.py b/xos/core/views/legacyapi.py
index fe86871..4657116 100644
--- a/xos/core/views/legacyapi.py
+++ b/xos/core/views/legacyapi.py
@@ -6,7 +6,7 @@
import traceback
import xmlrpclib
-from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+from core.models import Slice, Instance, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
@@ -52,8 +52,8 @@
continue
node_ids=[]
- for ps_sliver in ps_slice.slivers.all():
- node_ids.append(ps_id_to_pl_id(ps_sliver.node.id))
+ for ps_instance in ps_slice.instances.all():
+ node_ids.append(ps_id_to_pl_id(ps_instance.node.id))
slice = {"instantiation": "plc-instantiated",
"description": "XOS slice",
@@ -81,8 +81,8 @@
nodes = []
for ps_node in ps_nodes:
slice_ids=[]
- for ps_sliver in ps_node.slivers.all():
- slice_ids.append(pl_slice_id(ps_sliver.slice, slice_remap))
+ for ps_instance in ps_node.instances.all():
+ slice_ids.append(pl_slice_id(ps_instance.slice, slice_remap))
node = {"node_id": ps_id_to_pl_id(ps_node.id),
"site_id": ps_id_to_pl_id(ps_node.site_id),
@@ -125,7 +125,7 @@
"node_ids": node_ids,
"pcu_ids": [],
"max_slices": 100,
- "max_slivers": 1000,
+ "max_instances": 1000,
"is_public": False,
"peer_site_id": None,
"abbrebiated_name": ps_site.abbreviated_name,
@@ -150,17 +150,17 @@
interfaces = []
ps_slices = Slice.objects.filter(name=slicename)
for ps_slice in ps_slices:
- for ps_sliver in ps_slice.slivers.all():
- node_id = ps_id_to_pl_id(ps_sliver.node_id)
+ for ps_instance in ps_slice.instances.all():
+ node_id = ps_id_to_pl_id(ps_instance.node_id)
if node_id in node_ids:
- ps_node = ps_sliver.node
+ ps_node = ps_instance.node
ip = socket.gethostbyname(ps_node.name.strip())
# If the slice has a network that's labeled for hpc_client, then
# return that network.
found_labeled_network = False
- for port in ps_sliver.ports.all():
+ for port in ps_instance.ports.all():
if (not port.ip):
continue
if (port.network.owner != ps_slice):
@@ -171,7 +171,7 @@
if not found_labeled_network:
# search for a dedicated public IP address
- for port in ps_sliver.ports.all():
+ for port in ps_instance.ports.all():
if (not port.ip):
continue
template = port.network.template
@@ -180,7 +180,7 @@
if return_nat:
ip = None
- for port in ps_sliver.ports.all():
+ for port in ps_instance.ports.all():
if (not port.ip):
continue
template = port.network.template
@@ -191,7 +191,7 @@
if return_private:
ip = None
- for port in ps_sliver.ports.all():
+ for port in ps_instance.ports.all():
if (not port.ip):
continue
template = port.network.template
@@ -226,7 +226,7 @@
else:
node_id = 0
- node_sliver_tags = GetTags(slicename, node_id)
+ node_instance_tags = GetTags(slicename, node_id)
slices = GetSlices({"name": slicename}, slice_remap=slice_remap)
perhost = {}
@@ -266,12 +266,12 @@
hostprivmap[nodemap[interface['node_id']]] = interface['ip']
for nid in node_ids:
- sliver_tags = GetTags(slicename,nid)
- perhost[nodemap[nid]] = sliver_tags
+ instance_tags = GetTags(slicename,nid)
+ perhost[nodemap[nid]] = instance_tags
- slivers = GetSlices(slice_remap=slice_remap)
+ instances = GetSlices(slice_remap=slice_remap)
if node_id != 0:
- slivers = [slice for slice in slivers if (node_id in slice.node_ids)]
+ instances = [slice for slice in instances if (node_id in slice.node_ids)]
sites = GetSites(slice_remap=slice_remap)
for site in sites:
@@ -280,12 +280,12 @@
timestamp = int(time.time())
return {'version': 3,
'timestamp': timestamp,
- 'configuration': node_sliver_tags,
+ 'configuration': node_instance_tags,
'allconfigurations':perhost,
'hostipmap':hostipmap,
'hostnatmap':hostnatmap,
'hostprivmap':hostprivmap,
- 'slivers': slivers,
+ 'instances': instances,
'interfaces': allinterfaces,
'sites': sites,
'nodes': nodes}
diff --git a/xos/core/views/slivers.py b/xos/core/views/slivers.py
deleted file mode 100644
index bb310da..0000000
--- a/xos/core/views/slivers.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from core.serializers import SliverSerializer
-from rest_framework import generics
-from core.models import Sliver
-
-class SliverList(generics.ListCreateAPIView):
- queryset = Sliver.objects.all()
- serializer_class = SliverSerializer
-
-class SliverDetail(generics.RetrieveUpdateDestroyAPIView):
- queryset = Sliver.objects.all()
- serializer_class = SliverSerializer
-
-
diff --git a/xos/core/xoslib/methods/cordsubscriber.py b/xos/core/xoslib/methods/cordsubscriber.py
index e504c71..ea8da80 100644
--- a/xos/core/xoslib/methods/cordsubscriber.py
+++ b/xos/core/xoslib/methods/cordsubscriber.py
@@ -30,7 +30,7 @@
service_specific_id = ReadOnlyField()
vlan_id = ReadOnlyField()
vcpe_id = ReadOnlyField()
- sliver = ReadOnlyField()
+ instance = ReadOnlyField()
image = ReadOnlyField()
vbng_id = ReadOnlyField()
firewall_enable = serializers.BooleanField()
@@ -39,7 +39,7 @@
url_filter_rules = serializers.CharField()
url_filter_level = serializers.CharField(required=False)
cdn_enable = serializers.BooleanField()
- sliver_name = ReadOnlyField()
+ instance_name = ReadOnlyField()
image_name = ReadOnlyField()
routeable_subnet = serializers.CharField(required=False)
ssh_command = ReadOnlyField()
@@ -60,7 +60,7 @@
model = CordSubscriber
fields = ('humanReadableName', 'id',
'service_specific_id', 'vlan_id',
- 'vcpe_id', 'sliver', 'sliver_name', 'image', 'image_name',
+ 'vcpe_id', 'instance', 'instance_name', 'image', 'image_name',
'firewall_enable', 'firewall_rules',
'url_filter_enable', 'url_filter_rules', 'url_filter_level',
'bbs_account',
diff --git a/xos/core/xoslib/methods/hpcview.py b/xos/core/xoslib/methods/hpcview.py
index 0645405..f14b398 100644
--- a/xos/core/xoslib/methods/hpcview.py
+++ b/xos/core/xoslib/methods/hpcview.py
@@ -24,16 +24,16 @@
except:
return service.service.all()
-def lookup_tag(service, sliver, name, default=None):
- sliver_type = ContentType.objects.get_for_model(sliver)
- t = Tag.objects.filter(service=service, name=name, content_type__pk=sliver_type.id, object_id=sliver.id)
+def lookup_tag(service, instance, name, default=None):
+ instance_type = ContentType.objects.get_for_model(instance)
+ t = Tag.objects.filter(service=service, name=name, content_type__pk=instance_type.id, object_id=instance.id)
if t:
return t[0].value
else:
return default
-def lookup_time(service, sliver, name):
- v = lookup_tag(service, sliver, name)
+def lookup_time(service, instance, name):
+ v = lookup_tag(service, instance, name)
if v:
return str(time.time() - float(v))
else:
@@ -69,22 +69,22 @@
return config_run
# from hpc_watcher.py
-def get_public_ip(service, sliver):
+def get_public_ip(service, instance):
network_name = None
- if "hpc" in sliver.slice.name:
+ if "hpc" in instance.slice.name:
network_name = getattr(service, "watcher_hpc_network", None)
- elif "demux" in sliver.slice.name:
+ elif "demux" in instance.slice.name:
network_name = getattr(service, "watcher_dnsdemux_network", None)
- elif "redir" in sliver.slice.name:
+ elif "redir" in instance.slice.name:
network_name = getattr(service, "watcher_dnsredir_network", None)
if network_name and network_name.lower()=="nat":
return None
if (network_name is None) or (network_name=="") or (network_name.lower()=="public"):
- return sliver.get_public_ip()
+ return instance.get_public_ip()
- for ns in sliver.networkslivers.all():
+ for ns in instance.networkinstances.all():
if (ns.ip) and (ns.network.name==network_name):
return ns.ip
@@ -143,50 +143,50 @@
nameservers[nameserver] = {"name": nameserver, "ip": "exception", "hit": False}
dnsdemux=[]
- for sliver in dnsdemux_slice.slivers.all():
+ for instance in dnsdemux_slice.instances.all():
ip=None
try:
- ip = get_public_ip(dnsdemux_service, sliver)
+ ip = get_public_ip(dnsdemux_service, instance)
except Exception, e:
ip = "Exception: " + str(e)
if not ip:
try:
- ip = socket.gethostbyname(sliver.node.name)
+ ip = socket.gethostbyname(instance.node.name)
except:
- ip = "??? " + sliver.node.name
+ ip = "??? " + instance.node.name
- sliver_nameservers = []
+ instance_nameservers = []
for ns in nameservers.values():
if ns["ip"]==ip:
- sliver_nameservers.append(ns["name"])
+ instance_nameservers.append(ns["name"])
ns["hit"]=True
- # now find the dnsredir sliver that is also on this node
- watcherd_dnsredir = "no-redir-sliver"
- for dnsredir_sliver in dnsredir_slice.slivers.all():
- if dnsredir_sliver.node == sliver.node:
- watcherd_dnsredir = lookup_tag(dnsredir_service, dnsredir_sliver, "watcher.watcher.msg")
+ # now find the dnsredir instance that is also on this node
+ watcherd_dnsredir = "no-redir-instance"
+ for dnsredir_instance in dnsredir_slice.instances.all():
+ if dnsredir_instance.node == instance.node:
+ watcherd_dnsredir = lookup_tag(dnsredir_service, dnsredir_instance, "watcher.watcher.msg")
- watcherd_dnsdemux = lookup_tag(dnsdemux_service, sliver, "watcher.watcher.msg")
+ watcherd_dnsdemux = lookup_tag(dnsdemux_service, instance, "watcher.watcher.msg")
- dnsdemux.append( {"name": sliver.node.name,
- "watcher.DNS.msg": lookup_tag(dnsdemux_service, sliver, "watcher.DNS.msg"),
- "watcher.DNS.time": lookup_time(dnsdemux_service, sliver, "watcher.DNS.time"),
+ dnsdemux.append( {"name": instance.node.name,
+ "watcher.DNS.msg": lookup_tag(dnsdemux_service, instance, "watcher.DNS.msg"),
+ "watcher.DNS.time": lookup_time(dnsdemux_service, instance, "watcher.DNS.time"),
"ip": ip,
- "nameservers": sliver_nameservers,
+ "nameservers": instance_nameservers,
"dnsdemux_config_age": compute_config_run(watcherd_dnsdemux),
"dnsredir_config_age": compute_config_run(watcherd_dnsredir) })
hpc=[]
- for sliver in hpc_slice.slivers.all():
- watcherd_hpc = lookup_tag(hpc_service, sliver, "watcher.watcher.msg")
+ for instance in hpc_slice.instances.all():
+ watcherd_hpc = lookup_tag(hpc_service, instance, "watcher.watcher.msg")
- hpc.append( {"name": sliver.node.name,
- "watcher.HPC-hb.msg": lookup_tag(hpc_service, sliver, "watcher.HPC-hb.msg"),
- "watcher.HPC-hb.time": lookup_time(hpc_service, sliver, "watcher.HPC-hb.time"),
- "watcher.HPC-fetch.msg": lookup_tag(hpc_service, sliver, "watcher.HPC-fetch.msg"),
- "watcher.HPC-fetch.time": lookup_time(hpc_service, sliver, "watcher.HPC-fetch.time"),
- "watcher.HPC-fetch.urls": json_default(lookup_tag(hpc_service, sliver, "watcher.HPC-fetch-urls.msg"), []),
+ hpc.append( {"name": instance.node.name,
+ "watcher.HPC-hb.msg": lookup_tag(hpc_service, instance, "watcher.HPC-hb.msg"),
+ "watcher.HPC-hb.time": lookup_time(hpc_service, instance, "watcher.HPC-hb.time"),
+ "watcher.HPC-fetch.msg": lookup_tag(hpc_service, instance, "watcher.HPC-fetch.msg"),
+ "watcher.HPC-fetch.time": lookup_time(hpc_service, instance, "watcher.HPC-fetch.time"),
+ "watcher.HPC-fetch.urls": json_default(lookup_tag(hpc_service, instance, "watcher.HPC-fetch-urls.msg"), []),
"config_age": compute_config_run(watcherd_hpc),
})
diff --git a/xos/core/xoslib/methods/sliceplus.py b/xos/core/xoslib/methods/sliceplus.py
index 12f7c1a..c339789 100644
--- a/xos/core/xoslib/methods/sliceplus.py
+++ b/xos/core/xoslib/methods/sliceplus.py
@@ -66,7 +66,7 @@
class Meta:
model = SlicePlus
- fields = ('humanReadableName', 'id','created','updated','enacted','name','enabled','omf_friendly','description','slice_url','site','max_slivers','service','network','mount_data_sets',
+ fields = ('humanReadableName', 'id','created','updated','enacted','name','enabled','omf_friendly','description','slice_url','site','max_instances','service','network','mount_data_sets',
'default_image', 'default_flavor',
'serviceClass','creator','networks','sliceInfo','network_ports','backendIcon','backendHtml','site_allocation','site_ready','users',"user_names","current_user_can_see")
diff --git a/xos/core/xoslib/methods/sshkeys.py b/xos/core/xoslib/methods/sshkeys.py
index a714212..8413e65 100644
--- a/xos/core/xoslib/methods/sshkeys.py
+++ b/xos/core/xoslib/methods/sshkeys.py
@@ -16,16 +16,16 @@
def get(self, request, format=None):
instances=[]
- for sliver in self.get_queryset().all():
- if sliver.instance_id:
- instances.append( {"id": sliver.instance_id,
- "public_keys": sliver.get_public_keys(),
- "node_name": sliver.node.name } )
+ for instance in self.get_queryset().all():
+ if instance.instance_id:
+ instances.append( {"id": instance.instance_id,
+ "public_keys": instance.get_public_keys(),
+ "node_name": instance.node.name } )
return Response(instances)
def get_queryset(self):
- queryset = queryset=Sliver.objects.all()
+ queryset = queryset=Instance.objects.all()
node_name = self.request.QUERY_PARAMS.get('node_name', None)
if node_name is not None:
@@ -38,15 +38,15 @@
method_name = "sshkeys"
def get(self, request, format=None, pk=0):
- slivers = self.get_queryset().filter(instance_id=pk)
- if not slivers:
- raise XOSNotFound("didn't find sliver for instance %s" % pk)
- return Response( [ {"id": slivers[0].instance_id,
- "public_keys": slivers[0].get_public_keys(),
- "node_name": slivers[0].node.name } ])
+ instances = self.get_queryset().filter(instance_id=pk)
+ if not instances:
+ raise XOSNotFound("didn't find instance for instance %s" % pk)
+ return Response( [ {"id": instances[0].instance_id,
+ "public_keys": instances[0].get_public_keys(),
+ "node_name": instances[0].node.name } ])
def get_queryset(self):
- queryset = queryset=Sliver.objects.all()
+ queryset = queryset=Instance.objects.all()
node_name = self.request.QUERY_PARAMS.get('node_name', None)
if node_name is not None:
diff --git a/xos/core/xoslib/methods/volttenant.py b/xos/core/xoslib/methods/volttenant.py
index ba0e909..e5998da 100644
--- a/xos/core/xoslib/methods/volttenant.py
+++ b/xos/core/xoslib/methods/volttenant.py
@@ -44,10 +44,10 @@
vcpe = obj.vcpe
if not vcpe:
return None
- sliver = vcpe.sliver
- if not sliver:
+ instance = vcpe.instance
+ if not instance:
return None
- return sliver.node.name
+ return instance.node.name
class VOLTTenantList(XOSListCreateAPIView):
serializer_class = VOLTTenantIdSerializer
diff --git a/xos/core/xoslib/objects/cordsubscriber.py b/xos/core/xoslib/objects/cordsubscriber.py
index 63dfec2..318d54c 100644
--- a/xos/core/xoslib/objects/cordsubscriber.py
+++ b/xos/core/xoslib/objects/cordsubscriber.py
@@ -1,4 +1,4 @@
-from core.models import Slice, SlicePrivilege, SliceRole, Sliver, Site, Node, User
+from core.models import Slice, SlicePrivilege, SliceRole, Instance, Site, Node, User
from cord.models import VOLTTenant, CordSubscriberRoot
from plus import PlusObjectMixin
from operator import itemgetter, attrgetter
@@ -40,8 +40,8 @@
("cdn_enable", "vcpe.cdn_enable"),
("image", "vcpe.image.id"),
("image_name", "vcpe.image.name"),
- ("sliver", "vcpe.sliver.id"),
- ("sliver_name", "vcpe.sliver.name"),
+ ("instance", "vcpe.instance.id"),
+ ("instance_name", "vcpe.instance.name"),
("routeable_subnet", "vcpe.vbng.routeable_subnet"),
("vcpe_id", "vcpe.id"),
("vbng_id", "vcpe.vbng.id"),
@@ -119,8 +119,8 @@
("ssh_command", "volt.vcpe.ssh_command"),
("image", "volt.vcpe.image.id"),
("image_name", "volt.vcpe.image.name"),
- ("sliver", "volt.vcpe.sliver.id"),
- ("sliver_name", "volt.vcpe.sliver.name"),
+ ("instance", "volt.vcpe.instance.id"),
+ ("instance_name", "volt.vcpe.instance.name"),
("routeable_subnet", "volt.vcpe.vbng.routeable_subnet"),
("vcpe_id", "volt.vcpe.id"),
("vbng_id", "volt.vcpe.vbng.id"),
diff --git a/xos/core/xoslib/objects/sliceplus.py b/xos/core/xoslib/objects/sliceplus.py
index 47c93d7..9d2868f 100644
--- a/xos/core/xoslib/objects/sliceplus.py
+++ b/xos/core/xoslib/objects/sliceplus.py
@@ -1,4 +1,4 @@
-from core.models import Slice, SlicePrivilege, SliceRole, Sliver, Site, Node, User
+from core.models import Slice, SlicePrivilege, SliceRole, Instance, Site, Node, User
from plus import PlusObjectMixin
from operator import itemgetter, attrgetter
from rest_framework.exceptions import APIException
@@ -22,16 +22,16 @@
used_sites = {}
ready_sites = {}
used_deployments = {}
- sliverCount = 0
+ instanceCount = 0
sshCommands = []
- for sliver in self.slivers.all():
- site = sliver.node.site_deployment.site
- deployment = sliver.node.site_deployment.deployment
+ for instance in self.instances.all():
+ site = instance.node.site_deployment.site
+ deployment = instance.node.site_deployment.deployment
used_sites[site.name] = used_sites.get(site.name, 0) + 1
used_deployments[deployment.name] = used_deployments.get(deployment.name, 0) + 1
- sliverCount = sliverCount + 1
+ instanceCount = instanceCount + 1
- sshCommand = sliver.get_ssh_command()
+ sshCommand = instance.get_ssh_command()
if sshCommand:
sshCommands.append(sshCommand)
@@ -57,7 +57,7 @@
self._sliceInfo= {"sitesUsed": used_sites,
"sitesReady": ready_sites,
"deploymentsUsed": used_deployments,
- "sliverCount": sliverCount,
+ "instanceCount": instanceCount,
"siteCount": len(used_sites.keys()),
"users": users,
"roles": [],
@@ -127,10 +127,10 @@
nodeList = []
for node in Node.objects.all():
if (node.site_deployment.site.id in siteIDList):
- node.sliverCount = 0
- for sliver in node.slivers.all():
- if sliver.slice.id == self.id:
- node.sliverCount = node.sliverCount + 1
+ node.instanceCount = 0
+ for instance in node.instances.all():
+ if instance.slice.id == self.id:
+ node.instanceCount = node.instanceCount + 1
nodeList.append(node)
return nodeList
@@ -170,58 +170,58 @@
print "save_site_allocation, reset=",reset
if (not self._site_allocation):
- # Must be a sliver that was just created, and has not site_allocation
+ # Must be a instance that was just created, and has not site_allocation
# field.
return
- all_slice_slivers = self.slivers.all()
+ all_slice_instances = self.instances.all()
for site_name in self._site_allocation.keys():
desired_allocation = self._site_allocation[site_name]
- # make a list of the slivers for this site
- slivers = []
- for sliver in all_slice_slivers:
- if sliver.node.site_deployment.site.name == site_name:
- slivers.append(sliver)
+ # make a list of the instances for this site
+ instances = []
+ for instance in all_slice_instances:
+ if instance.node.site_deployment.site.name == site_name:
+ instances.append(instance)
- # delete extra slivers
- while (reset and len(slivers)>0) or (len(slivers) > desired_allocation):
- sliver = slivers.pop()
+ # delete extra instances
+ while (reset and len(instances)>0) or (len(instances) > desired_allocation):
+ instance = instances.pop()
if (not noAct):
- print "deleting sliver", sliver
- sliver.delete()
+ print "deleting instance", instance
+ instance.delete()
else:
- print "would delete sliver", sliver
+ print "would delete instance", instance
- # add more slivers
- if (len(slivers) < desired_allocation):
+ # add more instances
+ if (len(instances) < desired_allocation):
site = Site.objects.get(name = site_name)
nodes = self.get_node_allocation([site])
if (not nodes):
raise APIException(detail="no nodes in site %s" % site_name)
- while (len(slivers) < desired_allocation):
+ while (len(instances) < desired_allocation):
# pick the least allocated node
- nodes = sorted(nodes, key=attrgetter("sliverCount"))
+ nodes = sorted(nodes, key=attrgetter("instanceCount"))
node = nodes[0]
- sliver = Sliver(name=node.name,
+ instance = Instance(name=node.name,
slice=self,
node=node,
image = self.default_image,
flavor = self.default_flavor,
creator = self.creator,
deployment = node.site_deployment.deployment)
- sliver.caller = self.caller
- slivers.append(sliver)
+ instance.caller = self.caller
+ instances.append(instance)
if (not noAct):
- print "added sliver", sliver
- sliver.save()
+ print "added instance", instance
+ instance.save()
else:
- print "would add sliver", sliver
+ print "would add instance", instance
- node.sliverCount = node.sliverCount + 1
+ node.instanceCount = node.instanceCount + 1
def save_users(self, noAct = False):
new_users = self._update_users
diff --git a/xos/ec2_observer/deleters/instance_deleter.py b/xos/ec2_observer/deleters/instance_deleter.py
new file mode 100644
index 0000000..60f9e47
--- /dev/null
+++ b/xos/ec2_observer/deleters/instance_deleter.py
@@ -0,0 +1,14 @@
+from core.models import Instance, SliceDeployments
+from observer.deleter import Deleter
+
+class InstanceDeleter(Deleter):
+ model='Instance'
+
+ def call(self, pk):
+ instance = Instance.objects.get(pk=pk)
+ if instance.instance_id:
+ driver = self.driver.client_driver(caller=instance.creator,
+ tenant=instance.slice.name,
+ deployment=instance.deploymentNetwork.name)
+ driver.destroy_instance(instance.instance_id)
+ instance.delete()
diff --git a/xos/ec2_observer/deleters/network_instance_deleter.py b/xos/ec2_observer/deleters/network_instance_deleter.py
new file mode 100644
index 0000000..8a8f516
--- /dev/null
+++ b/xos/ec2_observer/deleters/network_instance_deleter.py
@@ -0,0 +1,13 @@
+from core.models import NetworkInstance
+from observer.deleter import Deleter
+
+class NetworkInstanceDeleter(Deleter):
+ model='NetworkInstance'
+
+ def call(self, pk):
+ network_instance = NetworkInstances.objects.get(pk=pk)
+ # handle openstack delete
+
+ network_instance.delete()
+
+
diff --git a/xos/ec2_observer/deleters/network_sliver_deleter.py b/xos/ec2_observer/deleters/network_sliver_deleter.py
deleted file mode 100644
index 71ba040..0000000
--- a/xos/ec2_observer/deleters/network_sliver_deleter.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from core.models import NetworkSliver
-from observer.deleter import Deleter
-
-class NetworkSliverDeleter(Deleter):
- model='NetworkSliver'
-
- def call(self, pk):
- network_sliver = NetworkSlivers.objects.get(pk=pk)
- # handle openstack delete
-
- network_sliver.delete()
-
-
diff --git a/xos/ec2_observer/deleters/sliver_deleter.py b/xos/ec2_observer/deleters/sliver_deleter.py
deleted file mode 100644
index 097f0f7..0000000
--- a/xos/ec2_observer/deleters/sliver_deleter.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from core.models import Sliver, SliceDeployments
-from observer.deleter import Deleter
-
-class SliverDeleter(Deleter):
- model='Sliver'
-
- def call(self, pk):
- sliver = Sliver.objects.get(pk=pk)
- if sliver.instance_id:
- driver = self.driver.client_driver(caller=sliver.creator,
- tenant=sliver.slice.name,
- deployment=sliver.deploymentNetwork.name)
- driver.destroy_instance(sliver.instance_id)
- sliver.delete()
diff --git a/xos/ec2_observer/event_loop.py b/xos/ec2_observer/event_loop.py
index bdbbab0..fb91ee8 100644
--- a/xos/ec2_observer/event_loop.py
+++ b/xos/ec2_observer/event_loop.py
@@ -49,7 +49,7 @@
return ig
class XOSObserver:
- #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
+ #sync_steps = [SyncNetworks,SyncNetworkInstances,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncInstances,SyncInstanceIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
sync_steps = []
diff --git a/xos/ec2_observer/steps/__init__.py b/xos/ec2_observer/steps/__init__.py
index ed44fdb..de7a1fd 100644
--- a/xos/ec2_observer/steps/__init__.py
+++ b/xos/ec2_observer/steps/__init__.py
@@ -1,12 +1,12 @@
#from .sync_external_routes import SyncExternalRoutes
-#from .sync_network_slivers import SyncNetworkSlivers
+#from .sync_network_instances import SyncNetworkInstances
#from .sync_networks import SyncNetworks
#from .sync_network_deployments import SyncNetworkDeployments
#from .sync_site_privileges import SyncSitePrivilege
#from .sync_slice_memberships import SyncSliceMemberships
#from .sync_slices import SyncSlices
-#from .sync_sliver_ips import SyncSliverIps
-#from .sync_slivers import SyncSlivers
+#from .sync_instance_ips import SyncInstanceIps
+#from .sync_instances import SyncInstances
#from .sync_users import SyncUsers
#from .sync_roles import SyncRoles
#from .sync_nodes import SyncNodes
diff --git a/xos/ec2_observer/steps/sync_slivers.py b/xos/ec2_observer/steps/sync_instances.py
similarity index 62%
rename from xos/ec2_observer/steps/sync_slivers.py
rename to xos/ec2_observer/steps/sync_instances.py
index a86b4c4..3dc5f0e 100644
--- a/xos/ec2_observer/steps/sync_slivers.py
+++ b/xos/ec2_observer/steps/sync_instances.py
@@ -4,7 +4,7 @@
from django.db.models import F, Q
from xos.config import Config
from ec2_observer.syncstep import SyncStep
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from core.models.slice import SlicePrivilege, SliceDeployments
from core.models.network import Network, NetworkSlice, NetworkDeployments
from util.logger import Logger, logging
@@ -16,53 +16,53 @@
logger = Logger(level=logging.INFO)
-class SyncSlivers(SyncStep):
- provides=[Sliver]
+class SyncInstances(SyncStep):
+ provides=[Instance]
requested_interval=0
def fetch_pending(self, deletion):
if deletion:
- object_source = Sliver.deleted_objects
+ object_source = Instance.deleted_objects
else:
- object_source = Sliver.objects
+ object_source = Instance.objects
- all_slivers = object_source.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- my_slivers = []
+ all_instances = object_source.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ my_instances = []
- for sliver in all_slivers:
- sd = SliceDeployments.objects.filter(Q(slice=sliver.slice))
+ for instance in all_instances:
+ sd = SliceDeployments.objects.filter(Q(slice=instance.slice))
if (sd):
if (sd.deployment.name=='Amazon EC2'):
- my_slivers.append(sliver)
- if (sliver.node.deployment.name=='Amazon EC2'):
- my_slivers.append(sliver)
- return my_slivers
+ my_instances.append(instance)
+ if (instance.node.deployment.name=='Amazon EC2'):
+ my_instances.append(instance)
+ return my_instances
- def delete_record(self, sliver):
- user = sliver.creator
+ def delete_record(self, instance):
+ user = instance.creator
e = get_creds(user=user, site=user.site)
- result = aws_run('ec2 terminate-instances --instance-ids=%s'%sliver.instance_id, env=e)
+ result = aws_run('ec2 terminate-instances --instance-ids=%s'%instance.instance_id, env=e)
- def sync_record(self, sliver):
- logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+ def sync_record(self, instance):
+ logger.info("sync'ing instance:%s deployment:%s " % (instance, instance.node.deployment))
- if not sliver.instance_id:
+ if not instance.instance_id:
# public keys
- slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+ slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
- if sliver.creator.public_key:
- pubkeys.append(sliver.creator.public_key)
+ if instance.creator.public_key:
+ pubkeys.append(instance.creator.public_key)
- if sliver.slice.creator.public_key:
- pubkeys.append(sliver.slice.creator.public_key)
+ if instance.slice.creator.public_key:
+ pubkeys.append(instance.slice.creator.public_key)
# netowrks
# include all networks available to the slice and/or associated network templates
#nics = []
- #networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ #networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
#network_deployments = NetworkDeployments.objects.filter(network__in=networks,
- #deployment=sliver.node.deployment)
+ #deployment=instance.node.deployment)
# Gather private networks first. This includes networks with a template that has
# visibility = private and translation = none
#for network_deployment in network_deployments:
@@ -78,11 +78,11 @@
# nics.append({'net-id': net['id']})
# look up image id
- instance_type = sliver.node.name.rsplit('.',1)[0]
+ instance_type = instance.node.name.rsplit('.',1)[0]
# Bail out of we don't have a key
- key_name = sliver.creator.email.lower().replace('@', 'AT').replace('.', '')
- u = sliver.creator
+ key_name = instance.creator.email.lower().replace('@', 'AT').replace('.', '')
+ u = instance.creator
s = u.site
e = get_creds(user=u, site=s)
key_sig = aws_run('ec2 describe-key-pairs', env=e)
@@ -95,23 +95,23 @@
if (not key_found):
# set backend_status
- raise Exception('Will not sync sliver without key')
+ raise Exception('Will not sync instance without key')
- image_id = sliver.image.path
- instance_sig = aws_run('ec2 run-instances --image-id %s --instance-type %s --count 1 --key-name %s --placement AvailabilityZone=%s'%(image_id,instance_type,key_name,sliver.node.site.name), env=e)
- sliver.instance_id = instance_sig['Instances'][0]['InstanceId']
- sliver.save()
+ image_id = instance.image.path
+ instance_sig = aws_run('ec2 run-instances --image-id %s --instance-type %s --count 1 --key-name %s --placement AvailabilityZone=%s'%(image_id,instance_type,key_name,instance.node.site.name), env=e)
+ instance.instance_id = instance_sig['Instances'][0]['InstanceId']
+ instance.save()
state = instance_sig['Instances'][0]['State']['Code']
if (state==16):
- sliver.ip = instance_sig['Instances'][0]['PublicIpAddress']
- sliver.save()
+ instance.ip = instance_sig['Instances'][0]['PublicIpAddress']
+ instance.save()
else:
# This status message should go into backend_status
raise Exception('Waiting for instance to start')
else:
- ret = aws_run('ec2 describe-instances --instance-ids %s'%sliver.instance_id, env=e)
+ ret = aws_run('ec2 describe-instances --instance-ids %s'%instance.instance_id, env=e)
state = ret['Reservations'][0]['Instances'][0]['State']['Code']
if (state==16):
- sliver.ip = ret['Reservations'][0]['Instances'][0]['PublicIpAddress']
- sliver.save()
+ instance.ip = ret['Reservations'][0]['Instances'][0]['PublicIpAddress']
+ instance.save()
diff --git a/xos/ec2_observer/syncstep.py b/xos/ec2_observer/syncstep.py
index 72adb87..e15e719 100644
--- a/xos/ec2_observer/syncstep.py
+++ b/xos/ec2_observer/syncstep.py
@@ -57,7 +57,7 @@
objs = main_obj.deleted_objects.all()
return objs
- #return Sliver.objects.filter(ip=None)
+ #return Instance.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
diff --git a/xos/hpc/models.py b/xos/hpc/models.py
index 48ae0f0..e49bf4e 100644
--- a/xos/hpc/models.py
+++ b/xos/hpc/models.py
@@ -18,16 +18,16 @@
cmi_hostname = StrippedCharField(max_length=254, null=True, blank=True)
hpc_port80 = models.BooleanField(default=True, help_text="Enable port 80 for HPC")
- watcher_hpc_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact hpc sliver")
- watcher_dnsdemux_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact dnsdemux sliver")
- watcher_dnsredir_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact dnsredir sliver")
+ watcher_hpc_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact hpc instance")
+ watcher_dnsdemux_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact dnsdemux instance")
+ watcher_dnsredir_network = StrippedCharField(max_length=254, null=True, blank=True, help_text="Network for hpc_watcher to contact dnsredir instance")
@property
def scale(self):
hpc_slices = [x for x in self.slices.all() if "hpc" in x.name]
if not hpc_slices:
return 0
- return hpc_slices[0].slivers.count()
+ return hpc_slices[0].instances.count()
@scale.setter
def scale(self, value):
diff --git a/xos/hpc_wizard/xos_analytics.py b/xos/hpc_wizard/xos_analytics.py
index a95d3ae..9502aa1 100644
--- a/xos/hpc_wizard/xos_analytics.py
+++ b/xos/hpc_wizard/xos_analytics.py
@@ -15,7 +15,7 @@
from django.conf import settings
from django import db
from django.db import connection
-from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
+from core.models import Slice, Instance, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
BLUE_LOAD=5000000
RED_LOAD=15000000
@@ -228,17 +228,17 @@
# we didn't find it in the data model
continue
- allocated_slivers = 0
+ allocated_instances = 0
if model_site and slice:
- for sliver in slice.slivers.all():
- if sliver.node.site == model_site:
- allocated_slivers = allocated_slivers + 1
+ for instance in slice.instances.all():
+ if instance.node.site == model_site:
+ allocated_instances = allocated_instances + 1
row["lat"] = float(model_site.location.latitude)
row["long"] = float(model_site.location.longitude)
row["url"] = model_site.site_url
row["numNodes"] = model_site.nodes.count()
- row["allocated_slivers"] = allocated_slivers
+ row["allocated_instances"] = allocated_instances
max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
cpu=float(max_cpu)/100.0
diff --git a/xos/importer/plclassic/importer.py b/xos/importer/plclassic/importer.py
index 864f4b9..de8628b 100644
--- a/xos/importer/plclassic/importer.py
+++ b/xos/importer/plclassic/importer.py
@@ -7,7 +7,7 @@
from plclassic.site_importer import SiteImporter
from plclassic.user_importer import UserImporter
from plclassic.slice_importer import SliceImporter
-from plclassic.sliver_importer import SliverImporter
+from plclassic.instance_importer import InstanceImporter
class Call:
@@ -36,14 +36,14 @@
self.sites = SiteImporter(api)
self.slices = SliceImporter(api)
self.users = UserImporter(api)
- self.slivers = SliverImporter(api)
+ self.instances = InstanceImporter(api)
def run(self):
self.sites.run()
self.users.run()
self.slices.run(remote_sites=self.sites.remote_sites,
local_sites=self.sites.local_sites)
- self.slivers.run()
+ self.instances.run()
diff --git a/xos/importer/plclassic/sliver_importer.py b/xos/importer/plclassic/instance_importer.py
similarity index 82%
rename from xos/importer/plclassic/sliver_importer.py
rename to xos/importer/plclassic/instance_importer.py
index 3f7912f..0858572 100644
--- a/xos/importer/plclassic/sliver_importer.py
+++ b/xos/importer/plclassic/instance_importer.py
@@ -1,6 +1,6 @@
from PLC.Nodes import Nodes
-class SliverImporter:
+class InstanceImporter:
def __init__(self, api):
self.api = api
diff --git a/xos/model_autodeletion.py b/xos/model_autodeletion.py
index f5288a0..2bfc48c 100644
--- a/xos/model_autodeletion.py
+++ b/xos/model_autodeletion.py
@@ -1 +1 @@
-ephemeral_models = ['ReservedResource','Sliver','Image','Network','Port','Tag','SitePrivilege','SliceMembership','SliceTag','Reservation','Slice']
+ephemeral_models = ['ReservedResource','Instance','Image','Network','Port','Tag','SitePrivilege','SliceMembership','SliceTag','Reservation','Slice']
diff --git a/xos/model_policies/__init__.py b/xos/model_policies/__init__.py
index a74e7be..36c6e25 100644
--- a/xos/model_policies/__init__.py
+++ b/xos/model_policies/__init__.py
@@ -1,5 +1,5 @@
from .model_policy_Slice import *
-from .model_policy_Sliver import *
+from .model_policy_Instance import *
from .model_policy_User import *
from .model_policy_Network import *
from .model_policy_Site import *
diff --git a/xos/model_policies/model_policy_Instance.py b/xos/model_policies/model_policy_Instance.py
new file mode 100644
index 0000000..a13428d
--- /dev/null
+++ b/xos/model_policies/model_policy_Instance.py
@@ -0,0 +1,13 @@
+
+def handle(instance):
+ from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
+
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+ controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+ controller=instance.node.site_deployment.controller)
+
+ for cn in controller_networks:
+ if (cn.lazy_blocked):
+ cn.lazy_blocked=False
+ cn.backend_register = '{}'
+ cn.save()
diff --git a/xos/model_policies/model_policy_Sliver.py b/xos/model_policies/model_policy_Sliver.py
index 0004e33..a13428d 100644
--- a/xos/model_policies/model_policy_Sliver.py
+++ b/xos/model_policies/model_policy_Sliver.py
@@ -1,10 +1,10 @@
-def handle(sliver):
+def handle(instance):
from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=sliver.node.site_deployment.controller)
+ controller=instance.node.site_deployment.controller)
for cn in controller_networks:
if (cn.lazy_blocked):
diff --git a/xos/model_policy.py b/xos/model_policy.py
index 2813416..ce59a32 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -59,7 +59,7 @@
return
# These are the models whose children get deleted when they are
- delete_policy_models = ['Slice','Sliver','Network']
+ delete_policy_models = ['Slice','Instance','Network']
sender_name = instance.__class__.__name__
policy_name = 'model_policy_%s'%sender_name
noargs = False
@@ -97,10 +97,10 @@
pass
def run_policy():
- from core.models import Sliver,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
+ from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
while (True):
start = time.time()
- models = [Sliver,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+ models = [Instance,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
objects = []
deleted_objects = []
diff --git a/xos/observers/base/SyncSliverUsingAnsible.py b/xos/observers/base/SyncSliverUsingAnsible.py
index c824595..9455780 100644
--- a/xos/observers/base/SyncSliverUsingAnsible.py
+++ b/xos/observers/base/SyncSliverUsingAnsible.py
@@ -13,7 +13,7 @@
logger = Logger(level=logging.INFO)
-class SyncSliverUsingAnsible(SyncStep):
+class SyncInstanceUsingAnsible(SyncStep):
# All of the following should be defined for classes derived from this
# base class. Examples below use VCPETenant.
@@ -36,13 +36,13 @@
return {}
- def get_sliver(self, o):
- # We need to know what sliver is associated with the object. Let's
- # assume 'o' has a field called 'sliver'. If the field is called
+ def get_instance(self, o):
+ # We need to know what instance is associated with the object. Let's
+ # assume 'o' has a field called 'instance'. If the field is called
# something else, or if custom logic is needed, then override this
# method.
- return o.sliver
+ return o.instance
def run_playbook(self, o, fields):
tStart = time.time()
@@ -61,9 +61,9 @@
def sync_record(self, o):
logger.info("sync'ing object %s" % str(o))
- sliver = self.get_sliver(o)
- if not sliver:
- self.defer_sync(o, "waiting on sliver")
+ instance = self.get_instance(o)
+ if not instance:
+ self.defer_sync(o, "waiting on instance")
return
if not os.path.exists(self.service_key_name):
@@ -71,9 +71,9 @@
service_key = file(self.service_key_name).read()
- fields = { "sliver_name": sliver.name,
- "hostname": sliver.node.name,
- "instance_id": sliver.instance_id,
+ fields = { "instance_name": instance.name,
+ "hostname": instance.node.name,
+ "instance_id": instance.instance_id,
"private_key": service_key,
"ansible_tag": "vcpe_tenant_" + str(o.id)
}
diff --git a/xos/observers/hpc/hpc_watcher.py b/xos/observers/hpc/hpc_watcher.py
index d098257..7c49aae 100644
--- a/xos/observers/hpc/hpc_watcher.py
+++ b/xos/observers/hpc/hpc_watcher.py
@@ -1,7 +1,7 @@
"""
hpc_watcher.py
- Daemon to watch the health of HPC and RR slivers.
+ Daemon to watch the health of HPC and RR instances.
This deamon uses HpcHealthCheck objects in the Data Model to conduct
periodic tests of HPC and RR nodes. Two types of Health Checks are
@@ -25,9 +25,9 @@
In addition to the above, HPC heartbeat probes are conducted, similar to
the ones that dnsredir conducts.
- The results of health checks are stored in a tag attached to the Sliver
+ The results of health checks are stored in a tag attached to the Instance
the healthcheck was conducted against. If all healthchecks of a particular
- variety were successful for a sliver, then "success" will be stored in
+ variety were successful for a instance, then "success" will be stored in
the tag. Otherwise, the first healthcheck to fail will be stored in the
tag.
@@ -351,50 +351,50 @@
Thread.__init__(self)
self.daemon = True
- def get_public_ip(self, service, sliver):
+ def get_public_ip(self, service, instance):
network_name = None
- if "hpc" in sliver.slice.name:
+ if "hpc" in instance.slice.name:
network_name = getattr(service, "watcher_hpc_network", None)
- elif "demux" in sliver.slice.name:
+ elif "demux" in instance.slice.name:
network_name = getattr(service, "watcher_dnsdemux_network", None)
- elif "redir" in sliver.slice.name:
+ elif "redir" in instance.slice.name:
network_name = getattr(service, "watcher_dnsredir_network", None)
if network_name and network_name.lower()=="nat":
return None
if (network_name is None) or (network_name=="") or (network_name.lower()=="public"):
- return sliver.get_public_ip()
+ return instance.get_public_ip()
- for ns in sliver.ports.all():
+ for ns in instance.ports.all():
if (ns.ip) and (ns.network.name==network_name):
return ns.ip
raise ValueError("Couldn't find network %s" % str(network_name))
- def set_status(self, sliver, service, kind, msg, check_error=True):
- #print sliver.node.name, kind, msg
+ def set_status(self, instance, service, kind, msg, check_error=True):
+ #print instance.node.name, kind, msg
if check_error:
- sliver.has_error = (msg!="success")
+ instance.has_error = (msg!="success")
- sliver_type = ContentType.objects.get_for_model(sliver)
+ instance_type = ContentType.objects.get_for_model(instance)
- t = Tag.objects.filter(service=service, name=kind+".msg", content_type__pk=sliver_type.id, object_id=sliver.id)
+ t = Tag.objects.filter(service=service, name=kind+".msg", content_type__pk=instance_type.id, object_id=instance.id)
if t:
t=t[0]
if (t.value != msg):
t.value = msg
t.save()
else:
- Tag(service=service, name=kind+".msg", content_object = sliver, value=msg).save()
+ Tag(service=service, name=kind+".msg", content_object = instance, value=msg).save()
- t = Tag.objects.filter(service=service, name=kind+".time", content_type__pk=sliver_type.id, object_id=sliver.id)
+ t = Tag.objects.filter(service=service, name=kind+".time", content_type__pk=instance_type.id, object_id=instance.id)
if t:
t=t[0]
t.value = str(time.time())
t.save()
else:
- Tag(service=service, name=kind+".time", content_object = sliver, value=str(time.time())).save()
+ Tag(service=service, name=kind+".time", content_object = instance, value=str(time.time())).save()
def get_service_slices(self, service, kind=None):
try:
@@ -416,51 +416,51 @@
for i in range(0,10):
DnsResolver(queue = self.resolver_queue)
- def check_request_routers(self, service, slivers):
- for sliver in slivers:
- sliver.has_error = False
+ def check_request_routers(self, service, instances):
+ for instance in instances:
+ instance.has_error = False
try:
- ip = self.get_public_ip(service, sliver)
+ ip = self.get_public_ip(service, instance)
except Exception, e:
- self.set_status(sliver, service, "watcher.DNS", "exception: %s" % str(e))
+ self.set_status(instance, service, "watcher.DNS", "exception: %s" % str(e))
continue
if not ip:
try:
- ip = socket.gethostbyname(sliver.node.name)
+ ip = socket.gethostbyname(instance.node.name)
except:
- self.set_status(sliver, service, "watcher.DNS", "dns resolution failure")
+ self.set_status(instance, service, "watcher.DNS", "dns resolution failure")
continue
if not ip:
- self.set_status(sliver, service, "watcher.DNS", "no IP address")
+ self.set_status(instance, service, "watcher.DNS", "no IP address")
continue
checks = HpcHealthCheck.objects.filter(kind="dns")
if not checks:
- self.set_status(sliver, service, "watcher.DNS", "no DNS HealthCheck tests configured")
+ self.set_status(instance, service, "watcher.DNS", "no DNS HealthCheck tests configured")
for check in checks:
- self.resolver_queue.submit_job({"domain": check.resource_name, "server": ip, "port": 53, "sliver": sliver, "result_contains": check.result_contains})
+ self.resolver_queue.submit_job({"domain": check.resource_name, "server": ip, "port": 53, "instance": instance, "result_contains": check.result_contains})
while self.resolver_queue.outstanding > 0:
result = self.resolver_queue.get_result()
- sliver = result["sliver"]
- if (result["status"]!="success") and (not sliver.has_error):
- self.set_status(sliver, service, "watcher.DNS", result["status"])
+ instance = result["instance"]
+ if (result["status"]!="success") and (not instance.has_error):
+ self.set_status(instance, service, "watcher.DNS", result["status"])
- for sliver in slivers:
- if not sliver.has_error:
- self.set_status(sliver, service, "watcher.DNS", "success")
+ for instance in instances:
+ if not instance.has_error:
+ self.set_status(instance, service, "watcher.DNS", "success")
def run_once(self):
for hpcService in HpcService.objects.all():
for slice in self.get_service_slices(hpcService, "dnsdemux"):
- self.check_request_routers(hpcService, slice.slivers.all())
+ self.check_request_routers(hpcService, slice.instances.all())
for rrService in RequestRouterService.objects.all():
for slice in self.get_service_slices(rrService, "dnsdemux"):
- self.check_request_routers(rrService, slice.slivers.all())
+ self.check_request_routers(rrService, slice.instances.all())
def run(self):
while True:
@@ -477,26 +477,26 @@
for i in range(0, 10):
HpcHeartbeat(queue = self.heartbeat_queue)
- def probe_hpc(self, service, slivers):
- for sliver in slivers:
- sliver.has_error = False
+ def probe_hpc(self, service, instances):
+ for instance in instances:
+ instance.has_error = False
- self.heartbeat_queue.submit_job({"server": sliver.node.name, "port": 8009, "sliver": sliver})
+ self.heartbeat_queue.submit_job({"server": instance.node.name, "port": 8009, "instance": instance})
while self.heartbeat_queue.outstanding > 0:
result = self.heartbeat_queue.get_result()
- sliver = result["sliver"]
- if (result["status"]!="success") and (not sliver.has_error):
- self.set_status(sliver, service, "watcher.HPC-hb", result["status"])
+ instance = result["instance"]
+ if (result["status"]!="success") and (not instance.has_error):
+ self.set_status(instance, service, "watcher.HPC-hb", result["status"])
- for sliver in slivers:
- if not sliver.has_error:
- self.set_status(sliver, service, "watcher.HPC-hb", "success")
+ for instance in instances:
+ if not instance.has_error:
+ self.set_status(instance, service, "watcher.HPC-hb", "success")
def run_once(self):
for hpcService in HpcService.objects.all():
for slice in self.get_service_slices(hpcService, "hpc"):
- self.probe_hpc(hpcService, slice.slivers.all())
+ self.probe_hpc(hpcService, slice.instances.all())
def run(self):
while True:
@@ -513,42 +513,42 @@
for i in range(0, 10):
HpcFetchUrl(queue = self.fetch_queue)
- def fetch_hpc(self, service, slivers):
- for sliver in slivers:
- sliver.has_error = False
- sliver.url_status = []
+ def fetch_hpc(self, service, instances):
+ for instance in instances:
+ instance.has_error = False
+ instance.url_status = []
checks = HpcHealthCheck.objects.filter(kind="http")
if not checks:
- self.set_status(sliver, service, "watcher.HPC-fetch", "no HTTP HealthCheck tests configured")
+ self.set_status(instance, service, "watcher.HPC-fetch", "no HTTP HealthCheck tests configured")
for check in checks:
if (not check.resource_name) or (":" not in check.resource_name):
- self.set_status(sliver, service, "watcher.HPC-fetch", "malformed resource_name: " + str(check.resource_name))
+ self.set_status(instance, service, "watcher.HPC-fetch", "malformed resource_name: " + str(check.resource_name))
break
(domain, url) = check.resource_name.split(":",1)
- self.fetch_queue.submit_job({"server": sliver.node.name, "port": 80, "sliver": sliver, "domain": domain, "url": url})
+ self.fetch_queue.submit_job({"server": instance.node.name, "port": 80, "instance": instance, "domain": domain, "url": url})
while self.fetch_queue.outstanding > 0:
result = self.fetch_queue.get_result()
- sliver = result["sliver"]
+ instance = result["instance"]
if (result["status"] == "success"):
- sliver.url_status.append( (result["domain"] + result["url"], "success", result["bytes_downloaded"], result["total_time"]) )
- if (result["status"]!="success") and (not sliver.has_error):
- self.set_status(sliver, service, "watcher.HPC-fetch", result["status"])
+ instance.url_status.append( (result["domain"] + result["url"], "success", result["bytes_downloaded"], result["total_time"]) )
+ if (result["status"]!="success") and (not instance.has_error):
+ self.set_status(instance, service, "watcher.HPC-fetch", result["status"])
- for sliver in slivers:
- self.set_status(sliver, service, "watcher.HPC-fetch-urls", json.dumps(sliver.url_status), check_error=False)
- if not sliver.has_error:
- self.set_status(sliver, service, "watcher.HPC-fetch", "success")
+ for instance in instances:
+ self.set_status(instance, service, "watcher.HPC-fetch-urls", json.dumps(instance.url_status), check_error=False)
+ if not instance.has_error:
+ self.set_status(instance, service, "watcher.HPC-fetch", "success")
def run_once(self):
for hpcService in HpcService.objects.all():
for slice in self.get_service_slices(hpcService, "hpc"):
try:
- self.fetch_hpc(hpcService, slice.slivers.all())
+ self.fetch_hpc(hpcService, slice.instances.all())
except:
traceback.print_exc()
@@ -567,41 +567,41 @@
for i in range(0, 10):
WatcherWorker(queue = self.fetch_queue)
- def fetch_watcher(self, service, slivers):
- for sliver in slivers:
+ def fetch_watcher(self, service, instances):
+ for instance in instances:
try:
- ip = self.get_public_ip(service, sliver)
+ ip = self.get_public_ip(service, instance)
except Exception, e:
- self.set_status(sliver, service, "watcher.watcher", json.dumps({"status": "exception: %s" % str(e)}) )
+ self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "exception: %s" % str(e)}) )
continue
if not ip:
try:
- ip = socket.gethostbyname(sliver.node.name)
+ ip = socket.gethostbyname(instance.node.name)
except:
- self.set_status(sliver, service, "watcher.watcher", json.dumps({"status": "dns resolution failure"}) )
+ self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "dns resolution failure"}) )
continue
if not ip:
- self.set_status(sliver, service, "watcher.watcher", json.dumps({"status": "no IP address"}) )
+ self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "no IP address"}) )
continue
port = 8015
- if ("redir" in sliver.slice.name):
+ if ("redir" in instance.slice.name):
port = 8016
- elif ("demux" in sliver.slice.name):
+ elif ("demux" in instance.slice.name):
port = 8017
- self.fetch_queue.submit_job({"server": ip, "port": port, "sliver": sliver})
+ self.fetch_queue.submit_job({"server": ip, "port": port, "instance": instance})
while self.fetch_queue.outstanding > 0:
result = self.fetch_queue.get_result()
- sliver = result["sliver"]
- self.set_status(sliver, service, "watcher.watcher", result["status"])
+ instance = result["instance"]
+ self.set_status(instance, service, "watcher.watcher", result["status"])
def run_once(self):
for hpcService in HpcService.objects.all():
for slice in self.get_service_slices(hpcService):
- self.fetch_watcher(hpcService, slice.slivers.all())
+ self.fetch_watcher(hpcService, slice.instances.all())
def run(self):
while True:
diff --git a/xos/observers/hpc/hpclib.py b/xos/observers/hpc/hpclib.py
index 30dbe87..51864a5 100644
--- a/xos/observers/hpc/hpclib.py
+++ b/xos/observers/hpc/hpclib.py
@@ -99,11 +99,11 @@
for slice in slices:
if slice.name.endswith("cmi"):
- for sliver in slice.slivers.all():
- if sliver.node:
- return sliver.node.name
+ for instance in slice.instances.all():
+ if instance.node:
+ return instance.node.name
- raise Exception("Failed to find a CMI sliver")
+ raise Exception("Failed to find a CMI instance")
@property
def client(self):
diff --git a/xos/observers/vbng/steps/sync_vbngtenant.py b/xos/observers/vbng/steps/sync_vbngtenant.py
index 18adfc8..8868d6f 100644
--- a/xos/observers/vbng/steps/sync_vbngtenant.py
+++ b/xos/observers/vbng/steps/sync_vbngtenant.py
@@ -63,10 +63,10 @@
raise Exception("More than one vCPE tenant is associated with vBNG %s" % str(o.id))
vcpe = vcpes[0]
- sliver = vcpe.sliver
+ instance = vcpe.instance
- if not sliver:
- raise Exception("No sliver associated with vBNG %s" % str(o.id))
+ if not instance:
+ raise Exception("No instance associated with vBNG %s" % str(o.id))
if not vcpe.wan_ip:
self.defer_sync(o, "does not have a WAN IP yet")
@@ -75,7 +75,7 @@
# this should never happen; WAN MAC is computed from WAN IP
self.defer_sync(o, "does not have a WAN MAC yet")
- return (vcpe.wan_ip, vcpe.wan_mac, vcpe.sliver.node.name)
+ return (vcpe.wan_ip, vcpe.wan_mac, vcpe.instance.node.name)
def sync_record(self, o):
logger.info("sync'ing VBNGTenant %s" % str(o))
diff --git a/xos/observers/vcpe/observer_ansible_test.py b/xos/observers/vcpe/observer_ansible_test.py
index 1b4358d..77715af 100644
--- a/xos/observers/vcpe/observer_ansible_test.py
+++ b/xos/observers/vcpe/observer_ansible_test.py
@@ -37,7 +37,7 @@
"""
observer.ansible.run_template_ssh("test.yaml",
- {"sliver_name": "onlab_test405-378",
+ {"instance_name": "onlab_test405-378",
"instance_id": "instance-0000004d",
"hostname": "node67.washington.vicci.org",
"private_key": private_key})
diff --git a/xos/observers/vcpe/steps/ansible_test/xos.py b/xos/observers/vcpe/steps/ansible_test/xos.py
index 3ef72ab..eb4f3eb 100755
--- a/xos/observers/vcpe/steps/ansible_test/xos.py
+++ b/xos/observers/vcpe/steps/ansible_test/xos.py
@@ -48,13 +48,13 @@
self.HASHED_KEY_MAGIC = "|1|"
self.has_pipelining = True
#self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
- #self.sliver_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "sliver_name", "SLIVER_NAME", None)
+ #self.instance_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "instance_name", "SLIVER_NAME", None)
inject={}
inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
self.instance_id = inject["instance_id"]
- self.sliver_name = inject["sliver_name"]
+ self.instance_name = inject["instance_name"]
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
@@ -282,7 +282,7 @@
if self.ipv6:
ssh_cmd += ['-6']
#ssh_cmd += [self.host]
- ssh_cmd += [self.sliver_name]
+ ssh_cmd += [self.instance_name]
if su and su_user:
sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
index 94f6d53..bc08845 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -8,7 +8,7 @@
from xos.config import Config
from observer.syncstep import SyncStep
from observer.ansible import run_template_ssh
-from observers.base.SyncSliverUsingAnsible import SyncSliverUsingAnsible
+from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
from core.models import Service, Slice
from cord.models import VCPEService, VCPETenant, VOLTTenant
from hpc.models import HpcService, CDNPrefix
@@ -22,7 +22,7 @@
logger = Logger(level=logging.INFO)
-class SyncVCPETenant(SyncSliverUsingAnsible):
+class SyncVCPETenant(SyncInstanceUsingAnsible):
provides=[VCPETenant]
observes=VCPETenant
requested_interval=0
@@ -67,21 +67,21 @@
for service in HpcService.objects.all():
for slice in service.slices.all():
if "dnsdemux" in slice.name:
- for sliver in slice.slivers.all():
- for ns in sliver.ports.all():
+ for instance in slice.instances.all():
+ for ns in instance.ports.all():
if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
dnsdemux_ip = ns.ip
if not dnsdemux_ip:
logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label)
else:
- # Connect to dnsdemux using the sliver's public address
+ # Connect to dnsdemux using the instance's public address
for service in HpcService.objects.all():
for slice in service.slices.all():
if "dnsdemux" in slice.name:
- for sliver in slice.slivers.all():
+ for instance in slice.instances.all():
if dnsdemux_ip=="none":
try:
- dnsdemux_ip = socket.gethostbyname(sliver.node.name)
+ dnsdemux_ip = socket.gethostbyname(instance.node.name)
except:
pass
if not dnsdemux_ip:
@@ -99,8 +99,8 @@
bbs_addrs = []
if vcpe_service.bbs_slice:
if vcpe_service.backend_network_label:
- for bbs_sliver in vcpe_service.bbs_slice.slivers.all():
- for ns in bbs_sliver.ports.all():
+ for bbs_instance in vcpe_service.bbs_slice.instances.all():
+ for ns in bbs_instance.ports.all():
if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
bbs_addrs.append(ns.ip)
else:
@@ -141,11 +141,11 @@
# Ansible uses the service's keypair in order to SSH into the
# instance. It would be bad if the slice had no service.
- raise Exception("Slice %s is not associated with a service" % sliver.slice.name)
+ raise Exception("Slice %s is not associated with a service" % instance.slice.name)
# Make sure the slice is configured properly
- if (service != o.sliver.slice.service):
- raise Exception("Slice %s is associated with some service that is not %s" % (str(sliver.slice), str(service)))
+ if (service != o.instance.slice.service):
+ raise Exception("Slice %s is associated with some service that is not %s" % (str(instance.slice), str(service)))
# only enable filtering if we have a subscriber object (see below)
url_filter_enable = False
diff --git a/xos/openstack/sliveragent.py b/xos/openstack/instanceagent.py
similarity index 67%
rename from xos/openstack/sliveragent.py
rename to xos/openstack/instanceagent.py
index 4aea44d..8e50cf3 100644
--- a/xos/openstack/sliveragent.py
+++ b/xos/openstack/instanceagent.py
@@ -2,10 +2,10 @@
import sys
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
import time
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from openstack.manager import OpenStackManager
-class SliverAgent:
+class InstanceAgent:
def run(self):
manager = OpenStackManager()
@@ -15,25 +15,25 @@
while True :
# fill in null ip addresses
- slivers = Sliver.objects.filter(ip=None)
- for sliver in slivers:
+ instances = Instance.objects.filter(ip=None)
+ for instance in instances:
# update connection
manager.client.connect(username=manager.client.keystone.username,
password=manager.client.keystone.password,
- tenant=sliver.slice.name)
- sliver.os_manager = manager
- servers = manager.client.nova.servers.findall(id=sliver.instance_id)
+ tenant=instance.slice.name)
+ instance.os_manager = manager
+ servers = manager.client.nova.servers.findall(id=instance.instance_id)
if not servers:
continue
server = servers[0]
- ips = server.addresses.get(sliver.slice.name, [])
+ ips = server.addresses.get(instance.slice.name, [])
if not ips:
continue
- sliver.ip = ips[0]['addr']
- sliver.save()
+ instance.ip = ips[0]['addr']
+ instance.save()
time.sleep(7)
if __name__ == '__main__':
- SliverAgent().run()
+ InstanceAgent().run()
diff --git a/xos/openstack/manager.py b/xos/openstack/manager.py
index 42b1ef1..45d49b4 100644
--- a/xos/openstack/manager.py
+++ b/xos/openstack/manager.py
@@ -314,47 +314,47 @@
return networks
@require_enabled
- def save_sliver(self, sliver):
+ def save_instance(self, instance):
metadata_update = {}
- if ("numberCores" in sliver.changed_fields):
- metadata_update["cpu_cores"] = str(sliver.numberCores)
+ if ("numberCores" in instance.changed_fields):
+ metadata_update["cpu_cores"] = str(instance.numberCores)
- for tag in sliver.slice.tags.all():
+ for tag in instance.slice.tags.all():
if tag.name.startswith("sysctl-"):
metadata_update[tag.name] = tag.value
- if not sliver.instance_id:
- nics = self.get_requested_networks(sliver.slice)
+ if not instance.instance_id:
+ nics = self.get_requested_networks(instance.slice)
for nic in nics:
# If a network hasn't been instantiated yet, then we'll fail
- # during slice creation. Defer saving the sliver for now.
+ # during slice creation. Defer saving the instance for now.
if not nic.get("net-id", None):
- sliver.save() # in case it hasn't been saved yet
+ instance.save() # in case it hasn't been saved yet
return
- slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
+ slice_memberships = SliceMembership.objects.filter(slice=instance.slice)
pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
- pubkeys.append(sliver.creator.public_key)
- instance = self.driver.spawn_instance(name=sliver.name,
- key_name = sliver.creator.keyname,
- image_id = sliver.image.image_id,
- hostname = sliver.node.name,
+ pubkeys.append(instance.creator.public_key)
+ instance = self.driver.spawn_instance(name=instance.name,
+ key_name = instance.creator.keyname,
+ image_id = instance.image.image_id,
+ hostname = instance.node.name,
pubkeys = pubkeys,
nics = nics,
metadata = metadata_update )
- sliver.instance_id = instance.id
- sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+ instance.instance_id = instance.id
+ instance.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
else:
if metadata_update:
- self.driver.update_instance_metadata(sliver.instance_id, metadata_update)
+ self.driver.update_instance_metadata(instance.instance_id, metadata_update)
- sliver.save()
- sliver.enacted = datetime.now()
- sliver.save(update_fields=['enacted'])
+ instance.save()
+ instance.enacted = datetime.now()
+ instance.save(update_fields=['enacted'])
@require_enabled
- def delete_sliver(self, sliver):
- if sliver.instance_id:
- self.driver.destroy_instance(sliver.instance_id)
+ def delete_instance(self, instance):
+ if instance.instance_id:
+ self.driver.destroy_instance(instance.instance_id)
def refresh_nodes(self):
diff --git a/xos/openstack/reservationagent.py b/xos/openstack/reservationagent.py
index cee5269..5cb9c8e 100644
--- a/xos/openstack/reservationagent.py
+++ b/xos/openstack/reservationagent.py
@@ -4,14 +4,14 @@
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
import time
import django.utils
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from core.models.reservation import Reservation, ReservedResource
class ReservationAgent:
def run(self):
while True :
- slivers = {}
+ instances = {}
tNow = django.utils.timezone.now()
print "Processing reservations, tNow is %s" % tNow
@@ -22,25 +22,25 @@
print " deleting expired reservation"
reservation.delete()
for reservedResource in reservation.reservedresources.all():
- sliver_resources = slivers.get(reservedResource.sliver.id, {})
- sliver_resources[reservedResource.resource.name] = reservedResource.quantity
- slivers[reservedResource.sliver.id] = sliver_resources
+ instance_resources = instances.get(reservedResource.instance.id, {})
+ instance_resources[reservedResource.resource.name] = reservedResource.quantity
+ instances[reservedResource.instance.id] = instance_resources
- print "Sliver reservation set"
- for (sliverid, sliver_resources) in slivers.items():
- print " sliver", sliverid,
- for (name, value) in sliver_resources.items():
+ print "Instance reservation set"
+ for (instanceid, instance_resources) in instances.items():
+ print " instance", instanceid,
+ for (name, value) in instance_resources.items():
print str(name)+":", value,
print
- print "Updating slivers"
- for sliver in Sliver.objects.all():
- sliver_resv = slivers.get(sliver.id, {})
- numberCores = sliver_resv.get("numberCores", 0)
- if numberCores != sliver.numberCores:
- print "sliver %s setting numberCores to %s" % (sliver.name, numberCores)
- sliver.numberCores = numberCores
- sliver.save()
+ print "Updating instances"
+ for instance in Instance.objects.all():
+ instance_resv = instances.get(instance.id, {})
+ numberCores = instance_resv.get("numberCores", 0)
+ if numberCores != instance.numberCores:
+ print "instance %s setting numberCores to %s" % (instance.name, numberCores)
+ instance.numberCores = numberCores
+ instance.save()
print "sleep"
time.sleep(7)
diff --git a/xos/openstack_observer/ansible.py b/xos/openstack_observer/ansible.py
index b53dd98..74af590 100755
--- a/xos/openstack_observer/ansible.py
+++ b/xos/openstack_observer/ansible.py
@@ -136,7 +136,7 @@
def run_template_ssh(name, opts, path='', expected_num=None):
instance_id = opts["instance_id"]
- sliver_name = opts["sliver_name"]
+ instance_name = opts["instance_name"]
hostname = opts["hostname"]
private_key = opts["private_key"]
@@ -159,7 +159,7 @@
f.close()
f = open(hosts_pathname, "w")
- f.write("[%s]\n" % sliver_name)
+ f.write("[%s]\n" % instance_name)
f.write("%s ansible_ssh_private_key_file=%s\n" % (hostname, private_key_pathname))
f.close()
diff --git a/xos/openstack_observer/ceilometer.py b/xos/openstack_observer/ceilometer.py
index 9944a9c..792515e 100644
--- a/xos/openstack_observer/ceilometer.py
+++ b/xos/openstack_observer/ceilometer.py
@@ -14,7 +14,7 @@
from core.models import *
filter_dict = {
'ControllerSlice':[ControllerSlice, 'tenant_id', 'project_id'],
- 'Sliver':[Sliver, 'instance_id', 'resource_id'],
+ 'Instance':[Instance, 'instance_id', 'resource_id'],
'ControllerSite':[ControllerSite, 'tenant_id', 'project_id']
}
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index b6eceb3..fc07e7d 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -192,9 +192,10 @@
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(step_graph)
self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
- #self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
+ #self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncInstances']
#self.ordered_steps = ['SyncControllerSites','SyncRoles','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
#self.ordered_steps = ['SyncControllerNetworks']
+ #self.ordered_steps = ['SyncInstances','SyncNetworkInstances']
print "Order of steps=",self.ordered_steps
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index eecba85..54f2466 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -8,7 +8,7 @@
from observer.syncstep import *
from core.models.network import *
from core.models.slice import *
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from util.logger import observer_logger as logger
from observer.ansible import *
from openstack.driver import OpenStackDriver
diff --git a/xos/openstack_observer/steps/sync_controller_slices.py b/xos/openstack_observer/steps/sync_controller_slices.py
index 1a6f517..0eceb95 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.py
+++ b/xos/openstack_observer/steps/sync_controller_slices.py
@@ -42,7 +42,7 @@
controller_user = controller_users[0]
roles = ['admin']
- max_instances=int(controller_slice.slice.max_slivers)
+ max_instances=int(controller_slice.slice.max_instances)
tenant_fields = {'endpoint':controller_slice.controller.auth_url,
'admin_user': controller_slice.controller.admin_user,
'admin_password': controller_slice.controller.admin_password,
@@ -60,7 +60,7 @@
if (not controller_slice.tenant_id):
try:
driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
- driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
+ driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_instances))
except:
logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
raise Exception('Could not update quota for %s'%controller_slice.slice.name)
diff --git a/xos/openstack_observer/steps/sync_instances.py b/xos/openstack_observer/steps/sync_instances.py
new file mode 100644
index 0000000..cbcdec6
--- /dev/null
+++ b/xos/openstack_observer/steps/sync_instances.py
@@ -0,0 +1,196 @@
+import os
+import base64
+import socket
+from django.db.models import F, Q
+from xos.config import Config
+from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.instance import Instance
+from core.models.slice import Slice, SlicePrivilege, ControllerSlice
+from core.models.network import Network, NetworkSlice, ControllerNetwork
+from observer.ansible import *
+from observer.syncstep import *
+from util.logger import observer_logger as logger
+
+def escape(s):
+ s = s.replace('\n',r'\n').replace('"',r'\"')
+ return s
+
+class SyncInstances(OpenStackSyncStep):
+ provides=[Instance]
+ requested_interval=0
+ observes=Instance
+
+ def get_userdata(self, instance, pubkeys):
+ userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+ userdata += 'ssh_authorized_keys:\n'
+ for key in pubkeys:
+ userdata += ' - %s\n' % key
+ return userdata
+
+ def sync_record(self, instance):
+ logger.info("sync'ing instance:%s slice:%s controller:%s " % (instance, instance.slice.name, instance.node.site_deployment.controller))
+ controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
+
+ if (controller_register.get('disabled',False)):
+ raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
+
+ metadata_update = {}
+ if (instance.numberCores):
+ metadata_update["cpu_cores"] = str(instance.numberCores)
+
+ for tag in instance.slice.tags.all():
+ if tag.name.startswith("sysctl-"):
+ metadata_update[tag.name] = tag.value
+
+ # public keys
+ slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
+ pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+ if instance.creator.public_key:
+ pubkeys.add(instance.creator.public_key)
+
+ if instance.slice.creator.public_key:
+ pubkeys.add(instance.slice.creator.public_key)
+
+ if instance.slice.service and instance.slice.service.public_key:
+ pubkeys.add(instance.slice.service.public_key)
+
+ # Handle any ports that are already created and attached to the sliver.
+ # If we do have a port for a network, then add that network to an
+ # exclude list so we won't try to auto-create ports on that network
+ # when instantiating.
+ ports = []
+ exclude_networks = set()
+ exclude_templates = set()
+ for ns in sliver.ports.all():
+ if not ns.port_id:
+ raise DeferredException("Port %s on sliver %s has no id; Try again later" % (str(ns), str(sliver)) )
+ ports.append(ns.port_id)
+ exclude_networks.add(ns.network)
+ exclude_templates.add(ns.network.template)
+
+ nics = []
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+ networks = [n for n in networks if (n not in exclude_networks)]
+ controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+ controller=instance.node.site_deployment.controller)
+
+ for controller_network in controller_networks:
+ if controller_network.network.template.visibility == 'private' and \
+ controller_network.network.template.translation == 'none':
+ if not controller_network.net_id:
+ raise DeferredException("Private Network %s has no id; Try again later" % controller_network.network.name)
+ nics.append(controller_network.net_id)
+
+ # Now include network templates, for those networks that use a
+ # shared_network_name.
+ network_templates = [network.template.shared_network_name for network in networks \
+ if network.template.shared_network_name]
+ network_templates = [nt for nt in network_templates if (nt not in exclude_templates)]
+
+ #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
+ driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
+ nets = driver.shell.quantum.list_networks()['networks']
+ for net in nets:
+ if net['name'] in network_templates:
+ nics.append(net['id'])
+
+ # If the slice isn't connected to anything, then at least put it on
+ # the public network.
+ if (not nics) and (not ports):
+ for net in nets:
+ if net['name']=='public':
+ nics.append(net['id'])
+
+ image_name = None
+ controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
+ if controller_images:
+ image_name = controller_images[0].image.name
+ logger.info("using image from ControllerImage object: " + str(image_name))
+
+ if image_name is None:
+ controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
+ images = controller_driver.shell.glanceclient.images.list()
+ for image in images:
+ if image.name == instance.image.name or not image_name:
+ image_name = image.name
+ logger.info("using image from glance: " + str(image_name))
+
+ try:
+ legacy = Config().observer_legacy
+ except:
+ legacy = False
+
+ if (legacy):
+ host_filter = instance.node.name.split('.',1)[0]
+ else:
+ host_filter = instance.node.name.strip()
+
+ availability_zone_filter = 'nova:%s'%host_filter
+ instance_name = '%s-%d'%(instance.slice.name,instance.id)
+
+ userData = self.get_userdata(instance, pubkeys)
+ if instance.userData:
+ userData = instance.userData
+
+ controller = instance.node.site_deployment.controller
+ tenant_fields = {'endpoint':controller.auth_url,
+ 'admin_user': instance.creator.email,
+ 'admin_password': instance.creator.remote_password,
+ 'admin_tenant': instance.slice.name,
+ 'tenant': instance.slice.name,
+ 'tenant_description': instance.slice.description,
+ 'name':instance_name,
+ 'ansible_tag':instance_name,
+ 'availability_zone': availability_zone_filter,
+ 'image_name':image_name,
+ 'flavor_name':instance.flavor.name,
+ 'nics':nics,
+ 'ports':ports,
+ 'meta':metadata_update,
+ 'user_data':r'%s'%escape(userData)}
+
+ res = run_template('sync_instances.yaml', tenant_fields,path='instances', expected_num=1)
+ instance_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
+ instance_uuid = res[0]['id']
+
+ try:
+ hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
+ ip = socket.gethostbyname(hostname)
+ instance.ip = ip
+ except:
+ pass
+
+ instance.instance_id = instance_id
+ instance.instance_uuid = instance_uuid
+ instance.instance_name = instance_name
+ instance.save()
+
+ def delete_record(self, instance):
+ controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
+
+ if (controller_register.get('disabled',False)):
+ raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
+
+ instance_name = '%s-%d'%(instance.slice.name,instance.id)
+ controller = instance.node.site_deployment.controller
+ tenant_fields = {'endpoint':controller.auth_url,
+ 'admin_user': instance.creator.email,
+ 'admin_password': instance.creator.remote_password,
+ 'admin_tenant': instance.slice.name,
+ 'tenant': instance.slice.name,
+ 'tenant_description': instance.slice.description,
+ 'name':instance_name,
+ 'ansible_tag':instance_name,
+ 'delete': True}
+
+ try:
+ res = run_template('sync_instances.yaml', tenant_fields,path='instances', expected_num=1)
+ except Exception,e:
+ print "Could not sync %s"%instance_name
+ #import traceback
+ #traceback.print_exc()
+ raise e
+
+ if (len(res)!=1):
+ raise Exception('Could not delete instance %s'%instance.slice.name)
diff --git a/xos/openstack_observer/steps/sync_ports.py b/xos/openstack_observer/steps/sync_ports.py
index 259245a..da08e36 100644
--- a/xos/openstack_observer/steps/sync_ports.py
+++ b/xos/openstack_observer/steps/sync_ports.py
@@ -17,7 +17,7 @@
# which Network is associated from the port.
def call(self, **args):
- logger.info("sync'ing network slivers")
+ logger.info("sync'ing network instances")
ports = Port.objects.all()
ports_by_id = {}
@@ -36,10 +36,10 @@
#for (network_id, network) in networks_by_id.items():
# logger.info(" %s: %s" % (network_id, network.name))
- slivers = Sliver.objects.all()
- slivers_by_instance_uuid = {}
- for sliver in slivers:
- slivers_by_instance_uuid[sliver.instance_uuid] = sliver
+ instances = Instance.objects.all()
+ instances_by_instance_uuid = {}
+ for instance in instances:
+ instances_by_instance_uuid[instance.instance_uuid] = instance
# Get all ports in all controllers
@@ -84,25 +84,25 @@
#logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
continue
- sliver = slivers_by_instance_uuid.get(port['device_id'], None)
- if not sliver:
- logger.info("no sliver for port %s device_id %s" % (port["id"], port['device_id']))
+ instance = instances_by_instance_uuid.get(port['device_id'], None)
+ if not instance:
+ logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
continue
network = networks_by_id.get(port['network_id'], None)
if not network:
# maybe it's public-nat or public-dedicated. Search the templates for
- # the id, then see if the sliver's slice has some network that uses
+ # the id, then see if the instance's slice has some network that uses
# that template
template = templates_by_id.get(port['network_id'], None)
- if template and sliver.slice:
- for candidate_network in sliver.slice.networks.all():
+ if template and instance.slice:
+ for candidate_network in instance.slice.networks.all():
if candidate_network.template == template:
network=candidate_network
if not network:
logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
- # we know it's associated with a sliver, but we don't know
+ # we know it's associated with a instance, but we don't know
# which network it is part of.
continue
@@ -114,7 +114,7 @@
networks = network.template.network_set.all()
network = None
for candidate_network in networks:
- if (candidate_network.owner == sliver.slice):
+ if (candidate_network.owner == instance.slice):
print "found network", candidate_network
network = candidate_network
@@ -127,10 +127,10 @@
continue
ip=port["fixed_ips"][0]["ip_address"]
- logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(sliver), ip, str(port["id"])))
+ logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
ns = Port(network=network,
- sliver=sliver,
+ instance=instance,
ip=ip,
port_id=port["id"])
@@ -142,9 +142,9 @@
# For ports that were created by the user, find that ones
# that don't have neutron ports, and create them.
- for port in Port.objects.filter(port_id__isnull=True, sliver__isnull=False):
+ for port in Port.objects.filter(port_id__isnull=True, instance__isnull=False):
#logger.info("XXX working on port %s" % port)
- controller = port.sliver.node.site_deployment.controller
+ controller = port.instance.node.site_deployment.controller
if controller:
cn=port.network.controllernetworks.filter(controller=controller)
if not cn:
@@ -161,9 +161,9 @@
continue
try:
# We need to use a client driver that specifies the tenant
- # of the destination sliver. Nova-compute will not connect
- # ports to slivers if the port's tenant does not match
- # the sliver's tenant.
+ # of the destination instance. Nova-compute will not connect
+ # ports to instances if the port's tenant does not match
+ # the instance's tenant.
# A bunch of stuff to compensate for OpenStackDriver.client_driveR()
# not being in working condition.
@@ -172,7 +172,7 @@
caller = port.network.owner.creator
auth = {'username': caller.email,
'password': caller.remote_password,
- 'tenant': port.sliver.slice.name} # port.network.owner.name}
+ 'tenant': port.instance.slice.name} # port.network.owner.name}
client = OpenStackClient(controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert,
driver = OpenStackDriver(client=client)
@@ -210,18 +210,18 @@
neutron_nat_list = []
if (neutron_nat_list != nat_list):
- logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(port.port_id), str(port.network.id), str(port.sliver), str(nat_list)))
+ logger.info("Setting nat:forward_ports for port %s network %s instance %s to %s" % (str(port.port_id), str(port.network.id), str(port.instance), str(nat_list)))
try:
- driver = self.driver.admin_driver(controller=port.sliver.node.site_deployment.controller,tenant='admin')
+ driver = self.driver.admin_driver(controller=port.instance.node.site_deployment.controller,tenant='admin')
driver.shell.quantum.update_port(port.port_id, {"port": {"nat:forward_ports": nat_list}})
except:
logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
continue
else:
- #logger.info("port %s network %s sliver %s nat %s is already set" % (str(port.port_id), str(port.network.id), str(port.sliver), str(nat_list)))
+ #logger.info("port %s network %s instance %s nat %s is already set" % (str(port.port_id), str(port.network.id), str(port.instance), str(nat_list)))
pass
- def delete_record(self, network_sliver):
+ def delete_record(self, network_instance):
# Nothing to do, this is an OpenCloud object
pass
diff --git a/xos/openstack_observer/steps/sync_slivers.py b/xos/openstack_observer/steps/sync_slivers.py
deleted file mode 100644
index d98c2d2..0000000
--- a/xos/openstack_observer/steps/sync_slivers.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import os
-import base64
-import socket
-from django.db.models import F, Q
-from xos.config import Config
-from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
-from observer.openstacksyncstep import OpenStackSyncStep
-from core.models.sliver import Sliver
-from core.models.slice import Slice, SlicePrivilege, ControllerSlice
-from core.models.network import Network, NetworkSlice, ControllerNetwork
-from observer.ansible import *
-from observer.syncstep import *
-from util.logger import observer_logger as logger
-
-def escape(s):
- s = s.replace('\n',r'\n').replace('"',r'\"')
- return s
-
-class SyncSlivers(OpenStackSyncStep):
- provides=[Sliver]
- requested_interval=0
- observes=Sliver
-
- def get_userdata(self, sliver, pubkeys):
- userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (sliver.slice.name, sliver.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
- userdata += 'ssh_authorized_keys:\n'
- for key in pubkeys:
- userdata += ' - %s\n' % key
- return userdata
-
- def sync_record(self, sliver):
- logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
- controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
-
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
-
- metadata_update = {}
- if (sliver.numberCores):
- metadata_update["cpu_cores"] = str(sliver.numberCores)
-
- for tag in sliver.slice.tags.all():
- if tag.name.startswith("sysctl-"):
- metadata_update[tag.name] = tag.value
-
- # public keys
- slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
- pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
- if sliver.creator.public_key:
- pubkeys.add(sliver.creator.public_key)
-
- if sliver.slice.creator.public_key:
- pubkeys.add(sliver.slice.creator.public_key)
-
- if sliver.slice.service and sliver.slice.service.public_key:
- pubkeys.add(sliver.slice.service.public_key)
-
- # Handle any ports that are already created and attached to the sliver.
- # If we do have a port for a network, then add that network to an
- # exclude list so we won't try to auto-create ports on that network
- # when instantiating.
- ports = []
- exclude_networks = set()
- exclude_templates = set()
- for ns in sliver.ports.all():
- if not ns.port_id:
- raise DeferredException("Port %s on sliver %s has no id; Try again later" % (str(ns), str(sliver)) )
- ports.append(ns.port_id)
- exclude_networks.add(ns.network)
- exclude_templates.add(ns.network.template)
-
- nics = []
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
- networks = [n for n in networks if (n not in exclude_networks)]
- controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=sliver.node.site_deployment.controller)
-
- for controller_network in controller_networks:
- if controller_network.network.template.visibility == 'private' and \
- controller_network.network.template.translation == 'none':
- if not controller_network.net_id:
- raise DeferredException("Private Network %s has no id; Try again later" % controller_network.network.name)
- nics.append(controller_network.net_id)
-
- # Now include network templates, for those networks that use a
- # shared_network_name.
- network_templates = [network.template.shared_network_name for network in networks \
- if network.template.shared_network_name]
- network_templates = [nt for nt in network_templates if (nt not in exclude_templates)]
-
- #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
- driver = self.driver.admin_driver(tenant='admin', controller=sliver.node.site_deployment.controller)
- nets = driver.shell.quantum.list_networks()['networks']
- for net in nets:
- if net['name'] in network_templates:
- nics.append(net['id'])
-
- # If the slice isn't connected to anything, then at least put it on
- # the public network.
- if (not nics) and (not ports):
- for net in nets:
- if net['name']=='public':
- nics.append(net['id'])
-
- image_name = None
- controller_images = sliver.image.controllerimages.filter(controller=sliver.node.site_deployment.controller)
- if controller_images:
- image_name = controller_images[0].image.name
- logger.info("using image from ControllerImage object: " + str(image_name))
-
- if image_name is None:
- controller_driver = self.driver.admin_driver(controller=sliver.node.site_deployment.controller)
- images = controller_driver.shell.glanceclient.images.list()
- for image in images:
- if image.name == sliver.image.name or not image_name:
- image_name = image.name
- logger.info("using image from glance: " + str(image_name))
-
- try:
- legacy = Config().observer_legacy
- except:
- legacy = False
-
- if (legacy):
- host_filter = sliver.node.name.split('.',1)[0]
- else:
- host_filter = sliver.node.name.strip()
-
- availability_zone_filter = 'nova:%s'%host_filter
- sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
-
- userData = self.get_userdata(sliver, pubkeys)
- if sliver.userData:
- userData = sliver.userData
-
- controller = sliver.node.site_deployment.controller
- tenant_fields = {'endpoint':controller.auth_url,
- 'admin_user': sliver.creator.email,
- 'admin_password': sliver.creator.remote_password,
- 'admin_tenant': sliver.slice.name,
- 'tenant': sliver.slice.name,
- 'tenant_description': sliver.slice.description,
- 'name':sliver_name,
- 'ansible_tag':sliver_name,
- 'availability_zone': availability_zone_filter,
- 'image_name':image_name,
- 'flavor_name':sliver.flavor.name,
- 'nics':nics,
- 'ports':ports,
- 'meta':metadata_update,
- 'user_data':r'%s'%escape(userData)}
-
- res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
- sliver_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
- sliver_uuid = res[0]['id']
-
- try:
- hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
- ip = socket.gethostbyname(hostname)
- sliver.ip = ip
- except:
- pass
-
- sliver.instance_id = sliver_id
- sliver.instance_uuid = sliver_uuid
- sliver.instance_name = sliver_name
- sliver.save()
-
- def delete_record(self, sliver):
- controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
-
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
-
- sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
- controller = sliver.node.site_deployment.controller
- tenant_fields = {'endpoint':controller.auth_url,
- 'admin_user': sliver.creator.email,
- 'admin_password': sliver.creator.remote_password,
- 'admin_tenant': sliver.slice.name,
- 'tenant': sliver.slice.name,
- 'tenant_description': sliver.slice.description,
- 'name':sliver_name,
- 'ansible_tag':sliver_name,
- 'delete': True}
-
- try:
- res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
- except Exception,e:
- print "Could not sync %s"%sliver_name
- #import traceback
- #traceback.print_exc()
- raise e
-
- if (len(res)!=1):
- raise Exception('Could not delete sliver %s'%sliver.slice.name)
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index 03f6d16..791a89a 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -80,7 +80,7 @@
objs = main_obj.deleted_objects.all()
return objs
- #return Sliver.objects.filter(ip=None)
+ #return Instance.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
diff --git a/xos/rr_observer/rrlib.py b/xos/rr_observer/rrlib.py
index 20c2341..6968dcd 100644
--- a/xos/rr_observer/rrlib.py
+++ b/xos/rr_observer/rrlib.py
@@ -27,7 +27,7 @@
pass
def gen_slice_info(self, service=None):
- """generates sliver information from slice of request router
+ """generates instance information from slice of request router
"""
if (service is None ):
@@ -40,13 +40,13 @@
'''for slice in service.service.all():
name = slice.name
- for sliver in slice.slivers.all():
- mapping[sliver.name] = str(sliver.ip)
+ for instance in slice.instances.all():
+ mapping[instance.name] = str(instance.ip)
'''
return mapping
def gen_servicemap_slice_info(self, servicemap):
- """generates sliver information from slice of servicemap
+ """generates instance information from slice of servicemap
"""
wzone = Set(['arizona', 'stanford', 'on.lab', 'housten']) # zone=1 in cooden.conf
@@ -56,14 +56,14 @@
mapping_ip = {}
slice = servicemap.slice
name = slice.name
- for sliver in slice.slivers.all():
- mapping_ip[sliver.node.name] = socket.gethostbyname(sliver.node.name)
- #print "sliver name "+sliver.name+str(sliver.ip)+"\n"
- site = sliver.node.site.name
+ for instance in slice.instances.all():
+ mapping_ip[instance.node.name] = socket.gethostbyname(instance.node.name)
+ #print "instance name "+instance.name+str(instance.ip)+"\n"
+ site = instance.node.site.name
if(site.lower() in wzone):
- mapping_zone[sliver.node.name] = str(1)
+ mapping_zone[instance.node.name] = str(1)
else:
- mapping_zone[sliver.node.name] = str(2)
+ mapping_zone[instance.node.name] = str(2)
return mapping_ip, mapping_zone
diff --git a/xos/syndicate_observer/syndicatelib.py b/xos/syndicate_observer/syndicatelib.py
index 331e925..9044a48 100644
--- a/xos/syndicate_observer/syndicatelib.py
+++ b/xos/syndicate_observer/syndicatelib.py
@@ -61,7 +61,7 @@
import syndicate_storage.models as models
# get OpenCloud models
- from core.models import Slice,Sliver
+ from core.models import Slice,Instance
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
@@ -501,7 +501,7 @@
def put_principal_data( user_email, observer_secret, public_key_pem, private_key_pem ):
"""
Seal and store the principal's private key into the database, in a SyndicatePrincipal object,
- so the sliver-side Syndicate daemon syndicated.py can get them later.
+ so the instance-side Syndicate daemon syndicated.py can get them later.
Overwrite an existing principal if one exists.
"""
@@ -814,9 +814,9 @@
#-------------------------------
-def do_push( sliver_hosts, portnum, payload ):
+def do_push( instance_hosts, portnum, payload ):
"""
- Push a payload to a list of slivers.
+ Push a payload to a list of instances.
NOTE: this has to be done in one go, since we can't import grequests
into the global namespace (without wrecking havoc on the credential server),
but it has to stick around for the push to work.
@@ -837,7 +837,7 @@
# fan-out
requests = []
- for sh in sliver_hosts:
+ for sh in instance_hosts:
rs = grequests.post( "http://" + sh + ":" + str(portnum), data={"observer_message": payload}, timeout=getattr(CONFIG, "SYNDICATE_HTTP_PUSH_TIMEOUT", 60) )
requests.append( rs )
@@ -873,7 +873,7 @@
logger.error("No such slice '%s'" % slice_name)
return None
- hostnames = [s.node.name for s in openstack_slice.slivers.all()]
+ hostnames = [s.node.name for s in openstack_slice.instances.all()]
return hostnames
@@ -894,8 +894,8 @@
for volume state.
NOTE: this is a fall-back mechanism. The observer should push new
- volume state to the slices' slivers. However, if that fails, the
- slivers are configured to poll for volume state periodically. This
+ volume state to the slices' instances. However, if that fails, the
+ instances are configured to poll for volume state periodically. This
server allows them to do just that.
Responses:
@@ -906,12 +906,12 @@
GET /<slicename>/SYNDICATE_SLICE_SECRET -- Reply with the slice secret (TEMPORARY)
- NOTE: We want to limit who can learn which Volumes a slice can access, so we'll seal its slivers'
- credentials with the SliceSecret secret. The slivers (which have the slice-wide secret) can then decrypt it.
+ NOTE: We want to limit who can learn which Volumes a slice can access, so we'll seal its instances'
+ credentials with the SliceSecret secret. The instances (which have the slice-wide secret) can then decrypt it.
However, sealing the listing is a time-consuming process (on the order of 10s), so we only want
to do it when we have to. Since *anyone* can ask for the ciphertext of the volume list,
we will cache the list ciphertext for each slice for a long-ish amount of time, so we don't
- accidentally DDoS this server. This necessarily means that the sliver might see a stale
+ accidentally DDoS this server. This necessarily means that the instance might see a stale
volume listing, but that's okay, since the Observer is eventually consistent anyway.
"""
@@ -971,7 +971,7 @@
# block the cache.
# NOTE: don't release the lock until we've generated credentials.
- # Chances are, there's a thundering herd of slivers coming online.
+ # Chances are, there's a thundering herd of instances coming online.
# Block them all until we've generated their slice's credentials,
# and then serve them the cached one.
diff --git a/xos/syndicate_observer/syndicatelib_config/config-opencloud.py b/xos/syndicate_observer/syndicatelib_config/config-opencloud.py
index 463b84e..3cd2950 100644
--- a/xos/syndicate_observer/syndicatelib_config/config-opencloud.py
+++ b/xos/syndicate_observer/syndicatelib_config/config-opencloud.py
@@ -11,7 +11,7 @@
# URL to the Syndicate SMI. For example, https://syndicate-metadata.appspot.com
SYNDICATE_SMI_URL="http://localhost:8080"
-# If you are going to use OpenID to authenticate the Syndicate sliver daemon,
+# If you are going to use OpenID to authenticate the Syndicate instance daemon,
# this is the OpenID provider URL. It is currently used only to generate
# identity pages for users, so you can put whatever you want here for now.
SYNDICATE_OPENID_TRUSTROOT="http://localhost:8081"
@@ -39,7 +39,7 @@
SYNDICATE_PRIVATE_KEY="/opt/xos/syndicate_observer/syndicatelib_config/pollserver.pem"
# This is the master secret used to generate secrets to seal sensitive information sent to the
-# Syndicate sliver mount daemons. It is also used to seal sensitive information
+# Syndicate instance mount daemons. It is also used to seal sensitive information
# stored to the Django database.
# TODO: think of a way to not have to store this on disk. Maybe we feed into the
# observer when it starts up?
@@ -47,14 +47,14 @@
# This is the default port number on which a Syndicate Replica Gateway
# will be provisioned. It's a well-known port, and can be the same across
-# slivers, since in OpenCloud, an RG instance only listens to localhost.
+# instances, since in OpenCloud, an RG instance only listens to localhost.
SYNDICATE_RG_DEFAULT_PORT=38800
# This is the absolute path to the RG's storage driver (which will be automatically
-# pushed to slivers by Syndicate). See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
+# pushed to instances by Syndicate). See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
SYNDICATE_RG_CLOSURE=None
-# This is the port number the observer listens on for GETs from the Syndicate sliver mount
+# This is the port number the observer listens on for GETs from the Syndicate instance mount
# daemons. Normally, the oserver pushes (encrypted) commands to the daemons, but if the
# daemons are NAT'ed or temporarily partitioned, they will pull commands instead.
SYNDICATE_HTTP_PORT=65321
@@ -65,7 +65,7 @@
# This is the number of seconds to wait for pushing a slice credential before timing out.
SYNDICATE_HTTP_PUSH_TIMEOUT=60
-# This is the port number the Syndicate sliver mount daemons listen on. The observer will
+# This is the port number the Syndicate instance mount daemons listen on. The observer will
# push commands to them on this port.
SYNDICATE_SLIVER_PORT=65322
diff --git a/xos/syndicate_observer/syndicatelib_config/config.py b/xos/syndicate_observer/syndicatelib_config/config.py
index 463b84e..3cd2950 100644
--- a/xos/syndicate_observer/syndicatelib_config/config.py
+++ b/xos/syndicate_observer/syndicatelib_config/config.py
@@ -11,7 +11,7 @@
# URL to the Syndicate SMI. For example, https://syndicate-metadata.appspot.com
SYNDICATE_SMI_URL="http://localhost:8080"
-# If you are going to use OpenID to authenticate the Syndicate sliver daemon,
+# If you are going to use OpenID to authenticate the Syndicate instance daemon,
# this is the OpenID provider URL. It is currently used only to generate
# identity pages for users, so you can put whatever you want here for now.
SYNDICATE_OPENID_TRUSTROOT="http://localhost:8081"
@@ -39,7 +39,7 @@
SYNDICATE_PRIVATE_KEY="/opt/xos/syndicate_observer/syndicatelib_config/pollserver.pem"
# This is the master secret used to generate secrets to seal sensitive information sent to the
-# Syndicate sliver mount daemons. It is also used to seal sensitive information
+# Syndicate instance mount daemons. It is also used to seal sensitive information
# stored to the Django database.
# TODO: think of a way to not have to store this on disk. Maybe we feed into the
# observer when it starts up?
@@ -47,14 +47,14 @@
# This is the default port number on which a Syndicate Replica Gateway
# will be provisioned. It's a well-known port, and can be the same across
-# slivers, since in OpenCloud, an RG instance only listens to localhost.
+# instances, since in OpenCloud, an RG instance only listens to localhost.
SYNDICATE_RG_DEFAULT_PORT=38800
# This is the absolute path to the RG's storage driver (which will be automatically
-# pushed to slivers by Syndicate). See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
+# pushed to instances by Syndicate). See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
SYNDICATE_RG_CLOSURE=None
-# This is the port number the observer listens on for GETs from the Syndicate sliver mount
+# This is the port number the observer listens on for GETs from the Syndicate instance mount
# daemons. Normally, the oserver pushes (encrypted) commands to the daemons, but if the
# daemons are NAT'ed or temporarily partitioned, they will pull commands instead.
SYNDICATE_HTTP_PORT=65321
@@ -65,7 +65,7 @@
# This is the number of seconds to wait for pushing a slice credential before timing out.
SYNDICATE_HTTP_PUSH_TIMEOUT=60
-# This is the port number the Syndicate sliver mount daemons listen on. The observer will
+# This is the port number the Syndicate instance mount daemons listen on. The observer will
# push commands to them on this port.
SYNDICATE_SLIVER_PORT=65322
diff --git a/xos/tests/slivertest.py b/xos/tests/instancetest.py
similarity index 67%
rename from xos/tests/slivertest.py
rename to xos/tests/instancetest.py
index 60124fd..2813521 100644
--- a/xos/tests/slivertest.py
+++ b/xos/tests/instancetest.py
@@ -1,8 +1,8 @@
"""
- Basic Sliver Test
+ Basic Instance Test
1) Create a slice1
- 2) Create sliver1 on slice1
+ 2) Create instance1 on slice1
"""
import os
@@ -14,15 +14,15 @@
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from openstack.manager import OpenStackManager
-from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+from core.models import Slice, Instance, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
from planetstacktest import PlanetStackTest, fail_unless
-class SliverTest(PlanetStackTest):
+class InstanceTest(PlanetStackTest):
def __init__(self):
PlanetStackTest.__init__(self)
- def run_sliver1(self):
+ def run_instance1(self):
slice1Name = self.make_slice_name()
slice1 = Slice(name = slice1Name,
omf_friendly=True,
@@ -30,22 +30,22 @@
creator=self.testUser)
slice1=self.save_and_wait_for_enacted(slice1, nonempty_fields=["tenant_id"])
- sliver1 = Sliver(image = self.testImage,
+ instance1 = Instance(image = self.testImage,
creator=self.testUser,
slice=slice1,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver1=self.save_and_wait_for_enacted(sliver1, nonempty_fields=["instance_id", "ip"])
+ instance1=self.save_and_wait_for_enacted(instance1, nonempty_fields=["instance_id", "ip"])
def run(self):
self.setup()
try:
- self.run_sliver1()
+ self.run_instance1()
finally:
self.cleanup()
def main():
- SliverTest().run()
+ InstanceTest().run()
if __name__=="__main__":
main()
diff --git a/xos/tests/networktest.py b/xos/tests/networktest.py
index 5390f0f..b4bcbd2 100644
--- a/xos/tests/networktest.py
+++ b/xos/tests/networktest.py
@@ -2,12 +2,12 @@
Network Data Model Test
1) Create a slice1
- 2) Create sliver1 on slice1
- 3) Verify one quantum network created for sliver1
+ 2) Create instance1 on slice1
+ 3) Verify one quantum network created for instance1
4) Create a private network, network1
5) Connect network1 to slice1
- 6) Create sliver1_2 on slice1
- 7) Verify two quantum networks created for sliver1_2
+ 6) Create instance1_2 on slice1
+ 7) Verify two quantum networks created for instance1_2
"""
import os
@@ -19,7 +19,7 @@
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from openstack.manager import OpenStackManager
-from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+from core.models import Slice, Instance, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
from planetstacktest import PlanetStackTest, fail_unless, fail
@@ -27,10 +27,10 @@
def __init__(self):
PlanetStackTest.__init__(self)
- def wait_for_ports(self, sliver, count=1, max_time=120):
- print "waiting for %d ports on %s" % (count, str(sliver))
+ def wait_for_ports(self, instance, count=1, max_time=120):
+ print "waiting for %d ports on %s" % (count, str(instance))
while max_time>0:
- ports = self.manager.driver.shell.quantum.list_ports(device_id=sliver.instance_id)["ports"]
+ ports = self.manager.driver.shell.quantum.list_ports(device_id=instance.instance_id)["ports"]
if len(ports)==count:
return ports
@@ -70,15 +70,15 @@
creator=self.testUser)
slice1=self.save_and_wait_for_enacted(slice1, nonempty_fields=["tenant_id"])
- sliver1 = Sliver(image = self.testImage,
+ instance1 = Instance(image = self.testImage,
creator=self.testUser,
slice=slice1,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver1=self.save_and_wait_for_enacted(sliver1, nonempty_fields=["instance_id", "ip"])
+ instance1=self.save_and_wait_for_enacted(instance1, nonempty_fields=["instance_id", "ip"])
- # sliver1 should have only one port, its private network
- ports = self.wait_for_ports(sliver1, count=1)
+ # instance1 should have only one port, its private network
+ ports = self.wait_for_ports(instance1, count=1)
self.verify_network_names(ports, [slice1.name])
network1 = Network(name = slice1Name + "-pvt",
@@ -89,14 +89,14 @@
network1_slice1 = NetworkSlice(network=network1, slice=slice1)
network1_slice1.save() # does not need to be enacted
- sliver1_2 = Sliver(image = self.testImage,
+ instance1_2 = Instance(image = self.testImage,
creator=self.testUser,
slice=slice1,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver1_2=self.save_and_wait_for_enacted(sliver1_2, nonempty_fields=["instance_id", "ip"])
+ instance1_2=self.save_and_wait_for_enacted(instance1_2, nonempty_fields=["instance_id", "ip"])
- ports = self.wait_for_ports(sliver1_2, count=2)
+ ports = self.wait_for_ports(instance1_2, count=2)
self.verify_network_names(ports, [slice1.name, network1.name])
self.slice1 = slice1
@@ -118,14 +118,14 @@
network2_slice2 = NetworkSlice(network=network2, slice=slice2)
network2_slice2.save() # does not need to be enacted
- sliver2_1 = Sliver(image = self.testImage,
+ instance2_1 = Instance(image = self.testImage,
creator=self.testUser,
slice=slice2,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver2_1=self.save_and_wait_for_enacted(sliver2_1, nonempty_fields=["instance_id", "ip"])
+ instance2_1=self.save_and_wait_for_enacted(instance2_1, nonempty_fields=["instance_id", "ip"])
- ports = self.wait_for_ports(sliver2_1, count=2)
+ ports = self.wait_for_ports(instance2_1, count=2)
self.verify_network_names(ports, [slice2.name, network2.name])
self.slice2 = slice2
@@ -137,14 +137,14 @@
network2_slice1 = NetworkSlice(network=self.network2, slice=self.slice1)
network2_slice1.save()
- sliver1_3 = Sliver(image = self.testImage,
+ instance1_3 = Instance(image = self.testImage,
creator=self.testUser,
slice=self.slice1,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver1_3=self.save_and_wait_for_enacted(sliver1_3, nonempty_fields=["instance_id", "ip"])
+ instance1_3=self.save_and_wait_for_enacted(instance1_3, nonempty_fields=["instance_id", "ip"])
- ports = self.wait_for_ports(sliver1_3, count=3)
+ ports = self.wait_for_ports(instance1_3, count=3)
self.verify_network_names(ports, [self.slice1.name, self.network1.name, self.network2.name])
def test_nat_net(self):
@@ -164,14 +164,14 @@
network3_slice3 = NetworkSlice(network=network3, slice=slice3)
network3_slice3.save() # does not need to be enacted
- sliver3_1 = Sliver(image = self.testImage,
+ instance3_1 = Instance(image = self.testImage,
creator=self.testUser,
slice=slice3,
node=self.testNode,
deploymentNetwork=self.testDeployment)
- sliver3_1=self.save_and_wait_for_enacted(sliver3_1, nonempty_fields=["instance_id", "ip"])
+ instance3_1=self.save_and_wait_for_enacted(instance3_1, nonempty_fields=["instance_id", "ip"])
- ports = self.wait_for_ports(sliver3_1, count=2)
+ ports = self.wait_for_ports(instance3_1, count=2)
self.verify_network_names(ports, [slice3.name, "nat-net"])
def run(self):
diff --git a/xos/tests/planetstacktest.py b/xos/tests/planetstacktest.py
index 99bd730..baf7efe 100644
--- a/xos/tests/planetstacktest.py
+++ b/xos/tests/planetstacktest.py
@@ -7,7 +7,7 @@
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from openstack.manager import OpenStackManager
-from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+from core.models import Slice, Instance, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
TEST_SITE_NAME = "Princeton University"
TEST_USER_EMAIL = "sbaker@planetstack.org"
diff --git a/xos/tools/ansible_hosts.py b/xos/tools/ansible_hosts.py
index 0cdb809..9ae3e52 100644
--- a/xos/tools/ansible_hosts.py
+++ b/xos/tools/ansible_hosts.py
@@ -14,7 +14,7 @@
NODES_API = REST_API + "nodes/"
SITES_API = REST_API + "sites/"
SLICES_API = REST_API + "slices/"
-SLIVERS_API = REST_API + "sliver/"
+SLIVERS_API = REST_API + "instance/"
def get_nodes_by_site():
r = requests.get(SITES_API + "?no_hyperlinks=1", auth=opencloud_auth)
@@ -51,16 +51,16 @@
nodes[str(nodes["id"])] = node
r = requests.get(SLIVERS_API + "?no_hyperlinks=1", auth=opencloud_auth)
- slivers = r.json()
- for slivers in nodes:
- if sliver["node"] not in nodes:
+ instances = r.json()
+ for instances in nodes:
+ if instance["node"] not in nodes:
continue
- if sliver["slice"] not in slices:
+ if instance["slice"] not in slices:
continue
- hostname = nodes[sliver["node"]].name
+ hostname = nodes[instance["node"]].name
- slices[sliver["slice"]]["hostnames"].append(hostname)
+ slices[instance["slice"]]["hostnames"].append(hostname)
return slices
"""
diff --git a/xos/tools/cleanup_unique.py b/xos/tools/cleanup_unique.py
index 92ce599..a637df1 100644
--- a/xos/tools/cleanup_unique.py
+++ b/xos/tools/cleanup_unique.py
@@ -57,7 +57,7 @@
seen=[]
for obj in Port.objects.all():
seen.append(obj.id)
- conflicts = Port.objects.filter(network=obj.network, sliver=obj.sliver)
+ conflicts = Port.objects.filter(network=obj.network, instance=obj.instanc)
for conflict in conflicts:
if conflict.id not in seen:
print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
diff --git a/xos/tools/get_instance_ip.py b/xos/tools/get_instance_ip.py
index 0e6a456..dd3c788 100644
--- a/xos/tools/get_instance_ip.py
+++ b/xos/tools/get_instance_ip.py
@@ -11,7 +11,7 @@
NODES_API = REST_API + "nodes/"
SLICES_API = REST_API + "slices/"
-SLIVERS_API = REST_API + "slivers/"
+INSTANCES_API = REST_API + "instances/"
PORTS_API = REST_API + "ports/"
opencloud_auth=("demo@onlab.us", "demo")
@@ -29,7 +29,7 @@
print >> sys.stderr, "Error: failed to find node %s" % host_name
sys.exit(-1)
-def get_slivers(slice_id=None, node_id=None):
+def get_instances(slice_id=None, node_id=None):
queries = []
if slice_id:
queries.append("slice=%s" % str(slice_id))
@@ -41,7 +41,7 @@
else:
query_string = ""
- r = requests.get(SLIVERS_API + query_string, auth=opencloud_auth)
+ r = requests.get(INSTANCES_API + query_string, auth=opencloud_auth)
return r.json()
def main():
@@ -60,19 +60,18 @@
slice_id = get_slice_id(slice_name)
node_id = get_node_id(hostname)
- slivers = get_slivers(slice_id, node_id)
+ instances = get_instances(slice_id, node_id)
# get (instance_name, ip) pairs for instances with names and ips
- slivers = [x for x in slivers if x["instance_name"]]
- slivers = sorted(slivers, key = lambda sliver: sliver["instance_name"])
+ instances = [x for x in instances if x["instance_name"]]
+ instances = sorted(instances, key = lambda instance: instance["instance_name"])
# return the last one in the list (i.e. the newest one)
- sliver_id = slivers[-1]["id"]
+ instance_id = instances[-1]["id"]
- r = requests.get(PORTS_API + "?sliver=%s" % sliver_id, auth=opencloud_auth)
-
+ r = requests.get(NETWORKINSTANCES_API + "?instance=%s" % instance_id, auth=opencloud_auth)
ports = r.json()
ips = [x["ip"] for x in ports]
diff --git a/xos/tools/get_instance_name.py b/xos/tools/get_instance_name.py
index 3450df5..844ba5a 100644
--- a/xos/tools/get_instance_name.py
+++ b/xos/tools/get_instance_name.py
@@ -9,7 +9,7 @@
NODES_API = REST_API + "nodes/"
SLICES_API = REST_API + "slices/"
-SLIVERS_API = REST_API + "slivers/"
+SLIVERS_API = REST_API + "instances/"
opencloud_auth=("demo@onlab.us", "demo")
@@ -26,7 +26,7 @@
print >> sys.stderr, "Error: failed to find node %s" % host_name
sys.exit(-1)
-def get_slivers(slice_id=None, node_id=None):
+def get_instances(slice_id=None, node_id=None):
queries = []
if slice_id:
queries.append("slice=%s" % str(slice_id))
@@ -57,9 +57,9 @@
slice_id = get_slice_id(slice_name)
node_id = get_node_id(hostname)
- slivers = get_slivers(slice_id, node_id)
+ instances = get_instances(slice_id, node_id)
- instance_names = [x["instance_name"] for x in slivers if x["instance_name"]]
+ instance_names = [x["instance_name"] for x in instances if x["instance_name"]]
# return the last one in the list (i.e. the newest one)
diff --git a/xos/tools/init-xos.py b/xos/tools/init-xos.py
index 581eb46..433d444 100644
--- a/xos/tools/init-xos.py
+++ b/xos/tools/init-xos.py
@@ -59,6 +59,6 @@
ServiceClass(name="Best Effort", description="Best Effort").save()
NetworkTemplate(name="Private", description="A private virtual network", visibility="private", translation="none", topology_kind="bigswitch", controller_kind="none").save()
-NetworkTemplate(name="Public shared IPv4", description="Connect a sliver to the public network", visibility="private", translation="NAT", shared_network_name="nat-net", topology_kind="bigswitch", controller_kind="none").save()
-NetworkTemplate(name="Public dedicated IPv4", description="Connect a sliver to the public network", visibility="public", translation="none", shared_network_name="ext-net", topology_kind="bigswitch", controller_kind="none").save()
+NetworkTemplate(name="Public shared IPv4", description="Connect a instance to the public network", visibility="private", translation="NAT", shared_network_name="nat-net", topology_kind="bigswitch", controller_kind="none").save()
+NetworkTemplate(name="Public dedicated IPv4", description="Connect a instance to the public network", visibility="public", translation="none", shared_network_name="ext-net", topology_kind="bigswitch", controller_kind="none").save()
diff --git a/xos/tosca/engine.py b/xos/tosca/engine.py
index fc1f10f..3efb5ef 100644
--- a/xos/tosca/engine.py
+++ b/xos/tosca/engine.py
@@ -7,7 +7,7 @@
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
-from core.models import Slice,Sliver,User,Flavor,Node,Image
+from core.models import Slice,Instance,User,Flavor,Node,Image
from nodeselect import XOSNodeSelector
from imageselect import XOSImageSelector
diff --git a/xos/tosca/flavorselect.py b/xos/tosca/flavorselect.py
index 36024f9..a79f8a5 100644
--- a/xos/tosca/flavorselect.py
+++ b/xos/tosca/flavorselect.py
@@ -1,7 +1,7 @@
import os
import sys
-from core.models import Slice,Sliver,User,Flavor,Node,Image
+from core.models import Slice,Instance,User,Flavor,Node,Image
class XOSFlavorSelector(object):
def __init__(self, user, mem_size=None, num_cpus=None, disk_size=None):
diff --git a/xos/tosca/nodeselect.py b/xos/tosca/nodeselect.py
index 11c3c78..24c5b85 100644
--- a/xos/tosca/nodeselect.py
+++ b/xos/tosca/nodeselect.py
@@ -1,7 +1,7 @@
import os
import sys
-from core.models import Slice,Sliver,User,Flavor,Node,Image
+from core.models import Slice,Instance,User,Flavor,Node,Image
class XOSNodeSelector(object):
def __init__(self, user, mem_size=None, num_cpus=None, disk_size=None, hostname = None):
diff --git a/xos/tosca/resources/compute.py b/xos/tosca/resources/compute.py
index 2f33ce5..f01a401 100644
--- a/xos/tosca/resources/compute.py
+++ b/xos/tosca/resources/compute.py
@@ -5,7 +5,7 @@
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
-from core.models import Slice,Sliver,User,Flavor,Node,Image
+from core.models import Slice,Instance,User,Flavor,Node,Image
from nodeselect import XOSNodeSelector
from imageselect import XOSImageSelector
from flavorselect import XOSFlavorSelector
@@ -14,7 +14,7 @@
class XOSCompute(XOSResource):
provides = "tosca.nodes.Compute"
- xos_model = Sliver
+ xos_model = Instance
def select_compute_node(self, user, v, hostname=None):
mem_size = v.get_property_value("mem_size")
@@ -50,14 +50,14 @@
sliceName = self.get_requirement("tosca.relationships.MemberOfSlice", throw_exception=True)
slice = self.get_xos_object(Slice, name=sliceName)
- # locate it one the same host as some other sliver
+ # locate it one the same host as some other instance
colocate_host = None
- colocate_sliver_name = self.get_requirement("tosca.relationships.SameHost")
+ colocate_instance_name = self.get_requirement("tosca.relationships.SameHost")
if index is not None:
- colocate_sliver_name = "%s-%d" % (colocate_sliver_name, index)
- colocate_slivers = Sliver.objects.filter(name=colocate_sliver_name)
- if colocate_slivers:
- colocate_host = colocate_slivers[0].node.name
+ colocate_instance_name = "%s-%d" % (colocate_instance_name, index)
+ colocate_instances = Instance.objects.filter(name=colocate_instance_name)
+ if colocate_instances:
+ colocate_host = colocate_instances[0].node.name
self.info("colocating on %s" % colocate_host)
capabilities = nodetemplate.get_capabilities()
@@ -84,15 +84,14 @@
def create(self, name = None, index = None):
xos_args = self.get_xos_args(name=name, index=index)
- sliver = Sliver(**xos_args)
- sliver.caller = self.user
- sliver.no_sync = True
- sliver.save()
+ instance = Instance(**xos_args)
+ instance.caller = self.user
+ instance.no_sync = True
+ instance.save()
+ self.deferred_sync.append(instance)
- self.deferred_sync.append(sliver)
-
- self.info("Created Sliver '%s' on node '%s' using flavor '%s' and image '%s'" %
- (str(sliver), str(sliver.node), str(sliver.flavor), str(sliver.image)))
+ self.info("Created Instance '%s' on node '%s' using flavor '%s' and image '%s'" %
+ (str(instance), str(instance.node), str(instance.flavor), str(instance.image)))
def create_or_update(self):
scalable = self.get_scalable()
@@ -100,10 +99,10 @@
default_instances = scalable.get("default_instances",1)
for i in range(0, default_instances):
name = "%s-%d" % (self.nodetemplate.name, i)
- existing_slivers = Sliver.objects.filter(name=name)
- if existing_slivers:
+ existing_instances = Instance.objects.filter(name=name)
+ if existing_instances:
self.info("%s %s already exists" % (self.xos_model.__name__, name))
- self.update(existing_slivers[0])
+ self.update(existing_instances[0])
else:
self.create(name, index=i)
else:
@@ -112,12 +111,12 @@
def get_existing_objs(self):
scalable = self.get_scalable()
if scalable:
- existing_slivers = []
+ existing_instances = []
max_instances = scalable.get("max_instances",1)
for i in range(0, max_instances):
name = "%s-%d" % (self.nodetemplate.name, i)
- existing_slivers = existing_slivers + list(Sliver.objects.filter(name=name))
- return existing_slivers
+ existing_instances = existing_instances + list(Instance.objects.filter(name=name))
+ return existing_instances
else:
return super(XOSCompute,self).get_existing_objs()
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 5c576be..d9684f2 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -60,8 +60,8 @@
self.info("Created Slice '%s' on Site '%s'" % (str(slice), str(slice.site)))
def delete(self, obj):
- if obj.slivers.exists():
- self.info("Slice %s has active slivers; skipping delete" % obj.name)
+ if obj.instances.exists():
+ self.info("Slice %s has active instances; skipping delete" % obj.name)
return
super(XOSSlice, self).delete(obj)
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index dc8b781..ab454e2 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -191,7 +191,7 @@
'auth.group',
'auth',
'core.network',
- 'core.sliver',
+ 'core.instance',
'core.node',
'core.image',
'core.deploymentrole',
diff --git a/xos/xos/xosapi.py b/xos/xos/xosapi.py
index 5dd1ce0..f7be27a 100644
--- a/xos/xos/xosapi.py
+++ b/xos/xos/xosapi.py
@@ -75,8 +75,8 @@
url(r'xos/slice_privileges/$', SlicePrivilegeList.as_view(), name='sliceprivilege-list'),
url(r'xos/slice_privileges/(?P<pk>[a-zA-Z0-9\-]+)/$', SlicePrivilegeDetail.as_view(), name ='sliceprivilege-detail'),
- url(r'xos/networkslivers/$', NetworkSliverList.as_view(), name='networksliver-list'),
- url(r'xos/networkslivers/(?P<pk>[a-zA-Z0-9\-]+)/$', NetworkSliverDetail.as_view(), name ='networksliver-detail'),
+ url(r'xos/networkinstances/$', NetworkInstanceList.as_view(), name='networkinstance-list'),
+ url(r'xos/networkinstances/(?P<pk>[a-zA-Z0-9\-]+)/$', NetworkInstanceDetail.as_view(), name ='networkinstance-detail'),
url(r'xos/flavors/$', FlavorList.as_view(), name='flavor-list'),
url(r'xos/flavors/(?P<pk>[a-zA-Z0-9\-]+)/$', FlavorDetail.as_view(), name ='flavor-detail'),
@@ -123,8 +123,8 @@
url(r'xos/slicecredentials/$', SliceCredentialList.as_view(), name='slicecredential-list'),
url(r'xos/slicecredentials/(?P<pk>[a-zA-Z0-9\-]+)/$', SliceCredentialDetail.as_view(), name ='slicecredential-detail'),
- url(r'xos/slivers/$', SliverList.as_view(), name='sliver-list'),
- url(r'xos/slivers/(?P<pk>[a-zA-Z0-9\-]+)/$', SliverDetail.as_view(), name ='sliver-detail'),
+ url(r'xos/instances/$', InstanceList.as_view(), name='instance-list'),
+ url(r'xos/instances/(?P<pk>[a-zA-Z0-9\-]+)/$', InstanceDetail.as_view(), name ='instance-detail'),
url(r'xos/nodes/$', NodeList.as_view(), name='node-list'),
url(r'xos/nodes/(?P<pk>[a-zA-Z0-9\-]+)/$', NodeDetail.as_view(), name ='node-detail'),
@@ -245,7 +245,7 @@
'tags': reverse('tag-list', request=request, format=format),
'invoices': reverse('invoice-list', request=request, format=format),
'sliceprivileges': reverse('sliceprivilege-list', request=request, format=format),
- 'networkslivers': reverse('networksliver-list', request=request, format=format),
+ 'networkinstances': reverse('networkinstance-list', request=request, format=format),
'flavors': reverse('flavor-list', request=request, format=format),
'controllersites': reverse('controllersite-list', request=request, format=format),
'projects': reverse('project-list', request=request, format=format),
@@ -261,7 +261,7 @@
'usableobjects': reverse('usableobject-list', request=request, format=format),
'providers': reverse('provider-list', request=request, format=format),
'slicecredentials': reverse('slicecredential-list', request=request, format=format),
- 'slivers': reverse('sliver-list', request=request, format=format),
+ 'instances': reverse('instance-list', request=request, format=format),
'nodes': reverse('node-list', request=request, format=format),
'dashboardviews': reverse('dashboardview-list', request=request, format=format),
'controllernetworks': reverse('controllernetwork-list', request=request, format=format),
@@ -791,7 +791,7 @@
-class NetworkSliverSerializer(serializers.HyperlinkedModelSerializer):
+class NetworkInstanceSerializer(serializers.HyperlinkedModelSerializer):
id = IdField()
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
@@ -804,10 +804,10 @@
except:
return None
class Meta:
- model = NetworkSliver
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ model = NetworkInstance
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
-class NetworkSliverIdSerializer(XOSModelSerializer):
+class NetworkInstanceIdSerializer(XOSModelSerializer):
id = IdField()
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
@@ -820,8 +820,8 @@
except:
return None
class Meta:
- model = NetworkSliver
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ model = NetworkInstance
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
@@ -961,7 +961,7 @@
return None
class Meta:
model = Slice
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_slivers','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_instance','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
class SliceIdSerializer(XOSModelSerializer):
id = IdField()
@@ -985,8 +985,7 @@
return None
class Meta:
model = Slice
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_slivers','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
-
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_instances','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
@@ -1002,7 +1001,7 @@
- slivers = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='sliver-detail')
+ instances = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='instance-detail')
@@ -1024,7 +1023,7 @@
return None
class Meta:
model = Network
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','slivers','routers','routers',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','instances','routers','routers',)
class NetworkIdSerializer(XOSModelSerializer):
id = IdField()
@@ -1038,7 +1037,7 @@
- slivers = serializers.PrimaryKeyRelatedField(many=True, queryset = Sliver.objects.all())
+ instances = serializers.PrimaryKeyRelatedField(many=True, queryset = Instance.objects.all())
@@ -1060,7 +1059,7 @@
return None
class Meta:
model = Network
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','slivers','routers','routers',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','instances','routers','routers',)
@@ -1415,7 +1414,7 @@
-class SliverSerializer(serializers.HyperlinkedModelSerializer):
+class InstanceSerializer(serializers.HyperlinkedModelSerializer):
id = IdField()
@@ -1432,10 +1431,15 @@
except:
return None
class Meta:
+<<<<<<< HEAD
+ model = Instance
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','instance_id','instance_uuid','name','instance_name','ip','image','creator','slice','deployment','node','numberCores','flavor','userData','networks',)
+=======
model = Sliver
fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance_id','instance_uuid','name','instance_name','ip','image','creator','slice','deployment','node','numberCores','flavor','userData','networks',)
+>>>>>>> origin/master
-class SliverIdSerializer(XOSModelSerializer):
+class InstanceIdSerializer(XOSModelSerializer):
id = IdField()
@@ -1452,8 +1456,13 @@
except:
return None
class Meta:
+<<<<<<< HEAD
+ model = Instance
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','instance_id','instance_uuid','name','instance_name','ip','image','creator','slice','deployment','node','numberCores','flavor','userData','networks',)
+=======
model = Sliver
fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance_id','instance_uuid','name','instance_name','ip','image','creator','slice','deployment','node','numberCores','flavor','userData','networks',)
+>>>>>>> origin/master
@@ -1663,7 +1672,7 @@
return None
class Meta:
model = ReservedResource
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','sliver','resource','quantity','reservationSet',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance','resource','quantity','reservationSet',)
class ReservedResourceIdSerializer(XOSModelSerializer):
id = IdField()
@@ -1679,7 +1688,7 @@
return None
class Meta:
model = ReservedResource
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','sliver','resource','quantity','reservationSet',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance','resource','quantity','reservationSet',)
@@ -1715,6 +1724,7 @@
class Meta:
model = NetworkTemplate
fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','description','guaranteed_bandwidth','visibility','translation','shared_network_name','shared_network_id','topology_kind','controller_kind',)
+>>>>>>> origin/master
@@ -1846,7 +1856,7 @@
return None
class Meta:
model = Port
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
class PortIdSerializer(XOSModelSerializer):
id = IdField()
@@ -1862,7 +1872,7 @@
return None
class Meta:
model = Port
- fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ fields = ('humanReadableName', 'validators', 'id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
@@ -2746,7 +2756,7 @@
SlicePrivilege: SlicePrivilegeSerializer,
- NetworkSliver: NetworkSliverSerializer,
+ NetworkInstance: NetworkInstanceSerializer,
Flavor: FlavorSerializer,
@@ -2778,7 +2788,7 @@
SliceCredential: SliceCredentialSerializer,
- Sliver: SliverSerializer,
+ Instance: InstanceSerializer,
Node: NodeSerializer,
@@ -3418,12 +3428,12 @@
-class NetworkSliverList(XOSListCreateAPIView):
- queryset = NetworkSliver.objects.select_related().all()
- serializer_class = NetworkSliverSerializer
- id_serializer_class = NetworkSliverIdSerializer
+class NetworkInstanceList(XOSListCreateAPIView):
+ queryset = NetworkInstance.objects.select_related().all()
+ serializer_class = NetworkInstanceSerializer
+ id_serializer_class = NetworkInstanceIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
- filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
def get_serializer_class(self):
no_hyperlinks=False
@@ -3437,13 +3447,13 @@
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
- return NetworkSliver.select_by_user(self.request.user)
+ return NetworkInstance.select_by_user(self.request.user)
-class NetworkSliverDetail(XOSRetrieveUpdateDestroyAPIView):
- queryset = NetworkSliver.objects.select_related().all()
- serializer_class = NetworkSliverSerializer
- id_serializer_class = NetworkSliverIdSerializer
+class NetworkInstanceDetail(XOSRetrieveUpdateDestroyAPIView):
+ queryset = NetworkInstance.objects.select_related().all()
+ serializer_class = NetworkInstanceSerializer
+ id_serializer_class = NetworkInstanceIdSerializer
def get_serializer_class(self):
no_hyperlinks=False
@@ -3457,7 +3467,7 @@
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
- return NetworkSliver.select_by_user(self.request.user)
+ return NetworkInstance.select_by_user(self.request.user)
# update() is handled by XOSRetrieveUpdateDestroyAPIView
@@ -3611,7 +3621,7 @@
serializer_class = SliceSerializer
id_serializer_class = SliceIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
- filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_slivers','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
+ filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','enabled','omf_friendly','description','slice_url','site','max_instances','service','network','serviceClass','creator','default_flavor','default_image','mount_data_sets','networks','networks',)
def get_serializer_class(self):
no_hyperlinks=False
@@ -3658,7 +3668,7 @@
serializer_class = NetworkSerializer
id_serializer_class = NetworkIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
- filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','slivers','routers','routers',)
+ filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','name','template','subnet','ports','labels','owner','guaranteed_bandwidth','permit_all_slices','topology_parameters','controller_url','controller_parameters','network_id','router_id','subnet_id','autoconnect','slices','slices','instances','routers','routers',)
def get_serializer_class(self):
no_hyperlinks=False
@@ -4170,10 +4180,10 @@
-class SliverList(XOSListCreateAPIView):
- queryset = Sliver.objects.select_related().all()
- serializer_class = SliverSerializer
- id_serializer_class = SliverIdSerializer
+class InstanceList(XOSListCreateAPIView):
+ queryset = Instance.objects.select_related().all()
+ serializer_class = InstanceSerializer
+ id_serializer_class = InstanceIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance_id','instance_uuid','name','instance_name','ip','image','creator','slice','deployment','node','numberCores','flavor','userData','networks',)
@@ -4189,13 +4199,13 @@
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
- return Sliver.select_by_user(self.request.user)
+ return Instance.select_by_user(self.request.user)
-class SliverDetail(XOSRetrieveUpdateDestroyAPIView):
- queryset = Sliver.objects.select_related().all()
- serializer_class = SliverSerializer
- id_serializer_class = SliverIdSerializer
+class InstanceDetail(XOSRetrieveUpdateDestroyAPIView):
+ queryset = Instance.objects.select_related().all()
+ serializer_class = InstanceSerializer
+ id_serializer_class = InstanceIdSerializer
def get_serializer_class(self):
no_hyperlinks=False
@@ -4209,7 +4219,7 @@
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
- return Sliver.select_by_user(self.request.user)
+ return Instance.select_by_user(self.request.user)
# update() is handled by XOSRetrieveUpdateDestroyAPIView
@@ -4457,7 +4467,7 @@
serializer_class = ReservedResourceSerializer
id_serializer_class = ReservedResourceIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
- filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','sliver','resource','quantity','reservationSet',)
+ filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','instance','resource','quantity','reservationSet',)
def get_serializer_class(self):
no_hyperlinks=False
@@ -4692,7 +4702,7 @@
serializer_class = PortSerializer
id_serializer_class = PortIdSerializer
filter_backends = (filters.DjangoFilterBackend,)
- filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','sliver','ip','port_id',)
+ filter_fields = ('id','created','updated','enacted','policed','backend_register','backend_status','deleted','write_protect','lazy_blocked','no_sync','network','instance','ip','port_id',)
def get_serializer_class(self):
no_hyperlinks=False