replace all occurrences of {Ss}liver{s,} with {Ii}nstance{s,}
diff --git a/xos/openstack_observer/ansible.py b/xos/openstack_observer/ansible.py
index b53dd98..74af590 100755
--- a/xos/openstack_observer/ansible.py
+++ b/xos/openstack_observer/ansible.py
@@ -136,7 +136,7 @@
def run_template_ssh(name, opts, path='', expected_num=None):
instance_id = opts["instance_id"]
- sliver_name = opts["sliver_name"]
+ instance_name = opts["instance_name"]
hostname = opts["hostname"]
private_key = opts["private_key"]
@@ -159,7 +159,7 @@
f.close()
f = open(hosts_pathname, "w")
- f.write("[%s]\n" % sliver_name)
+ f.write("[%s]\n" % instance_name)
f.write("%s ansible_ssh_private_key_file=%s\n" % (hostname, private_key_pathname))
f.close()
diff --git a/xos/openstack_observer/ceilometer.py b/xos/openstack_observer/ceilometer.py
index 9944a9c..792515e 100644
--- a/xos/openstack_observer/ceilometer.py
+++ b/xos/openstack_observer/ceilometer.py
@@ -14,7 +14,7 @@
from core.models import *
filter_dict = {
'ControllerSlice':[ControllerSlice, 'tenant_id', 'project_id'],
- 'Sliver':[Sliver, 'instance_id', 'resource_id'],
+ 'Instance':[Instance, 'instance_id', 'resource_id'],
'ControllerSite':[ControllerSite, 'tenant_id', 'project_id']
}
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 57d6a31..46bd23a 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -66,7 +66,7 @@
return ig
class XOSObserver:
- #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
+ #sync_steps = [SyncNetworks,SyncNetworkInstances,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncInstances,SyncInstanceIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
sync_steps = []
@@ -193,10 +193,10 @@
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(step_graph)
self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
- #self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
+ #self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncInstances']
#self.ordered_steps = ['SyncControllerSites','SyncRoles','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
#self.ordered_steps = ['SyncControllerNetworks']
- #self.ordered_steps = ['SyncSlivers','SyncNetworkSlivers']
+ #self.ordered_steps = ['SyncInstances','SyncNetworkInstances']
print "Order of steps=",self.ordered_steps
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index f740771..4c8e934 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -8,7 +8,7 @@
from observer.syncstep import *
from core.models.network import *
from core.models.slice import *
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from util.logger import observer_logger as logger
from observer.ansible import *
from openstack.driver import OpenStackDriver
diff --git a/xos/openstack_observer/steps/sync_controller_slices.py b/xos/openstack_observer/steps/sync_controller_slices.py
index 1a6f517..0eceb95 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.py
+++ b/xos/openstack_observer/steps/sync_controller_slices.py
@@ -42,7 +42,7 @@
controller_user = controller_users[0]
roles = ['admin']
- max_instances=int(controller_slice.slice.max_slivers)
+ max_instances=int(controller_slice.slice.max_instances)
tenant_fields = {'endpoint':controller_slice.controller.auth_url,
'admin_user': controller_slice.controller.admin_user,
'admin_password': controller_slice.controller.admin_password,
@@ -60,7 +60,7 @@
if (not controller_slice.tenant_id):
try:
driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
- driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
+ driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_instances))
except:
logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
raise Exception('Could not update quota for %s'%controller_slice.slice.name)
diff --git a/xos/openstack_observer/steps/sync_network_slivers.py b/xos/openstack_observer/steps/sync_network_slivers.py
index 4a658fb..b3b11e7 100644
--- a/xos/openstack_observer/steps/sync_network_slivers.py
+++ b/xos/openstack_observer/steps/sync_network_slivers.py
@@ -7,24 +7,24 @@
from core.models.network import *
from util.logger import observer_logger as logger
-class SyncNetworkSlivers(OpenStackSyncStep):
+class SyncNetworkInstances(OpenStackSyncStep):
requested_interval = 0 # 3600
- provides=[NetworkSliver]
- observes=NetworkSliver
+ provides=[NetworkInstance]
+ observes=NetworkInstance
# The way it works is to enumerate the all of the ports that quantum
# has, and then work backward from each port's network-id to determine
# which Network is associated from the port.
def call(self, **args):
- logger.info("sync'ing network slivers")
+ logger.info("sync'ing network instances")
- networkSlivers = NetworkSliver.objects.all()
- networkSlivers_by_id = {}
- networkSlivers_by_port = {}
- for networkSliver in networkSlivers:
- networkSlivers_by_id[networkSliver.id] = networkSliver
- networkSlivers_by_port[networkSliver.port_id] = networkSliver
+ networkInstances = NetworkInstance.objects.all()
+ networkInstances_by_id = {}
+ networkInstances_by_port = {}
+ for networkInstance in networkInstances:
+ networkInstances_by_id[networkInstance.id] = networkInstance
+ networkInstances_by_port[networkInstance.port_id] = networkInstance
networks = Network.objects.all()
networks_by_id = {}
@@ -36,10 +36,10 @@
#for (network_id, network) in networks_by_id.items():
# logger.info(" %s: %s" % (network_id, network.name))
- slivers = Sliver.objects.all()
- slivers_by_instance_uuid = {}
- for sliver in slivers:
- slivers_by_instance_uuid[sliver.instance_uuid] = sliver
+ instances = Instance.objects.all()
+ instances_by_instance_uuid = {}
+ for instance in instances:
+ instances_by_instance_uuid[instance.instance_uuid] = instance
# Get all ports in all controllers
@@ -74,7 +74,7 @@
for port in ports_by_id.values():
#logger.info("port %s" % str(port))
- if port["id"] in networkSlivers_by_port:
+ if port["id"] in networkInstances_by_port:
# we already have it
#logger.info("already accounted for port %s" % port["id"])
continue
@@ -84,25 +84,25 @@
#logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
continue
- sliver = slivers_by_instance_uuid.get(port['device_id'], None)
- if not sliver:
- logger.info("no sliver for port %s device_id %s" % (port["id"], port['device_id']))
+ instance = instances_by_instance_uuid.get(port['device_id'], None)
+ if not instance:
+ logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
continue
network = networks_by_id.get(port['network_id'], None)
if not network:
# maybe it's public-nat or public-dedicated. Search the templates for
- # the id, then see if the sliver's slice has some network that uses
+ # the id, then see if the instance's slice has some network that uses
# that template
template = templates_by_id.get(port['network_id'], None)
- if template and sliver.slice:
- for candidate_network in sliver.slice.networks.all():
+ if template and instance.slice:
+ for candidate_network in instance.slice.networks.all():
if candidate_network.template == template:
network=candidate_network
if not network:
logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
- # we know it's associated with a sliver, but we don't know
+ # we know it's associated with a instance, but we don't know
# which network it is part of.
continue
@@ -114,7 +114,7 @@
networks = network.template.network_set.all()
network = None
for candidate_network in networks:
- if (candidate_network.owner == sliver.slice):
+ if (candidate_network.owner == instance.slice):
print "found network", candidate_network
network = candidate_network
@@ -127,57 +127,57 @@
continue
ip=port["fixed_ips"][0]["ip_address"]
- logger.info("creating NetworkSliver (%s, %s, %s, %s)" % (str(network), str(sliver), ip, str(port["id"])))
+ logger.info("creating NetworkInstance (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
- ns = NetworkSliver(network=network,
- sliver=sliver,
+ ns = NetworkInstance(network=network,
+ instance=instance,
ip=ip,
port_id=port["id"])
try:
ns.save()
except:
- logger.log_exc("failed to save networksliver %s" % str(ns))
+ logger.log_exc("failed to save networkinstance %s" % str(ns))
continue
- # For networkSlivers that were created by the user, find that ones
+ # For networkInstances that were created by the user, find that ones
# that don't have neutron ports, and create them.
- for networkSliver in NetworkSliver.objects.filter(port_id__isnull=True, sliver__isnull=False):
- #logger.info("working on networksliver %s" % networkSliver)
- controller = sliver.node.site_deployment.controller
+ for networkInstance in NetworkInstance.objects.filter(port_id__isnull=True, instance__isnull=False):
+ #logger.info("working on networkinstance %s" % networkInstance)
+ controller = instance.node.site_deployment.controller
if controller:
- cn=networkSliver.network.controllernetworks.filter(controller=controller)
+ cn=networkInstance.network.controllernetworks.filter(controller=controller)
if not cn:
- logger.log_exc("no controllernetwork for %s" % networkSliver)
+ logger.log_exc("no controllernetwork for %s" % networkInstance)
continue
cn=cn[0]
try:
driver = self.driver.admin_driver(controller = controller,tenant='admin')
port = driver.shell.quantum.create_port({"port": {"network_id": cn.net_id}})["port"]
- networkSliver.port_id = port["id"]
+ networkInstance.port_id = port["id"]
if port["fixed_ips"]:
- networkSliver.ip = port["fixed_ips"][0]["ip_address"]
+ networkInstance.ip = port["fixed_ips"][0]["ip_address"]
except:
- logger.log_exc("failed to create neutron port for %s" % networkSliver)
+ logger.log_exc("failed to create neutron port for %s" % networkInstance)
continue
- networkSliver.save()
+ networkInstance.save()
# Now, handle port forwarding
- # We get the list of NetworkSlivers again, since we might have just
+ # We get the list of NetworkInstances again, since we might have just
# added a few. Then, for each one of them we find it's quantum port and
# make sure quantum's nat:forward_ports argument is the same.
- for networkSliver in NetworkSliver.objects.all():
+ for networkInstance in NetworkInstance.objects.all():
try:
- nat_list = networkSliver.network.nat_list
+ nat_list = networkInstance.network.nat_list
except (TypeError, ValueError), e:
logger.info("Failed to decode nat_list: %s" % str(e))
continue
- if not networkSliver.port_id:
+ if not networkInstance.port_id:
continue
- neutron_port = ports_by_id.get(networkSliver.port_id, None)
+ neutron_port = ports_by_id.get(networkInstance.port_id, None)
if not neutron_port:
continue
@@ -187,18 +187,18 @@
neutron_nat_list = []
if (neutron_nat_list != nat_list):
- logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
+ logger.info("Setting nat:forward_ports for port %s network %s instance %s to %s" % (str(networkInstance.port_id), str(networkInstance.network.id), str(networkInstance.instance), str(nat_list)))
try:
- driver = self.driver.admin_driver(controller=networkSliver.sliver.node.site_deployment.controller,tenant='admin')
- driver.shell.quantum.update_port(networkSliver.port_id, {"port": {"nat:forward_ports": nat_list}})
+ driver = self.driver.admin_driver(controller=networkInstance.instance.node.site_deployment.controller,tenant='admin')
+ driver.shell.quantum.update_port(networkInstance.port_id, {"port": {"nat:forward_ports": nat_list}})
except:
logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
continue
else:
- #logger.info("port %s network %s sliver %s nat %s is already set" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
+ #logger.info("port %s network %s instance %s nat %s is already set" % (str(networkInstance.port_id), str(networkInstance.network.id), str(networkInstance.instance), str(nat_list)))
pass
- def delete_record(self, network_sliver):
+ def delete_record(self, network_instance):
# Nothing to do, this is an OpenCloud object
pass
diff --git a/xos/openstack_observer/steps/sync_slivers.py b/xos/openstack_observer/steps/sync_slivers.py
index 48f5c25..b42b092 100644
--- a/xos/openstack_observer/steps/sync_slivers.py
+++ b/xos/openstack_observer/steps/sync_slivers.py
@@ -5,7 +5,7 @@
from xos.config import Config
from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
from observer.openstacksyncstep import OpenStackSyncStep
-from core.models.sliver import Sliver
+from core.models.instance import Instance
from core.models.slice import Slice, SlicePrivilege, ControllerSlice
from core.models.network import Network, NetworkSlice, ControllerNetwork
from observer.ansible import *
@@ -16,49 +16,49 @@
s = s.replace('\n',r'\n').replace('"',r'\"')
return s
-class SyncSlivers(OpenStackSyncStep):
- provides=[Sliver]
+class SyncInstances(OpenStackSyncStep):
+ provides=[Instance]
requested_interval=0
- observes=Sliver
+ observes=Instance
- def get_userdata(self, sliver, pubkeys):
- userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (sliver.slice.name, sliver.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+ def get_userdata(self, instance, pubkeys):
+ userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
userdata += 'ssh_authorized_keys:\n'
for key in pubkeys:
userdata += ' - %s\n' % key
return userdata
- def sync_record(self, sliver):
- logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
- controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+ def sync_record(self, instance):
+ logger.info("sync'ing instance:%s slice:%s controller:%s " % (instance, instance.slice.name, instance.node.site_deployment.controller))
+ controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+ raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
metadata_update = {}
- if (sliver.numberCores):
- metadata_update["cpu_cores"] = str(sliver.numberCores)
+ if (instance.numberCores):
+ metadata_update["cpu_cores"] = str(instance.numberCores)
- for tag in sliver.slice.tags.all():
+ for tag in instance.slice.tags.all():
if tag.name.startswith("sysctl-"):
metadata_update[tag.name] = tag.value
# public keys
- slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+ slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
- if sliver.creator.public_key:
- pubkeys.add(sliver.creator.public_key)
+ if instance.creator.public_key:
+ pubkeys.add(instance.creator.public_key)
- if sliver.slice.creator.public_key:
- pubkeys.add(sliver.slice.creator.public_key)
+ if instance.slice.creator.public_key:
+ pubkeys.add(instance.slice.creator.public_key)
- if sliver.slice.service and sliver.slice.service.public_key:
- pubkeys.add(sliver.slice.service.public_key)
+ if instance.slice.service and instance.slice.service.public_key:
+ pubkeys.add(instance.slice.service.public_key)
nics = []
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=sliver.node.site_deployment.controller)
+ controller=instance.node.site_deployment.controller)
for controller_network in controller_networks:
if controller_network.network.template.visibility == 'private' and \
@@ -71,8 +71,8 @@
network_templates = [network.template.shared_network_name for network in networks \
if network.template.shared_network_name]
- #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
- driver = self.driver.admin_driver(tenant='admin', controller=sliver.node.site_deployment.controller)
+ #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
+ driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
nets = driver.shell.quantum.list_networks()['networks']
for net in nets:
if net['name'] in network_templates:
@@ -84,16 +84,16 @@
nics.append(net['id'])
image_name = None
- controller_images = sliver.image.controllerimages.filter(controller=sliver.node.site_deployment.controller)
+ controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
if controller_images:
image_name = controller_images[0].image.name
logger.info("using image from ControllerImage object: " + str(image_name))
if image_name is None:
- controller_driver = self.driver.admin_driver(controller=sliver.node.site_deployment.controller)
+ controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
images = controller_driver.shell.glanceclient.images.list()
for image in images:
- if image.name == sliver.image.name or not image_name:
+ if image.name == instance.image.name or not image_name:
image_name = image.name
logger.info("using image from glance: " + str(image_name))
@@ -103,74 +103,74 @@
legacy = False
if (legacy):
- host_filter = sliver.node.name.split('.',1)[0]
+ host_filter = instance.node.name.split('.',1)[0]
else:
- host_filter = sliver.node.name.strip()
+ host_filter = instance.node.name.strip()
availability_zone_filter = 'nova:%s'%host_filter
- sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
+ instance_name = '%s-%d'%(instance.slice.name,instance.id)
- userData = self.get_userdata(sliver, pubkeys)
- if sliver.userData:
- userData = sliver.userData
+ userData = self.get_userdata(instance, pubkeys)
+ if instance.userData:
+ userData = instance.userData
- controller = sliver.node.site_deployment.controller
+ controller = instance.node.site_deployment.controller
tenant_fields = {'endpoint':controller.auth_url,
- 'admin_user': sliver.creator.email,
- 'admin_password': sliver.creator.remote_password,
- 'admin_tenant': sliver.slice.name,
- 'tenant': sliver.slice.name,
- 'tenant_description': sliver.slice.description,
- 'name':sliver_name,
- 'ansible_tag':sliver_name,
+ 'admin_user': instance.creator.email,
+ 'admin_password': instance.creator.remote_password,
+ 'admin_tenant': instance.slice.name,
+ 'tenant': instance.slice.name,
+ 'tenant_description': instance.slice.description,
+ 'name':instance_name,
+ 'ansible_tag':instance_name,
'availability_zone': availability_zone_filter,
'image_name':image_name,
- 'flavor_name':sliver.flavor.name,
+ 'flavor_name':instance.flavor.name,
'nics':nics,
'meta':metadata_update,
'user_data':r'%s'%escape(userData)}
- res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
- sliver_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
- sliver_uuid = res[0]['id']
+ res = run_template('sync_instances.yaml', tenant_fields,path='instances', expected_num=1)
+ instance_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
+ instance_uuid = res[0]['id']
try:
hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
ip = socket.gethostbyname(hostname)
- sliver.ip = ip
+ instance.ip = ip
except:
pass
- sliver.instance_id = sliver_id
- sliver.instance_uuid = sliver_uuid
- sliver.instance_name = sliver_name
- sliver.save()
+ instance.instance_id = instance_id
+ instance.instance_uuid = instance_uuid
+ instance.instance_name = instance_name
+ instance.save()
- def delete_record(self, sliver):
- controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+ def delete_record(self, instance):
+ controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+ raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
- sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
- controller = sliver.node.site_deployment.controller
+ instance_name = '%s-%d'%(instance.slice.name,instance.id)
+ controller = instance.node.site_deployment.controller
tenant_fields = {'endpoint':controller.auth_url,
- 'admin_user': sliver.creator.email,
- 'admin_password': sliver.creator.remote_password,
- 'admin_tenant': sliver.slice.name,
- 'tenant': sliver.slice.name,
- 'tenant_description': sliver.slice.description,
- 'name':sliver_name,
- 'ansible_tag':sliver_name,
+ 'admin_user': instance.creator.email,
+ 'admin_password': instance.creator.remote_password,
+ 'admin_tenant': instance.slice.name,
+ 'tenant': instance.slice.name,
+ 'tenant_description': instance.slice.description,
+ 'name':instance_name,
+ 'ansible_tag':instance_name,
'delete': True}
try:
- res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
+ res = run_template('sync_instances.yaml', tenant_fields,path='instances', expected_num=1)
except Exception,e:
- print "Could not sync %s"%sliver_name
+ print "Could not sync %s"%instance_name
#import traceback
#traceback.print_exc()
raise e
if (len(res)!=1):
- raise Exception('Could not delete sliver %s'%sliver.slice.name)
+ raise Exception('Could not delete instance %s'%instance.slice.name)
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index 9ec79cc..f5703f2 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -77,7 +77,7 @@
objs = main_obj.deleted_objects.all()
return objs
- #return Sliver.objects.filter(ip=None)
+ #return Instance.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies: