Refactor to /opt/planetstack, final tweaks to make sure planetstack can run in non-openstack mode, adjustments to GUI for model focus changes
diff --git a/planetstack/openstack/__init__.py b/planetstack/openstack/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/planetstack/openstack/__init__.py
diff --git a/planetstack/openstack/client.py b/planetstack/openstack/client.py
new file mode 100644
index 0000000..f3abbb2
--- /dev/null
+++ b/planetstack/openstack/client.py
@@ -0,0 +1,148 @@
+try:
+ from keystoneclient.v2_0 import client as keystone_client
+ from glance import client as glance_client
+ from novaclient.v1_1 import client as nova_client
+ from quantumclient.v2_0 import client as quantum_client
+ has_openstack = True
+except:
+ has_openstack = False
+
+from planetstack.config import Config
+
+def require_enabled(callable):
+ def wrapper(*args, **kwds):
+ if has_openstack:
+ return callable(*args, **kwds)
+ else:
+ return None
+ return wrapper
+
+def parse_novarc(filename):
+ opts = {}
+ f = open(filename, 'r')
+ for line in f:
+ try:
+ line = line.replace('export', '').strip()
+ parts = line.split('=')
+ if len(parts) > 1:
+ value = parts[1].replace("\'", "")
+ value = value.replace('\"', '')
+ opts[parts[0]] = value
+ except:
+ pass
+ f.close()
+ return opts
+
+class Client:
+ def __init__(self, username=None, password=None, tenant=None, url=None, config=None, *args, **kwds):
+ if config:
+ config = Config(config)
+ else:
+ config = Config()
+ self.has_openstack = has_openstack
+ self.username = config.nova_admin_user
+ self.password = config.nova_admin_password
+ self.tenant = config.nova_admin_tenant
+ self.url = config.nova_url
+
+ if username:
+ self.username = username
+ if password:
+ self.password = password
+ if tenant:
+ self.tenant = tenant
+ if url:
+ self.url = url
+
+ if '@' in self.username:
+ self.username = self.username[:self.username.index('@')]
+
+class KeystoneClient(Client):
+ def __init__(self, *args, **kwds):
+ Client.__init__(self, *args, **kwds)
+ if has_openstack:
+ self.client = keystone_client.Client(username=self.username,
+ password=self.password,
+ tenant_name=self.tenant,
+ auth_url=self.url)
+
+ @require_enabled
+ def connect(self, *args, **kwds):
+ self.__init__(*args, **kwds)
+
+ @require_enabled
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+
+class GlanceClient(Client):
+ def __init__(self, *args, **kwds):
+ Client.__init__(self, *args, **kwds)
+ if has_openstack:
+ self.client = glance_client.get_client(host='0.0.0.0',
+ username=self.username,
+ password=self.password,
+ tenant=self.tenant,
+ auth_url=self.url)
+ @require_enabled
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+class NovaClient(Client):
+ def __init__(self, *args, **kwds):
+ Client.__init__(self, *args, **kwds)
+ if has_openstack:
+ self.client = nova_client.Client(username=self.username,
+ api_key=self.password,
+ project_id=self.tenant,
+ auth_url=self.url,
+ region_name='',
+ extensions=[],
+ service_type='compute',
+ service_name='',
+ )
+
+ @require_enabled
+ def connect(self, *args, **kwds):
+ self.__init__(*args, **kwds)
+
+ @require_enabled
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+class QuantumClient(Client):
+ def __init__(self, *args, **kwds):
+ Client.__init__(self, *args, **kwds)
+ if has_openstack:
+ self.client = quantum_client.Client(username=self.username,
+ password=self.password,
+ tenant_name=self.tenant,
+ auth_url=self.url)
+ @require_enabled
+ def connect(self, *args, **kwds):
+ self.__init__(*args, **kwds)
+
+ @require_enabled
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+class OpenStackClient:
+ """
+ A simple native shell to the openstack backend services.
+ This class can receive all nova calls to the underlying testbed
+ """
+
+ def __init__ ( self, *args, **kwds) :
+ # instantiate managers
+ self.keystone = KeystoneClient(*args, **kwds)
+ self.glance = GlanceClient(*args, **kwds)
+ self.nova = NovaClient(*args, **kwds)
+ self.quantum = QuantumClient(*args, **kwds)
+
+ @require_enabled
+ def connect(self, *args, **kwds):
+ self.__init__(*args, **kwds)
+
+ @require_enabled
+ def authenticate(self):
+ return self.keystone.authenticate()
diff --git a/planetstack/openstack/driver.py b/planetstack/openstack/driver.py
new file mode 100644
index 0000000..6b04b5d
--- /dev/null
+++ b/planetstack/openstack/driver.py
@@ -0,0 +1,285 @@
+from planetstack.config import Config
+from openstack.client import OpenStackClient
+
+has_openstack = False
+class OpenStackDriver:
+
+ def __init__(self, config = None, client=None):
+ if config:
+ self.config = Config(config)
+ else:
+ self.config = Config()
+
+ self.admin_client = OpenStackClient()
+ if has_openstack:
+ self.admin_user = self.admin_client.keystone.users.find(name=self.admin_client.keystone.username)
+ else:
+ self.admin_user = None
+
+ if client:
+ self.shell = client
+ else:
+ self.shell = OpenStackClient()
+
+ def create_role(self, name):
+ roles = self.shell.keystone.roles.findall(name=name)
+ if not roles:
+ role = self.shell.keystone.roles.create(name)
+ else:
+ role = roles[0]
+ return role
+
+ def delete_role(self, filter):
+ roles = self.shell.keystone.roles.findall(**filter)
+ for role in roles:
+ self.shell.keystone.roles.delete(role)
+ return 1
+
+ def create_tenant(self, tenant_name, enabled, description):
+ """Create keystone tenant. Suggested fields: name, description, enabled"""
+ tenants = self.shell.keystone.tenants.findall(name=tenant_name)
+ if not tenants:
+ fields = {'tenant_name': tenant_name, 'enabled': enabled,
+ 'description': description}
+ tenant = self.shell.keystone.tenants.create(**fields)
+ else:
+ tenant = tenants[0]
+
+ # always give the admin user the admin role to any tenant created
+ # by the driver.
+ self.add_user_role(self.admin_user.id, tenant.id, 'admin')
+ return tenant
+
+ def update_tenant(self, id, **kwds):
+ return self.shell.keystone.tenants.update(id, **kwds)
+
+ def delete_tenant(self, id):
+ tenants = self.shell.keystone.tenants.findall(id=id)
+ for tenant in tenants:
+ self.shell.keystone.tenants.delete(tenant)
+ return 1
+
+ def create_user(self, name, email, password, enabled):
+ users = self.shell.keystone.users.findall(email=email)
+ if not users:
+ fields = {'name': name, 'email': email, 'password': password,
+ 'enabled': enabled}
+ user = self.shell.keystone.users.create(**fields)
+ else:
+ user = users[0]
+ return user
+
+ def delete_user(self, id):
+ users = self.shell.keystone.users.findall(id=id)
+ for user in users:
+ self.shell.keystone.users.delete(user)
+ return 1
+
+ def add_user_role(self, kuser_id, tenant_id, role_name):
+ user = self.shell.keystone.users.find(id=kuser_id)
+ tenant = self.shell.keystone.tenants.find(id=tenant_id)
+ role = self.shell.keystone.roles.find(name=role_name)
+
+ role_found = False
+ user_roles = user.list_roles(tenant.id)
+ for user_role in user_roles:
+ if user_role.name == role.name:
+ role_found = True
+ if not role_found:
+ tenant.add_user(user, role)
+
+ return 1
+
+ def delete_user_role(self, kuser_id, tenant_id, role_name):
+ user = self.shell.keystone.users.find(id=kuser_id)
+ tenant = self.shell.keystone.tenants.find(id=tenant_id)
+ role = self.shell.keystone.roles.find(name=role_name)
+
+ role_found = False
+ user_roles = user.list_roles(tenant.id)
+ for user_role in user_roles:
+ if user_role.name == role.name:
+ role_found = True
+ if role_found:
+ tenant.remove_user(user, role)
+
+ return 1
+
+ def update_user(self, id, **kwds):
+ return self.shell.keystone.users.update(id, **kwds)
+
+ def create_router(self, name, set_gateway=True):
+ routers = self.shell.quantum.list_routers(name=name)['routers']
+ if routers:
+ router = routers[0]
+ else:
+ router = self.shell.quantum.create_router({'router': {'name': name}})['router']
+ # add router to external network
+ if set_gateway:
+ nets = self.shell.quantum.list_networks()['networks']
+ for net in nets:
+ if net['router:external'] == True:
+ self.shell.quantum.add_gateway_router(router['id'],
+ {'network_id': net['id']})
+
+ return router
+
+ def delete_router(self, id):
+ routers = self.shell.quantum.list_routers(id=id)['routers']
+ for router in routers:
+ self.shell.quantum.delete_router(router['id'])
+ # remove router form external network
+ #nets = self.shell.quantum.list_networks()['networks']
+ #for net in nets:
+ # if net['router:external'] == True:
+ # self.shell.quantum.remove_gateway_router(router['id'])
+
+ def add_router_interface(self, router_id, subnet_id):
+ router = self.shell.quantum.show_router(router_id)['router']
+ subnet = self.shell.quantum.show_subnet(subnet_id)['subnet']
+ if router and subnet:
+ self.shell.quantum.add_interface_router(router_id, {'subnet_id': subnet_id})
+
+ def delete_router_interface(self, router_id, subnet_id):
+ router = self.shell.quantum.show_router(router_id)
+ subnet = self.shell.quantum.show_subnet(subnet_id)
+ if router and subnet:
+ self.shell.quantum.remove_interface_router(router_id, {'subnet_id': subnet_id})
+
+ def create_network(self, name):
+ nets = self.shell.quantum.list_networks(name=name)['networks']
+ if nets:
+ net = nets[0]
+ else:
+ net = self.shell.quantum.create_network({'network': {'name': name}})['network']
+ return net
+
+ def delete_network(self, id):
+ nets = self.shell.quantum.list_networks()['networks']
+ for net in nets:
+ if net['id'] == id:
+ # delete_all ports
+ self.delete_network_ports(net['id'])
+ # delete all subnets:
+ for subnet_id in net['subnets']:
+ self.delete_subnet(subnet_id)
+ self.shell.quantum.delete_network(net['id'])
+ return 1
+
+ def delete_network_ports(self, network_id):
+ ports = self.shell.quantum.list_ports()['ports']
+ for port in ports:
+ if port['network_id'] == network_id:
+ self.shell.quantum.delete_port(port['id'])
+ return 1
+
+ def delete_subnet_ports(self, subnet_id):
+ ports = self.shell.quantum.list_ports()['ports']
+ for port in ports:
+ delete = False
+ for fixed_ip in port['fixed_ips']:
+ if fixed_ip['subnet_id'] == subnet_id:
+ delete=True
+ break
+ if delete:
+ self.shell.quantum.delete_port(port['id'])
+ return 1
+
+ def create_subnet(self, name, network_id, cidr_ip, ip_version, start, end):
+ #nets = self.shell.quantum.list_networks(name=network_name)['networks']
+ #if not nets:
+ # raise Exception, "No such network: %s" % network_name
+ #net = nets[0]
+
+ subnet = None
+ subnets = self.shell.quantum.list_subnets()['subnets']
+ for snet in subnets:
+ if snet['cidr'] == cidr_ip and snet['network_id'] == network_id:
+ subnet = snet
+
+ if not subnet:
+ allocation_pools = [{'start': start, 'end': end}]
+ subnet = {'subnet': {'name': name,
+ 'network_id': network_id,
+ 'ip_version': ip_version,
+ 'cidr': cidr_ip,
+ 'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
+ 'allocation_pools': allocation_pools}}
+ subnet = self.shell.quantum.create_subnet(subnet)['subnet']
+
+ # TODO: Add route to external network
+ # e.g. # route add -net 10.0.3.0/24 dev br-ex gw 10.100.0.5
+ return subnet
+
+ def update_subnet(self, id, fields):
+ return self.shell.quantum.update_subnet(id, fields)
+
+ def delete_subnet(self, id):
+ #return self.shell.quantum.delete_subnet(id=id)
+ # inefficient but fault tolerant
+ subnets = self.shell.quantum.list_subnets()['subnets']
+ for subnet in subnets:
+ if subnet['id'] == id:
+ self.delete_subnet_ports(subnet['id'])
+ self.shell.quantum.delete_subnet(id)
+ return
+
+ def create_keypair(self, name, key):
+ keys = self.shell.nova.keypairs.findall(name=name)
+ if keys:
+ key = keys[0]
+ else:
+ key = self.shell.nova.keypairs.create(name=name, public_key=key)
+ return key
+
+ def delete_keypair(self, id):
+ keys = self.shell.nova.keypairs.findall(id=id)
+ for key in keys:
+ self.shell.nova.keypairs.delete(key)
+ return 1
+
+ def spawn_instance(self, name, key_name=None, hostname=None, image_id=None, security_group=None, pubkeys=[]):
+ flavor_name = self.config.nova_default_flavor
+ flavor = self.shell.nova.flavors.find(name=flavor_name)
+ #if not image:
+ # image = self.config.nova_default_imave
+ if not security_group:
+ security_group = self.config.nova_default_security_group
+
+ #authorized_keys = "\n".join(pubkeys)
+ #files = {'/root/.ssh/authorized_keys': authorized_keys}
+ files = {}
+
+ hints = {}
+ availability_zone = None
+ if hostname:
+ availability_zone = 'nova:%s' % hostname
+ server = self.shell.nova.servers.create(
+ name=name,
+ key_name = key_name,
+ flavor=flavor.id,
+ image=image_id,
+ security_group = security_group,
+ files=files,
+ scheduler_hints=hints,
+ availability_zone=availability_zone)
+ return server
+
+ def destroy_instance(self, id):
+ servers = self.shell.nova.servers.findall(id=id)
+ for server in servers:
+ self.shell.nova.servers.delete(server)
+
+ def update_instance_metadata(self, id, metadata):
+ servers = self.shell.nova.servers.findall(id=id)
+ for server in servers:
+ self.shell.nova.servers.set_meta(server, metadata)
+ # note: set_meta() returns a broken Server() object. Don't try to
+ # print it in the shell or it will fail in __repr__.
+
+ def delete_instance_metadata(self, id, metadata):
+ # note: metadata is a dict. Only the keys matter, not the values.
+ servers = self.shell.nova.servers.findall(id=id)
+ for server in servers:
+ self.shell.nova.servers.delete_meta(server, metadata)
+
diff --git a/planetstack/openstack/manager.py b/planetstack/openstack/manager.py
new file mode 100644
index 0000000..788a621
--- /dev/null
+++ b/planetstack/openstack/manager.py
@@ -0,0 +1,245 @@
+from planetstack import settings
+#from django.core import management
+#management.setup_environ(settings)
+import os
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+try:
+ from openstack.client import OpenStackClient
+ from openstack.driver import OpenStackDriver
+ from planetstack.config import Config
+ from core.models import *
+ has_openstack = True
+except:
+ has_openstack = False
+
+#manager_enabled = Config().api_nova_enabled
+manager_enabled = False
+
+def require_enabled(callable):
+ def wrapper(*args, **kwds):
+ if manager_enabled and has_openstack:
+ return callable(*args, **kwds)
+ else:
+ return None
+ return wrapper
+
+
+class OpenStackManager:
+
+ def __init__(self, auth={}, caller=None):
+ if auth:
+ self.client = OpenStackClient(**auth)
+ else:
+ self.client = OpenStackClient()
+ self.has_openstack = has_openstack
+ self.enabled = manager_enabled
+ self.driver = OpenStackDriver(client=self.client)
+ self.caller=caller
+ if not self.caller:
+ self.caller = self.driver.admin_user
+ self.caller.kuser_id = self.caller.id
+
+ @require_enabled
+ def save_role(self, role):
+ if not role.role_id:
+ keystone_role = self.driver.create_role(role.role_type)
+ role.role_id = keystone_role.id
+
+ @require_enabled
+ def delete_role(self, role):
+ if role.role_id:
+ self.driver.delete_role({'id': role.role_id})
+
+ @require_enabled
+ def save_key(self, key):
+ if not key.key_id:
+ key_fields = {'name': key.name,
+ 'key': key.key}
+ nova_key = self.driver.create_keypair(**key_fields)
+ key.key_id = nova_key.id
+
+ @require_enabled
+ def delete_key(self, key):
+ if key.key_id:
+ self.driver.delete_keypair(key.key_id)
+
+ @require_enabled
+ def save_user(self, user):
+ if not user.kuser_id:
+ name = user.email[:user.email.find('@')]
+ user_fields = {'name': name,
+ 'email': user.email,
+ 'password': user.password,
+ 'enabled': True}
+ keystone_user = self.driver.create_user(**user_fields)
+ user.kuser_id = keystone_user.id
+
+ @require_enabled
+ def delete_user(self, user):
+ if user.kuser_id:
+ self.driver.delete_user(user.kuser_id)
+
+
+
+ @require_enabled
+ def save_site(self, site, add_role=True):
+ if not site.tenant_id:
+ tenant = self.driver.create_tenant(tenant_name=site.login_base,
+ description=site.name,
+ enabled=site.enabled)
+ site.tenant_id = tenant.id
+ # give caller an admin role at the tenant they've created
+ self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+ # update the record
+ if site.id and site.tenant_id:
+ self.driver.update_tenant(site.tenant_id,
+ description=site.name,
+ enabled=site.enabled)
+
+ @require_enabled
+ def delete_site(self, site):
+ if site.tenant_id:
+ self.driver.delete_tenant(site.tenant_id)
+
+ @require_enabled
+ def save_slice(self, slice):
+ if not slice.tenant_id:
+ nova_fields = {'tenant_name': slice.name,
+ 'description': slice.description,
+ 'enabled': slice.enabled}
+ tenant = self.driver.create_tenant(**nova_fields)
+ slice.tenant_id = tenant.id
+
+ # give caller an admin role at the tenant they've created
+ self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+ # refresh credentials using this tenant
+ self.driver.shell.connect(username=self.driver.shell.keystone.username,
+ password=self.driver.shell.keystone.password,
+ tenant=tenant.name)
+
+ # create network
+ network = self.driver.create_network(slice.name)
+ slice.network_id = network['id']
+
+ # create router
+ router = self.driver.create_router(slice.name)
+ slice.router_id = router['id']
+
+ if slice.id and slice.tenant_id:
+ self.driver.update_tenant(slice.tenant_id,
+ description=slice.description,
+ enabled=slice.enabled)
+
+ @require_enabled
+ def delete_slice(self, slice):
+ if slice.tenant_id:
+ self.driver.delete_router(slice.router_id)
+ self.driver.delete_network(slice.network_id)
+ self.driver.delete_tenant(slice.tenant_id)
+
+ @require_enabled
+ def save_subnet(self, subnet):
+ if not subnet.subnet_id:
+ quantum_subnet = self.driver.create_subnet(name= subnet.slice.name,
+ network_id=subnet.slice.network_id,
+ cidr_ip = subnet.cidr,
+ ip_version=subnet.ip_version,
+ start = subnet.start,
+ end = subnet.end)
+ subnet.subnet_id = quantum_subnet['id']
+ # add subnet as interface to slice's router
+ self.driver.add_router_interface(subnet.slice.router_id, subnet.subnet_id)
+ #add_route = 'route add -net %s dev br-ex gw 10.100.0.5' % self.cidr
+ #commands.getstatusoutput(add_route)
+
+
+ @require_enabled
+ def delete_subnet(self, subnet):
+ if subnet.subnet_id:
+ self.driver.delete_router_interface(subnet.slice.router_id, subnet.subnet_id)
+ self.driver.delete_subnet(subnet.subnet_id)
+ #del_route = 'route del -net %s' % self.cidr
+ #commands.getstatusoutput(del_route)
+
+ @require_enabled
+ def save_sliver(self, sliver):
+ if not sliver.instance_id:
+ instance = self.driver.spawn_instance(name=sliver.name,
+ key_name = sliver.key.name,
+ image_id = sliver.image.image_id,
+ hostname = sliver.node.name )
+ sliver.instance_id = instance.id
+ sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+
+ @require_enabled
+ def delete_sliver(self, sliver):
+ if sliver.instance_id:
+ self.driver.destroy_instance(sliver.instance_id)
+
+
+ def refresh_nodes(self):
+ # collect local nodes
+ nodes = Node.objects.all()
+ nodes_dict = {}
+ for node in nodes:
+ if 'viccidev10' not in node.name:
+ nodes_dict[node.name] = node
+
+ deployment = DeploymentNetwork.objects.filter(name='VICCI')[0]
+ login_bases = ['princeton', 'stanford', 'gt', 'uw', 'mpisws']
+ sites = Site.objects.filter(login_base__in=login_bases)
+ # collect nova nodes:
+ compute_nodes = self.client.nova.hypervisors.list()
+
+ compute_nodes_dict = {}
+ for compute_node in compute_nodes:
+ compute_nodes_dict[compute_node.hypervisor_hostname] = compute_node
+
+ # add new nodes:
+ new_node_names = set(compute_nodes_dict.keys()).difference(nodes_dict.keys())
+ i = 0
+ max = len(sites)
+ for name in new_node_names:
+ if i == max:
+ i = 0
+ site = sites[i]
+ node = Node(name=compute_nodes_dict[name].hypervisor_hostname,
+ site=site,
+ deploymentNetwork=deployment)
+ node.save()
+ i+=1
+
+ # remove old nodes
+ old_node_names = set(nodes_dict.keys()).difference(compute_nodes_dict.keys())
+ Node.objects.filter(name__in=old_node_names).delete()
+
+ def refresh_images(self):
+ # collect local images
+ images = Image.objects.all()
+ images_dict = {}
+ for image in images:
+ images_dict[image.name] = image
+
+ # collect glance images
+ glance_images = self.client.glance.get_images()
+ glance_images_dict = {}
+ for glance_image in glance_images:
+ glance_images_dict[glance_image['name']] = glance_image
+
+ # add new images
+ new_image_names = set(glance_images_dict.keys()).difference(images_dict.keys())
+ for name in new_image_names:
+ image = Image(image_id=glance_images_dict[name]['id'],
+ name=glance_images_dict[name]['name'],
+ disk_format=glance_images_dict[name]['disk_format'],
+ container_format=glance_images_dict[name]['container_format'])
+ image.save()
+
+ # remove old images
+ old_image_names = set(images_dict.keys()).difference(glance_images_dict.keys())
+ Image.objects.filter(name__in=old_image_names).delete()
+
+
diff --git a/planetstack/openstack/siteagent.py b/planetstack/openstack/siteagent.py
new file mode 100644
index 0000000..a57fa0b
--- /dev/null
+++ b/planetstack/openstack/siteagent.py
@@ -0,0 +1,22 @@
+import os
+import sys
+#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+import time
+from core.models.site import Site
+from openstack.manager import OpenStackManager
+
+class SiteAgent:
+ def run(self):
+ manager = OpenStackManager()
+ # exit if openstack is disable or unavailable
+ if manager.enabled and manager.has_openstack:
+ # fill in null tenant ids
+ sites = Site.objects.filter(tenant_id__in=[None, ''])
+ for site in sites:
+ # calling save() on the model should force the tenant_id to be set
+ site.os_manager = manager
+ site.save()
+
+if __name__ == '__main__':
+ SiteAgent().run()
+
diff --git a/planetstack/openstack/sliveragent.py b/planetstack/openstack/sliveragent.py
new file mode 100644
index 0000000..b2f29cf
--- /dev/null
+++ b/planetstack/openstack/sliveragent.py
@@ -0,0 +1,39 @@
+import os
+import sys
+#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+import time
+from core.models.sliver import Sliver
+from openstack.manager import OpenStackManager
+
+class SliverAgent:
+
+ def run(self):
+ manager = OpenStackManager()
+ # exit if openstack is disable or unavailable
+ if not manager.enabled or not manager.has_openstack:
+ sys.exit()
+
+ while True :
+ # fill in null ip addresses
+ slivers = Sliver.objects.filter(ip=None)
+ for sliver in slivers:
+ # update connection
+ manager.client.connect(username=manager.client.keystone.username,
+ password=manager.client.keystone.password,
+ tenant=sliver.slice.name)
+ sliver.os_manager = manager
+ servers = manager.client.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ continue
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if not ips:
+ continue
+ sliver.ip = ips[0]['addr']
+ sliver.save()
+ time.sleep(7)
+
+
+if __name__ == '__main__':
+ SliverAgent().run()
+