Merged plcorebase
diff --git a/planetstack/core/models/deployment.py b/planetstack/core/models/deployment.py
index 8bf9c06..9a4cbe1 100644
--- a/planetstack/core/models/deployment.py
+++ b/planetstack/core/models/deployment.py
@@ -15,6 +15,7 @@
ROLE_CHOICES = (('admin','Admin'),)
role = models.CharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+ krole_id = models.CharField(max_length=80, verbose_name="Keystone role id", null=True, blank=True)
def __unicode__(self): return u'%s' % (self.role)
diff --git a/planetstack/core/models/plcorebase.py b/planetstack/core/models/plcorebase.py
index 4a64ce5..dcc3c39 100644
--- a/planetstack/core/models/plcorebase.py
+++ b/planetstack/core/models/plcorebase.py
@@ -8,6 +8,7 @@
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
+ enacted = models.DateTimeField(null=True, default=None)
class Meta:
abstract = True
@@ -38,7 +39,11 @@
def delete(self, *args, **kwds):
super(PlCoreBase, self).delete(*args, **kwds)
-# EventSender().fire({'delete_flag':True,'model':self.__name__})
+ try:
+ EventSender().fire({'delete_flag':True,'model':self.__name__})
+ except:
+ # Investigate later.
+ pass
def save(self, *args, **kwargs):
super(PlCoreBase, self).save(*args, **kwargs)
diff --git a/planetstack/core/models/role.py b/planetstack/core/models/role.py
index 234868e..bd97f52 100644
--- a/planetstack/core/models/role.py
+++ b/planetstack/core/models/role.py
@@ -8,6 +8,7 @@
class Role(PlCoreBase):
role_type = models.CharField(max_length=80, verbose_name="Name")
+ role = models.CharField(max_length=80, verbose_name="Keystone role id", null=True, blank=True)
description = models.CharField(max_length=120, verbose_name="Description")
content_type = models.ForeignKey(ContentType, verbose_name="Role Scope")
@@ -15,16 +16,8 @@
def save(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- self.os_manager.save_role(self)
super(Role, self).save(*args, **kwds)
def delete(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- self.os_manager.delete_role(self)
super(Role, self).delete(*args, **kwds)
diff --git a/planetstack/core/models/site.py b/planetstack/core/models/site.py
index aee3843..56f9bd0 100644
--- a/planetstack/core/models/site.py
+++ b/planetstack/core/models/site.py
@@ -26,8 +26,9 @@
class SiteRole(PlCoreBase):
- ROLE_CHOICES = (('admin','Admin'),('pi','PI'),('tech','Tech'),('billing','Billing'))
+ ROLE_CHOICES = (('admin','Admin'),('pi','PI'),('tech','Tech'),('billing','Billing'), ('user', 'User'))
role = models.CharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+ krole_id = models.CharField(max_length=80, verbose_name="Keystone role id", null=True, blank=True)
def __unicode__(self): return u'%s' % (self.role)
diff --git a/planetstack/core/models/slice.py b/planetstack/core/models/slice.py
index 51e05f5..b7ca15d 100644
--- a/planetstack/core/models/slice.py
+++ b/planetstack/core/models/slice.py
@@ -43,9 +43,10 @@
super(Slice, self).save(*args, **kwds)
class SliceRole(PlCoreBase):
- ROLE_CHOICES = (('admin','Admin'),('default','Default'))
+ ROLE_CHOICES = (('admin','Admin'),('default','Default'), ('user', 'User'), ('pi', 'PI'))
role = models.CharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+ krole_id = models.CharField(max_length=80, verbose_name="Keystone role id", null=True, blank=True)
def __unicode__(self): return u'%s' % (self.role)
diff --git a/planetstack/observer/deleters/network_deleter.py b/planetstack/observer/deleters/network_deleter.py
new file mode 100644
index 0000000..51f9fcb
--- /dev/null
+++ b/planetstack/observer/deleters/network_deleter.py
@@ -0,0 +1,17 @@
+from core.models import Network
+from deleter import Deleter
+
+class NetworkDeleter(Deleter):
+ model='Network'
+
+ def call(self, pk):
+ network = Network.objects.get(pk=pk)
+ if (network.router_id) and (network.subnet_id):
+ self.driver.delete_router_interface(network.router_id, network.subnet_id)
+ if network.subnet_id:
+ self.driver.delete_subnet(network.subnet_id)
+ if network.router_id:
+ self.driver.delete_router(network.router_id)
+ if network.network_id:
+ self.driver.delete_network(network.network_id)
+ network.delete()
diff --git a/planetstack/observer/deleters/network_sliver_deleter.py b/planetstack/observer/deleters/network_sliver_deleter.py
new file mode 100644
index 0000000..71ba040
--- /dev/null
+++ b/planetstack/observer/deleters/network_sliver_deleter.py
@@ -0,0 +1,13 @@
+from core.models import NetworkSliver
+from observer.deleter import Deleter
+
+class NetworkSliverDeleter(Deleter):
+ model='NetworkSliver'
+
+ def call(self, pk):
+ network_sliver = NetworkSlivers.objects.get(pk=pk)
+ # handle openstack delete
+
+ network_sliver.delete()
+
+
diff --git a/planetstack/observer/deleters/site_deleter.py b/planetstack/observer/deleters/site_deleter.py
new file mode 100644
index 0000000..bb29c94
--- /dev/null
+++ b/planetstack/observer/deleters/site_deleter.py
@@ -0,0 +1,11 @@
+from core.models import Site
+from observer.delete import Deleter
+
+class SiteDeleter(Deleter):
+ model='Site'
+
+ def call(self, pk):
+ site = Site.objects.get(pk=pk)
+ if site.tenant_id:
+ self.driver.delete_tenant(site.tenant_id)
+ site.delete()
diff --git a/planetstack/observer/deleters/slice_deleter.py b/planetstack/observer/deleters/slice_deleter.py
index 4cb0a72..6796d7a 100644
--- a/planetstack/observer/deleters/slice_deleter.py
+++ b/planetstack/observer/deleters/slice_deleter.py
@@ -1,9 +1,22 @@
-#from code.models import Slice
+from core.models import Slice
+from observer.deleter import Deleter
-class SliceDeleter:
+class SliceDeleter(Deleter):
model='Slice'
def call(self, pk):
- s = Slice.objects.get(pk=pk)
-
- # Proceed with delete
+ slice = Slice.objects.get(pk=pk)
+ self.driver.delete_router_interface(slice.router_id, slice.subnet_id)
+ self.driver.delete_subnet(slice.subnet_id)
+ self.driver.delete_router(slice.router_id)
+ self.driver.delete_network(slice.network_id)
+ self.driver.delete_tenant(slice.tenant_id)
+ # delete external route
+ subnet = None
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ for snet in subnets:
+ if snet['id'] == slice.subnet_id:
+ subnet = snet
+ if subnet:
+ self.driver.delete_external_route(subnet)
+ slice.delete()
diff --git a/planetstack/observer/deleters/sliver_deleter.py b/planetstack/observer/deleters/sliver_deleter.py
new file mode 100644
index 0000000..d76b533
--- /dev/null
+++ b/planetstack/observer/deleters/sliver_deleter.py
@@ -0,0 +1,11 @@
+from core.models import Sliver
+from observer.deleter import Deleter
+
+class SliverDeleter(Deleter):
+ model='Sliver'
+
+ def call(self, pk):
+ sliver = Sliver.objects.get(pk=pk)
+ if sliver.instance_id:
+ self.driver.destroy_instance(sliver.instance_id)
+ sliver.delete()
diff --git a/planetstack/observer/deleters/user_deleter.py b/planetstack/observer/deleters/user_deleter.py
new file mode 100644
index 0000000..f250993
--- /dev/null
+++ b/planetstack/observer/deleters/user_deleter.py
@@ -0,0 +1,11 @@
+from core.models import User
+from observer.deleter import Deleter
+
+class UserDeleter(Deleter):
+ model='User'
+
+ def call(self, pk):
+ user = User.objects.get(pk=pk)
+ if user.kuser_id:
+ self.driver.delete_user(user.kuser_id)
+ user.delete()
diff --git a/planetstack/observer/event_loop.py b/planetstack/observer/event_loop.py
index 671bdc3..7903ce4 100644
--- a/planetstack/observer/event_loop.py
+++ b/planetstack/observer/event_loop.py
@@ -8,7 +8,8 @@
from collections import defaultdict
from core.models import *
from django.db.models import F, Q
-from openstack.manager import OpenStackManager
+#from openstack.manager import OpenStackManager
+from openstack.driver import OpenStackDriver
from util.logger import Logger, logging, logger
#from timeout import timeout
from planetstack.config import Config
@@ -21,7 +22,14 @@
class StepNotReady(Exception):
pass
-def toposort(g, steps):
+def toposort(g, steps=None):
+ if (not steps):
+ keys = set(g.keys())
+ values = set({})
+ for v in g.values():
+ values=values | set(v)
+
+ steps=list(keys|values)
reverse = {}
for k,v in g.items():
@@ -53,16 +61,22 @@
marked.append(m)
except KeyError:
pass
- order.append(n)
+ if (n in steps):
+ order.append(n)
+
+ order.reverse()
+ order.extend(set(steps)-set(order))
return order
class PlanetStackObserver:
- sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps]
+ sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,GarbageCollector]
def __init__(self):
# The Condition object that gets signalled by Feefie events
+ self.step_lookup = {}
self.load_sync_steps()
self.event_cond = threading.Condition()
+ self.driver = OpenStackDriver()
def wait_for_event(self, timeout):
self.event_cond.acquire()
@@ -76,15 +90,15 @@
self.event_cond.release()
def load_sync_steps(self):
- dep_path = Config().observer_dependency_path
+ dep_path = Config().observer_backend_dependency_graph
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
except Exception,e:
raise e
- backend_path = Config().observer_backend_dependency_path
try:
+ backend_path = Config().observer_pl_dependency_graph
# This contains dependencies between backend records
self.backend_dependency_graph = json.loads(open(backend_path).read())
except Exception,e:
@@ -93,6 +107,7 @@
provides_dict = {}
for s in self.sync_steps:
+ self.step_lookup[s.__name__] = s
for m in s.provides:
try:
provides_dict[m.__name__].append(s.__name__)
@@ -119,11 +134,11 @@
pass
# no dependencies, pass
- import pdb
- pdb.set_trace()
+ #import pdb
+ #pdb.set_trace()
if (self.backend_dependency_graph):
backend_dict = {}
- for s in sync_steps:
+ for s in self.sync_steps:
for m in s.serves:
backend_dict[m]=s.__name__
@@ -144,29 +159,29 @@
dependency_graph = step_graph
- self.ordered_steps = toposort(dependency_graph, self.sync_steps)
+ self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
print "Order of steps=",self.ordered_steps
self.load_run_times()
- def check_duration(self):
+ def check_duration(self, step, duration):
try:
- if (duration > S.deadline):
- logger.info('Sync step %s missed deadline, took %.2f seconds'%(S.name,duration))
+ if (duration > step.deadline):
+ logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
except AttributeError:
# S doesn't have a deadline
pass
def update_run_time(self, step):
- self.last_run_times[step.name]=time.time()
+ self.last_run_times[step.__name__]=time.time()
def check_schedule(self, step):
- time_since_last_run = time.time() - self.last_run_times[step.name]
+ time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0)
try:
if (time_since_last_run < step.requested_interval):
raise StepNotReady
except AttributeError:
- logger.info('Step %s does not have requested_interval set'%step.name)
+ logger.info('Step %s does not have requested_interval set'%step.__name__)
raise StepNotReady
def load_run_times(self):
@@ -176,8 +191,7 @@
except:
self.last_run_times={}
for e in self.ordered_steps:
- self.last_run_times[e.name]=0
-
+ self.last_run_times[e]=0
def save_run_times(self):
@@ -185,19 +199,21 @@
open('/tmp/observer_run_times','w').write(run_times)
def check_class_dependency(self, step, failed_steps):
- for failed_step in failed_steps:
- if (failed_step in self.dependency_graph[step.name]):
- raise StepNotReady
+ step.dependenices = []
+ for obj in step.provides:
+ step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
+ for failed_step in failed_steps:
+ if (failed_step in step.dependencies):
+ raise StepNotReady
def run(self):
- if not self.manager.enabled or not self.manager.has_openstack:
+ if not self.driver.enabled or not self.driver.has_openstack:
return
-
while True:
try:
logger.info('Waiting for event')
tBeforeWait = time.time()
- self.wait_for_event(timeout=300)
+ self.wait_for_event(timeout=30)
logger.info('Observer woke up')
# Set of whole steps that failed
@@ -207,10 +223,19 @@
failed_step_objects = []
for S in self.ordered_steps:
+ step = self.step_lookup[S]
start_time=time.time()
- sync_step = S()
- sync_step.dependencies = self.dependencies[sync_step.name]
+ sync_step = step(driver=self.driver)
+ sync_step.__name__ = step.__name__
+ sync_step.dependencies = []
+ try:
+ mlist = sync_step.provides
+
+ for m in mlist:
+ sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
+ except KeyError:
+ pass
sync_step.debug_mode = debug_mode
should_run = False
@@ -221,24 +246,28 @@
self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour
should_run = True
except StepNotReady:
- logging.info('Step not ready: %s'%sync_step.name)
- failed_steps.add(sync_step)
+ logging.info('Step not ready: %s'%sync_step.__name__)
+ failed_steps.append(sync_step)
except:
- failed_steps.add(sync_step)
+ failed_steps.append(sync_step)
if (should_run):
try:
duration=time.time() - start_time
# ********* This is the actual sync step
+ #import pdb
+ #pdb.set_trace()
failed_objects = sync_step(failed=failed_step_objects)
- check_deadline(sync_step, duration)
- failed_step_objects.extend(failed_objects)
+ self.check_duration(sync_step, duration)
+ if failed_objects:
+ failed_step_objects.extend(failed_objects)
self.update_run_time(sync_step)
except:
- failed_steps.add(S)
+ raise
+ failed_steps.append(S)
self.save_run_times()
except:
logger.log_exc("Exception in observer run loop")
diff --git a/planetstack/observer/event_manager.py b/planetstack/observer/event_manager.py
index c4215ac..60b615a 100644
--- a/planetstack/observer/event_manager.py
+++ b/planetstack/observer/event_manager.py
@@ -1,8 +1,6 @@
import threading
import requests, json
-from core.models import *
-#from openstack.manager import OpenStackManager
from planetstack.config import Config
from observer.deleters import deleters
@@ -11,80 +9,80 @@
from fofum import Fofum
import json
-# decorator that marks dispatachable event methods
+# decorator that marks dispatachable event methods
def event(func):
- setattr(func, 'event', func.__name__)
- return func
+ setattr(func, 'event', func.__name__)
+ return func
class EventHandler:
- # This code is currently not in use.
- def __init__(self):
- pass #self.manager = OpenStackManager()
+ # This code is currently not in use.
+ def __init__(self):
+ pass
- @staticmethod
- def get_events():
- events = []
- for name in dir(EventHandler):
- attribute = getattr(EventHandler, name)
- if hasattr(attribute, 'event'):
- events.append(getattr(attribute, 'event'))
- return events
+ @staticmethod
+ def get_events():
+ events = []
+ for name in dir(EventHandler):
+ attribute = getattr(EventHandler, name)
+ if hasattr(attribute, 'event'):
+ events.append(getattr(attribute, 'event'))
+ return events
- def dispatch(self, event, *args, **kwds):
- if hasattr(self, event):
- return getattr(self, event)(*args, **kwds)
-
-
+ def dispatch(self, event, *args, **kwds):
+ if hasattr(self, event):
+ return getattr(self, event)(*args, **kwds)
+
+
class EventSender:
- def __init__(self,user=None,clientid=None):
- try:
- clid = Config().feefie_client_id
- user = Config().feefie_client_user
- except:
- clid = 'planetstack_core_team'
- user = 'pl'
+ def __init__(self,user=None,clientid=None):
+ try:
+ clid = Config().feefie_client_id
+ user = Config().feefie_client_user
+ except:
+ clid = 'planetstack_core_team'
+ user = 'pl'
- self.fofum = Fofum(user=user)
- self.fofum.make(clid)
+ self.fofum = Fofum(user=user)
+ self.fofum.make(clid)
- def fire(self,**args):
- self.fofum.fire(json.dumps(args))
+ def fire(self,**args):
+ self.fofum.fire(json.dumps(args))
class EventListener:
- def __init__(self,wake_up=None):
- self.handler = EventHandler()
- self.wake_up = wake_up
+ def __init__(self,wake_up=None):
+ self.handler = EventHandler()
+ self.wake_up = wake_up
- def handle_event(self, payload):
- payload_dict = json.loads(payload)
+ def handle_event(self, payload):
+ payload_dict = json.loads(payload)
- try:
- deletion = payload_dict['deletion_flag']
- if (deletion):
- model = payload_dict['model']
- pk = payload_dict['pk']
+ try:
+ deletion = payload_dict['deletion_flag']
+ if (deletion):
+ model = payload_dict['model']
+ pk = payload_dict['pk']
- for deleter in deleters[model]:
- deleter(pk)
- except:
- deletion = False
+ for deleter in deleters[model]:
+ deleter(pk)
+ except:
+ deletion = False
- if (not deletion and self.wake_up):
- self.wake_up()
-
+ if (not deletion and self.wake_up):
+ self.wake_up()
+
- def run(self):
- # This is our unique client id, to be used when firing and receiving events
- # It needs to be generated once and placed in the config file
+ def run(self):
+ # This is our unique client id, to be used when firing and receiving events
+ # It needs to be generated once and placed in the config file
- try:
- clid = Config().feefie_client_id
- user = Config().feefie_client_user
- except:
- clid = 'planetstack_core_team'
- user = 'pl'
+ try:
+ clid = Config().feefie_client_id
+ user = Config().feefie_client_user
+ except:
+ clid = 'planetstack_core_team'
+ user = 'pl'
- f = Fofum(user=user)
-
- listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
- listener_thread.start()
+ f = Fofum(user=user)
+
+ listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
+ listener_thread.start()
diff --git a/planetstack/observer/openstacksyncstep.py b/planetstack/observer/openstacksyncstep.py
index 3ce3c68..51b510f 100644
--- a/planetstack/observer/openstacksyncstep.py
+++ b/planetstack/observer/openstacksyncstep.py
@@ -2,16 +2,13 @@
import base64
from syncstep import SyncStep
-class OpenStackSyncStep:
- """ PlanetStack Sync step for copying data to OpenStack
- """
-
- def __init__(self, **args):
- super(SyncStep,self).__init__(**args)
- return
+class OpenStackSyncStep(SyncStep):
+ """ PlanetStack Sync step for copying data to OpenStack
+ """
+
+ def __init__(self, **args):
+ SyncStep.__init__(self, **args)
+ return
-
-
-
- def __call__(self):
- return self.call()
+ def __call__(self, **args):
+ return self.call(**args)
diff --git a/planetstack/observer/steps/__init__.py b/planetstack/observer/steps/__init__.py
index 6d7adb8..7954426 100644
--- a/planetstack/observer/steps/__init__.py
+++ b/planetstack/observer/steps/__init__.py
@@ -8,3 +8,5 @@
from .sync_sliver_ips import SyncSliverIps
from .sync_slivers import SyncSlivers
from .sync_users import SyncUsers
+from .sync_roles import SyncRoles
+from .garbage_collector import GarbageCollector
diff --git a/planetstack/observer/steps/garbage_collector.py b/planetstack/observer/steps/garbage_collector.py
index f03577c..5d434a0 100644
--- a/planetstack/observer/steps/garbage_collector.py
+++ b/planetstack/observer/steps/garbage_collector.py
@@ -1,11 +1,210 @@
import os
import base64
+import traceback
+from collections import defaultdict
+from django.db.models import F, Q
from planetstack.config import Config
+from util.logger import Logger, logging
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models import *
-class GarbageCollector(SyncStep):
- requested_interval = 86400
- provides=[]
+logger = Logger(logfile='observer.log', level=logging.INFO)
- def call(self):
- pass
-
+class GarbageCollector(OpenStackSyncStep):
+ requested_interval = 86400
+ provides=[]
+
+ def call(self, **args):
+ try:
+ #self.sync_roles()
+ self.gc_tenants()
+ self.gc_users()
+ self.gc_user_tenant_roles()
+ self.gc_slivers()
+ self.gc_sliver_ips()
+ self.gc_external_routes()
+ except:
+ traceback.print_exc()
+
+ def gc_roles(self):
+ """
+ all role that don't already exist in keystone. Remove keystone roles that
+ don't exist in planetstack
+ """
+ # sync all roles that don't already in keystone
+ keystone_roles = self.driver.shell.keystone.roles.findall()
+ keystone_role_names = [kr.name for kr in keystone_roles]
+ pending_roles = Role.objects.all()
+ pending_role_names = [r.role_type for r in pending_roles]
+ # don't delete roles for now
+ """
+ # delete keystone roles that don't exist in planetstack
+ for keystone_role in keystone_roles:
+ if keystone_role.name == 'admin':
+ continue
+ if keystone_role.name not in pending_role_names:
+ try:
+ self.driver.delete_role({id: keystone_role.id})
+ except:
+ traceback.print_exc()
+ """
+
+ def gc_tenants(self):
+ """
+ Remove sites and slices that no don't exist in openstack db if they
+ have an enacted time (enacted != None).
+ """
+ # get all sites that where enacted != null. We can assume these sites
+ # have previously been synced and need to be checed for deletion.
+ sites = Site.objects.filter(enacted__isnull=False)
+ site_dict = {}
+ for site in sites:
+ site_dict[site.login_base] = site
+
+ # get all slices that where enacted != null. We can assume these slices
+ # have previously been synced and need to be checed for deletion.
+ slices = Slice.objects.filter(enacted__isnull=False)
+ slice_dict = {}
+ for slice in slices:
+ slice_dict[slice.name] = slice
+
+ # delete keystone tenants that don't have a site record
+ tenants = self.driver.shell.keystone.tenants.findall()
+ system_tenants = ['admin','service', 'invisible_to_admin']
+ for tenant in tenants:
+ if tenant.name in system_tenants:
+ continue
+ if tenant.name not in site_dict and tenant.name not in slice_dict:
+ try:
+ self.driver.delete_tenant(tenant.id)
+ logger.info("deleted tenant: %s" % (tenant))
+ except:
+ logger.log_exc("delete tenant failed: %s" % tenant)
+
+
+ def gc_users(self):
+ """
+ Remove users that no don't exist in openstack db if they have an
+ enacted time (enacted != None).
+ """
+ # get all users that where enacted != null. We can assume these users
+ # have previously been synced and need to be checed for deletion.
+ users = User.objects.filter(enacted__isnull=False)
+ user_dict = {}
+ for user in users:
+ user_dict[user.kuser_id] = user
+
+ # delete keystone users that don't have a user record
+ system_users = ['admin', 'nova', 'quantum', 'glance', 'cinder', 'swift', 'service', 'demo']
+ users = self.driver.shell.keystone.users.findall()
+ for user in users:
+ if user.name in system_users:
+ continue
+ if user.id not in user_dict:
+ try:
+ self.driver.delete_user(user.id)
+ logger.info("deleted user: %s" % user)
+ except:
+ logger.log_exc("delete user failed: %s" % user)
+
+
+ def gc_user_tenant_roles(self):
+ """
+ Remove roles that don't exist in openstack db if they have
+ an enacted time (enacted != None).
+ """
+ # get all site privileges and slice memberships that have been enacted
+ user_tenant_roles = defaultdict(list)
+ for site_priv in SitePrivilege.objects.filter(enacted__isnull=False):
+ user_tenant_roles[(site_priv.user.kuser_id, site_priv.site.tenant_id)].append(site_priv.role.role)
+ for slice_memb in SlicePrivilege.objects.filter(enacted__isnull=False):
+ user_tenant_roles[(slice_memb.user.kuser_id, slice_memb.slice.tenant_id)].append(slice_memb.role.role)
+
+ # Some user tenant role aren't stored in planetstack but they must be preserved.
+ # Role that fall in this category are
+ # 1. Never remove a user's role that their home site
+ # 2. Never remove a user's role at a slice they've created.
+ # Keep track of all roles that must be preserved.
+ users = User.objects.all()
+ preserved_roles = {}
+ for user in users:
+ tenant_ids = [s['tenant_id'] for s in user.slices.values()]
+ if user.site:
+ tenant_ids.append(user.site.tenant_id)
+ preserved_roles[user.kuser_id] = tenant_ids
+
+
+ # begin removing user tenant roles from keystone. This is stored in the
+ # Metadata table.
+ for metadata in self.driver.shell.keystone_db.get_metadata():
+ # skip admin roles
+ if metadata.user_id == self.driver.admin_user.id:
+ continue
+ # skip preserved tenant ids
+ if metadata.user_id in preserved_roles and \
+ metadata.tenant_id in preserved_roles[metadata.user_id]:
+ continue
+ # get roles for user at this tenant
+ user_tenant_role_ids = user_tenant_roles.get((metadata.user_id, metadata.tenant_id), [])
+
+ if user_tenant_role_ids:
+ # The user has roles at the tenant. Check if roles need to
+ # be updated.
+ user_keystone_role_ids = metadata.data.get('roles', [])
+ for role_id in user_keystone_role_ids:
+ if role_id not in user_tenant_role_ids:
+ user_keystone_role_ids.pop(user_keystone_role_ids.index(role_id))
+ else:
+ # The user has no roles at this tenant.
+ metadata.data['roles'] = []
+ #session.add(metadata)
+ logger.info("pruning metadata for %s at %s" % (metadata.user_id, metadata.tenant_id))
+
+ def gc_slivers(self):
+ """
+ Remove slivers that no don't exist in openstack db if they have
+ an enacted time (enacted != None).
+ """
+ # get all slivers where enacted != null. We can assume these users
+ # have previously been synced and need to be checed for deletion.
+ slivers = Sliver.objects.filter(enacted__isnull=False)
+ sliver_dict = {}
+ for sliver in slivers:
+ sliver_dict[sliver.instance_id] = sliver
+
+ # delete sliver that don't have a sliver record
+ ctx = self.driver.shell.nova_db.ctx
+ instances = self.driver.shell.nova_db.instance_get_all(ctx)
+ for instance in instances:
+ if instance.uuid not in sliver_dict:
+ try:
+ # lookup tenant and update context
+ tenant = self.driver.shell.keystone.tenants.find(id=instance.project_id)
+ driver = self.driver.client_driver(tenant=tenant.name)
+ driver.destroy_instance(instance.uuid)
+ logger.info("destroyed sliver: %s" % (instance))
+ except:
+ logger.log_exc("destroy sliver failed: %s" % instance)
+
+
+ def gc_sliver_ips(self):
+ """
+ Update ips that have changed.
+ """
+ # fill in null ip addresses
+ slivers = Sliver.objects.filter(ip=None)
+ for sliver in slivers:
+ # update connection
+ driver = self.driver.client_driver(tenant=sliver.slice.name)
+ servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ continue
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if ips and sliver.ip != ips[0]['addr']:
+ sliver.ip = ips[0]['addr']
+ sliver.save()
+ logger.info("updated sliver ip: %s %s" % (sliver, ips[0]))
+
+ def gc_external_routes(self):
+ pass
diff --git a/planetstack/observer/steps/sync_external_routes.py b/planetstack/observer/steps/sync_external_routes.py
index 6c22c8b..334d19d 100644
--- a/planetstack/observer/steps/sync_external_routes.py
+++ b/planetstack/observer/steps/sync_external_routes.py
@@ -1,20 +1,18 @@
import os
import base64
from planetstack.config import Config
-from observer.syncstep import SyncStep
+from observer.openstacksyncstep import OpenStackSyncStep
-class SyncExternalRoutes(SyncStep):
- # XXX what does this provide?
- requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
+class SyncExternalRoutes(OpenStackSyncStep):
+ # XXX what does this provide?
+ provides=[]
+ requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
- def __init__(self):
- pass
-
- def call(self):
- routes = self.manager.driver.get_external_routes()
- subnets = self.manager.driver.shell.quantum.list_subnets()['subnets']
- for subnet in subnets:
- try:
- self.manager.driver.add_external_route(subnet, routes)
- except:
- logger.log_exc("failed to add external route for subnet %s" % subnet)
+ def call(self, **args):
+ routes = self.driver.get_external_routes()
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ for subnet in subnets:
+ try:
+ self.driver.add_external_route(subnet, routes)
+ except:
+ logger.log_exc("failed to add external route for subnet %s" % subnet)
diff --git a/planetstack/observer/steps/sync_network_slivers.py b/planetstack/observer/steps/sync_network_slivers.py
index 9e24fae..09dc7ed 100644
--- a/planetstack/observer/steps/sync_network_slivers.py
+++ b/planetstack/observer/steps/sync_network_slivers.py
@@ -1,75 +1,79 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.network import *
class SyncNetworkSlivers(OpenStackSyncStep):
- requested_interval = 3600
- provides=[NetworkSliver]
+ requested_interval = 3600
+ provides=[NetworkSliver]
- def call(self):
- networkSlivers = NetworkSliver.objects.all()
- networkSlivers_by_id = {}
- networkSlivers_by_port = {}
- for networkSliver in networkSlivers:
- networkSlivers_by_id[networkSliver.id] = networkSliver
- networkSlivers_by_port[networkSliver.port_id] = networkSliver
+ def fetch_pending(self):
+ return NetworkSliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- networks = Network.objects.all()
- networks_by_id = {}
- for network in networks:
- networks_by_id[network.network_id] = network
+ def call(self, failed=[]):
+ networkSlivers = NetworkSliver.objects.all()
+ networkSlivers_by_id = {}
+ networkSlivers_by_port = {}
+ for networkSliver in networkSlivers:
+ networkSlivers_by_id[networkSliver.id] = networkSliver
+ networkSlivers_by_port[networkSliver.port_id] = networkSliver
- slivers = Sliver.objects.all()
- slivers_by_instance_id = {}
- for sliver in slivers:
- slivers_by_instance_id[sliver.instance_id] = sliver
+ networks = Network.objects.all()
+ networks_by_id = {}
+ for network in networks:
+ networks_by_id[network.network_id] = network
- ports = self.manager.driver.shell.quantum.list_ports()["ports"]
- for port in ports:
- if port["id"] in networkSlivers_by_port:
- # we already have it
- print "already accounted for port", port["id"]
- continue
+ slivers = Sliver.objects.all()
+ slivers_by_instance_id = {}
+ for sliver in slivers:
+ slivers_by_instance_id[sliver.instance_id] = sliver
- if port["device_owner"] != "compute:nova":
- # we only want the ports that connect to instances
- continue
+ ports = self.driver.shell.quantum.list_ports()["ports"]
+ for port in ports:
+ if port["id"] in networkSlivers_by_port:
+ # we already have it
+ print "already accounted for port", port["id"]
+ continue
- network = networks_by_id.get(port['network_id'], None)
- if not network:
- #print "no network for port", port["id"], "network", port["network_id"]
- continue
+ if port["device_owner"] != "compute:nova":
+ # we only want the ports that connect to instances
+ continue
- sliver = slivers_by_instance_id.get(port['device_id'], None)
- if not sliver:
- print "no sliver for port", port["id"], "device_id", port['device_id']
- continue
+ network = networks_by_id.get(port['network_id'], None)
+ if not network:
+ #print "no network for port", port["id"], "network", port["network_id"]
+ continue
- if network.template.sharedNetworkId is not None:
- # If it's a shared network template, then more than one network
- # object maps to the quantum network. We have to do a whole bunch
- # of extra work to find the right one.
- networks = network.template.network_set.all()
- network = None
- for candidate_network in networks:
- if (candidate_network.owner == sliver.slice):
- print "found network", candidate_network
- network = candidate_network
+ sliver = slivers_by_instance_id.get(port['device_id'], None)
+ if not sliver:
+ print "no sliver for port", port["id"], "device_id", port['device_id']
+ continue
- if not network:
- print "failed to find the correct network for a shared template for port", port["id"], "network", port["network_id"]
- continue
+ if network.template.sharedNetworkId is not None:
+ # If it's a shared network template, then more than one network
+ # object maps to the quantum network. We have to do a whole bunch
+ # of extra work to find the right one.
+ networks = network.template.network_set.all()
+ network = None
+ for candidate_network in networks:
+ if (candidate_network.owner == sliver.slice):
+ print "found network", candidate_network
+ network = candidate_network
- if not port["fixed_ips"]:
- print "port", port["id"], "has no fixed_ips"
- continue
+ if not network:
+ print "failed to find the correct network for a shared template for port", port["id"], "network", port["network_id"]
+ continue
-# print "XXX", port
+ if not port["fixed_ips"]:
+ print "port", port["id"], "has no fixed_ips"
+ continue
- ns = NetworkSliver(network=network,
- sliver=sliver,
- ip=port["fixed_ips"][0]["ip_address"],
- port_id=port["id"])
- ns.save()
+# print "XXX", port
+
+ ns = NetworkSliver(network=network,
+ sliver=sliver,
+ ip=port["fixed_ips"][0]["ip_address"],
+ port_id=port["id"])
+ ns.save()
diff --git a/planetstack/observer/steps/sync_networks.py b/planetstack/observer/steps/sync_networks.py
index e64f0a4..82d6bc7 100644
--- a/planetstack/observer/steps/sync_networks.py
+++ b/planetstack/observer/steps/sync_networks.py
@@ -1,52 +1,62 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.network import *
class SyncNetworks(OpenStackSyncStep):
- provides=[Network]
- requested_interval = 0
+ provides=[Network]
+ requested_interval = 0
- def save_network(self, network):
- if not network.network_id:
- if network.template.sharedNetworkName:
- network.network_id = network.template.sharedNetworkId
- (network.subnet_id, network.subnet) = self.driver.get_network_subnet(network.network_id)
- else:
- network_name = network.name
+ def fetch_pending(self):
+ return Network.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- # create network
- os_network = self.driver.create_network(network_name, shared=True)
- network.network_id = os_network['id']
+ def save_network(self, network):
+ if not network.network_id:
+ if network.template.sharedNetworkName:
+ network.network_id = network.template.sharedNetworkId
+ (network.subnet_id, network.subnet) = self.driver.get_network_subnet(network.network_id)
+ else:
+ network_name = network.name
- # create router
- router = self.driver.create_router(network_name)
- network.router_id = router['id']
+ # create network
+ os_network = self.driver.create_network(network_name, shared=True)
+ network.network_id = os_network['id']
- # create subnet
- next_subnet = self.get_next_subnet()
- cidr = str(next_subnet.cidr)
- ip_version = next_subnet.version
- start = str(next_subnet[2])
- end = str(next_subnet[-2])
- subnet = self.driver.create_subnet(name=network_name,
- network_id = network.network_id,
- cidr_ip = cidr,
- ip_version = ip_version,
- start = start,
- end = end)
- network.subnet = cidr
- network.subnet_id = subnet['id']
+ # create router
+ router = self.driver.create_router(network_name)
+ network.router_id = router['id']
- def sync_record(self, site):
- if network.owner and network.owner.creator:
- try:
- # update manager context
- self.driver.init_caller(network.owner.creator, network.owner.name)
- self.save_network(network)
- logger.info("saved network: %s" % (network))
- except Exception,e:
- logger.log_exc("save network failed: %s" % network)
- raise e
+ # create subnet
+ next_subnet = self.get_next_subnet()
+ cidr = str(next_subnet.cidr)
+ ip_version = next_subnet.version
+ start = str(next_subnet[2])
+ end = str(next_subnet[-2])
+ subnet = self.driver.create_subnet(name=network_name,
+ network_id = network.network_id,
+ cidr_ip = cidr,
+ ip_version = ip_version,
+ start = start,
+ end = end)
+ network.subnet = cidr
+ network.subnet_id = subnet['id']
+ # add subnet as interface to slice's router
+ self.driver.add_router_interface(router['id'], subnet['id'])
+ # add external route
+ self.driver.add_external_route(subnet)
+
+ def sync_record(self, site):
+ if network.owner and network.owner.creator:
+ try:
+ # update manager context
+ real_driver = self.driver
+ self.driver = self.driver.client_driver(network.owner.creator, network.owner.name)
+ self.save_network(network)
+ self.driver = real_driver
+ logger.info("saved network: %s" % (network))
+ except Exception,e:
+ logger.log_exc("save network failed: %s" % network)
+ raise e
diff --git a/planetstack/observer/steps/sync_roles.py b/planetstack/observer/steps/sync_roles.py
new file mode 100644
index 0000000..6f7373a
--- /dev/null
+++ b/planetstack/observer/steps/sync_roles.py
@@ -0,0 +1,40 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole
+from core.models.slice import SliceRole
+from core.models.deployment import DeploymentRole
+
+class SyncRoles(OpenStackSyncStep):
+ provides=[Role]
+ requested_interval=0
+
+ def fetch_pending(self):
+ site_roles = SiteRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)
+ slice_roles = SliceRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ deployment_roles = DeploymentRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ roles = []
+ for site_role in site_roles:
+ roles.append(site_role)
+ for slice_role in slice_roles:
+ roles.append(slice_role)
+ for deployment_role in deployment_roles:
+ roles.append(deployment_role)
+
+ return roles
+
+
+ def sync_record(self, role):
+ save_role = False
+ if not role.krole_id:
+ krole = self.driver.create_role(role.role)
+ role.krole_id = krole.id
+ save_role = True
+
+ if (save_role):
+ role.save()
+
diff --git a/planetstack/observer/steps/sync_site_privileges.py b/planetstack/observer/steps/sync_site_privileges.py
index ac0dbac..e3dde26 100644
--- a/planetstack/observer/steps/sync_site_privileges.py
+++ b/planetstack/observer/steps/sync_site_privileges.py
@@ -1,15 +1,19 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.site import *
class SyncSitePrivileges(OpenStackSyncStep):
- requested_interval=0
+ requested_interval=0
+ provides=[SitePrivilege]
- provides=[SitePrivilege]
- def sync_record(self, user):
- if site_priv.user.kuser_id and site_priv.site.tenant_id:
- self.driver.add_user_role(site_priv.user.kuser_id,
- site_priv.site.tenant_id,
- site_priv.role.role_type)
+ def fetch_pending(self):
+ return SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user):
+ if site_priv.user.kuser_id and site_priv.site.tenant_id:
+ self.driver.add_user_role(site_priv.user.kuser_id,
+ site_priv.site.tenant_id,
+ site_priv.role.role)
diff --git a/planetstack/observer/steps/sync_sites.py b/planetstack/observer/steps/sync_sites.py
index 1f7a0f8..2013c6d 100644
--- a/planetstack/observer/steps/sync_sites.py
+++ b/planetstack/observer/steps/sync_sites.py
@@ -1,29 +1,34 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.site import Site
class SyncSites(OpenStackSyncStep):
- provides=[Site]
- requested_interval=0
- def sync_record(self, site):
- save_site = False
- if not site.tenant_id:
- tenant = self.driver.create_tenant(tenant_name=site.login_base,
- description=site.name,
- enabled=site.enabled)
- site.tenant_id = tenant.id
- save_site = True
- # XXX - What's caller?
- # self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+ provides=[Site]
+ requested_interval=0
- # update the record
- if site.id and site.tenant_id:
- self.driver.update_tenant(site.tenant_id,
- description=site.name,
- enabled=site.enabled)
+ def fetch_pending(self):
+ return Site.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- if (save_site):
- site.save() #
+ def sync_record(self, site):
+ save_site = False
+ if not site.tenant_id:
+ tenant = self.driver.create_tenant(tenant_name=site.login_base,
+ description=site.name,
+ enabled=site.enabled)
+ site.tenant_id = tenant.id
+ save_site = True
+ # XXX - What's caller?
+ # self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+ # update the record
+ if site.id and site.tenant_id:
+ self.driver.update_tenant(site.tenant_id,
+ description=site.name,
+ enabled=site.enabled)
+
+ if (save_site):
+ site.save() #
diff --git a/planetstack/observer/steps/sync_slice_memberships.py b/planetstack/observer/steps/sync_slice_memberships.py
index 66953f1..d0936c4 100644
--- a/planetstack/observer/steps/sync_slice_memberships.py
+++ b/planetstack/observer/steps/sync_slice_memberships.py
@@ -1,14 +1,19 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.slice import *
class SyncSliceMemberships(OpenStackSyncStep):
- requested_interval=0
- provides=[SliceMembership]
- def sync_record(self, user):
- if slice_memb.user.kuser_id and slice_memb.slice.tenant_id:
- self.driver.add_user_role(slice_memb.user.kuser_id,
- slice_memb.slice.tenant_id,
- slice_memb.role.role_type)
+ requested_interval=0
+ provides=[SlicePrivilege]
+
+ def fetch_pending(self):
+ return SlicePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user):
+ if slice_memb.user.kuser_id and slice_memb.slice.tenant_id:
+ self.driver.add_user_role(slice_memb.user.kuser_id,
+ slice_memb.slice.tenant_id,
+ slice_memb.role.role)
diff --git a/planetstack/observer/steps/sync_slices.py b/planetstack/observer/steps/sync_slices.py
index 81ed925..53dc06b 100644
--- a/planetstack/observer/steps/sync_slices.py
+++ b/planetstack/observer/steps/sync_slices.py
@@ -1,58 +1,73 @@
import os
import base64
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.slice import Slice
class SyncSlices(OpenStackSyncStep):
- provides=[Slice]
- requested_interval=0
- def sync_record(self, slice):
- if not slice.tenant_id:
- nova_fields = {'tenant_name': slice.name,
- 'description': slice.description,
- 'enabled': slice.enabled}
- tenant = self.driver.create_tenant(**nova_fields)
- slice.tenant_id = tenant.id
+ provides=[Slice]
+ requested_interval=0
- # XXX give caller an admin role at the tenant they've created
- self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+ def fetch_pending(self):
+ return Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- # refresh credentials using this tenant
- self.driver.shell.connect(username=self.driver.shell.keystone.username,
- password=self.driver.shell.keystone.password,
- tenant=tenant.name)
+ def get_next_subnet(self):
+ # limit ourself to 10.0.x.x for now
+ valid_subnet = lambda net: net.startswith('10.0')
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+ if valid_subnet(subnet['cidr'])]
+ ints.sort()
+ last_ip = IPAddress(ints[-1])
+ last_network = IPNetwork(str(last_ip) + "/24")
+ next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
- # create network
- network = self.driver.create_network(slice.name)
- slice.network_id = network['id']
+ def sync_record(self, slice):
+ if not slice.tenant_id:
+ nova_fields = {'tenant_name': slice.name,
+ 'description': slice.description,
+ 'enabled': slice.enabled}
+ tenant = self.driver.create_tenant(**nova_fields)
+ slice.tenant_id = tenant.id
- # create router
- router = self.driver.create_router(slice.name)
- slice.router_id = router['id']
+ # XXX give caller an admin role at the tenant they've created
+ self.driver.add_user_role(slice.creator.kuser_id, tenant.id, 'admin')
- # create subnet
- next_subnet = self.get_next_subnet()
- cidr = str(next_subnet.cidr)
- ip_version = next_subnet.version
- start = str(next_subnet[2])
- end = str(next_subnet[-2])
- subnet = self.driver.create_subnet(name=slice.name,
- network_id = network['id'],
- cidr_ip = cidr,
- ip_version = ip_version,
- start = start,
- end = end)
- slice.subnet_id = subnet['id']
- # add subnet as interface to slice's router
- self.driver.add_router_interface(router['id'], subnet['id'])
- # add external route
- self.driver.add_external_route(subnet)
+ # refresh credentials using this tenant
+ client_driver = self.driver.client_driver(tenant=tenant.name)
+
+ # create network
+ network = client_driver.create_network(slice.name)
+ slice.network_id = network['id']
+
+ # create router
+ router = client_driver.create_router(slice.name)
+ slice.router_id = router['id']
+
+ # create subnet
+ next_subnet = self.get_next_subnet()
+ cidr = str(next_subnet.cidr)
+ ip_version = next_subnet.version
+ start = str(next_subnet[2])
+ end = str(next_subnet[-2])
+ subnet = client_driver.create_subnet(name=slice.name,
+ network_id = network['id'],
+ cidr_ip = cidr,
+ ip_version = ip_version,
+ start = start,
+ end = end)
+ slice.subnet_id = subnet['id']
+ # add subnet as interface to slice's router
+ client_driver.add_router_interface(router['id'], subnet['id'])
+ # add external route
+ client_driver.add_external_route(subnet)
- if slice.id and slice.tenant_id:
- self.driver.update_tenant(slice.tenant_id,
- description=slice.description,
- enabled=slice.enabled)
+ if slice.id and slice.tenant_id:
+ client_driver.update_tenant(slice.tenant_id,
+ description=slice.description,
+ enabled=slice.enabled)
- slice.save()
+ slice.save()
diff --git a/planetstack/observer/steps/sync_sliver_ips.py b/planetstack/observer/steps/sync_sliver_ips.py
index d69fd5d..2d7f1f8 100644
--- a/planetstack/observer/steps/sync_sliver_ips.py
+++ b/planetstack/observer/steps/sync_sliver_ips.py
@@ -1,25 +1,27 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.sliver import Sliver
class SyncSliverIps(OpenStackSyncStep):
- provides=[Sliver]
- requested_interval=0
- def fetch_pending(self):
- slivers = Sliver.objects.filter(ip=None)
- return slivers
+ provides=[Sliver]
+ requested_interval=0
- def sync_record(self, sliver):
- self.manager.init_admin(tenant=sliver.slice.name)
- servers = self.manager.driver.shell.nova.servers.findall(id=sliver.instance_id)
- if not servers:
- return
- server = servers[0]
- ips = server.addresses.get(sliver.slice.name, [])
- if not ips:
- return
- sliver.ip = ips[0]['addr']
- sliver.save()
- logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
+ def fetch_pending(self):
+ slivers = Sliver.objects.filter(ip=None)
+ return slivers
+
+ def sync_record(self, sliver):
+ driver = self.driver.client_driver(tenant=sliver.slice.name)
+ servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ return
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if not ips:
+ return
+ sliver.ip = ips[0]['addr']
+ sliver.save()
+ logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
diff --git a/planetstack/observer/steps/sync_slivers.py b/planetstack/observer/steps/sync_slivers.py
index adab39d..900840d 100644
--- a/planetstack/observer/steps/sync_slivers.py
+++ b/planetstack/observer/steps/sync_slivers.py
@@ -1,29 +1,34 @@
import os
import base64
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.sliver import Sliver
class SyncSlivers(OpenStackSyncStep):
- provides=[Sliver]
- requested_interval=0
- def sync_record(self, slice):
- if not sliver.instance_id:
- nics = self.get_requested_networks(sliver.slice)
- file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
- slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
- pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
- pubkeys.append(sliver.creator.public_key)
- instance = self.driver.spawn_instance(name=sliver.name,
- key_name = sliver.creator.keyname,
- image_id = sliver.image.image_id,
- hostname = sliver.node.name,
- pubkeys = pubkeys,
- nics = nics )
- sliver.instance_id = instance.id
- sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+ provides=[Sliver]
+ requested_interval=0
- if sliver.instance_id and ("numberCores" in sliver.changed_fields):
- self.driver.update_instance_metadata(sliver.instance_id, {"cpu_cores": str(sliver.numberCores)})
+ def fetch_pending(self):
+ return Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- sliver.save()
+ def sync_record(self, slice):
+ if not sliver.instance_id:
+ nics = self.get_requested_networks(sliver.slice)
+ file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+ slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
+ pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+ pubkeys.append(sliver.creator.public_key)
+ instance = self.driver.spawn_instance(name=sliver.name,
+ key_name = sliver.creator.keyname,
+ image_id = sliver.image.image_id,
+ hostname = sliver.node.name,
+ pubkeys = pubkeys,
+ nics = nics )
+ sliver.instance_id = instance.id
+ sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+
+ if sliver.instance_id and ("numberCores" in sliver.changed_fields):
+ self.driver.update_instance_metadata(sliver.instance_id, {"cpu_cores": str(sliver.numberCores)})
+
+ sliver.save()
diff --git a/planetstack/observer/steps/sync_users.py b/planetstack/observer/steps/sync_users.py
index 3f509ef..25f093e 100644
--- a/planetstack/observer/steps/sync_users.py
+++ b/planetstack/observer/steps/sync_users.py
@@ -1,35 +1,42 @@
import os
import base64
+import hashlib
+from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.user import User
class SyncUsers(OpenStackSyncStep):
- provides=[User]
- requested_interval=0
- def sync_record(self, user):
- name = user.email[:user.email.find('@')]
- user_fields = {'name': name,
- 'email': user.email,
- 'password': hashlib.md5(user.password).hexdigest()[:6],
- 'enabled': True}
- if not user.kuser_id:
- keystone_user = self.driver.create_user(**user_fields)
- user.kuser_id = keystone_user.id
- else:
- self.driver.update_user(user.kuser_id, user_fields)
+ provides=[User]
+ requested_interval=0
- if user.site:
- self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'user')
- if user.is_admin:
- self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'admin')
- else:
- # may have admin role so attempt to remove it
- self.driver.delete_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+ def fetch_pending(self):
+ return User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
- if user.public_key:
- self.init_caller(user, user.site.login_base)
- self.save_key(user.public_key, user.keyname)
- self.init_admin()
+ def sync_record(self, user):
+ name = user.email[:user.email.find('@')]
+ user_fields = {'name': name,
+ 'email': user.email,
+ 'password': hashlib.md5(user.password).hexdigest()[:6],
+ 'enabled': True}
+ if not user.kuser_id:
+ keystone_user = self.driver.create_user(**user_fields)
+ user.kuser_id = keystone_user.id
+ else:
+ self.driver.update_user(user.kuser_id, user_fields)
- user.save()
+ if user.site:
+ self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'user')
+ if user.is_admin:
+ self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+ else:
+ # may have admin role so attempt to remove it
+ self.driver.delete_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+
+ if user.public_key:
+ driver = self.driver.client_driver(caller=user, tenant=user.site.login_base)
+ key_fields = {'name': user.keyname,
+ 'public_key': user.public_key}
+ driver.create_keypair(**key_fields)
+
+ user.save()
diff --git a/planetstack/observer/syncstep.py b/planetstack/observer/syncstep.py
index 68e9f99..4116d65 100644
--- a/planetstack/observer/syncstep.py
+++ b/planetstack/observer/syncstep.py
@@ -1,61 +1,65 @@
import os
import base64
+from datetime import datetime
from planetstack.config import Config
class FailedDependency(Exception):
- pass
+ pass
class SyncStep:
- """ A PlanetStack Sync step.
+ """ A PlanetStack Sync step.
- Attributes:
- psmodel Model name the step synchronizes
- dependencies list of names of models that must be synchronized first if the current model depends on them
- """
- slow=False
- def get_prop(prop):
- try:
- sync_config_dir = Config().sync_config_dir
- except:
- sync_config_dir = '/etc/planetstack/sync'
- prop_config_path = '/'.join(sync_config_dir,self.name,prop)
- return open(prop_config_path).read().rstrip()
+ Attributes:
+ psmodel Model name the step synchronizes
+ dependencies list of names of models that must be synchronized first if the current model depends on them
+ """
+ slow=False
+ def get_prop(prop):
+ try:
+ sync_config_dir = Config().sync_config_dir
+ except:
+ sync_config_dir = '/etc/planetstack/sync'
+ prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+ return open(prop_config_path).read().rstrip()
- def __init__(self, **args):
- """Initialize a sync step
- Keyword arguments:
- name -- Name of the step
- provides -- PlanetStack models sync'd by this step
- """
- dependencies = []
- try:
- self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
- except:
- self.soft_deadline = 5 # 5 seconds
+ def __init__(self, **args):
+ """Initialize a sync step
+ Keyword arguments:
+ name -- Name of the step
+ provides -- PlanetStack models sync'd by this step
+ """
+ dependencies = []
+ self.driver = args.get('driver')
+ try:
+ self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+ except:
+ self.soft_deadline = 5 # 5 seconds
- return
+ return
- def fetch_pending(self):
- return Sliver.objects.filter(ip=None)
-
- def check_dependencies(self, obj):
- for dep in self.dependencies:
- peer_object = getattr(obj, dep.name.lowercase())
- if (peer_object.pk==dep.pk):
- raise DependencyFailed
+ def fetch_pending(self):
+ return []
+ #return Sliver.objects.filter(ip=None)
+
+ def check_dependencies(self, obj, failed):
+ for dep in self.dependencies:
+ peer_object = getattr(obj, dep.lower())
+ if (peer_object.pk==failed.pk):
+ raise DependencyFailed
- def call(self, failed=[]):
- pending = self.fetch_pending()
- for o in pending:
- if (not self.depends_on(o, failed)):
- try:
- check_dependencies(o) # Raises exception if failed
- self.sync_record(o)
- o.enacted = datetime.now() # Is this the same timezone? XXX
- o.save(update_fields=['enacted'])
- except:
- failed.append(o)
- return failed
+ def call(self, failed=[]):
+ pending = self.fetch_pending()
+ for o in pending:
+ try:
+ for f in failed:
+ self.check_dependencies(o,f) # Raises exception if failed
+ self.sync_record(o)
+ o.enacted = datetime.now() # Is this the same timezone? XXX
+ o.save(update_fields=['enacted'])
+ except:
+ failed.append(o)
- def __call__(self):
- return self.call()
+ return failed
+
+ def __call__(self, **args):
+ return self.call(**args)
diff --git a/planetstack/observer/toposort.py b/planetstack/observer/toposort.py
index 34bf6f5..959cea3 100755
--- a/planetstack/observer/toposort.py
+++ b/planetstack/observer/toposort.py
@@ -9,7 +9,15 @@
from datetime import datetime
from collections import defaultdict
-def toposort(g, steps):
+def toposort(g, steps=None):
+ if (not steps):
+ keys = set(g.keys())
+ values = set({})
+ for v in g.values():
+ values=values | set(v)
+
+ steps=list(keys|values)
+
reverse = {}
for k,v in g.items():
@@ -24,15 +32,15 @@
if not reverse.has_key(k):
sources.append(k)
-
for k,v in reverse.iteritems():
if (not v):
sources.append(k)
order = []
marked = []
+
while sources:
- n = sources.pop()
+ n = sources.pop(0)
try:
for m in g[n]:
if m not in marked:
@@ -43,6 +51,12 @@
if (n in steps):
order.append(n)
+ order.reverse()
+
return order
-print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])
+graph_file=open('model-deps').read()
+g = json.loads(graph_file)
+print toposort(g)
+
+#print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])
diff --git a/planetstack/openstack/driver.py b/planetstack/openstack/driver.py
index 0e5fbf0..8224c17 100644
--- a/planetstack/openstack/driver.py
+++ b/planetstack/openstack/driver.py
@@ -1,6 +1,14 @@
import commands
+import hashlib
from planetstack.config import Config
-from openstack.client import OpenStackClient
+
+try:
+ from openstack.client import OpenStackClient
+ has_openstack = True
+except:
+ has_openstack = False
+
+manager_enabled = Config().api_nova_enabled
class OpenStackDriver:
@@ -18,6 +26,24 @@
else:
self.shell = OpenStackClient()
+ self.enabled = manager_enabled
+ self.has_openstack = has_openstack
+
+ def client_driver(self, caller=None, tenant=None):
+ if caller:
+ auth = {'username': caller.email,
+ 'password': hashlib.md5(caller.password).hexdigest()[:6],
+ 'tenant': tenant}
+ client = OpenStackClient(**auth)
+ else:
+ client = OpenStackClient(tenant=tenant)
+ driver = OpenStackDriver(client=client)
+ return driver
+
+ def admin_driver(self, tenant=None):
+ client = OpenStackClient(tenant=tenant)
+ driver = OpenStackDriver(client=client)
+
def create_role(self, name):
roles = self.shell.keystone.roles.findall(name=name)
if not roles:
@@ -358,18 +384,18 @@
return (subnet_id, subnet)
- def spawn_instance(self, name, key_name=None, hostname=None, image_id=None, security_group=None, pubkeys=[], nics=None):
+ def spawn_instance(self, name, key_name=None, hostname=None, image_id=None, security_group=None, pubkeys=[], nics=None, metadata=None):
flavor_name = self.config.nova_default_flavor
flavor = self.shell.nova.flavors.find(name=flavor_name)
#if not image:
# image = self.config.nova_default_imave
if not security_group:
- security_group = self.config.nova_default_security_group
+ security_group = self.config.nova_default_security_group
files = {}
- if pubkeys:
+ if pubkeys:
files['/root/.ssh/authorized_keys'] = "\n".join(pubkeys)
-
+
hints = {}
availability_zone = None
if hostname:
@@ -383,7 +409,8 @@
files=files,
scheduler_hints=hints,
availability_zone=availability_zone,
- nics=nics)
+ nics=nics,
+ meta=metadata)
return server
def destroy_instance(self, id):
diff --git a/planetstack/openstack/manager.py b/planetstack/openstack/manager.py
index 2fb4ff8..9ede33f 100644
--- a/planetstack/openstack/manager.py
+++ b/planetstack/openstack/manager.py
@@ -315,9 +315,22 @@
@require_enabled
def save_sliver(self, sliver):
+ metadata_update = {}
+ if ("numberCores" in sliver.changed_fields):
+ metadata_update["cpu_cores"] = str(sliver.numberCores)
+
+ for tag in sliver.slice.tags.all():
+ if tag.name.startswith("sysctl-"):
+ metadata_update[tag.name] = tag.value
+
if not sliver.instance_id:
nics = self.get_requested_networks(sliver.slice)
- file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+ for nic in nics:
+ # If a network hasn't been instantiated yet, then we'll fail
+ # during slice creation. Defer saving the sliver for now.
+ if not nic.get("net-id", None):
+ sliver.save() # in case it hasn't been saved yet
+ return
slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
pubkeys.append(sliver.creator.public_key)
@@ -326,12 +339,13 @@
image_id = sliver.image.image_id,
hostname = sliver.node.name,
pubkeys = pubkeys,
- nics = nics )
+ nics = nics,
+ metadata = metadata_update )
sliver.instance_id = instance.id
sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
-
- if sliver.instance_id and ("numberCores" in sliver.changed_fields):
- self.driver.update_instance_metadata(sliver.instance_id, {"cpu_cores": str(sliver.numberCores)})
+ else:
+ if metadata_update:
+ self.driver.update_instance_metadata(sliver.instance_id, metadata_update)
sliver.save()
sliver.enacted = datetime.now()
diff --git a/planetstack/plstackapi_config b/planetstack/plstackapi_config
index deaf2e3..6e0b26c 100644
--- a/planetstack/plstackapi_config
+++ b/planetstack/plstackapi_config
@@ -29,4 +29,4 @@
default_security_group=default
[observer]
-pl_dependency_graph='/opt/planetstack/model-deps'
+pl_dependency_graph=/opt/planetstack/model-deps
diff --git a/planetstack/openstack/openstack-db-cleanup.sh b/planetstack/tools/openstack-db-cleanup.sh
similarity index 100%
rename from planetstack/openstack/openstack-db-cleanup.sh
rename to planetstack/tools/openstack-db-cleanup.sh
diff --git a/planetstack/tools/openstack-healthcheck.py b/planetstack/tools/openstack-healthcheck.py
new file mode 100755
index 0000000..63534c8
--- /dev/null
+++ b/planetstack/tools/openstack-healthcheck.py
@@ -0,0 +1,57 @@
+#! /usr/bin/python
+
+"""
+ Check the status of libvirt, openstack-nova-compute, and
+ quantum-openvswitch-agent. If these services are enabled and have failed,
+ then restart them.
+"""
+
+import os
+import sys
+import subprocess
+import time
+
+def get_systemd_status(service):
+ p=subprocess.Popen(["/bin/systemctl", "is-active", service], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = p.communicate()
+ out = out.strip()
+ return out
+
+libvirt_enabled = os.system("systemctl -q is-enabled libvirtd.service")==0
+nova_compute_enabled = os.system("systemctl -q is-enabled openstack-nova-compute.service")==0
+openvswitch_agent_enabled = os.system("systemctl -q is-enabled quantum-openvswitch-agent.service")==0
+
+print "enabled:"
+print " libvirtd=", libvirt_enabled
+print " openstack-nova-compute=", nova_compute_enabled
+print " quantum-openvswitch-agent=", openvswitch_agent_enabled
+
+if (not libvirt_enabled) or (not nova_compute_enabled) or (not openvswitch_agent_enabled):
+ print "services are not enabled. exiting"
+ sys.exit(0)
+
+libvirt_status = get_systemd_status("libvirtd.service")
+nova_compute_status = get_systemd_status("openstack-nova-compute.service")
+openvswitch_agent_status = get_systemd_status("quantum-openvswitch-agent.service")
+
+print "status:"
+print " libvirtd=", libvirt_status
+print " openstack-nova-compute=", nova_compute_status
+print " quantum-openvswitch-agent=", openvswitch_agent_status
+
+if (libvirt_status=="failed") or (nova_compute_status=="failed") or (openvswitch_agent_status=="failed"):
+ print "services have failed. doing the big restart"
+ os.system("systemctl stop openstack-nova-compute.service")
+ os.system("systemctl stop quantum-openvswitch-agent.service")
+ os.system("systemctl stop libvirtd.service")
+ time.sleep(5)
+ os.system("systemctl start libvirtd.service")
+ time.sleep(5)
+ os.system("systemctl start quantum-openvswitch-agent.service")
+ time.sleep(5)
+ os.system("systemctl start openstack-nova-compute.service")
+ print "done"
+
+
+
+
diff --git a/planetstack/util/logger.py b/planetstack/util/logger.py
new file mode 100644
index 0000000..91e47d0
--- /dev/null
+++ b/planetstack/util/logger.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+
+#----------------------------------------------------------------------
+# Copyright (c) 2008 Board of Trustees, Princeton University
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and/or hardware specification (the "Work") to
+# deal in the Work without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Work, and to permit persons to whom the Work
+# is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Work.
+#
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
+# IN THE WORK.
+#----------------------------------------------------------------------
+
+import os, sys
+import traceback
+import logging, logging.handlers
+
+CRITICAL=logging.CRITICAL
+ERROR=logging.ERROR
+WARNING=logging.WARNING
+INFO=logging.INFO
+DEBUG=logging.DEBUG
+
+# a logger that can handle tracebacks
+class Logger:
+ def __init__ (self,logfile=None,loggername=None,level=logging.INFO):
+ # default is to locate loggername from the logfile if avail.
+ if not logfile:
+ #loggername='console'
+ #handler=logging.StreamHandler()
+ #handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
+ logfile = "/var/log/planetstack.log"
+
+ if not loggername:
+ loggername=os.path.basename(logfile)
+ try:
+ handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5)
+ except IOError:
+ # This is usually a permissions error becaue the file is
+ # owned by root, but httpd is trying to access it.
+ tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile)
+ # In strange uses, 2 users on same machine might use same code,
+ # meaning they would clobber each others files
+ # We could (a) rename the tmplogfile, or (b)
+ # just log to the console in that case.
+ # Here we default to the console.
+ if os.path.exists(tmplogfile) and not os.access(tmplogfile,os.W_OK):
+ loggername = loggername + "-console"
+ handler = logging.StreamHandler()
+ else:
+ handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5)
+ handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
+ self.logger=logging.getLogger(loggername)
+ self.logger.setLevel(level)
+ # check if logger already has the handler we're about to add
+ handler_exists = False
+ for l_handler in self.logger.handlers:
+ if l_handler.baseFilename == handler.baseFilename and \
+ l_handler.level == handler.level:
+ handler_exists = True
+
+ if not handler_exists:
+ self.logger.addHandler(handler)
+
+ self.loggername=loggername
+
+ def setLevel(self,level):
+ self.logger.setLevel(level)
+
+ # shorthand to avoid having to import logging all over the place
+ def setLevelDebug(self):
+ self.logger.setLevel(logging.DEBUG)
+
+ def debugEnabled (self):
+ return self.logger.getEffectiveLevel() == logging.DEBUG
+
+ # define a verbose option with s/t like
+ # parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0)
+ # and pass the coresponding options.verbose to this method to adjust level
+ def setLevelFromOptVerbose(self,verbose):
+ if verbose==0:
+ self.logger.setLevel(logging.WARNING)
+ elif verbose==1:
+ self.logger.setLevel(logging.INFO)
+ elif verbose>=2:
+ self.logger.setLevel(logging.DEBUG)
+ # in case some other code needs a boolean
+ def getBoolVerboseFromOpt(self,verbose):
+ return verbose>=1
+ def getBoolDebugFromOpt(self,verbose):
+ return verbose>=2
+
+ ####################
+ def info(self, msg):
+ self.logger.info(msg)
+
+ def debug(self, msg):
+ self.logger.debug(msg)
+
+ def warn(self, msg):
+ self.logger.warn(msg)
+
+ # some code is using logger.warn(), some is using logger.warning()
+ def warning(self, msg):
+ self.logger.warning(msg)
+
+ def error(self, msg):
+ self.logger.error(msg)
+
+ def critical(self, msg):
+ self.logger.critical(msg)
+
+ # logs an exception - use in an except statement
+ def log_exc(self,message):
+ self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
+ self.error("%s END TRACEBACK"%message)
+
+ def log_exc_critical(self,message):
+ self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
+ self.critical("%s END TRACEBACK"%message)
+
+ # for investigation purposes, can be placed anywhere
+ def log_stack(self,message):
+ to_log="".join(traceback.format_stack())
+ self.info("%s BEG STACK"%message+"\n"+to_log)
+ self.info("%s END STACK"%message)
+
+ def enable_console(self, stream=sys.stdout):
+ formatter = logging.Formatter("%(message)s")
+ handler = logging.StreamHandler(stream)
+ handler.setFormatter(formatter)
+ self.logger.addHandler(handler)
+
+
+info_logger = Logger(loggername='info', level=logging.INFO)
+debug_logger = Logger(loggername='debug', level=logging.DEBUG)
+warn_logger = Logger(loggername='warning', level=logging.WARNING)
+error_logger = Logger(loggername='error', level=logging.ERROR)
+critical_logger = Logger(loggername='critical', level=logging.CRITICAL)
+logger = info_logger
+########################################
+import time
+
+def profile(logger):
+ """
+ Prints the runtime of the specified callable. Use as a decorator, e.g.,
+
+ @profile(logger)
+ def foo(...):
+ ...
+ """
+ def logger_profile(callable):
+ def wrapper(*args, **kwds):
+ start = time.time()
+ result = callable(*args, **kwds)
+ end = time.time()
+ args = map(str, args)
+ args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()]
+ # should probably use debug, but then debug is not always enabled
+ logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start))
+ return result
+ return wrapper
+ return logger_profile
+
+
+if __name__ == '__main__':
+ print 'testing logging into logger.log'
+ logger1=Logger('logger.log', loggername='std(info)')
+ logger2=Logger('logger.log', loggername='error', level=logging.ERROR)
+ logger3=Logger('logger.log', loggername='debug', level=logging.DEBUG)
+
+ for (logger,msg) in [ (logger1,"std(info)"),(logger2,"error"),(logger3,"debug")]:
+
+ print "====================",msg, logger.logger.handlers
+
+ logger.enable_console()
+ logger.critical("logger.critical")
+ logger.error("logger.error")
+ logger.warn("logger.warning")
+ logger.info("logger.info")
+ logger.debug("logger.debug")
+ logger.setLevel(logging.DEBUG)
+ logger.debug("logger.debug again")
+
+ @profile(logger)
+ def sleep(seconds = 1):
+ time.sleep(seconds)
+
+ logger.info('console.info')
+ sleep(0.5)
+ logger.setLevel(logging.DEBUG)
+ sleep(0.25)
+