initial checkin of event listener. refactor/cleanup
diff --git a/planetstack/core/models/site.py b/planetstack/core/models/site.py
index 81bf5f3..53a1a6c 100644
--- a/planetstack/core/models/site.py
+++ b/planetstack/core/models/site.py
@@ -28,17 +28,9 @@
def __unicode__(self): return u'%s %s %s' % (self.site, self.user, self.role)
def save(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- self.os_manager.driver.add_user_role(self.user.kuser_id, self.site.tenant_id, self.role.role_type)
super(SitePrivilege, self).save(*args, **kwds)
def delete(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- self.os_manager.driver.delete_user_role(self.user.kuser_id, self.site.tenant_id, self.role.role_type)
super(SitePrivilege, self).delete(*args, **kwds)
diff --git a/planetstack/core/models/slice.py b/planetstack/core/models/slice.py
index da9379d..63754e4 100644
--- a/planetstack/core/models/slice.py
+++ b/planetstack/core/models/slice.py
@@ -44,17 +44,7 @@
def __unicode__(self): return u'%s %s %s' % (self.slice, self.user, self.role)
def save(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- if self.os_manager.driver:
- self.os_manager.driver.add_user_role(self.user.kuser_id, self.slice.tenant_id, self.role.role_type)
super(SliceMembership, self).save(*args, **kwds)
def delete(self, *args, **kwds):
- if not hasattr(self, 'os_manager'):
- from openstack.manager import OpenStackManager
- setattr(self, 'os_manager', OpenStackManager())
- if self.os_manager.driver:
- self.os_manager.driver.delete_user_role(self.user.kuser_id, self.slice.tenant_id, self.role.role_type)
super(SliceMembership, self).delete(*args, **kwds)
diff --git a/planetstack/openstack/backend.py b/planetstack/openstack/backend.py
new file mode 100644
index 0000000..340b29b
--- /dev/null
+++ b/planetstack/openstack/backend.py
@@ -0,0 +1,18 @@
+import threading
+from openstack.sliveragent import SliverAgent
+from openstack.observer import OpenStackObserver
+from openstack.event_listener import EventListener
+
+class Backend:
+
+ def run(self):
+ # start the openstack observer
+ observer = OpenStackObserver()
+ observer_thread = threading.Thread(target=observer.run)
+ observer_thread.start()
+
+ # start event listene
+ event_listener = EventListener()
+ event_listener_thread = threading.Thread(target=event_listener.run)
+ event_listener_thread.start()
+
diff --git a/planetstack/openstack/client.py b/planetstack/openstack/client.py
index 4563e46..1a6386d 100644
--- a/planetstack/openstack/client.py
+++ b/planetstack/openstack/client.py
@@ -5,6 +5,9 @@
from quantumclient.v2_0 import client as quantum_client
from nova.db.sqlalchemy import api as nova_db_api
from nova.context import get_admin_context
+ from keystone.common.sql import core
+ core.CONF(args=[], project='keystone', default_config_files=['/etc/keystone/keystone.conf'])
+ from keystone.identity.backends.sql import Metadata
has_openstack = True
except:
has_openstack = False
@@ -59,6 +62,17 @@
if '@' in self.username:
self.username = self.username[:self.username.index('@')]
+class KeystoneDB:
+ @require_enabled
+ def get_session(self):
+ return core.Base().get_session()
+
+ @require_enabled
+ def get_metadata(self):
+ session = self.get_session()
+ return session.query(Metadata).all()
+
+
class KeystoneClient(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
@@ -154,6 +168,7 @@
def __init__ ( self, *args, **kwds) :
# instantiate managers
self.keystone = KeystoneClient(*args, **kwds)
+ self.keystone_db = KeystoneDB()
self.glance = GlanceClient(*args, **kwds)
self.nova = NovaClient(*args, **kwds)
self.nova_db = NovaDB(*args, **kwds)
diff --git a/planetstack/openstack/event_listener.py b/planetstack/openstack/event_listener.py
new file mode 100644
index 0000000..d3f0abf
--- /dev/null
+++ b/planetstack/openstack/event_listener.py
@@ -0,0 +1,114 @@
+import threading
+import requests, json
+from core.models import *
+from openstack.manager import OpenStackManager
+
+# decorator that marks dispatachable event methods
+def event(func):
+ setattr(func, 'event', func.__name__)
+ return func
+
+class EventHandler:
+
+ def __init__(self):
+ self.manager = OpenStackManager()
+
+ def get_events(self):
+ events = []
+ for attrib in dir(self):
+ if hasattr(attrib, 'event'):
+ events.append(getattr(attrib, 'event'))
+ return events
+
+ def dispatch(self, event, *args, **kwds):
+ if hasattr(self, event):
+ return getattr(self, event)(*args, **kwds)
+
+
+ @event
+ def save_site(self, id):
+ sites = Site.objects.filter(id=id)
+ if sites:
+ self.manager.save_site(sites[0])
+
+ @event
+ def delete_site(self, tenant_id):
+ self.manager.driver.delete_tenant(tenant_id)
+
+ @event
+ def save_site_privilege(self, id):
+ site_privileges = SitePrivilege.objects.filter(id=id)
+ if site_privileges:
+ site_priv = self.manager.save_site_privilege(site_privileges[0])
+
+ @event
+ def delete_site_privilege(self, kuser_id, tenant_id, role_type):
+ self.manager.driver.delete_user_role(kuser_id, tenant_id, role_type)
+
+ @event
+ def save_slice(self, id):
+ slices = Slice.objects.filter(id=id)
+ if slices:
+ self.manager.save_slice(slices[0])
+
+ @event
+ def delete_slice(self, tenant_id, network_id, router_id, subnet_id):
+ self.manager._delete_slice(tenant_id, network_id, router_id, subnet_id)
+
+ @event
+ def save_user(self, id):
+ users = User.objects.filter(id=id)
+ if users:
+ self.manager.save_user(users[0])
+
+ @event
+ def delete_user(self, kuser_id):
+ self.manager.driver.delete_user(kuser_id)
+
+ @event
+ def save_sliver(self, id):
+ slivers = Sliver.objects.filter(id=id)
+ if slivers:
+ self.manager.save_sliver(slivers[0])
+
+ @event
+ def delete_sliver(self, instance_id):
+ self.manager.destroy_instance(instance_id)
+
+
+
+class EventListener:
+
+ def __init__(self):
+ self.handler = EventHandler()
+
+ def listen_for_event(self, event, hash):
+ url = 'http://www.feefie.com/command'
+ params = {'action': 'subscribe',
+ 'hash': hash,
+ 'htm': 1}
+ while True:
+ r = requests.get(url, params=params)
+ r_data = json.loads(r)
+ payload = r_data.get('payload')
+ self.handler.dispatch(event, **payload)
+
+
+ def run(self):
+ # register events
+ event_names = [{'title': name} for name in self.handler.get_events()]
+ url = 'http://www.feefie.com/command'
+ params = {'action': 'add',
+ 'u': 'pl',
+ 'events': event_names}
+ r = requests.get(url, params=params)
+ print dir(r)
+ print r
+ r_data = json.loads(r)
+ events = r_data.get('events', [])
+ # spanw a thread for each event
+ for event in events:
+ args = (event['title'], event['hash'])
+ listener_thread = threading.Thread(target=self.listen_for_event, args=args)
+ listener_tread.start()
+
diff --git a/planetstack/openstack/manager.py b/planetstack/openstack/manager.py
index a5d3f4a..8016888 100644
--- a/planetstack/openstack/manager.py
+++ b/planetstack/openstack/manager.py
@@ -3,6 +3,7 @@
import string
import random
import hashlib
+from datetime import datetime
from netaddr import IPAddress, IPNetwork
from planetstack import settings
@@ -118,6 +119,10 @@
self.init_caller(user, user.site.login_base)
self.save_key(user.public_key, user.keyname)
self.init_admin()
+
+ user.save()
+ user.enacted = datetime.now()
+ user.save(update_fields=['enacted'])
@require_enabled
def delete_user(self, user):
@@ -140,12 +145,34 @@
description=site.name,
enabled=site.enabled)
+ # commit the updated record
+ site.save()
+ site.enacted = datetime.now()
+ site.save(update_fields=['enacted']) # enusre enacted > updated
+
+
@require_enabled
def delete_site(self, site):
if site.tenant_id:
self.driver.delete_tenant(site.tenant_id)
@require_enabled
+ def save_site_privilege(self, site_priv):
+ if site_priv.user.kuser_id and site_priv.site.tenant_id:
+ self.driver.add_user_role(site_priv.user.kuser_id,
+ site_priv.site.tenant_id,
+ site_priv.role.role_type)
+ site_priv.enacted = datetime.now()
+ site_priv.save(update_fields=['enacted'])
+
+
+ @require_enabled
+ def delete_site_privilege(self, site_priv):
+ self.driver.delete_user_role(site_priv.user.kuser_id,
+ site_priv.site.tenant_id,
+ site_priv.role.role_type)
+
+ @require_enabled
def save_slice(self, slice):
if not slice.tenant_id:
nova_fields = {'tenant_name': slice.name,
@@ -186,30 +213,55 @@
# add subnet as interface to slice's router
self.driver.add_router_interface(router['id'], subnet['id'])
# add external route
- self.driver.add_external_route(subnet)
-
+ self.driver.add_external_route(subnet)
+
if slice.id and slice.tenant_id:
self.driver.update_tenant(slice.tenant_id,
description=slice.description,
- enabled=slice.enabled)
+ enabled=slice.enabled)
+
+ slice.save()
+ slice.enacted = datetime.now()
+ slice.save(update_fields=['enacted'])
@require_enabled
def delete_slice(self, slice):
if slice.tenant_id:
- self.driver.delete_router_interface(slice.router_id, slice.subnet_id)
- self.driver.delete_subnet(slice.subnet_id)
- self.driver.delete_router(slice.router_id)
- self.driver.delete_network(slice.network_id)
- self.driver.delete_tenant(slice.tenant_id)
- # delete external route
- subnet = None
- subnets = self.driver.shell.quantum.list_subnets()['subnets']
- for snet in subnets:
- if snet['id'] == slice.subnet_id:
- subnet = snet
- if subnet:
- self.driver.delete_external_route(subnet)
+ self._delete_slice(slice.tenant_id, slice.network_id,
+ slice.router_id, slice.subnet_id)
+ @require_enabled
+ def _delete_slice(self, tenant_id, network_id, router_id, subnet_id):
+ self.driver.delete_router_interface(slice.router_id, slice.subnet_id)
+ self.driver.delete_subnet(slice.subnet_id)
+ self.driver.delete_router(slice.router_id)
+ self.driver.delete_network(slice.network_id)
+ self.driver.delete_tenant(slice.tenant_id)
+ # delete external route
+ subnet = None
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ for snet in subnets:
+ if snet['id'] == slice.subnet_id:
+ subnet = snet
+ if subnet:
+ self.driver.delete_external_route(subnet)
+
+
+ @require_enabled
+ def save_slice_membership(self, slice_memb):
+ if slice_memb.user.kuser_id and slice_memb.slice.tenant_id:
+ self.driver.add_user_role(slice_memb.user.kuser_id,
+ slice_memb.slice.tenant_id,
+ slice_memb.role.role_type)
+ slice_memb.enacted = datetime.now()
+ slice_memb.save(update_fields=['enacted'])
+
+
+ @require_enabled
+ def delete_slice_membership(self, slice_memb):
+ self.driver.delete_user_role(slice_memb.user.kuser_id,
+ slice_memb.slice.tenant_id,
+ slice_memb.role.role_type)
@require_enabled
@@ -266,6 +318,10 @@
if sliver.instance_id and ("numberCores" in sliver.changed_fields):
self.driver.update_instance_metadata(sliver.instance_id, {"cpu_cores": str(sliver.numberCores)})
+ sliver.save()
+ sliver.enacted = datetime.now()
+ sliver.save(update_fields=['enacted'])
+
@require_enabled
def delete_sliver(self, sliver):
if sliver.instance_id:
diff --git a/planetstack/openstack/observer.py b/planetstack/openstack/observer.py
index 872cf58..c14b2cb 100644
--- a/planetstack/openstack/observer.py
+++ b/planetstack/openstack/observer.py
@@ -1,11 +1,15 @@
import time
import traceback
from datetime import datetime
+from collections import defaultdict
from core.models import *
from django.db.models import F, Q
from openstack.manager import OpenStackManager
+from util.logger import Logger, logging
+logger = Logger(logfile='observer.log', level=logging.INFO)
+
class OpenStackObserver:
def __init__(self):
@@ -19,8 +23,9 @@
#self.sync_roles()
self.sync_tenants()
self.sync_users()
- #self.sync_user_tenant_roles()
+ self.sync_user_tenant_roles()
self.sync_slivers()
+ self.sync_sliver_ips()
time.sleep(7)
except:
traceback.print_exc()
@@ -37,16 +42,25 @@
pending_role_names = [r.role_type for r in pending_roles]
for role in pending_roles:
if role.role_type not in keystone_role_names:
- self.manager.save_role(role)
+ try:
+ self.manager.save_role(role)
+ logger.info("save role: %s" % (role))
+ except:
+ logger.log_exc("save role failed: %s" % role)
+ traceback.print_exc()
-
- # delete keystone roles that don't exist in planetstack
+ # don't delete roles for now
+ """
+ # delete keystone roles that don't exist in planetstack
for keystone_role in keystone_roles:
if keystone_role.name == 'admin':
continue
if keystone_role.name not in pending_role_names:
- pass
- #self.manager.driver.delete_role({id: keystone_role.id})
+ try:
+ self.manager.driver.delete_role({id: keystone_role.id})
+ except:
+ traceback.print_exc()
+ """
def sync_tenants(self):
"""
@@ -57,19 +71,21 @@
# get all sites that need to be synced (enacted < updated or enacted is None)
pending_sites = Site.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
for site in pending_sites:
- self.manager.save_site(site)
- site.save()
- site.enacted = datetime.now()
- site.save(update_fields=['enacted'])
+ try:
+ self.manager.save_site(site)
+ logger.info("saved site %s" % site)
+ except:
+ logger.log_exc("save site failed: %s" % site)
# get all slices that need to be synced (enacted < updated or enacted is None)
pending_slices = Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
for slice in pending_slices:
- self.manager.init_caller(slice.creator, slice.creator.site.login_base)
- self.manager.save_slice(slice)
- slice.save()
- slice.enacted = datetime.now()
- slice.save(update_fields=['enacted'])
+ try:
+ self.manager.init_caller(slice.creator, slice.creator.site.login_base)
+ self.manager.save_slice(slice)
+ logger.info("saved slice %s" % slice)
+ except:
+ logger.log_exc("save slice failed: %s" % slice)
# get all sites that where enacted != null. We can assume these sites
# have previously been synced and need to be checed for deletion.
@@ -87,13 +103,16 @@
# delete keystone tenants that don't have a site record
tenants = self.manager.driver.shell.keystone.tenants.findall()
+ system_tenants = ['admin','service']
for tenant in tenants:
if tenant.name == 'admin':
continue
if tenant.name not in site_dict and tenant.name not in slice_dict:
- #print "delete " + tenant.name
- pass
- #self.manager.driver.delete_tenant(tenant.id)
+ try:
+ self.manager.driver.delete_tenant(tenant.id)
+ logger.info("deleted tenant: %s" % (tenant))
+ except:
+ logger.log_exc("delete tenant failed: %s" % tenant)
def sync_users(self):
@@ -104,10 +123,11 @@
# get all users that need to be synced (enacted < updated or enacted is None)
pending_users = User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
for user in pending_users:
- self.manager.save_user(user)
- user.save()
- user.enacted = datetime.now()
- user.save(update_fields=['enacted'])
+ try:
+ self.manager.save_user(user)
+ logger.info("saved user: %s" % (user))
+ except:
+ logger.log_exc("save user failed: %s" %user)
# get all users that where enacted != null. We can assume these users
# have previously been synced and need to be checed for deletion.
@@ -117,15 +137,87 @@
user_dict[user.kuser_id] = user
# delete keystone users that don't have a user record
+ system_users = ['admin', 'nova', 'quantum', 'glance', 'cinder', 'swift', 'service']
users = self.manager.driver.shell.keystone.users.findall()
for user in users:
- if user.name == 'admin':
+ if user.name in system_users:
continue
if user.id not in user_dict:
- pass
- #self.manager.driver.delete_user(user.id)
-
-
+ try:
+ #self.manager.driver.delete_user(user.id)
+ logger.info("deleted user: %s" % user)
+ except:
+ logger.log_exc("delete user failed: %s" % user)
+
+
+ def sync_user_tenant_roles(self):
+ """
+ Save all site privileges and slice memberships wheree enacted < updated or
+ enacted == None. Remove ones that don't exist in openstack db if they have
+ an enacted time (enacted != None).
+ """
+ # sync site privileges
+ pending_site_privileges = SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ for site_priv in pending_site_privileges:
+ try:
+ self.manager.save_site_privilege(site_priv)
+ logger.info("saved site privilege: %s" % (site_priv))
+ except: logger.log_exc("save site privilege failed: %s " % site_priv)
+
+ # sync slice memberships
+ pending_slice_memberships = SliceMembership.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ for slice_memb in pending_slice_memberships:
+ try:
+ self.manager.save_slice_membership(slice_memb)
+ logger.info("saved slice membership: %s" % (slice_memb))
+ except: logger.log_exc("save slice membership failed: %s" % slice_memb)
+
+ # get all site privileges and slice memberships that have been enacted
+ user_tenant_roles = defaultdict(list)
+ for site_priv in SitePrivilege.objects.filter(enacted__isnull=False):
+ user_tenant_roles[(site_priv.user.kuser_id, site_priv.site.tenant_id)].append(site_priv.role.role)
+ for slice_memb in SliceMembership.objects.filter(enacted__isnull=False):
+ user_tenant_roles[(slice_memb.user.kuser_id, slice_memb.slice.tenant_id)].append(slice_memb.role.role)
+
+ # Some user tenant role aren't stored in planetstack but they must be preserved.
+ # Role that fall in this category are
+ # 1. Never remove a user's role that their home site
+ # 2. Never remove a user's role at a slice they've created.
+ # Keep track of all roles that must be preserved.
+ users = User.objects.all()
+ preserved_roles = {}
+ for user in users:
+ tenant_ids = [s['tenant_id'] for s in user.slices.values()]
+ tenant_ids.append(user.site.tenant_id)
+ preserved_roles[user.kuser_id] = tenant_ids
+
+
+ # begin removing user tenant roles from keystone. This is stored in the
+ # Metadata table.
+ for metadata in self.manager.driver.shell.keystone_db.get_metadata():
+ # skip admin roles
+ if metadata.user_id == self.manager.driver.admin_user.id:
+ continue
+ # skip preserved tenant ids
+ if metadata.user_id in preserved_roles and \
+ metadata.tenant_id in preserved_roles[metadata.user_id]:
+ continue
+ # get roles for user at this tenant
+ user_tenant_role_ids = user_tenant_roles.get((metadata.user_id, metadata.tenant_id), [])
+
+ if user_tenant_role_ids:
+ # The user has roles at the tenant. Check if roles need to
+ # be updated.
+ user_keystone_role_ids = metadata.data.get('roles', [])
+ for role_id in user_keystone_role_ids:
+ if role_id not in user_tenant_role_ids:
+ user_keystone_role_ids.pop(user_keystone_role_ids.index(role_id))
+ else:
+ # The user has no roles at this tenant.
+ metadata.data['roles'] = []
+ #session.add(metadata)
+ logger.info("pruning metadata for %s at %s" % (metadata.user_id, metadata.tenant_id))
+
def sync_slivers(self):
"""
save all slivers where enacted < updated or enacted == None. Remove slivers that
@@ -134,13 +226,14 @@
# get all users that need to be synced (enacted < updated or enacted is None)
pending_slivers = Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
for sliver in pending_slivers:
- if not sliver.instance_id and sliver.creator:
- # update manager context
- self.manager.init_caller(sliver.creator, sliver.slice.name)
- self.manager.save_sliver(sliver)
- sliver.save()
- sliver.enacted = datetime.now()
- sliver.save(update_fields=['enacted'])
+ if not sliver.instance_id and sliver.creator:
+ try:
+ # update manager context
+ self.manager.init_caller(sliver.creator, sliver.slice.name)
+ self.manager.save_sliver(sliver)
+ logger.info("saved sliver: %s %s" % (sliver))
+ except:
+ logger.log_exc("save sliver failed: %s" % sliver)
# get all slivers that where enacted != null. We can assume these users
# have previously been synced and need to be checed for deletion.
@@ -154,8 +247,29 @@
instances = self.manager.driver.shell.nova_db.instance_get_all(ctx)
for instance in instances:
if instance.id not in sliver_dict:
- # lookup tenant and update context
- #tenant = self.manager.driver.shell.keystone.tenants.findall(id=instance.tenant_id)
- #self.manager.init_admin(tenant=tenant.name)
- #self.manager.driver.destroy_instance(instance.id)
- pass
+ try:
+ # lookup tenant and update context
+ tenant = self.manager.driver.shell.keystone.tenants.find(id=instance.project_id)
+ self.manager.init_admin(tenant=tenant.name)
+ self.manager.driver.destroy_instance(instance.id)
+ logger.info("destroyed sliver: %s" % (instance))
+ except:
+ logger.log_exc("destroy sliver failed: %s" % instance)
+
+
+ def sync_sliver_ips(self):
+ # fill in null ip addresses
+ slivers = Sliver.objects.filter(ip=None)
+ for sliver in slivers:
+ # update connection
+ self.manager.init_admin(tenant=sliver.slice.name)
+ servers = self.manager.client.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ continue
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if not ips:
+ continue
+ sliver.ip = ips[0]['addr']
+ sliver.save()
+ logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
diff --git a/planetstack/plstackapi-debug-server.py b/planetstack/plstackapi-debug-server.py
index a78cfe7..e120d72 100644
--- a/planetstack/plstackapi-debug-server.py
+++ b/planetstack/plstackapi-debug-server.py
@@ -1,12 +1,10 @@
#!/usr/bin/env python
import os
import sys
-import threading
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
-from planetstack.config import Config
-from openstack.sliveragent import SliverAgent
-from openstack.observer import OpenStackObserver
+from planetstack.config import Config
+from openstack.backend import Backend
if __name__ == '__main__':
@@ -17,16 +15,9 @@
args = [__file__, 'runserver', url]
- # start the sliver agent thread
- sliver_agent = SliverAgent()
- sliver_agent_thread = threading.Thread(target=sliver_agent.run)
- sliver_agent_thread.start()
-
- # start the openstack observer
- observer = OpenStackObserver()
- observer_thread = threading.Thread(target=observer.run)
- observer_thread.start()
-
+ backend = Backend()
+ backend.run()
+
# start the server
server = ManagementUtility(args)
server.execute()
diff --git a/planetstack/util/logging.py b/planetstack/util/logging.py
deleted file mode 100644
index 91e47d0..0000000
--- a/planetstack/util/logging.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python
-
-#----------------------------------------------------------------------
-# Copyright (c) 2008 Board of Trustees, Princeton University
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and/or hardware specification (the "Work") to
-# deal in the Work without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Work, and to permit persons to whom the Work
-# is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Work.
-#
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
-# IN THE WORK.
-#----------------------------------------------------------------------
-
-import os, sys
-import traceback
-import logging, logging.handlers
-
-CRITICAL=logging.CRITICAL
-ERROR=logging.ERROR
-WARNING=logging.WARNING
-INFO=logging.INFO
-DEBUG=logging.DEBUG
-
-# a logger that can handle tracebacks
-class Logger:
- def __init__ (self,logfile=None,loggername=None,level=logging.INFO):
- # default is to locate loggername from the logfile if avail.
- if not logfile:
- #loggername='console'
- #handler=logging.StreamHandler()
- #handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
- logfile = "/var/log/planetstack.log"
-
- if not loggername:
- loggername=os.path.basename(logfile)
- try:
- handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5)
- except IOError:
- # This is usually a permissions error becaue the file is
- # owned by root, but httpd is trying to access it.
- tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile)
- # In strange uses, 2 users on same machine might use same code,
- # meaning they would clobber each others files
- # We could (a) rename the tmplogfile, or (b)
- # just log to the console in that case.
- # Here we default to the console.
- if os.path.exists(tmplogfile) and not os.access(tmplogfile,os.W_OK):
- loggername = loggername + "-console"
- handler = logging.StreamHandler()
- else:
- handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5)
- handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
- self.logger=logging.getLogger(loggername)
- self.logger.setLevel(level)
- # check if logger already has the handler we're about to add
- handler_exists = False
- for l_handler in self.logger.handlers:
- if l_handler.baseFilename == handler.baseFilename and \
- l_handler.level == handler.level:
- handler_exists = True
-
- if not handler_exists:
- self.logger.addHandler(handler)
-
- self.loggername=loggername
-
- def setLevel(self,level):
- self.logger.setLevel(level)
-
- # shorthand to avoid having to import logging all over the place
- def setLevelDebug(self):
- self.logger.setLevel(logging.DEBUG)
-
- def debugEnabled (self):
- return self.logger.getEffectiveLevel() == logging.DEBUG
-
- # define a verbose option with s/t like
- # parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0)
- # and pass the coresponding options.verbose to this method to adjust level
- def setLevelFromOptVerbose(self,verbose):
- if verbose==0:
- self.logger.setLevel(logging.WARNING)
- elif verbose==1:
- self.logger.setLevel(logging.INFO)
- elif verbose>=2:
- self.logger.setLevel(logging.DEBUG)
- # in case some other code needs a boolean
- def getBoolVerboseFromOpt(self,verbose):
- return verbose>=1
- def getBoolDebugFromOpt(self,verbose):
- return verbose>=2
-
- ####################
- def info(self, msg):
- self.logger.info(msg)
-
- def debug(self, msg):
- self.logger.debug(msg)
-
- def warn(self, msg):
- self.logger.warn(msg)
-
- # some code is using logger.warn(), some is using logger.warning()
- def warning(self, msg):
- self.logger.warning(msg)
-
- def error(self, msg):
- self.logger.error(msg)
-
- def critical(self, msg):
- self.logger.critical(msg)
-
- # logs an exception - use in an except statement
- def log_exc(self,message):
- self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
- self.error("%s END TRACEBACK"%message)
-
- def log_exc_critical(self,message):
- self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
- self.critical("%s END TRACEBACK"%message)
-
- # for investigation purposes, can be placed anywhere
- def log_stack(self,message):
- to_log="".join(traceback.format_stack())
- self.info("%s BEG STACK"%message+"\n"+to_log)
- self.info("%s END STACK"%message)
-
- def enable_console(self, stream=sys.stdout):
- formatter = logging.Formatter("%(message)s")
- handler = logging.StreamHandler(stream)
- handler.setFormatter(formatter)
- self.logger.addHandler(handler)
-
-
-info_logger = Logger(loggername='info', level=logging.INFO)
-debug_logger = Logger(loggername='debug', level=logging.DEBUG)
-warn_logger = Logger(loggername='warning', level=logging.WARNING)
-error_logger = Logger(loggername='error', level=logging.ERROR)
-critical_logger = Logger(loggername='critical', level=logging.CRITICAL)
-logger = info_logger
-########################################
-import time
-
-def profile(logger):
- """
- Prints the runtime of the specified callable. Use as a decorator, e.g.,
-
- @profile(logger)
- def foo(...):
- ...
- """
- def logger_profile(callable):
- def wrapper(*args, **kwds):
- start = time.time()
- result = callable(*args, **kwds)
- end = time.time()
- args = map(str, args)
- args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()]
- # should probably use debug, but then debug is not always enabled
- logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start))
- return result
- return wrapper
- return logger_profile
-
-
-if __name__ == '__main__':
- print 'testing logging into logger.log'
- logger1=Logger('logger.log', loggername='std(info)')
- logger2=Logger('logger.log', loggername='error', level=logging.ERROR)
- logger3=Logger('logger.log', loggername='debug', level=logging.DEBUG)
-
- for (logger,msg) in [ (logger1,"std(info)"),(logger2,"error"),(logger3,"debug")]:
-
- print "====================",msg, logger.logger.handlers
-
- logger.enable_console()
- logger.critical("logger.critical")
- logger.error("logger.error")
- logger.warn("logger.warning")
- logger.info("logger.info")
- logger.debug("logger.debug")
- logger.setLevel(logging.DEBUG)
- logger.debug("logger.debug again")
-
- @profile(logger)
- def sleep(seconds = 1):
- time.sleep(seconds)
-
- logger.info('console.info')
- sleep(0.5)
- logger.setLevel(logging.DEBUG)
- sleep(0.25)
-