CORD-1709: Update logger module in synchronizers and core
Change-Id: I4d9eafe3a30cae3de1b13b24b95924349dcc1ca0
diff --git a/lib/xos-config/xosconfig/default.py b/lib/xos-config/xosconfig/default.py
index f1a7adc..61829ba 100644
--- a/lib/xos-config/xosconfig/default.py
+++ b/lib/xos-config/xosconfig/default.py
@@ -16,6 +16,13 @@
DEFAULT_VALUES = {
'xos_dir': '/opt/xos',
+
+ # The configuration below inherits from the standard config of the Python logging module
+ # See: https://docs.python.org/2/library/logging.config.html
+ # multistructlog supports this config in all of its generality
+ # So for instance, you can add new handlers. Note that all handlers will
+ # receive logs simultaneously.
+
'logging': {
'version': 1,
'handlers': {
diff --git a/lib/xos-config/xosconfig/xos-config-schema.yaml b/lib/xos-config/xosconfig/xos-config-schema.yaml
index 216bbf1..d897e76 100644
--- a/lib/xos-config/xosconfig/xos-config-schema.yaml
+++ b/lib/xos-config/xosconfig/xos-config-schema.yaml
@@ -31,7 +31,12 @@
password:
type: str
required: True
- logging:
+ # The configuration below inherits from the standard config of the Python logging module
+ # See: https://docs.python.org/2/library/logging.config.html
+ # multistructlog supports this config in all of its generality
+ # So for instance, you can add new handlers. Note that all handlers will
+ # receive logs simultaneously.
+ logging:
type: any
xos_dir:
type: str
diff --git a/xos/coreapi/core_main.py b/xos/coreapi/core_main.py
index ad951f8..7318159 100644
--- a/xos/coreapi/core_main.py
+++ b/xos/coreapi/core_main.py
@@ -26,9 +26,10 @@
from reaper import ReaperThread
from grpc_server import XOSGrpcServer, restart_chameleon
-from xos.logger import Logger, logging
-logger = Logger(level=logging.DEBUG)
+from xosconfig import Config
+from multistructlog import create_logger
+log = create_logger(Config().get('logging'))
if __name__ == '__main__':
@@ -41,13 +42,14 @@
restart_chameleon()
- logger.info("Core_main entering wait loop")
+ log.info("XOS core entering wait loop")
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while 1:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
+ log.info("XOS core terminated by keyboard interrupt")
server.stop()
reaper.stop()
diff --git a/xos/coreapi/grpc_server.py b/xos/coreapi/grpc_server.py
index caeb9e6..69a50fc 100644
--- a/xos/coreapi/grpc_server.py
+++ b/xos/coreapi/grpc_server.py
@@ -40,6 +40,12 @@
from concurrent import futures
import zlib
+from xosconfig import Config
+
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
+
if __name__ == "__main__":
import django
sys.path.append('/opt/xos')
@@ -51,8 +57,7 @@
from xos_utility_api import UtilityService
from google.protobuf.empty_pb2 import Empty
-from xos.logger import Logger, logging
-logger = Logger(level=logging.INFO)
+
SERVER_KEY="/opt/cord_profile/core_api_key.pem"
SERVER_CERT="/opt/cord_profile/core_api_cert.pem"
@@ -115,7 +120,7 @@
def __init__(self, port=50055):
self.port = port
- logger.info('init-grpc-server port=%d' % self.port)
+ log.info('Initializing GRPC Server', port = port)
self.thread_pool = futures.ThreadPoolExecutor(max_workers=10)
self.server = grpc.server(self.thread_pool)
@@ -128,7 +133,7 @@
self.services = []
def start(self):
- logger.debug('starting')
+ log.info('Starting GRPC Server')
# add each service unit to the server and also to the list
for activator_func, service_class in (
@@ -148,15 +153,15 @@
# strat the server
self.server.start()
- logger.info('started')
+ log.info('GRPC Server Started')
return self
def stop(self, grace=0):
- logger.debug('stopping')
+ log.info('Stopping GRPC Server')
for service in self.services:
service.stop()
self.server.stop(grace)
- logger.debug('stopped')
+ log.info('stopped')
def register(self, activator_func, service):
"""
diff --git a/xos/coreapi/reaper.py b/xos/coreapi/reaper.py
index 38ecfcf..3d6def0 100644
--- a/xos/coreapi/reaper.py
+++ b/xos/coreapi/reaper.py
@@ -43,13 +43,15 @@
from django.db import models as django_models
from core.models.xosbase import XOSCollector
from django.db import router
-from xos.logger import Logger, logging
import pdb
import time
import traceback
-logger = Logger(level=logging.DEBUG)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class ReaperThread(threading.Thread):
daemon = True
@@ -67,15 +69,15 @@
#diag = Diag.objects.filter(name="foo").first()
except Exception, e:
if "connection already closed" in traceback.format_exc():
- logger.error("XXX connection already closed")
+ log.exception("XXX connection already closed", e = e)
try:
# if db.connection:
# db.connection.close()
db.close_old_connections()
- except:
- logger.log_exc("XXX we failed to fix the failure")
+ except Exception,e:
+ log.exception("XXX we failed to fix the failure", e = e)
else:
- logger.log_exc("XXX some other error")
+ log.exception("XXX some other error", e = e)
def journal_object(self, o, operation, msg=None, timestamp=None):
# not implemented at this time
@@ -128,13 +130,13 @@
if (not getattr(d, "backend_need_reap", False)) and getattr(d, "backend_need_delete", False):
self.journal_object(d, "reaper.need_delete")
- logger.info("REAPER: skipping %r because it has need_delete set" % d)
+ log.info("skipping because it has need_delete set", object = d)
continue
cascade_set = self.get_cascade_set(d)
if cascade_set:
self.journal_object(d, "reaper.cascade_set", msg=",".join([str(m) for m in cascade_set]))
- logger.info('REAPER: cannot purge object %r because its cascade_set is nonempty: %s' % (d, ",".join([str(m) for m in cascade_set])))
+ log.info('REAPER: cannot purge object because its cascade_set is nonempty',object = d, cascade_set = ",".join([str(m) for m in cascade_set]))
continue
# XXX I don't think we need dependency_walker here anymore,
@@ -145,18 +147,18 @@
if (True):
self.journal_object(d, "reaper.purge")
- logger.info('REAPER: purging object %r'%d)
+ log.info('REAPER: purging object',object = d)
try:
d.delete(purge=True)
except:
self.journal_object(d, "reaper.purge.exception")
- logger.error('REAPER: exception purging object %r'%d)
+ log.error('REAPER: exception purging object', object = d)
traceback.print_exc()
try:
reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("REAPER: exception in reset_queries")
+ log.exception("REAPER: exception in reset_queries")
# logger.debug("REAPER: finished run_reaper_once()")
@@ -166,7 +168,7 @@
try:
self.run_reaper_once()
except:
- logger.log_exc("REAPER: Exception in run loop")
+ log.exception("REAPER: Exception in run loop")
telap = time.time()-start
if telap<self.interval:
diff --git a/xos/generate/dependency_walker.py b/xos/generate/dependency_walker.py
index ab50039..e5684bb 100644
--- a/xos/generate/dependency_walker.py
+++ b/xos/generate/dependency_walker.py
@@ -29,96 +29,108 @@
import json
import pdb
from core.models import *
-from xos.logger import Logger, logging
-logger = Logger(level=logging.INFO)
-missing_links={}
+from xosconfig import Config
+from multistructlog import create_logger
+
+Config.init()
+log = create_logger(Config().get('logging'))
+
+missing_links = {}
try:
- dep_data = open(Config.get('dependency_graph')).read()
-except:
- raise Exception('[XOS-Dependency-Walker] File %s not found' % Config.get('dependency_graph'))
+ dep_data = open(Config.get('dependency_graph')).read()
+except BaseException:
+ raise Exception(
+ '[XOS-Dependency-Walker] File %s not found' %
+ Config.get('dependency_graph'))
dependencies = json.loads(dep_data)
inv_dependencies = {}
for k, lst in dependencies.items():
- for v in lst:
- try:
- inv_dependencies[v].append(k)
- except KeyError:
- inv_dependencies[v]=[k]
-
+ for v in lst:
+ try:
+ inv_dependencies[v].append(k)
+ except KeyError:
+ inv_dependencies[v] = [k]
+
def plural(name):
- if (name.endswith('s')):
- return name+'es'
- else:
- return name+'s'
+ if (name.endswith('s')):
+ return name + 'es'
+ else:
+ return name + 's'
def walk_deps(fn, object):
- model = object.__class__.__name__
- try:
- deps = dependencies[model]
- except:
- deps = []
- return __walk_deps(fn, object, deps)
+ model = object.__class__.__name__
+ try:
+ deps = dependencies[model]
+ except BaseException:
+ deps = []
+ return __walk_deps(fn, object, deps)
+
def walk_inv_deps(fn, object):
- model = object.__class__.__name__
- try:
- deps = inv_dependencies[model]
- except:
- deps = []
- return __walk_deps(fn, object, deps)
+ model = object.__class__.__name__
+ try:
+ deps = inv_dependencies[model]
+ except BaseException:
+ deps = []
+ return __walk_deps(fn, object, deps)
+
def __walk_deps(fn, object, deps):
- model = object.__class__.__name__
- ret = []
- for dep in deps:
- #print "Checking dep %s"%dep
- peer=None
- link = dep.lower()
- try:
- peer = getattr(object, link)
- except AttributeError:
- link = plural(link)
- try:
- peer = getattr(object, link)
- except AttributeError:
- if not missing_links.has_key(model+'.'+link):
- print "Model %s missing link for dependency %s"%(model, link)
- logger.log_exc("WARNING: Model %s missing link for dependency %s."%(model, link))
- missing_links[model+'.'+link]=True
+ model = object.__class__.__name__
+ ret = []
+ for dep in deps:
+ # print "Checking dep %s"%dep
+ peer = None
+ link = dep.lower()
+ try:
+ peer = getattr(object, link)
+ except AttributeError:
+ link = plural(link)
+ try:
+ peer = getattr(object, link)
+ except AttributeError:
+ if model + '.' + link not in missing_links:
+ log.exception(
+ "Model missing link for dependency.",
+ model=model,
+ link=link)
+ missing_links[model + '.' + link] = True
+
+ if (peer):
+ try:
+ peer_objects = peer.all()
+ except AttributeError:
+ peer_objects = [peer]
+ except BaseException:
+ peer_objects = []
+
+ for o in peer_objects:
+ # if (isinstance(o,XOSBase)):
+ if (hasattr(o, 'updated')):
+ fn(o, object)
+ ret.append(o)
+ # Uncomment the following line to enable recursion
+ # walk_inv_deps(fn, o)
+ return ret
- if (peer):
- try:
- peer_objects = peer.all()
- except AttributeError:
- peer_objects = [peer]
- except:
- peer_objects = []
+def p(x, source):
+ print x, x.__class__.__name__
+ return
- for o in peer_objects:
- #if (isinstance(o,XOSBase)):
- if (hasattr(o,'updated')):
- fn(o, object)
- ret.append(o)
- # Uncomment the following line to enable recursion
- # walk_inv_deps(fn, o)
- return ret
-
-def p(x,source):
- print x,x.__class__.__name__
- return
def main():
- #pdb.set_trace()
- s = Slice.objects.filter(name='princeton_sapan62')
- #pdb.set_trace()
- print walk_inv_deps(p,s[0])
-
-if __name__=='__main__':
- main()
+ # pdb.set_trace()
+ s = Slice.objects.filter(name='princeton_sapan62')
+ # pdb.set_trace()
+ print walk_inv_deps(p, s[0])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/xos/synchronizers/model_policy.py b/xos/synchronizers/model_policy.py
index c324340..aabbf83 100644
--- a/xos/synchronizers/model_policy.py
+++ b/xos/synchronizers/model_policy.py
@@ -24,7 +24,6 @@
from django.utils import timezone
from django.db import models as django_models
from generate.dependency_walker import *
-from xos.logger import Logger, logging
import pdb
import time
@@ -35,7 +34,11 @@
model_policies = {}
-logger = Logger(level=logging.DEBUG)
+from xosconfig import Config
+from multistructlog import create_logger
+
+Config.init()
+log = create_logger(Config().get('logging'))
def EnableModelPolicy(x):
global modelPolicyEnabled
@@ -62,10 +65,10 @@
if (save_fields):
d.save(update_fields=save_fields)
except AttributeError,e:
- logger.log_exc("AttributeError in update_dep")
+ log.exception("AttributeError in update_dep", e = e)
raise e
except Exception,e:
- logger.log_exc("Exception in update_dep")
+ log.exception("Exception in update_dep", e = e)
def delete_if_inactive(d, o):
try:
@@ -108,7 +111,7 @@
try:
policy_handler = model_policies.get(policy_name, None) # getattr(model_policies, policy_name, None)
- logger.debug("MODEL POLICY: handler %s %s" % (policy_name, policy_handler))
+ log.debug("MODEL POLICY: handler %s %s",policy_name = policy_name, policy_handler = policy_handler)
if policy_handler is not None:
if (deleted):
try:
@@ -117,15 +120,15 @@
pass
else:
policy_handler.handle(instance)
- logger.debug("MODEL POLICY: completed handler %s %s" % (policy_name, policy_handler))
- except:
- logger.log_exc("MODEL POLICY: Exception when running handler")
+ log.debug("MODEL POLICY: completed handler", policy_name = policy_name, policy_handler = policy_handler)
+ except Exception, e:
+ log.exception("MODEL POLICY: Exception when running handler", e = e)
try:
instance.policed=timezone.now()
instance.save(update_fields=['policed'])
except:
- logger.log_exc('MODEL POLICY: Object %r is defective'%instance)
+ log.exception('MODEL POLICY: Object is defective', object = instance)
bad_instances.append(instance)
def noop(o,p):
@@ -139,15 +142,15 @@
#diag = Diag.objects.filter(name="foo").first()
except Exception, e:
if "connection already closed" in traceback.format_exc():
- logger.error("XXX connection already closed")
+ log.error("XXX connection already closed")
try:
# if db.connection:
# db.connection.close()
db.close_old_connections()
- except:
- logger.log_exc("XXX we failed to fix the failure")
+ except Exception,f:
+ log.exception("XXX we failed to fix the failure", e = f)
else:
- logger.log_exc("XXX some other error")
+ log.exception("XXX some other error", e = e)
def run_policy():
load_model_policies()
@@ -156,8 +159,9 @@
start = time.time()
try:
run_policy_once()
- except:
- logger.log_exc("MODEL_POLICY: Exception in run_policy()")
+ except Exception,e:
+ log.exception("MODEL_POLICY: Exception in run_policy()", e)
+
if (time.time()-start<1):
time.sleep(1)
@@ -167,7 +171,7 @@
objects = []
deleted_objects = []
- logger.debug("MODEL POLICY: run_policy_once()")
+ log.debug("MODEL POLICY: run_policy_once()")
check_db_connection_okay()
@@ -185,8 +189,8 @@
try:
reset_queries()
- except:
+ except Exception,e:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("MODEL POLICY: exception in reset_queries")
+ log.exception("MODEL POLICY: exception in reset_queries", e = e)
- logger.debug("MODEL POLICY: finished run_policy_once()")
+ log.debug("MODEL POLICY: finished run_policy_once()")
diff --git a/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py b/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
index 71c3400..53f50a5 100644
--- a/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
@@ -24,9 +24,10 @@
from synchronizers.new_base.syncstep import SyncStep
from synchronizers.new_base.ansible_helper import run_template_ssh
from synchronizers.new_base.modelaccessor import *
-from xos.logger import Logger, logging
+from xosconfig import Config
+from multistructlog import create_logger
-logger = Logger(level=logging.INFO)
+log = create_logger(Config().get('logging'))
class SyncInstanceUsingAnsible(SyncStep):
@@ -50,7 +51,7 @@
def defer_sync(self, o, reason):
# zdw, 2017-02-18 - is raising the exception here necessary? - seems like
# it's just logging the same thing twice
- logger.info("defer object %s due to %s" % (str(o), reason), extra=o.tologdict())
+ log.info("defer object", object = str(o), reason = reason, **o.tologdict())
raise Exception("defer object %s due to %s" % (str(o), reason))
def get_extra_attributes(self, o):
@@ -80,7 +81,7 @@
template_name = self.template_name
tStart = time.time()
run_template_ssh(template_name, fields, object=o)
- logger.info("playbook execution time %d" % int(time.time() - tStart), extra=o.tologdict())
+ log.info("playbook execution time", time = int(time.time() - tStart), **o.tologdict())
def pre_sync_hook(self, o, fields):
pass
@@ -187,7 +188,7 @@
return fields
def sync_record(self, o):
- logger.info("sync'ing object %s" % str(o), extra=o.tologdict())
+ log.info("sync'ing object", object = str(o), **o.tologdict())
self.prepare_record(o)
@@ -298,7 +299,7 @@
# in the derived class and override the below handle_watched_object() method to route the notifications
# accordingly
def handle_watched_object(self, o):
- logger.info("handle_watched_object is invoked for object %s" % (str(o)), extra=o.tologdict())
+ log.info("handle_watched_object is invoked for object",object = str(o), **o.tologdict())
if (model_accessor.is_type(o, "ServiceDependency")):
self.handle_service_composition_watch_notification(o)
elif (model_accessor.is_type(o, "ServiceMonitoringAgentInfo")):
@@ -309,7 +310,7 @@
cls_obj = self.observes
if (type(cls_obj) is list):
cls_obj = cls_obj[0]
- logger.info("handle_watched_object observed model %s" % (cls_obj))
+ log.info("handle_watched_object observed model", model = cls_obj)
objs = cls_obj.objects.filter(kind=cls_obj.KIND)
@@ -327,7 +328,7 @@
valid_instance = False
if not valid_instance:
- logger.warn("handle_watched_object: No valid instance found for object %s" % (str(obj)))
+ log.warn("handle_watched_object: No valid instance found for object", object =str(obj))
return
provider_service = coarse_tenant.provider_service
@@ -341,8 +342,8 @@
matched_service = subscriber_service
other_service = provider_service
else:
- logger.info("handle_watched_object: Service object %s does not match with any of composed services" % (
- str(obj)))
+ log.info("handle_watched_object: Service object does not match with any of composed services",
+ object = str(obj))
return
elif model_accessor.is_instance(obj, "Tenant"):
if obj.provider_service.id == provider_service.id:
@@ -352,11 +353,11 @@
matched_service = subscriber_service
other_service = provider_service
else:
- logger.info(
- "handle_watched_object: Tenant object %s does not match with any of composed services" % (str(obj)))
+ log.info(
+ "handle_watched_object: Tenant object does not match with any of composed services", object = str(obj))
return
else:
- logger.warn("handle_watched_object: Model object %s is of neither Service nor Tenant type" % (str(obj)))
+ log.warn("handle_watched_object: Model object is of neither Service nor Tenant type", object = str(obj))
src_networks = matched_service.get_composable_networks()
target_networks = other_service.get_composable_networks()
@@ -374,10 +375,9 @@
fields["target_subnet"] = target_subnet
# Template file is available under .../synchronizers/shared_templates
service_composition_template_name = "sync_service_composition.yaml"
- logger.info(
- "handle_watched_object: Updating routing tables in the instance associated with object %s: target_subnet:%s src_ip:%s" % (
- str(obj), target_subnet, src_ip))
+ log.info(
+ "handle_watched_object: Updating routing tables in the instance associated with object. target_subnet:%s src_ip:%s",
+ object = str(obj), subnet = target_subnet, ip = src_ip)
SyncInstanceUsingAnsible.run_playbook(self, obj, fields, service_composition_template_name)
else:
- logger.info("handle_watched_object: No intersection of composable networks between composed services %s" % (
- str(coarse_tenant)))
+ log.info("handle_watched_object: No intersection of composable networks between composed services", object = str(coarse_tenant))
diff --git a/xos/synchronizers/new_base/ansible_helper.py b/xos/synchronizers/new_base/ansible_helper.py
index 517c15e..0491048 100644
--- a/xos/synchronizers/new_base/ansible_helper.py
+++ b/xos/synchronizers/new_base/ansible_helper.py
@@ -28,11 +28,14 @@
import traceback
import subprocess
import threading
-from xosconfig import Config
-from xos.logger import observer_logger as logger
+
from multiprocessing import Process, Queue
from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
+
step_dir = Config.get("steps_dir")
sys_dir = Config.get("sys_dir")
@@ -80,7 +83,7 @@
args_fn = None
result_fn = None
try:
- logger.info("creating args file in %s" % dir)
+ log.info("creating args file",dir = dir)
args_fn = os.path.join(dir, "args")
result_fn = os.path.join(dir, "result")
@@ -94,12 +97,12 @@
result = pickle.loads(open(result_fn).read())
if hasattr(result, "exception"):
- logger.log_error("Exception in playbook: %s" % result["exception"])
+ log.error("Exception in playbook",exception = result["exception"])
stats = result.get("stats", None)
aresults = result.get("aresults", None)
- except:
- logger.log_exc("Exception running ansible_main")
+ except Exception,e:
+ log.exception("Exception running ansible_main")
stats = None
aresults = None
finally:
@@ -176,7 +179,7 @@
else:
oprops['ansible_status']='FAILED'
- logger.info(x._task, extra=oprops)
+ log.info(x._task, **oprops)
ofile.close()
diff --git a/xos/synchronizers/new_base/ansible_runner.py b/xos/synchronizers/new_base/ansible_runner.py
index f9cf70c..b7595f2 100644
--- a/xos/synchronizers/new_base/ansible_runner.py
+++ b/xos/synchronizers/new_base/ansible_runner.py
@@ -32,7 +32,11 @@
from ansible.executor import playbook_executor
from ansible.utils.display import Display
from ansible.plugins.callback import CallbackBase
-from xos.logger import observer_logger as logger
+
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class ResultCallback(CallbackBase):
@@ -56,7 +60,7 @@
'ansible_status': "OK",
'ansible_playbook': self.playbook
}
- logger.info("PLAYBOOK START [%s]" % self.playbook, extra=log_extra)
+ log.info("PLAYBOOK START", playbook = self.playbook, **log_extra)
def v2_playbook_on_stats(self, stats):
host_stats = {}
@@ -73,9 +77,9 @@
}
if self.playbook_status == 'OK':
- logger.info("PLAYBOOK END [%s]" % self.playbook, extra=log_extra)
+ log.info("PLAYBOOK END", playbook = self.playbook, **log_extra)
else:
- logger.error("PLAYBOOK END [%s]" % self.playbook, extra=log_extra)
+ log.error("PLAYBOOK END", playbook = self.playbook, **log_extra)
def v2_playbook_on_play_start(self, play):
log_extra = {
@@ -85,7 +89,7 @@
'ansible_status': self.playbook_status,
'ansible_playbook': self.playbook
}
- logger.debug("PLAY START [%s]" % play.name, extra=log_extra)
+ log.debug("PLAY START",play_name = play.name, **log_extra)
def v2_runner_on_ok(self, result, **kwargs):
log_extra = {
@@ -98,7 +102,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.debug("OK [%s]" % str(result._task), extra=log_extra)
+ log.debug("OK", task = str(result._task), **log_extra)
self.results.append(result)
def v2_runner_on_failed(self, result, **kwargs):
@@ -113,7 +117,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.error("FAILED [%s]" % str(result._task), extra=log_extra)
+ log.error("FAILED", task = str(result._task), **log_extra)
self.results.append(result)
def v2_runner_on_async_failed(self, result, **kwargs):
@@ -128,7 +132,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.error("ASYNC FAILED [%s]" % str(result._task), extra=log_extra)
+ log.error("ASYNC FAILED", task = str(result._task), **log_extra)
def v2_runner_on_skipped(self, result, **kwargs):
log_extra = {
@@ -141,7 +145,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.debug("SKIPPED [%s]" % str(result._task), extra=log_extra)
+ log.debug("SKIPPED", task = str(result._task), **log_extra)
self.results.append(result)
def v2_runner_on_unreachable(self, result, **kwargs):
@@ -155,7 +159,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.error("UNREACHABLE [%s]" % str(result._task), extra=log_extra)
+ log.error("UNREACHABLE", task = str(result._task), **log_extra)
self.results.append(result)
def v2_runner_retry(self, result, **kwargs):
@@ -169,7 +173,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.warning("RETRYING [%s] - attempt %d" % (str(result._task), result._result['attempts']), extra=log_extra)
+ log.warning("RETRYING - attempt", task =str(result._task), attempt = result._result['attempts'], **log_extra)
self.results.append(result)
def v2_playbook_on_handler_task_start(self, task, **kwargs):
@@ -180,9 +184,9 @@
'ansible_status': "HANDLER",
'ansible_task': task.get_name().strip(),
'ansible_playbook': self.playbook,
- 'ansible_host': result._host.get_name()
+ # 'ansible_host': result._host.get_name()
}
- logger.debug("HANDLER [%s]" % task.get_name().strip(), extra=log_extra)
+ log.debug("HANDLER", task = task.get_name().strip(), **log_extra)
def v2_playbook_on_import_for_host(self, result, imported_file):
log_extra = {
@@ -194,7 +198,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.debug("IMPORT [%s]" % imported_file, extra=log_extra)
+ log.debug("IMPORT", imported_file =imported_file, **log_extra)
self.results.append(result)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
@@ -207,7 +211,7 @@
'ansible_playbook': self.playbook,
'ansible_host': result._host.get_name()
}
- logger.debug("MISSING IMPORT [%s]" % missing_file, extra=log_extra)
+ log.debug("MISSING IMPORT", missing = missing_file, **log_extra)
self.results.append(result)
class Options(object):
diff --git a/xos/synchronizers/new_base/backend.py b/xos/synchronizers/new_base/backend.py
index 3d3255a..656c3cd 100644
--- a/xos/synchronizers/new_base/backend.py
+++ b/xos/synchronizers/new_base/backend.py
@@ -24,15 +24,17 @@
from synchronizers.new_base.event_loop import XOSObserver
from synchronizers.new_base.model_policy_loop import XOSPolicyEngine
from synchronizers.new_base.modelaccessor import *
-from xos.logger import Logger, logging
+
from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
watchers_enabled = Config.get("enable_watchers")
if (watchers_enabled):
from synchronizers.new_base.watchers import XOSWatcher
-logger = Logger(level=logging.INFO)
class Backend:
@@ -42,7 +44,7 @@
def load_sync_step_modules(self, step_dir):
sync_steps = []
- logger.info("Loading sync steps from %s" % step_dir)
+ log.info("Loading sync steps", strp_dir = step_dir)
for fn in os.listdir(step_dir):
pathname = os.path.join(step_dir,fn)
@@ -61,7 +63,7 @@
if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in sync_steps):
sync_steps.append(c)
- logger.info("Loaded %s sync steps" % len(sync_steps))
+ log.info("Loaded sync steps", count = len(sync_steps))
return sync_steps
@@ -87,7 +89,7 @@
watcher_thread = threading.Thread(target=watcher.run,name='watcher')
watcher_thread.start()
else:
- logger.info("Skipping observer and watcher threads due to no steps dir.")
+ log.info("Skipping observer and watcher threads due to no steps dir.")
# start model policies thread
policies_dir = Config.get("model_policies_dir")
@@ -96,7 +98,7 @@
model_policy_thread = threading.Thread(target=policy_engine.run, name="policy_engine")
model_policy_thread.start()
else:
- logger.info("Skipping model policies thread due to no model_policies dir.")
+ log.info("Skipping model policies thread due to no model_policies dir.")
while True:
try:
diff --git a/xos/synchronizers/new_base/backend_modelpolicy.py b/xos/synchronizers/new_base/backend_modelpolicy.py
index df6e183..58ef32f 100644
--- a/xos/synchronizers/new_base/backend_modelpolicy.py
+++ b/xos/synchronizers/new_base/backend_modelpolicy.py
@@ -22,8 +22,11 @@
import time
from syncstep import SyncStep
from synchronizers.new_base.event_loop import XOSObserver
-from xos.logger import Logger, logging
+
from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
watchers_enabled = Config.get("enable_watchers")
@@ -31,8 +34,6 @@
if (watchers_enabled):
from synchronizers.new_base.watchers import XOSWatcher
-logger = Logger(level=logging.INFO)
-
class Backend:
def run(self):
# start model policies thread
@@ -43,7 +44,7 @@
model_policy_thread.start()
else:
model_policy_thread = None
- logger.info("Skipping model policies thread due to no model_policies dir.")
+ log.info("Skipping model policies thread due to no model_policies dir.")
while True:
try:
diff --git a/xos/synchronizers/new_base/dependency_walker_new.py b/xos/synchronizers/new_base/dependency_walker_new.py
index 11c6ce1..fa7e5ed 100644
--- a/xos/synchronizers/new_base/dependency_walker_new.py
+++ b/xos/synchronizers/new_base/dependency_walker_new.py
@@ -30,9 +30,10 @@
from xosconfig import Config
import json
-from xos.logger import Logger, logging
+from xosconfig import Config
+from multistructlog import create_logger
-logger = Logger(level=logging.INFO)
+log = create_logger(Config().get('logging'))
missing_links = {}
@@ -90,7 +91,7 @@
except AttributeError:
if not missing_links.has_key(model + '.' + link):
print "Model %s missing link for dependency %s" % (model, link)
- logger.log_exc("WARNING: Model %s missing link for dependency %s." % (model, link))
+ log.exception("WARNING: Model missing link for dependency.", model = model, link = link)
missing_links[model + '.' + link] = True
if (peer):
diff --git a/xos/synchronizers/new_base/diag.py b/xos/synchronizers/new_base/diag.py
index 900af73..18b54ad 100644
--- a/xos/synchronizers/new_base/diag.py
+++ b/xos/synchronizers/new_base/diag.py
@@ -18,9 +18,9 @@
import json
from xosconfig import Config
-from xos.logger import Logger, logging, logger
+from multistructlog import create_logger
-logger = Logger(level=logging.INFO)
+log = create_logger(Config().get('logging'))
def update_diag(diag_class, loop_end=None, loop_start=None, syncrecord_start=None, sync_start=None,
@@ -54,5 +54,5 @@
diag.backend_register = json.dumps(br)
diag.save()
except:
- logger.log_exc("Exception in update_diag")
+ log.exception("Exception in update_diag")
traceback.print_exc()
diff --git a/xos/synchronizers/new_base/djangoaccessor.py b/xos/synchronizers/new_base/djangoaccessor.py
index f833777..cb0de92 100644
--- a/xos/synchronizers/new_base/djangoaccessor.py
+++ b/xos/synchronizers/new_base/djangoaccessor.py
@@ -25,8 +25,10 @@
from django import setup as django_setup # django 1.7
from django.contrib.contenttypes.models import ContentType
-from xos.logger import Logger, logging
-logger = Logger(level=logging.INFO)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class DjangoModelAccessor(ModelAccessor):
def __init__(self):
@@ -82,15 +84,15 @@
except Exception as e:
from django import db
if "connection already closed" in traceback.format_exc():
- logger.error("XXX connection already closed")
+ log.error("XXX connection already closed")
try:
# if db.connection:
# db.connection.close()
db.close_old_connections()
- except:
- logger.log_exc("XXX we failed to fix the failure")
+ except Exception, e:
+ logger.exception("XXX we failed to fix the failure", e = e)
else:
- logger.log_exc("XXX some other error")
+ logger.exception("XXX some other error")
def obj_exists(self, o):
return (o.pk is not None)
diff --git a/xos/synchronizers/new_base/error_mapper.py b/xos/synchronizers/new_base/error_mapper.py
index 3678629..5617d20 100644
--- a/xos/synchronizers/new_base/error_mapper.py
+++ b/xos/synchronizers/new_base/error_mapper.py
@@ -14,10 +14,10 @@
# limitations under the License.
-from xos.logger import Logger, logging, logger
+from xosconfig import Config
+from multistructlog import create_logger
-logger = Logger(level=logging.INFO)
-
+log = create_logger(Config().get('logging'))
class ErrorMapper:
def __init__(self, error_map_file):
@@ -30,7 +30,7 @@
k, v = map(lambda i: i.rstrip(), splits)
self.error_map[k] = v
except:
- logger.info('Could not read error map')
+ log.info('Could not read error map')
def map(self, error):
return self.error_map[error]
diff --git a/xos/synchronizers/new_base/event_loop.py b/xos/synchronizers/new_base/event_loop.py
index 11ebd7d..91d82f7 100644
--- a/xos/synchronizers/new_base/event_loop.py
+++ b/xos/synchronizers/new_base/event_loop.py
@@ -27,8 +27,12 @@
import pprint
import traceback
from datetime import datetime
-from xos.logger import Logger, logging, logger
+
from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
+
from synchronizers.new_base.steps import *
from syncstep import SyncStep, NullSyncStep
from toposort import toposort
@@ -50,8 +54,6 @@
UNDERLINE = '\033[4m'
-logger = Logger(level=logging.INFO)
-
class StepNotReady(Exception):
pass
@@ -109,14 +111,14 @@
self.event_cond.release()
def wake_up(self):
- logger.info('Wake up routine called. Event cond %r' % self.event_cond)
+ log.info('Wake up routine called. Event cond %r' % self.event_cond)
self.event_cond.acquire()
self.event_cond.notify()
self.event_cond.release()
def load_sync_steps(self):
dep_path = Config.get("dependency_graph")
- logger.info('Loading model dependency graph from %s' % dep_path)
+ log.info('Loading model dependency graph from %s' % dep_path)
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
@@ -138,7 +140,7 @@
# FIXME `pl_dependency_graph` is never defined, this will always fail
# NOTE can we remove it?
backend_path = Config.get("pl_dependency_graph")
- logger.info(
+ log.info(
'Loading backend dependency graph from %s' %
backend_path)
# This contains dependencies between backend records
@@ -151,7 +153,7 @@
self.model_dependency_graphp[k] = v
except Exception as e:
- logger.info('Backend dependency graph not loaded')
+ log.info('Backend dependency graph not loaded')
# We can work without a backend graph
self.backend_dependency_graph = {}
@@ -199,22 +201,20 @@
self.deletion_dependency_graph = invert_graph(step_graph)
pp = pprint.PrettyPrinter(indent=4)
- logger.debug(pp.pformat(step_graph))
+ log.debug(pp.pformat(step_graph))
self.ordered_steps = toposort(
self.dependency_graph, phantom_steps + map(lambda s: s.__name__, self.sync_steps))
self.ordered_steps = [
i for i in self.ordered_steps if i != 'SyncObject']
- logger.info("Order of steps=%s" % self.ordered_steps)
-
self.load_run_times()
def check_duration(self, step, duration):
try:
if (duration > step.deadline):
- logger.info(
- 'Sync step %s missed deadline, took %.2f seconds' %
- (step.name, duration))
+ log.info(
+ 'Sync step missed deadline',
+ step_name = step.name, duration = duration)
except AttributeError:
# S doesn't have a deadline
pass
@@ -233,9 +233,9 @@
if (time_since_last_run < step.requested_interval):
raise StepNotReady
except AttributeError:
- logger.info(
- 'Step %s does not have requested_interval set' %
- step.__name__)
+ log.info(
+ 'Step does not have requested_interval set',
+ step_name = step.__name__)
raise StepNotReady
def load_run_times(self):
@@ -306,9 +306,9 @@
step = self.lookup_step_class(S)
start_time = time.time()
- logger.debug(
- "Starting to work on step %s, deletion=%s" %
- (step.__name__, str(deletion)))
+ log.debug(
+ "Starting to work on steps",
+ step_name = step.__name__, deletion = str(deletion))
dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
# if not deletion else self.deletion_step_conditions
@@ -328,28 +328,16 @@
if (has_deps):
for d in deps:
if d == step.__name__:
- logger.debug(
- " step %s self-wait skipped" %
- step.__name__)
go = True
continue
cond = step_conditions[d]
cond.acquire()
if (step_status[d] is STEP_STATUS_WORKING):
- logger.debug(
- " step %s wait on dep %s" %
- (step.__name__, d))
cond.wait()
- logger.debug(
- " step %s wait on dep %s cond returned" %
- (step.__name__, d))
elif step_status[d] == STEP_STATUS_OK:
go = True
else:
- logger.debug(
- " step %s has failed dep %s" %
- (step.__name__, d))
go = False
failed_dep = d
cond.release()
@@ -359,7 +347,6 @@
go = True
if (not go):
- logger.debug("Step %s skipped" % step.__name__)
self.failed_steps.append(step)
my_status = STEP_STATUS_KO
else:
@@ -392,14 +379,10 @@
self.check_schedule(sync_step, deletion)
should_run = True
except StepNotReady:
- logger.info('Step not ready: %s' % sync_step.__name__)
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
except Exception as e:
- logger.error('%r' % e)
- logger.log_exc(
- "sync step failed: %r. Deletion: %r" %
- (sync_step, deletion))
+ log.error('%r' % e)
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
@@ -407,10 +390,6 @@
try:
duration = time.time() - start_time
- logger.debug(
- 'Executing step %s, deletion=%s' %
- (sync_step.__name__, deletion))
-
failed_objects = sync_step(
failed=list(
self.failed_step_objects),
@@ -421,20 +400,12 @@
if failed_objects:
self.failed_step_objects.update(failed_objects)
- logger.debug(
- "Step %r succeeded, deletion=%s" %
- (sync_step.__name__, deletion))
my_status = STEP_STATUS_OK
self.update_run_time(sync_step, deletion)
except Exception as e:
- logger.error(
- 'Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' %
- (sync_step.__name__, e))
- logger.log_exc("Exception in sync step")
self.failed_steps.append(S)
my_status = STEP_STATUS_KO
else:
- logger.info("Step %r succeeded due to non-run" % step)
my_status = STEP_STATUS_OK
try:
@@ -444,14 +415,13 @@
my_cond.notify_all()
my_cond.release()
except KeyError as e:
- logger.debug('Step %r is a leaf' % step)
pass
finally:
try:
model_accessor.reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("exception in reset_queries")
+ log.error("exception in reset_queries")
model_accessor.connection_close()
@@ -460,9 +430,9 @@
return
while True:
- logger.debug('Waiting for event')
+ log.debug('Waiting for event')
self.wait_for_event(timeout=5)
- logger.debug('Observer woke up')
+ log.debug('Observer woke up')
self.run_once()
@@ -501,7 +471,7 @@
self.failed_steps = []
threads = []
- logger.debug('Deletion=%r...' % deletion)
+ log.debug('Deletion', deletion =deletion)
schedule = self.ordered_steps if not deletion else reversed(
self.ordered_steps)
@@ -510,7 +480,7 @@
target=self.sync, name='synchronizer', args=(
S, deletion))
- logger.debug('Deletion=%r...' % deletion)
+ log.debug('Deletion', deletion =deletion)
threads.append(thread)
# Start threads
@@ -522,7 +492,7 @@
model_accessor.reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("exception in reset_queries")
+ log.exception("exception in reset_queries")
# Wait for all threads to finish before continuing with the run
# loop
@@ -539,9 +509,5 @@
backend_status="1 - Bottom Of Loop")
except Exception as e:
- logger.error(
- 'Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' %
- e)
- logger.log_exc("Exception in observer run loop")
traceback.print_exc()
model_accessor.update_diag(backend_status="2 - Exception in Event Loop")
diff --git a/xos/synchronizers/new_base/model_policy_loop.py b/xos/synchronizers/new_base/model_policy_loop.py
index d03400d..6feb65f 100644
--- a/xos/synchronizers/new_base/model_policy_loop.py
+++ b/xos/synchronizers/new_base/model_policy_loop.py
@@ -17,14 +17,16 @@
from synchronizers.new_base.modelaccessor import *
from synchronizers.new_base.dependency_walker_new import *
from synchronizers.new_base.policy import Policy
-from xos.logger import Logger, logging
import imp
import pdb
import time
import traceback
-logger = Logger(level=logging.DEBUG)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class XOSPolicyEngine(object):
def __init__(self, policies_dir):
@@ -62,10 +64,10 @@
if (save_fields):
d.save(update_fields=save_fields)
except AttributeError,e:
- logger.log_exc("AttributeError in update_dep")
+ log.exception("AttributeError in update_dep", e = e)
raise e
except Exception,e:
- logger.log_exc("Exception in update_dep")
+ log.exception("Exception in update_dep", e = e)
def delete_if_inactive(self, d, o):
try:
@@ -94,14 +96,14 @@
if inspect.isclass(c) and issubclass(c, Policy) and hasattr(c, "model_name") and (
c not in policies):
if not c.model_name:
- logger.info("load_model_policies: skipping model policy %s" % classname)
+ log.info("load_model_policies: skipping model policy", classname =classname)
continue
if not model_accessor.has_model_class(c.model_name):
- logger.error("load_model_policies: unable to find policy %s model %s" % (classname, c.model_name))
+ log.error("load_model_policies: unable to find model policy", classname = classname, model = c.model_name)
c.model = model_accessor.get_model_class(c.model_name)
policies.append(c)
- logger.info("Loaded %s model policies" % len(policies))
+ log.info("Loaded model policies", count = len(policies))
return policies
def execute_model_policy(self, instance, action):
@@ -121,18 +123,18 @@
method_name= "handle_%s" % action
if hasattr(policy, method_name):
try:
- logger.debug("MODEL POLICY: calling handler %s %s %s %s" % (sender_name, instance, policy.__name__, method_name))
+ log.debug("MODEL POLICY: calling handler",sender_name = sender_name, instance = instance, policy = policy.__name__, method = method_name)
getattr(policy(), method_name)(instance)
- logger.debug("MODEL POLICY: completed handler %s %s %s %s" % (sender_name, instance, policy.__name__, method_name))
- except:
- logger.log_exc("MODEL POLICY: Exception when running handler")
+ log.debug("MODEL POLICY: completed handler",sender_name = sender_name, instance = instance, policy_name = policy.__name__, method = method_name)
+ except Exception,e:
+ log.exception("MODEL POLICY: Exception when running handler", e = e)
policies_failed = True
try:
instance.policy_status = "2 - %s" % traceback.format_exc(limit=1)
instance.save(update_fields=["policy_status"])
- except:
- logger.log_exc("MODEL_POLICY: Exception when storing policy_status")
+ except Exception,e:
+ log.exception("MODEL_POLICY: Exception when storing policy_status", e = e)
if not policies_failed:
try:
@@ -140,7 +142,7 @@
instance.policy_status = "1 - done"
instance.save(update_fields=['policed', 'policy_status'])
except:
- logger.log_exc('MODEL POLICY: Object %r failed to update policed timestamp' % instance)
+ log.exception('MODEL POLICY: Object failed to update policed timestamp', instance =instance)
def noop(self, o,p):
pass
@@ -150,8 +152,8 @@
start = time.time()
try:
self.run_policy_once()
- except:
- logger.log_exc("MODEL_POLICY: Exception in run()")
+ except Exception,e:
+ log.exception("MODEL_POLICY: Exception in run()", e = e)
if (time.time() - start < 5):
time.sleep(5)
@@ -161,7 +163,7 @@
def run_policy_once(self):
models = self.policies_by_class.keys()
- logger.debug("MODEL POLICY: run_policy_once()")
+ log.debug("MODEL POLICY: run_policy_once()")
model_accessor.check_db_connection_okay()
@@ -182,8 +184,8 @@
try:
model_accessor.reset_queries()
- except:
+ except Exception,e:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("MODEL POLICY: exception in reset_queries")
+ log.exception("MODEL POLICY: exception in reset_queries", e)
- logger.debug("MODEL POLICY: finished run_policy_once()")
+ log.debug("MODEL POLICY: finished run_policy_once()")
diff --git a/xos/synchronizers/new_base/modelaccessor.py b/xos/synchronizers/new_base/modelaccessor.py
index b53d9d8..7e4dcd4 100644
--- a/xos/synchronizers/new_base/modelaccessor.py
+++ b/xos/synchronizers/new_base/modelaccessor.py
@@ -30,9 +30,10 @@
from xosconfig import Config
from diag import update_diag
-from xos.logger import Logger, logging
+from xosconfig import Config
+from multistructlog import create_logger
-logger = Logger(level=logging.INFO)
+log = create_logger(Config().get('logging'))
orig_sigint = None
model_accessor = None
@@ -140,11 +141,11 @@
try:
client.utility.NoOp(Empty())
- except:
+ except Exception,e:
# If we caught an exception, then the API has become unavailable.
# So reconnect.
- logger.log_exc("exception in NoOp")
+ log.exception("exception in NoOp", e)
client.connected = False
client.connect()
return
@@ -177,9 +178,9 @@
else:
missing.append(model)
- logger.info("required_models: found %s" % ", ".join(found))
+ log.info("required_models, found:", models = ", ".join(found))
if missing:
- logger.warning("required_models: missing %s" % ", ".join(missing))
+ log.warning("required_models: missing",models = ", ".join(missing))
# We're missing a required model. Give up and wait for the connection
# to reconnect, and hope our missing model has shown up.
reactor.callLater(1, functools.partial(keep_trying, client, reactor))
diff --git a/xos/synchronizers/new_base/policy.py b/xos/synchronizers/new_base/policy.py
index 5877d94..fa01902 100644
--- a/xos/synchronizers/new_base/policy.py
+++ b/xos/synchronizers/new_base/policy.py
@@ -19,7 +19,10 @@
Base Classes for Model Policies
"""
-from xos.logger import Logger, logging
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class Policy(object):
""" An XOS Model Policy
@@ -34,5 +37,5 @@
"""
def __init__(self):
- self.logger = Logger(level=logging.DEBUG)
+ self.logger = log
diff --git a/xos/synchronizers/new_base/syncstep.py b/xos/synchronizers/new_base/syncstep.py
index a67186a..00f103e 100644
--- a/xos/synchronizers/new_base/syncstep.py
+++ b/xos/synchronizers/new_base/syncstep.py
@@ -17,7 +17,6 @@
import os
import base64
from xosconfig import Config
-from xos.logger import Logger, logging
from synchronizers.new_base.modelaccessor import *
from synchronizers.new_base.ansible_helper import run_template
@@ -25,7 +24,10 @@
import time
import pdb
-logger = Logger(level=logging.DEBUG)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
def f7(seq):
@@ -147,7 +149,7 @@
str(getattr(peer_object, "pk", "no_pk")), obj_class_name(failed), str(getattr(failed, "pk", "no_pk"))))
def sync_record(self, o):
- logger.debug("Sync_record called for %s %s" % (obj_class_name(o), str(o)))
+ log.debug("Sync_record called for", class_name = obj_class_name(o), object = str(o))
# try:
# controller = o.get_controller()
@@ -208,9 +210,9 @@
# another spot to clean up debug state
try:
model_accessor.reset_queries()
- except:
+ except Exception,e:
# this shouldn't happen, but in case it does, catch it...
- logger.log_exc("exception in reset_queries", extra=o.tologdict())
+ log.exception("exception in reset_queries", e = e,**o.tologdict())
sync_failed = False
@@ -222,8 +224,8 @@
next_run = scratchpad['next_run']
if (not backoff_disabled and next_run > time.time()):
sync_failed = True
- except:
- logger.log_exc("Exception while loading scratchpad", extra=o.tologdict())
+ except Exception,e:
+ log.exception("Exception while loading scratchpad", e = e, **o.tologdict())
pass
if (not sync_failed):
@@ -264,9 +266,9 @@
o.backend_status = "1 - OK"
model_accessor.journal_object(o, "syncstep.call.save_update")
o.save(update_fields=['enacted', 'backend_status', 'backend_register'])
- logger.info("save sync object, new enacted = %s" % str(new_enacted))
+ log.info("save sync object, new enacted registered", enacted = str(new_enacted))
except (InnocuousException, Exception, DeferredException) as e:
- logger.log_exc("sync step failed!", extra=o.tologdict())
+ log.exception("sync step failed!", **o.tologdict())
try:
if (o.backend_status.startswith('2 - ')):
str_e = '%s // %r' % (o.backend_status[4:], e)
@@ -289,8 +291,8 @@
try:
scratchpad = json.loads(o.backend_register)
scratchpad['exponent']
- except:
- logger.log_exc("Exception while updating scratchpad", extra=o.tologdict())
+ except Exception,e:
+ log.exception("Exception while updating scratchpad", e = e, **o.tologdict())
scratchpad = {'next_run': 0, 'exponent': 0, 'last_success': time.time(), 'failures': 0}
# Second failure
diff --git a/xos/synchronizers/new_base/watchers.py b/xos/synchronizers/new_base/watchers.py
index 5cda613..377ac3b 100644
--- a/xos/synchronizers/new_base/watchers.py
+++ b/xos/synchronizers/new_base/watchers.py
@@ -28,13 +28,15 @@
from datetime import datetime
from collections import defaultdict
-from xos.logger import Logger, logging, logger
-from xosconfig import Config
+
from syncstep import SyncStep
from synchronizers.new_base.error_mapper import *
import redis
-logger = Logger(level=logging.INFO)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
class XOSWatcher:
@@ -77,7 +79,7 @@
self.redis = r
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe(channels)
- logger.info("XOS watcher initialized")
+ log.info("XOS watcher initialized")
def run(self):
for item in self.pubsub.listen():
@@ -94,5 +96,5 @@
step = w.source()
step.handle_watched_object(o)
except Exception as e:
- logger.log_exc("XOS watcher: exception %s while processing object: %s" % (type(e), e))
+ log.exception("XOS watcher: exception while processing object", e = e)
pass
diff --git a/xos/synchronizers/new_base/xos-policy.py b/xos/synchronizers/new_base/xos-policy.py
index b24e30a..1452101 100644
--- a/xos/synchronizers/new_base/xos-policy.py
+++ b/xos/synchronizers/new_base/xos-policy.py
@@ -31,12 +31,15 @@
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from xosconfig import Config
-from xos.logger import Logger, logging, logger
+
import time
from synchronizers.new_base.model_policy_loop import XOSPolicyEngine
from synchronizers.new_base.modelaccessor import *
-logger = Logger(level=logging.INFO)
+from xosconfig import Config
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
def main():
@@ -48,8 +51,8 @@
_ = NetworkTemplate.objects.first()
models_active = True
except Exception,e:
- logger.info(str(e))
- logger.info('Waiting for data model to come up before starting...')
+ log.exception("Exception", e = e)
+ log.info('Waiting for data model to come up before starting...')
time.sleep(10)
wait = True
diff --git a/xos/synchronizers/new_base/xos-synchronizer.py b/xos/synchronizers/new_base/xos-synchronizer.py
index f374d10..fd824e1 100644
--- a/xos/synchronizers/new_base/xos-synchronizer.py
+++ b/xos/synchronizers/new_base/xos-synchronizer.py
@@ -23,14 +23,14 @@
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from xosconfig import Config
-from xos.logger import Logger, logging, logger
+from multistructlog import create_logger
+
+log = create_logger(Config().get('logging'))
import time
from synchronizers.new_base.modelaccessor import *
from synchronizers.new_base.backend import Backend
-logger = Logger(level=logging.INFO)
-
def main():
models_active = False
@@ -41,8 +41,8 @@
_ = NetworkTemplate.objects.first()
models_active = True
except Exception,e:
- logger.info(str(e))
- logger.info('Waiting for data model to come up before starting...')
+ log.info("Exception", e = e)
+ log.info('Waiting for data model to come up before starting...')
time.sleep(10)
wait = True
diff --git a/xos/xos_client/xosapi/xos_grpc_client.py b/xos/xos_client/xosapi/xos_grpc_client.py
index f8815ed..38f91c2 100644
--- a/xos/xos_client/xosapi/xos_grpc_client.py
+++ b/xos/xos_client/xosapi/xos_grpc_client.py
@@ -189,20 +189,6 @@
return args
-def setup_logging(args):
- import logging
- import structlog
-
- verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
- logging.basicConfig()
- logger = logging.getLogger()
- logger.setLevel(logging.DEBUG - 10*verbosity_adjust)
-
- def logger_factory():
- return logger
-
- structlog.configure(logger_factory=logger_factory)
-
def coreclient_reconnect(client, reconnect_callback, *args, **kwargs):
global coreapi
@@ -233,8 +219,6 @@
args = parse_args()
- setup_logging(args)
-
if args.username:
start_api(reconnect_callback, endpoint=args.grpc_secure_endpoint, username=args.username, password=args.password)
else: