[SEBA-366], [SEBA-378]
Fix issues with XOS logging
- Pass logging object as parameter in various places, adding structured
context.
- Update multistructlog to print level and timestamp on messages, and
fix issue with unprinted messages in synchronizer event loop.
Change-Id: I92c67b4f1266d80bdd86d45c8c99585b286ac059
diff --git a/VERSION b/VERSION
index 26412a8..a0b09d3 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.1.24
+2.1.25
diff --git a/containers/chameleon/Dockerfile.chameleon b/containers/chameleon/Dockerfile.chameleon
index c93226d..8f27a20 100644
--- a/containers/chameleon/Dockerfile.chameleon
+++ b/containers/chameleon/Dockerfile.chameleon
@@ -13,7 +13,7 @@
# limitations under the License.
# xosproject/chameleon
-FROM xosproject/xos-base:2.1.24
+FROM xosproject/xos-base:2.1.25
# xos-base already has protoc and dependencies installed
diff --git a/containers/xos/Dockerfile.client b/containers/xos/Dockerfile.client
index 98a0061..9b446f3 100644
--- a/containers/xos/Dockerfile.client
+++ b/containers/xos/Dockerfile.client
@@ -13,7 +13,7 @@
# limitations under the License.
# xosproject/xos-client
-FROM xosproject/xos-libraries:2.1.24
+FROM xosproject/xos-libraries:2.1.25
# Install XOS client
COPY xos/xos_client /tmp/xos_client
diff --git a/containers/xos/Dockerfile.libraries b/containers/xos/Dockerfile.libraries
index 7fc933e..766cca7 100644
--- a/containers/xos/Dockerfile.libraries
+++ b/containers/xos/Dockerfile.libraries
@@ -13,7 +13,7 @@
# limitations under the License.
# xosproject/xos-libraries
-FROM xosproject/xos-base:2.1.24
+FROM xosproject/xos-base:2.1.25
# Add libraries
COPY lib /opt/xos/lib
diff --git a/containers/xos/Dockerfile.synchronizer-base b/containers/xos/Dockerfile.synchronizer-base
index b3ead3b..e00018f 100644
--- a/containers/xos/Dockerfile.synchronizer-base
+++ b/containers/xos/Dockerfile.synchronizer-base
@@ -13,7 +13,7 @@
# limitations under the License.
# xosproject/xos-synchronizer-base
-FROM xosproject/xos-client:2.1.24
+FROM xosproject/xos-client:2.1.25
COPY xos/synchronizers/new_base /opt/xos/synchronizers/new_base
COPY xos/xos/logger.py /opt/xos/xos/logger.py
diff --git a/containers/xos/Dockerfile.xos-core b/containers/xos/Dockerfile.xos-core
index 3dc2e7f..bb1a7e7 100644
--- a/containers/xos/Dockerfile.xos-core
+++ b/containers/xos/Dockerfile.xos-core
@@ -13,7 +13,7 @@
# limitations under the License.
# xosproject/xos-core
-FROM xosproject/xos-libraries:2.1.24
+FROM xosproject/xos-libraries:2.1.25
# Install XOS
ADD xos /opt/xos
diff --git a/containers/xos/pip_requirements.txt b/containers/xos/pip_requirements.txt
index 8b0c289..af3f2ae 100644
--- a/containers/xos/pip_requirements.txt
+++ b/containers/xos/pip_requirements.txt
@@ -56,7 +56,7 @@
mock==2.0.0
monotonic==1.5
msgpack==0.5.6
-multistructlog==2.0.0
+multistructlog==2.1.0
munch==2.3.2
netaddr==0.7.19
netifaces==0.10.7
diff --git a/scripts/xos_dev_reqs.txt b/scripts/xos_dev_reqs.txt
index 1b83b3a..86d84a0 100644
--- a/scripts/xos_dev_reqs.txt
+++ b/scripts/xos_dev_reqs.txt
@@ -8,7 +8,7 @@
grpcio-tools==1.12.0
grpcio==1.12.0
ipaddress==1.0.19
-multistructlog==2.0.0
+multistructlog==2.1.0
netaddr==0.7.19
networkx==1.11
nose2==0.7.4
diff --git a/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py b/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
index 2120e0e..7b52a8d 100644
--- a/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/new_base/SyncInstanceUsingAnsible.py
@@ -21,13 +21,10 @@
import base64
import time
from xosconfig import Config
+
from synchronizers.new_base.syncstep import SyncStep, DeferredException
from synchronizers.new_base.ansible_helper import run_template_ssh
from synchronizers.new_base.modelaccessor import *
-from xosconfig import Config
-from multistructlog import create_logger
-
-log = create_logger(Config().get('logging'))
class SyncInstanceUsingAnsible(SyncStep):
@@ -51,7 +48,7 @@
def defer_sync(self, o, reason):
# zdw, 2017-02-18 - is raising the exception here necessary? - seems like
# it's just logging the same thing twice
- log.info("defer object", object = str(o), reason = reason, **o.tologdict())
+ self.log.info("defer object", object = str(o), reason = reason, **o.tologdict())
raise DeferredException("defer object %s due to %s" % (str(o), reason))
def get_extra_attributes(self, o):
@@ -81,7 +78,7 @@
template_name = self.template_name
tStart = time.time()
run_template_ssh(template_name, fields, object=o)
- log.info("playbook execution time", time = int(time.time() - tStart), **o.tologdict())
+ self.log.info("playbook execution time", time = int(time.time() - tStart), **o.tologdict())
def pre_sync_hook(self, o, fields):
pass
@@ -188,7 +185,7 @@
return fields
def sync_record(self, o):
- log.info("sync'ing object", object = str(o), **o.tologdict())
+ self.log.info("sync'ing object", object = str(o), **o.tologdict())
self.prepare_record(o)
diff --git a/xos/synchronizers/new_base/backend.py b/xos/synchronizers/new_base/backend.py
index 1d8df0f..00e43f2 100644
--- a/xos/synchronizers/new_base/backend.py
+++ b/xos/synchronizers/new_base/backend.py
@@ -50,6 +50,9 @@
sys_path_save = sys.path
sys.path.append(step_dir)
module = imp.load_source(fn[:-3],pathname)
+
+ self.log.debug("Loaded file: %s", pathname)
+
# reset the original path
sys.path = sys_path_save
@@ -90,7 +93,7 @@
if len(sync_steps) > 0:
# start the observer
self.log.info("Starting XOSObserver", sync_steps=sync_steps)
- observer = XOSObserver(sync_steps, log = self.log)
+ observer = XOSObserver(sync_steps, self.log)
observer_thread = threading.Thread(target=observer.run,name='synchronizer')
observer_thread.start()
@@ -110,7 +113,7 @@
event_steps_dir = Config.get("event_steps_dir")
if event_steps_dir:
self.log.info("Starting XOSEventEngine", event_steps_dir=event_steps_dir)
- event_engine = XOSEventEngine()
+ event_engine = XOSEventEngine(self.log)
event_engine.load_event_step_modules(event_steps_dir)
event_engine.start()
else:
diff --git a/xos/synchronizers/new_base/event_engine.py b/xos/synchronizers/new_base/event_engine.py
index 8f43e2f..acbc57d 100644
--- a/xos/synchronizers/new_base/event_engine.py
+++ b/xos/synchronizers/new_base/event_engine.py
@@ -20,9 +20,6 @@
import threading
import time
from xosconfig import Config
-from multistructlog import create_logger
-
-log = create_logger(Config().get('logging'))
class XOSKafkaMessage():
@@ -48,11 +45,12 @@
function is called for each event.
"""
- def __init__(self, step, bootstrap_servers, *args, **kwargs):
+ def __init__(self, step, bootstrap_servers, log, *args, **kwargs):
super(XOSKafkaThread, self).__init__(*args, **kwargs)
self.consumer = None
self.step = step
self.bootstrap_servers = bootstrap_servers
+ self.log = log
self.daemon = True
def create_kafka_consumer(self):
@@ -73,10 +71,10 @@
raise Exception("Both topics and pattern are defined for step %s. Choose one." %
self.step.__name__)
- log.info("Waiting for events",
- topic=self.step.topics,
- pattern=self.step.pattern,
- step=self.step.__name__)
+ self.log.info("Waiting for events",
+ topic=self.step.topics,
+ pattern=self.step.pattern,
+ step=self.step.__name__)
while True:
try:
@@ -91,13 +89,13 @@
self.consumer.subscribe(self.step.pattern)
except confluent_kafka.KafkaError._ALL_BROKERS_DOWN, e:
- log.warning("No brokers available on %s, %s" % (self.bootstrap_servers, e))
+ self.log.warning("No brokers available on %s, %s" % (self.bootstrap_servers, e))
time.sleep(20)
continue
except confluent_kafka.KafkaError, e:
# Maybe Kafka has not started yet. Log the exception and try again in a second.
- log.exception("Exception in kafka loop: %s" % e)
+ self.log.exception("Exception in kafka loop: %s" % e)
time.sleep(1)
continue
@@ -109,25 +107,25 @@
if msg.error():
if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
- log.debug("Reached end of kafka topic %s, partition: %s, offset: %d" %
+ self.log.debug("Reached end of kafka topic %s, partition: %s, offset: %d" %
(msg.topic(), msg.partition(), msg.offset()))
else:
- log.exception("Error in kafka message: %s" % msg.error())
+ self.log.exception("Error in kafka message: %s" % msg.error())
else:
# wrap parsing the event in a class
event_msg = XOSKafkaMessage(msg)
- log.info("Processing event", event_msg=event_msg, step=self.step.__name__)
+ self.log.info("Processing event", event_msg=event_msg, step=self.step.__name__)
try:
- self.step(log=log).process_event(event_msg)
+ self.step(log=self.log).process_event(event_msg)
except:
- log.exception("Exception in event step", event_msg=event_msg, step=self.step.__name__)
+ self.log.exception("Exception in event step", event_msg=event_msg, step=self.step.__name__)
-class XOSEventEngine:
+class XOSEventEngine(object):
""" XOSEventEngine
Subscribe to and handle processing of events. Two methods are defined:
@@ -139,13 +137,14 @@
will be called before start().
"""
- def __init__(self):
+ def __init__(self, log):
self.event_steps = []
self.threads = []
+ self.log = log
def load_event_step_modules(self, event_step_dir):
self.event_steps = []
- log.info("Loading event steps", event_step_dir=event_step_dir)
+ self.log.info("Loading event steps", event_step_dir=event_step_dir)
# NOTE we'll load all the classes that inherit from EventStep
for fn in os.listdir(event_step_dir):
@@ -160,28 +159,28 @@
base_names = [b.__name__ for b in c.__bases__]
if 'EventStep' in base_names:
self.event_steps.append(c)
- log.info("Loaded event steps", steps=self.event_steps)
+ self.log.info("Loaded event steps", steps=self.event_steps)
def start(self):
eventbus_kind = Config.get("event_bus.kind")
eventbus_endpoint = Config.get("event_bus.endpoint")
if not eventbus_kind:
- log.error("Eventbus kind is not configured in synchronizer config file.")
+ self.log.error("Eventbus kind is not configured in synchronizer config file.")
return
if eventbus_kind not in ["kafka"]:
- log.error("Eventbus kind is set to a technology we do not implement.", eventbus_kind=eventbus_kind)
+ self.log.error("Eventbus kind is set to a technology we do not implement.", eventbus_kind=eventbus_kind)
return
if not eventbus_endpoint:
- log.error("Eventbus endpoint is not configured in synchronizer config file.")
+ self.log.error("Eventbus endpoint is not configured in synchronizer config file.")
return
for step in self.event_steps:
if step.technology == "kafka":
- thread = XOSKafkaThread(step=step, bootstrap_servers=[eventbus_endpoint])
+ thread = XOSKafkaThread(step, [eventbus_endpoint], self.log)
thread.start()
self.threads.append(thread)
else:
- log.error("Unknown technology. Skipping step", technology=step.technology, step=step.__name__)
+ self.log.error("Unknown technology. Skipping step", technology=step.technology, step=step.__name__)
diff --git a/xos/synchronizers/new_base/event_loop.py b/xos/synchronizers/new_base/event_loop.py
index 5605648..9c2479a 100644
--- a/xos/synchronizers/new_base/event_loop.py
+++ b/xos/synchronizers/new_base/event_loop.py
@@ -17,18 +17,13 @@
# - 2 sets of Instance, ControllerSlice, ControllerNetworks - delete and create case
import time
-import sys
import threading
import json
-import pprint
-import traceback
from collections import defaultdict
-from networkx import DiGraph, dfs_edges, weakly_connected_component_subgraphs, all_shortest_paths, NetworkXNoPath
+from networkx import DiGraph, weakly_connected_component_subgraphs, all_shortest_paths, NetworkXNoPath
from networkx.algorithms.dag import topological_sort
-from datetime import datetime
-from xosconfig import Config
from synchronizers.new_base.steps import *
from syncstep import InnocuousException, DeferredException, SyncStep
from synchronizers.new_base.modelaccessor import *
@@ -38,7 +33,6 @@
log = create_logger(Config().get('logging'))
-
class StepNotReady(Exception):
pass
@@ -68,12 +62,13 @@
DRIVER = x
-class XOSObserver:
+class XOSObserver(object):
sync_steps = []
def __init__(self, sync_steps, log=log):
# The Condition object via which events are received
self.log = log
+
self.step_lookup = {}
self.sync_steps = sync_steps
self.load_sync_steps()
@@ -103,7 +98,7 @@
self.log.trace('Loading model dependency graph', path=Config.get("dependency_graph"))
dep_graph_str = open(Config.get("dependency_graph")).read()
else:
- self.log.trace('Using defualt model dependency graph', graph={})
+ self.log.trace('Using default model dependency graph', graph={})
dep_graph_str = '{}'
# joint_dependencies is of the form { Model1 -> [(Model2, src_port, dst_port), ...] }
@@ -176,9 +171,13 @@
else:
logdict = {}
- log.error("exception in reset_queries", **logdict)
+ self.log.error("exception in reset_queries", **logdict)
- def delete_record(self, o, log):
+ def delete_record(self, o, dr_log = None):
+
+ if dr_log is None:
+ dr_log = self.log
+
if getattr(o, "backend_need_reap", False):
# the object has already been deleted and marked for reaping
model_accessor.journal_object(
@@ -189,19 +188,20 @@
raise ExternalDependencyFailed
model_accessor.journal_object(o, "syncstep.call.delete_record")
- log.debug("Deleting object", **o.tologdict())
- step.log = log.bind(step=step)
+ dr_log.debug("Deleting object", **o.tologdict())
+
+ step.log = dr_log.new(step=step)
step.delete_record(o)
- step.log = self.log
+ step.log = dr_log
- log.debug("Deleted object", **o.tologdict())
+ dr_log.debug("Deleted object", **o.tologdict())
model_accessor.journal_object(o, "syncstep.call.delete_set_reap")
o.backend_need_reap = True
o.save(update_fields=['backend_need_reap'])
- def sync_record(self, o, log):
+ def sync_record(self, o, sr_log=None):
try:
step = o.synchronizer_step
except AttributeError:
@@ -210,6 +210,9 @@
if step is None:
raise ExternalDependencyFailed
+ if sr_log is None:
+ sr_log = self.log
+
# Mark this as an object that will require delete. Do
# this now rather than after the syncstep,
if not (o.backend_need_delete):
@@ -218,13 +221,13 @@
model_accessor.journal_object(o, "syncstep.call.sync_record")
- log.debug("Syncing object", **o.tologdict())
+ sr_log.debug("Syncing object", **o.tologdict())
- step.log = log.bind(step=step)
+ step.log = sr_log.new(step=step)
step.sync_record(o)
- step.log = self.log
+ step.log = sr_log
- log.debug("Synced object", **o.tologdict())
+ sr_log.debug("Synced object", **o.tologdict())
o.enacted = max(o.updated, o.changed_by_policy)
scratchpad = {'next_run': 0, 'exponent': 0,
@@ -237,11 +240,11 @@
'backend_register', 'backend_code'])
if hasattr(step, "after_sync_save"):
- step.log = log.bind(step=step)
+ step.log = sr_log.new(step=step)
step.after_sync_save(o)
- step.log = self.log
+ step.log = sr_log
- log.info("Saved sync object", o=o)
+ sr_log.info("Saved sync object", o=o)
""" This function needs a cleanup. FIXME: Rethink backend_status, backend_register """
@@ -328,10 +331,12 @@
def sync_cohort(self, cohort, deletion):
threading.current_thread().is_sync_thread=True
- log = self.log.bind(thread_id=threading.current_thread().ident)
+
+ sc_log = self.log.new(thread_id=threading.current_thread().ident)
+
try:
start_time = time.time()
- log.debug(
+ sc_log.debug(
"Starting to work on cohort",
cohort=cohort,
deletion=deletion)
@@ -354,9 +359,9 @@
try:
if (deletion):
- self.delete_record(o, log)
+ self.delete_record(o, sc_log)
else:
- self.sync_record(o, log)
+ self.sync_record(o, sc_log)
except ExternalDependencyFailed:
dependency_error = 'External dependency on object %s id %d not met' % (
o.leaf_model_name, o.id)
@@ -366,7 +371,7 @@
o, e)
except StopIteration:
- log.debug(
+ sc_log.debug(
"Cohort completed",
cohort=cohort,
deletion=deletion)
@@ -458,13 +463,13 @@
for step_class in step_list:
step = step_class(driver=self.driver)
- step.log = self.log.bind(step=step)
+ step.log = self.log.new(step=step)
if not hasattr(step, 'call'):
pending = step.fetch_pending(deletion)
for obj in pending:
step = step_class(driver=self.driver)
- step.log = self.log.bind(step=step)
+ step.log = self.log.new(step=step)
obj.synchronizer_step = step
pending_service_dependencies = self.compute_service_instance_dependencies(
@@ -592,7 +597,7 @@
except AttributeError as e:
if sa!='fake_accessor':
self.log.debug(
- 'Could not check object dependencies, making conservative choice', src_object=src_object, sa=sa, o1=o1, o2=o2)
+ 'Could not check object dependencies, making conservative choice %s', e, src_object=src_object, sa=sa, o1=o1, o2=o2)
return True, edge_type
src_object = dst_object
diff --git a/xos/synchronizers/new_base/model_policy_loop.py b/xos/synchronizers/new_base/model_policy_loop.py
index 14ca39c..25aaabf 100644
--- a/xos/synchronizers/new_base/model_policy_loop.py
+++ b/xos/synchronizers/new_base/model_policy_loop.py
@@ -23,13 +23,9 @@
import time
import traceback
-from xosconfig import Config
-from multistructlog import create_logger
-
-log = create_logger(Config().get('logging'))
class XOSPolicyEngine(object):
- def __init__(self, policies_dir, log = log):
+ def __init__(self, policies_dir, log):
self.model_policies = self.load_model_policies(policies_dir)
self.policies_by_name = {}
self.policies_by_class = {}
diff --git a/xos/synchronizers/new_base/syncstep.py b/xos/synchronizers/new_base/syncstep.py
index 4ca697e..fcb1cb3 100644
--- a/xos/synchronizers/new_base/syncstep.py
+++ b/xos/synchronizers/new_base/syncstep.py
@@ -96,6 +96,9 @@
except:
self.soft_deadline = 5 # 5 seconds
+ if 'log' in args:
+ self.log = args.get('log')
+
return
def fetch_pending(self, deletion=False):
diff --git a/xos/synchronizers/new_base/tests/test_config.yaml b/xos/synchronizers/new_base/tests/test_config.yaml
index 2a01b6e..0a4fece 100644
--- a/xos/synchronizers/new_base/tests/test_config.yaml
+++ b/xos/synchronizers/new_base/tests/test_config.yaml
@@ -1,3 +1,4 @@
+---
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
name: test-synchronizer
accessor:
username: xosadmin@opencord.org
diff --git a/xos/synchronizers/new_base/tests/test_event_engine.py b/xos/synchronizers/new_base/tests/test_event_engine.py
index 5919721..94ac2d7 100644
--- a/xos/synchronizers/new_base/tests/test_event_engine.py
+++ b/xos/synchronizers/new_base/tests/test_event_engine.py
@@ -21,6 +21,8 @@
import os, sys
import time
+log = None
+
test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
xos_dir = os.path.join(test_path, '..', '..', '..')
@@ -82,8 +84,24 @@
class TestEventEngine(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+
+ global log
+
+ config = os.path.join(test_path, "test_config.yaml")
+ from xosconfig import Config
+ Config.clear()
+ Config.init(config, 'synchronizer-config-schema.yaml')
+
+ if not log:
+ from multistructlog import create_logger
+ log = create_logger(Config().get('logging'))
+
+
def setUp(self):
- global XOSKafkaThread, Config, event_engine_log
+ global XOSKafkaThread, Config, log
self.sys_path_save = sys.path
self.cwd_save = os.getcwd()
@@ -102,10 +120,9 @@
os.chdir(os.path.join(test_path, '..')) # config references tests/model-deps
from event_engine import XOSKafkaThread, XOSEventEngine
- from event_engine import log as event_engine_log
self.event_steps_dir = Config.get("event_steps_dir")
- self.event_engine = XOSEventEngine()
+ self.event_engine = XOSEventEngine(log)
def tearDown(self):
sys.path = self.sys_path_save
@@ -172,7 +189,7 @@
self.event_engine.load_event_step_modules(self.event_steps_dir)
with patch.object(XOSKafkaThread, "create_kafka_consumer") as create_kafka_consumer, \
- patch.object(event_engine_log, "error") as log_error, \
+ patch.object(log, "error") as log_error, \
patch.object(self.event_engine.event_steps[0], "technology") as technology:
technology.return_value = "not_kafka"
create_kafka_consumer.return_value = FakeKafkaConsumer()
@@ -234,7 +251,7 @@
config_get_orig = Config.get
with patch.object(XOSKafkaThread, "create_kafka_consumer") as create_kafka_consumer, \
- patch.object(event_engine_log, "error") as log_error, \
+ patch.object(log, "error") as log_error, \
patch.object(Config, "get", new=functools.partial(config_get_mock, config_get_orig, {"event_bus.kind": None})):
create_kafka_consumer.return_value = FakeKafkaConsumer()
@@ -253,7 +270,7 @@
config_get_orig = Config.get
with patch.object(XOSKafkaThread, "create_kafka_consumer") as create_kafka_consumer, \
- patch.object(event_engine_log, "error") as log_error, \
+ patch.object(log, "error") as log_error, \
patch.object(Config, "get",
new=functools.partial(config_get_mock, config_get_orig, {"event_bus.kind": "not_kafka"})):
create_kafka_consumer.return_value = FakeKafkaConsumer()
diff --git a/xos/synchronizers/new_base/tests/test_payload.py b/xos/synchronizers/new_base/tests/test_payload.py
index a72a5da..d398612 100644
--- a/xos/synchronizers/new_base/tests/test_payload.py
+++ b/xos/synchronizers/new_base/tests/test_payload.py
@@ -27,6 +27,8 @@
ANSIBLE_FILE='/tmp/payload_test'
+log = None
+
def run_fake_ansible_template(*args,**kwargs):
opts = args[1]
open(ANSIBLE_FILE,'w').write(json.dumps(opts))
@@ -42,7 +44,24 @@
return json.loads(ansible_str)
class TestPayload(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+
+ global log
+
+ config = os.path.join(test_path, "test_config.yaml")
+ from xosconfig import Config
+ Config.clear()
+ Config.init(config, 'synchronizer-config-schema.yaml')
+
+ if not log:
+ from multistructlog import create_logger
+ log = create_logger(Config().get('logging'))
+
+
def setUp(self):
+
global log, steps, event_loop
self.sys_path_save = sys.path
@@ -72,10 +91,6 @@
# import all class names to globals
for (k, v) in model_accessor.all_model_classes.items():
globals()[k] = v
-
- from multistructlog import create_logger
- log = create_logger()
-
b = backend.Backend()
steps_dir = Config.get("steps_dir")
self.steps = b.load_sync_step_modules(steps_dir)
diff --git a/xos/synchronizers/new_base/tests/test_run.py b/xos/synchronizers/new_base/tests/test_run.py
index 60eb2c0..86db27d 100644
--- a/xos/synchronizers/new_base/tests/test_run.py
+++ b/xos/synchronizers/new_base/tests/test_run.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import unittest
from mock import patch
import mock
diff --git a/xos/synchronizers/new_base/xos-policy.py b/xos/synchronizers/new_base/xos-policy.py
index 1452101..33935e8 100644
--- a/xos/synchronizers/new_base/xos-policy.py
+++ b/xos/synchronizers/new_base/xos-policy.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
# Copyright 2017-present Open Networking Foundation
#
@@ -13,9 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#!/usr/bin/env python
-
""" xos-policy.py
Standalone interface to model_policy engine.
@@ -39,10 +37,11 @@
from xosconfig import Config
from multistructlog import create_logger
-log = create_logger(Config().get('logging'))
def main():
+ log = create_logger(Config().get('logging'))
+
models_active = False
wait = False
while not models_active:
@@ -62,7 +61,8 @@
# start model policies thread
policies_dir = Config.get("model_policies_dir")
- XOSPolicyEngine(policies_dir=policies_dir).run()
+
+ XOSPolicyEngine(policies_dir=policies_dir, log=log).run()
if __name__ == '__main__':
main()
diff --git a/xos/synchronizers/new_base/xos-synchronizer.py b/xos/synchronizers/new_base/xos-synchronizer.py
index d8ee593..22426a3 100644
--- a/xos/synchronizers/new_base/xos-synchronizer.py
+++ b/xos/synchronizers/new_base/xos-synchronizer.py
@@ -46,7 +46,7 @@
if (wait):
time.sleep(60) # Safety factor, seeing that we stumbled waiting for the data model to come up.
-
+
log_closure = log.bind(synchronizer_name = Config().get('name'))
backend = Backend(log = log_closure)
backend.run()