Merge branch 'master' of github.com:open-cloud/xos
diff --git a/containers/postgresql/Makefile b/containers/postgresql/Makefile
index c50923e..8f483f8 100644
--- a/containers/postgresql/Makefile
+++ b/containers/postgresql/Makefile
@@ -14,7 +14,7 @@
 .PHONY: rm
 rm: ; docker rm ${CONTAINER_NAME}
 
-.PHONE: rmi
+.PHONY: rmi
 rmi: ; docker rmi ${IMAGE_NAME}
 
 .PHONY: backup
diff --git a/containers/xos/Dockerfile b/containers/xos/Dockerfile
index f65eb37..a558018 100644
--- a/containers/xos/Dockerfile
+++ b/containers/xos/Dockerfile
@@ -58,6 +58,7 @@
     python-ceilometerclient \
     python-dateutil \
     python-keyczar \
+    python-logstash \
     pygraphviz \
     pytz \
     pyyaml \
diff --git a/containers/xos/Dockerfile.devel b/containers/xos/Dockerfile.devel
index a8a9710..818b463 100644
--- a/containers/xos/Dockerfile.devel
+++ b/containers/xos/Dockerfile.devel
@@ -58,6 +58,7 @@
     python-ceilometerclient \
     python-dateutil \
     python-keyczar \
+    python-logstash \
     pygraphviz \
     pytz \
     pyyaml \
diff --git a/containers/xos/Dockerfile.templ b/containers/xos/Dockerfile.templ
index 25270a6..c9a577a 100644
--- a/containers/xos/Dockerfile.templ
+++ b/containers/xos/Dockerfile.templ
@@ -60,6 +60,7 @@
     python-dateutil \
     python_gflags \
     python-keyczar \
+    python-logstash \
     pygraphviz \
     pytz \
     pyyaml \
diff --git a/views/style/README.md b/views/style/README.md
index 51f314e..7086fd9 100644
--- a/views/style/README.md
+++ b/views/style/README.md
@@ -1,9 +1,28 @@
 # XOS Styles
 
-Collection of commands to work on xos styling.
+This folder holds style definition for XOS and a collection of tools usefull to work with them.
 
-> _Require to install dependencies with `npm install` before usage_
+## Setup
 
-`npm start` run BrowserSync and `node-sass` in watch mode.
+The best way to work with XOS styling and appearance is to have the `frontend` configuration running locally on your machine. In this way most of the GUI files are shared (see below). To use the provided tools as they are XOS should be available at `http://xos.dev:9999`.
 
->NOTE _Require XOS to respond on `http:xos.dev:999`_
\ No newline at end of file
+Before start working on the UI you should also install the dependencies, so enter `xos/views/style/` and execute `npm install` (NodeJs is required).
+
+## Developing
+
+When your environment is ready you could start it with `npm start`, this command will:
+  - Whatch styles in `xos/views/style/sass` and compile them on change
+  - Reload the broser on file changes (for more details see `xos/views/style/bs-config.js`)
+
+## Shared files:
+Shared files are defined in `xos/configurations/frontend/docker-compose.yml`, for the `frontend` configuration they are:
+```
+  - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config
+  - ../../core/xoslib:/opt/xos/core/xoslib
+  - ../../core/static:/opt/xos/core/static
+  - ../../core/dashboard:/opt/xos/core/dashboard
+  - ../../core/templatetags:/opt/xos/core/templatetags
+  - ../../templates/admin:/opt/xos/templates/admin
+  - ../../configurations:/opt/xos/configurations
+  - ../../xos:/opt/xos/xos
+```
diff --git a/xos/configurations/common/Dockerfile.common b/xos/configurations/common/Dockerfile.common
index fd27593..aedd245 100644
--- a/xos/configurations/common/Dockerfile.common
+++ b/xos/configurations/common/Dockerfile.common
@@ -40,6 +40,7 @@
 RUN pip install pytz
 RUN pip install django-timezones
 RUN pip install requests
+RUN pip install python-logstash
 RUN pip install django-crispy-forms
 RUN pip install django-geoposition
 RUN pip install django-extensions
diff --git a/xos/configurations/common/devstack/local.conf b/xos/configurations/common/devstack/local.conf
index ea70675..15a95fb 100644
--- a/xos/configurations/common/devstack/local.conf
+++ b/xos/configurations/common/devstack/local.conf
@@ -6,7 +6,7 @@
 #IMAGE_URLS+="http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2"
 LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
 # Append the git branch name if you wish to download ceilometer from a specific branch
-enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer 
+#enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer 
 
 disable_service n-net
 enable_service q-svc
@@ -26,7 +26,7 @@
 #enable_service ceilometer-alarm-evaluator
 #enable_service ceilometer-api
 #enable_service ceilometer-acompute
-CEILOMETER_BACKEND=mongodb
+#CEILOMETER_BACKEND=mongodb
 
 ## Neutron options
 Q_USE_SECGROUP=False
diff --git a/xos/configurations/common/devstack/setup-devstack.sh b/xos/configurations/common/devstack/setup-devstack.sh
new file mode 100644
index 0000000..bfbb8f8
--- /dev/null
+++ b/xos/configurations/common/devstack/setup-devstack.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# If running on a CloudLab node, set up extra disk space
+if [ -e /usr/testbed/bin/mkextrafs ]
+then
+    sudo mkdir -p /opt/stack
+    sudo /usr/testbed/bin/mkextrafs -f /opt/stack
+fi
+
+cd ~
+git clone https://github.com/open-cloud/xos.git
+git clone https://git.openstack.org/openstack-dev/devstack
+cd ~/devstack
+git checkout stable/kilo
+cp ~/xos/xos/configurations/common/devstack/local.conf .
+./stack.sh
diff --git a/xos/configurations/devel/README.md b/xos/configurations/devel/README.md
index 84bf6fc..df9f999 100644
--- a/xos/configurations/devel/README.md
+++ b/xos/configurations/devel/README.md
@@ -25,29 +25,25 @@
 
 ### DevStack
 
-The following instructions can be used to install DevStack and XOS together
-on a single node.  This setup has been run successfully in a VirtualBox VM
-with 2 CPUs and 4096 GB RAM.
-
-First, if you happen to be installing DevStack on a CloudLab node, you can
-configure about 1TB of unallocated disk space for DevStack as follows:
+On a server with a fresh Ubuntu 14.04 install, 
+[this script](https://raw.githubusercontent.com/open-cloud/xos/master/xos/configurations/common/devstack/setup-devstack.sh)
+can be used to bootstrap a single-node DevStack environment that can be used
+for basic XOS development.
+The script installs DevStack and checks out the XOS repository.  Run the script
+and then invoke the XOS configuration for DevStack as follows:
 ```
-~$ sudo mkdir -p /opt/stack
-~$ sudo /usr/testbed/bin/mkextrafs /opt/stack
-```
-
-To install DevStack and XOS:
-
-```
-~$ git clone https://github.com/open-cloud/xos.git
-~$ git clone https://git.openstack.org/openstack-dev/devstack
-~$ cd devstack
-~/devstack$ cp ../xos/xos/configurations/common/devstack/local.conf .
-~/devstack$ ./stack.sh
-~/devstack$ cd ../xos/xos/configurations/devel/
+~$ wget https://raw.githubusercontent.com/open-cloud/xos/master/xos/configurations/common/devstack/setup-devstack.sh
+~$ bash ./setup-devstack.sh
+~$ cd ../xos/xos/configurations/devel/
 ~/xos/xos/configurations/devel$ make devstack
 ```
 
+This setup has been run successfully in a VirtualBox VM with 2 CPUs and 4096 GB RAM.
+However it is recommended to use a dedicated server with more resources.
+
+**NOTE: If your goal is to create a development environment for [CORD](http://opencord.org/), 
+DevStack is not what you want.  Look at the [cord-pod](../cord-pod) configuration instead!**
+
 ## What you get
 
 XOS will be set up with a single Deployment and Site.  It should be in a state where
diff --git a/xos/core/models/instance.py b/xos/core/models/instance.py
index 7f13eb8..6ba7cbf 100644
--- a/xos/core/models/instance.py
+++ b/xos/core/models/instance.py
@@ -104,6 +104,15 @@
     def get_controller (self):
         return self.node.site_deployment.controller
 
+    def tologdict(self):
+        d=super(Instance,self).tologdict()
+        try:
+            d['slice_name']=self.slice.name
+            d['controller_name']=self.get_controller().name
+        except:
+            pass
+        return d
+
     def __unicode__(self):
         if self.name and Slice.objects.filter(id=self.slice_id) and (self.name != self.slice.name):
             # NOTE: The weird check on self.slice_id was due to a problem when
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index 80ee9ba..6af72bf 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -207,6 +207,15 @@
     class Meta:
         unique_together = ('network', 'controller')
 
+    def tologdict(self):
+        d=super(ControllerNetwork,self).tologdict()
+        try:
+            d['network_name']=self.network.name
+            d['controller_name']=self.controller.name
+        except:
+            pass
+        return d
+ 
     @staticmethod
     def select_by_user(user):
         if user.is_admin:
diff --git a/xos/core/models/plcorebase.py b/xos/core/models/plcorebase.py
index 99acc15..4170697 100644
--- a/xos/core/models/plcorebase.py
+++ b/xos/core/models/plcorebase.py
@@ -300,3 +300,11 @@
     @classmethod
     def is_ephemeral(cls):
         return cls in ephemeral_models
+
+    def tologdict(self):
+        try:
+            d = {'model_name':self.__class__.__name__, 'pk': self.pk}
+        except:
+            d = {}
+
+        return d
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 12c1ea2..a449691 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -184,6 +184,15 @@
     class Meta:
         unique_together = ('controller', 'slice')
      
+    def tologdict(self):
+        d=super(ControllerSlice,self).tologdict()
+        try:
+            d['slice_name']=self.slice.name
+            d['controller_name']=self.controller.name
+        except:
+            pass
+        return d
+
     def __unicode__(self):  return u'%s %s'  % (self.slice, self.controller)
 
     @staticmethod
diff --git a/xos/synchronizers/base/SyncInstanceUsingAnsible.py b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
index 04b98df..fef8f86 100644
--- a/xos/synchronizers/base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
@@ -33,7 +33,7 @@
         return False
 
     def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
+        logger.info("defer object %s due to %s" % (str(o), reason),extra=o.tologdict())
         raise Exception("defer object %s due to %s" % (str(o), reason))
 
     def get_extra_attributes(self, o):
@@ -63,7 +63,7 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields)
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
 
     def pre_sync_hook(self, o, fields):
         pass
@@ -154,7 +154,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         self.prepare_record(o)
 
diff --git a/xos/synchronizers/base/steps/sync_container.py b/xos/synchronizers/base/steps/sync_container.py
index d647aef..b944495 100644
--- a/xos/synchronizers/base/steps/sync_container.py
+++ b/xos/synchronizers/base/steps/sync_container.py
@@ -119,7 +119,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -139,7 +139,7 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("delete'ing object %s" % str(o))
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -158,6 +158,6 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields, path="container")
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart,extra=o.tologdict())
 
 
diff --git a/xos/synchronizers/base/syncstep-portal.py b/xos/synchronizers/base/syncstep-portal.py
index 66ec1af..dfb810e 100644
--- a/xos/synchronizers/base/syncstep-portal.py
+++ b/xos/synchronizers/base/syncstep-portal.py
@@ -114,7 +114,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -129,7 +129,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -147,7 +147,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
 		except (InnocuousException,Exception) as e:
-                    logger.log_exc("Syncstep caught exception")
+                    logger.log_exc("Syncstep caught exception",extra=o.tologdict())
 
                     force_error = False
                     try:
@@ -180,7 +180,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0}
 
                     # Second failure
@@ -218,4 +218,4 @@
         return
 
     def __call__(self, **args):
-        return self.call(**args)
\ No newline at end of file
+        return self.call(**args)
diff --git a/xos/synchronizers/base/syncstep.py b/xos/synchronizers/base/syncstep.py
index e6b8d55..0e34010 100644
--- a/xos/synchronizers/base/syncstep.py
+++ b/xos/synchronizers/base/syncstep.py
@@ -201,7 +201,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -216,7 +216,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -235,7 +235,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
                 except (InnocuousException,Exception,DeferredException) as e:
-                    logger.log_exc("sync step failed!")
+                    logger.log_exc("sync step failed!",extra=o.tologdict())
                     try:
                         if (o.backend_status.startswith('2 - ')):
                             str_e = '%s // %r'%(o.backend_status[4:],e)
@@ -259,7 +259,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
 
                     # Second failure
diff --git a/xos/synchronizers/ec2/deleters/network_deleter.py b/xos/synchronizers/ec2/deleters/network_deleter.py
index aa9ef59..ba9cd09 100644
--- a/xos/synchronizers/ec2/deleters/network_deleter.py
+++ b/xos/synchronizers/ec2/deleters/network_deleter.py
@@ -15,5 +15,5 @@
             try:
                 network_deployment_deleter(network_deployment.id)    
             except:
-                logger.log_exc("Failed to delte network deployment %s" % network_deployment)
+                logger.log_exc("Failed to delete network deployment %s" % network_deployment,extra=network.tologdict())
         network.delete()
diff --git a/xos/synchronizers/ec2/deleters/slice_deleter.py b/xos/synchronizers/ec2/deleters/slice_deleter.py
index 49bf692..6b800ac 100644
--- a/xos/synchronizers/ec2/deleters/slice_deleter.py
+++ b/xos/synchronizers/ec2/deleters/slice_deleter.py
@@ -15,5 +15,5 @@
             try:
                 slice_deployment_deleter(slice_deployment.id)
             except:
-                logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment) 
+                logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment,extra=slice.tologdict()) 
         slice.delete()
diff --git a/xos/synchronizers/ec2/steps/sync_instances.py b/xos/synchronizers/ec2/steps/sync_instances.py
index fc11e05..efab74d 100644
--- a/xos/synchronizers/ec2/steps/sync_instances.py
+++ b/xos/synchronizers/ec2/steps/sync_instances.py
@@ -44,7 +44,7 @@
         result = aws_run('ec2 terminate-instances --instance-ids=%s'%instance.instance_id, env=e)
 
     def sync_record(self, instance):
-        logger.info("sync'ing instance:%s deployment:%s " % (instance, instance.node.deployment))
+        logger.info("sync'ing instance:%s deployment:%s " % (instance, instance.node.deployment),extra=instance.tologdict())
 
         if not instance.instance_id:
             # public keys
diff --git a/xos/synchronizers/ec2/syncstep.py b/xos/synchronizers/ec2/syncstep.py
index 3cba48b..3a31cb6 100644
--- a/xos/synchronizers/ec2/syncstep.py
+++ b/xos/synchronizers/ec2/syncstep.py
@@ -92,7 +92,7 @@
                 if (o.pk):
                     o.save(update_fields=['backend_status'])
 
-                logger.log_exc("sync step failed!")
+                logger.log_exc("sync step failed!",extra=o.tologdict())
                 failed.append(o)
 
         return failed
diff --git a/xos/synchronizers/hpc/steps/sync_cdnprefix.py b/xos/synchronizers/hpc/steps/sync_cdnprefix.py
index 7439633..eff3b5d 100644
--- a/xos/synchronizers/hpc/steps/sync_cdnprefix.py
+++ b/xos/synchronizers/hpc/steps/sync_cdnprefix.py
@@ -67,7 +67,7 @@
         return result
 
     def sync_record(self, cp):
-        logger.info("sync'ing cdn prefix %s" % str(cp))
+        logger.info("sync'ing cdn prefix %s" % str(cp),extra=cp.tologdict())
 
         if (not cp.contentProvider) or (not cp.contentProvider.content_provider_id):
             raise Exception("CDN Prefix %s is linked to a contentProvider without an id" % str(cp))
diff --git a/xos/synchronizers/hpc/steps/sync_contentprovider.py b/xos/synchronizers/hpc/steps/sync_contentprovider.py
index c58cb5e..3e30ed3 100644
--- a/xos/synchronizers/hpc/steps/sync_contentprovider.py
+++ b/xos/synchronizers/hpc/steps/sync_contentprovider.py
@@ -51,7 +51,7 @@
         return result
 
     def sync_record(self, cp):
-        logger.info("sync'ing content provider %s" % str(cp))
+        logger.info("sync'ing content provider %s" % str(cp), extra=cp.tologdict())
         account_name = self.make_account_name(cp.name)
 
         if (not cp.serviceProvider) or (not cp.serviceProvider.service_provider_id):
diff --git a/xos/synchronizers/hpc/steps/sync_hpcservices.py b/xos/synchronizers/hpc/steps/sync_hpcservices.py
index e49f93f..63bf19b 100644
--- a/xos/synchronizers/hpc/steps/sync_hpcservices.py
+++ b/xos/synchronizers/hpc/steps/sync_hpcservices.py
@@ -39,5 +39,5 @@
             return self.filter_hpc_service(HpcService.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)))
 
     def sync_record(self, hpc_service):
-        logger.info("sync'ing hpc_service %s" % str(hpc_service))
+        logger.info("sync'ing hpc_service %s" % str(hpc_service),extra=hpc_service.tologdict())
         hpc_service.save()
diff --git a/xos/synchronizers/hpc/steps/sync_originserver.py b/xos/synchronizers/hpc/steps/sync_originserver.py
index 0a675e1..bd5b227 100644
--- a/xos/synchronizers/hpc/steps/sync_originserver.py
+++ b/xos/synchronizers/hpc/steps/sync_originserver.py
@@ -55,7 +55,7 @@
         return result
 
     def sync_record(self, ors):
-        logger.info("sync'ing origin server %s" % str(ors))
+        logger.info("sync'ing origin server %s" % str(ors),extra=ors.tologdict())
 
         if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
             raise Exception("Origin Server %s is linked to a contentProvider with no id" % str(ors))
diff --git a/xos/synchronizers/hpc/steps/sync_serviceprovider.py b/xos/synchronizers/hpc/steps/sync_serviceprovider.py
index 0cf145f..af6d685 100644
--- a/xos/synchronizers/hpc/steps/sync_serviceprovider.py
+++ b/xos/synchronizers/hpc/steps/sync_serviceprovider.py
@@ -51,7 +51,7 @@
         return result
 
     def sync_record(self, sp):
-        logger.info("sync'ing service provider %s" % str(sp))
+        logger.info("sync'ing service provider %s" % str(sp),extra=sp.tologdict())
         account_name = self.make_account_name(sp.name)
         sp_dict = {"account": account_name, "name": sp.name, "enabled": sp.enabled}
         if not sp.service_provider_id:
diff --git a/xos/synchronizers/hpc/steps/sync_sitemap.py b/xos/synchronizers/hpc/steps/sync_sitemap.py
index 885c616..a1d177b 100644
--- a/xos/synchronizers/hpc/steps/sync_sitemap.py
+++ b/xos/synchronizers/hpc/steps/sync_sitemap.py
@@ -49,7 +49,7 @@
         all_map_ids = [x["map_id"] for x in self.client.onev.ListAll("Map")]
         for map in SiteMap.objects.all():
             if (map.map_id is not None) and (map.map_id not in all_map_ids):
-                logger.info("Map %s was not found on CMI" % map.map_id)
+                logger.info("Map %s was not found on CMI" % map.map_id,extra=map.tologdict())
                 map.map_id=None
                 map.save()
                 result = True
@@ -68,7 +68,7 @@
                 self.client.onev.UnBind("map", map.map_id, to_name, id)
 
     def sync_record(self, map):
-        logger.info("sync'ing SiteMap %s" % str(map))
+        logger.info("sync'ing SiteMap %s" % str(map),extra=map.tologdict())
 
         if not map.map:
             # no contents
diff --git a/xos/synchronizers/model_policy.py b/xos/synchronizers/model_policy.py
index d0bbbb1..e2121ec 100644
--- a/xos/synchronizers/model_policy.py
+++ b/xos/synchronizers/model_policy.py
@@ -41,7 +41,7 @@
     except AttributeError,e:
         raise e
     except Exception,e:
-            logger.info('Could not save %r. Exception: %r'%(d,e))
+            logger.info('Could not save %r. Exception: %r'%(d,e), extra=d.tologdict())
 
 def delete_if_inactive(d, o):
     try:
diff --git a/xos/synchronizers/onos/steps/sync_onosservice.py b/xos/synchronizers/onos/steps/sync_onosservice.py
index 944a05c..2e6acd9 100644
--- a/xos/synchronizers/onos/steps/sync_onosservice.py
+++ b/xos/synchronizers/onos/steps/sync_onosservice.py
@@ -59,7 +59,7 @@
 
     def sync_record(self, o):
         if o.no_container:
-            logger.info("no work to do for onos service, because o.no_container is set")
+            logger.info("no work to do for onos service, because o.no_container is set",extra=o.tologdict())
             o.save()
         else:
             super(SyncONOSService, self).sync_record(o)
diff --git a/xos/synchronizers/openstack/steps/sync_container.py b/xos/synchronizers/openstack/steps/sync_container.py
index d647aef..84a2c61 100644
--- a/xos/synchronizers/openstack/steps/sync_container.py
+++ b/xos/synchronizers/openstack/steps/sync_container.py
@@ -119,7 +119,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -139,7 +139,7 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("delete'ing object %s" % str(o))
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -158,6 +158,6 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields, path="container")
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
 
 
diff --git a/xos/synchronizers/openstack/syncstep.py b/xos/synchronizers/openstack/syncstep.py
index d1639b4..0a01356 100644
--- a/xos/synchronizers/openstack/syncstep.py
+++ b/xos/synchronizers/openstack/syncstep.py
@@ -201,7 +201,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -216,7 +216,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -234,7 +234,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
                 except (InnocuousException,Exception,DeferredException) as e:
-                    logger.log_exc("sync step failed!")
+                    logger.log_exc("sync step failed!",extra=o.tologdict())
                     try:
                         if (o.backend_status.startswith('2 - ')):
                             str_e = '%s // %r'%(o.backend_status[4:],e)
@@ -258,7 +258,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
 
                     # Second failure
diff --git a/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py b/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
index c9648ff..15a9b91 100644
--- a/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
+++ b/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
@@ -35,7 +35,7 @@
     def sync_record(self, rr_service):
 	try:
         	print "syncing service!"
-        	logger.info("sync'ing rr_service %s" % str(rr_service))
+        	logger.info("sync'ing rr_service %s" % str(rr_service),extra=rr_service.tologdict())
         	self.gen_slice_file(rr_service)
         	rr_service.save()
 		return True
diff --git a/xos/synchronizers/syndicate/steps/sync_volume.py b/xos/synchronizers/syndicate/steps/sync_volume.py
index e6dc90b..8773542 100644
--- a/xos/synchronizers/syndicate/steps/sync_volume.py
+++ b/xos/synchronizers/syndicate/steps/sync_volume.py
@@ -25,7 +25,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack
 if __name__ != "__main__": 
@@ -53,7 +53,7 @@
         Synchronize a Volume record with Syndicate.
         """
         
-        logger.info( "Sync Volume = %s\n\n" % volume.name )
+        logger.info( "Sync Volume = %s\n\n" % volume.name ,extra=volume.tologdict())
     
         user_email = volume.owner_id.email
         config = syndicatelib.get_config()
@@ -65,7 +65,7 @@
             observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
         except Exception, e:
             traceback.print_exc()
-            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET")
+            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET",extra=volume.tologdict())
             raise e
 
         # volume owner must exist as a Syndicate user...
@@ -74,7 +74,7 @@
             assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id )
+            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id ,extra=volume.tologdict())
             raise e
 
         # volume must exist 
@@ -84,7 +84,7 @@
             new_volume = syndicatelib.ensure_volume_exists( volume_principal_id, volume, user=user )
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure volume '%s' exists" % volume.name )
+            logger.error("Failed to ensure volume '%s' exists" % volume.name ,extra=volume.tologdict())
             raise e
            
         # did we create the Volume?
@@ -98,7 +98,7 @@
                 rc = syndicatelib.update_volume( volume )
             except Exception, e:
                 traceback.print_exc()
-                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
+                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message),extra=volume.tologdict())
                 raise e
                     
         return True
@@ -109,7 +109,7 @@
             syndicatelib.ensure_volume_absent( volume_name )
         except Exception, e:
             traceback.print_exc()
-            logger.exception("Failed to erase volume '%s'" % volume_name)
+            logger.exception("Failed to erase volume '%s'" % volume_name,extra=volume.tologdict())
             raise e
 
 
diff --git a/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py b/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
index 2889502..9fca2a4 100644
--- a/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
+++ b/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
@@ -23,7 +23,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack 
 if __name__ != "__main__":
@@ -57,7 +57,7 @@
         volume_name = vac.volume.name
         syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) 
         
-        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) )
+        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) ,extra=vac.tologdict())
         
         # validate config
         try:
@@ -65,7 +65,7 @@
            observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
         except Exception, e:
            traceback.print_exc()
-           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET")
+           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET",extra=vac.tologdict())
            raise e
             
         # ensure the user exists and has credentials
@@ -74,7 +74,7 @@
             assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (user_email, rc, user)
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure user '%s' exists" % user_email )
+            logger.error("Failed to ensure user '%s' exists" % user_email ,extra=vac.tologdict())
             raise e
  
         # make the access right for the user to create their own UGs, and provision an RG for this user that will listen on localhost.
@@ -85,7 +85,7 @@
 
         except Exception, e:
             traceback.print_exc()
-            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps))
+            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps),extra=vac.tologdict())
             raise e
 
         return True
diff --git a/xos/synchronizers/syndicate/steps/sync_volumeslice.py b/xos/synchronizers/syndicate/steps/sync_volumeslice.py
index 1be61b9..9af97f3 100644
--- a/xos/synchronizers/syndicate/steps/sync_volumeslice.py
+++ b/xos/synchronizers/syndicate/steps/sync_volumeslice.py
@@ -23,7 +23,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack 
 if __name__ != "__main__":
@@ -50,7 +50,7 @@
 
     def sync_record(self, vs):
         
-        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
+        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name),extra=vs.tologdict())
         
         # extract arguments...
         user_email = vs.slice_id.creator.email
@@ -70,7 +70,7 @@
            
         except Exception, e:
            traceback.print_exc()
-           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL")
+           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL",extra=vs.tologdict())
            raise e
             
         # get secrets...
@@ -84,7 +84,7 @@
            
         except Exception, e:
            traceback.print_exc()
-           logger.error("Failed to load secret credentials")
+           logger.error("Failed to load secret credentials",extra=vs.tologdict())
            raise e
         
         # make sure there's a slice-controlled Syndicate user account for the slice owner
@@ -95,7 +95,7 @@
             assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
         except Exception, e:
             traceback.print_exc()
-            logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
+            logger.error('Failed to ensure slice user %s exists' % slice_principal_id,extra=vs.tologdict())
             raise e
             
         # grant the slice-owning user the ability to provision UGs in this Volume, and also provision for the user the (single) RG the slice will instantiate in each VM.
@@ -105,7 +105,7 @@
             
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name))
+            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name),extra=vs.tologdict())
             raise e
             
         # generate and save slice credentials....
@@ -115,7 +115,7 @@
                 
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
+            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name),extra=vs.tologdict())
             raise e
              
         # ... and push them all out.
@@ -125,7 +125,7 @@
                
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
+            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name),extra=vs.tologdict())
             raise e
         
         return True
diff --git a/xos/synchronizers/vbng/steps/sync_vbngtenant.py b/xos/synchronizers/vbng/steps/sync_vbngtenant.py
index 4fa351e..89e7bc0 100644
--- a/xos/synchronizers/vbng/steps/sync_vbngtenant.py
+++ b/xos/synchronizers/vbng/steps/sync_vbngtenant.py
@@ -37,7 +37,7 @@
         return objs
 
     def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
+        logger.info("defer object %s due to %s" % (str(o), reason),extra=o.tologdict())
         raise Exception("defer object %s due to %s" % (str(o), reason))
 
     def get_vbng_service(self, o):
@@ -77,7 +77,7 @@
                 if not ip:
                     raise Exception("vBNG service is linked to an ONOSApp, but the App's Service's Slice's first instance does not have an ip")
 
-                logger.info("Using ip %s from ONOS Instance %s" % (ip, instance))
+                logger.info("Using ip %s from ONOS Instance %s" % (ip, instance),extra=o.tologdict())
 
                 return "http://%s:8181/onos/virtualbng/" % ip
 
@@ -107,18 +107,18 @@
         return (vcpe.wan_ip, vcpe.wan_container_mac, vcpe.instance.node.name)
 
     def sync_record(self, o):
-        logger.info("sync'ing VBNGTenant %s" % str(o))
+        logger.info("sync'ing VBNGTenant %s" % str(o),extra=o.tologdict())
 
         if not o.routeable_subnet:
             (private_ip, private_mac, private_hostname) = self.get_private_interface(o)
-            logger.info("contacting vBNG service to request mapping for private ip %s mac %s host %s" % (private_ip, private_mac, private_hostname) )
+            logger.info("contacting vBNG service to request mapping for private ip %s mac %s host %s" % (private_ip, private_mac, private_hostname) ,extra=o.tologdict())
 
             url = self.get_vbng_url(o) + "privateip/%s/%s/%s" % (private_ip, private_mac, private_hostname)
-            logger.info( "vbng url: %s" % url )
+            logger.info( "vbng url: %s" % url ,extra=o.tologdict())
             r = requests.post(url )
             if (r.status_code != 200):
                 raise Exception("Received error from bng service (%d)" % r.status_code)
-            logger.info("received public IP %s from private IP %s" % (r.text, private_ip))
+            logger.info("received public IP %s from private IP %s" % (r.text, private_ip),extra=o.tologdict())
 
             if r.text == "0":
                 raise Exception("VBNG service failed to return a routeable_subnet (probably ran out)")
@@ -131,11 +131,11 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("deleting VBNGTenant %s" % str(o))
+        logger.info("deleting VBNGTenant %s" % str(o),extra=o.tologdict())
 
         if o.mapped_ip:
             private_ip = o.mapped_ip
-            logger.info("contacting vBNG service to delete private ip %s" % private_ip)
+            logger.info("contacting vBNG service to delete private ip %s" % private_ip,extra=o.tologdict())
             r = requests.delete(self.get_vbng_url(o) + "privateip/%s" % private_ip, )
             if (r.status_code != 200):
                 raise Exception("Received error from bng service (%d)" % r.status_code)
diff --git a/xos/synchronizers/vcpe/steps/sync_vcpetenant.py b/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
index 2f2147b..d52f075 100644
--- a/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
@@ -91,7 +91,7 @@
                                     if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
                                         dnsdemux_ip = ns.ip
                 if not dnsdemux_ip:
-                    logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label)
+                    logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label,extra=o.tologdict())
             else:
                 # Connect to dnsdemux using the instance's public address
                 for service in HpcService.objects.all():
@@ -104,7 +104,7 @@
                                     except:
                                         pass
                 if not dnsdemux_ip:
-                    logger.info("failed to find a dnsdemux with a public address")
+                    logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict())
 
             for prefix in CDNPrefix.objects.all():
                 cdn_prefixes.append(prefix.prefix)
@@ -122,13 +122,13 @@
                         if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
                             bbs_addrs.append(ns.ip)
             else:
-                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not")
+                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict())
             if not bbs_addrs:
-                logger.info("failed to find any usable addresses on bbs_slice")
+                logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict())
         elif vcpe_service.bbs_server:
             bbs_addrs.append(vcpe_service.bbs_server)
         else:
-            logger.info("neither bbs_slice nor bbs_server is configured in the vCPE")
+            logger.info("neither bbs_slice nor bbs_server is configured in the vCPE",extra=o.tologdict())
 
         vlan_ids = []
         s_tags = []
@@ -222,7 +222,7 @@
         if service.url_filter_kind == "broadbandshield":
             # disable url_filter if there are no bbs_addrs
             if url_filter_enable and (not fields.get("bbs_addrs",[])):
-                logger.info("disabling url_filter because there are no bbs_addrs")
+                logger.info("disabling url_filter because there are no bbs_addrs",extra=o.tologdict())
                 url_filter_enable = False
 
             if url_filter_enable:
@@ -239,19 +239,19 @@
                     bbs_port = 8018
 
                 if not bbs_hostname:
-                    logger.info("broadbandshield is not configured")
+                    logger.info("broadbandshield is not configured",extra=o.tologdict())
                 else:
                     tStart = time.time()
                     bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
                     bbs.sync(url_filter_level, url_filter_users)
 
                     if o.hpc_client_ip:
-                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip))
+                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip),extra=o.tologdict())
                         bbs.associate(o.hpc_client_ip)
                     else:
-                        logger.info("no hpc_client_ip to associate")
+                        logger.info("no hpc_client_ip to associate",extra=o.tologdict())
 
-                    logger.info("bbs update time %d" % int(time.time()-tStart))
+                    logger.info("bbs update time %d" % int(time.time()-tStart),extra=o.tologdict())
 
 
     def run_playbook(self, o, fields):
@@ -259,7 +259,7 @@
         quick_update = (o.last_ansible_hash == ansible_hash)
 
         if ENABLE_QUICK_UPDATE and quick_update:
-            logger.info("quick_update triggered; skipping ansible recipe")
+            logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
         else:
             if o.instance.isolation in ["container", "container_vm"]:
                 super(SyncVSGTenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
diff --git a/xos/tools/README.md b/xos/tools/README.md
new file mode 100644
index 0000000..ef13848
--- /dev/null
+++ b/xos/tools/README.md
@@ -0,0 +1,9 @@
+## Overview of XOS tools
+
+### modelgen
+
+Modelgen reads the XOS models and applies those models to a template to generate output. 
+
+Examples:
+  * ./modelgen -a core api.template.py > ../../xos/xosapi.py            
+  * ./modelgen -a services.hpc -b Service -b User hpc-api.template.py > ../../xos/hpcapi.py
diff --git a/xos/xos/logger.py b/xos/xos/logger.py
index 7a0d401..7a358a5 100644
--- a/xos/xos/logger.py
+++ b/xos/xos/logger.py
@@ -26,6 +26,8 @@
 import os, sys
 import traceback
 import logging, logging.handlers
+import logstash
+from xos.config import Config
 
 CRITICAL=logging.CRITICAL
 ERROR=logging.ERROR
@@ -36,10 +38,16 @@
 # a logger that can handle tracebacks 
 class Logger:
     def __init__ (self,logfile=None,loggername=None,level=logging.INFO):
+        # Logstash config
+        try:
+            logstash_host,logstash_port = Config().observer_logstash_hostport.split(':')
+            logstash_handler = logstash.LogstashHandler(logstash_host, int(logstash_port), version=1)
+        except:
+            logstash_handler = None
+
         # default is to locate loggername from the logfile if avail.
         if not logfile:
             try:
-                from xos.config import Config
                 logfile = Config().observer_log_file
             except:
                 logfile = "/var/log/xos.log"
@@ -72,14 +80,14 @@
         self.logger.setLevel(level)
         # check if logger already has the handler we're about to add
         handler_exists = False
-        for l_handler in self.logger.handlers:
-            if ((not hasattr(l_handler,"baseFilename")) or (l_handler.baseFilename == handler.baseFilename)) and \
-               l_handler.level == handler.level:
-                handler_exists = True 
+        logstash_handler_exists = False
 
-        if not handler_exists:
+        if not len(self.logger.handlers):
             self.logger.addHandler(handler)
 
+            if (logstash_handler):
+                self.logger.addHandler(logstash_handler)
+
         self.loggername=loggername
 
     def setLevel(self,level):
@@ -109,39 +117,58 @@
         return verbose>=2
 
     ####################
-    def info(self, msg):
-        self.logger.info(msg)
 
-    def debug(self, msg):
-        self.logger.debug(msg)
+    def extract_context(self,cur):
+        try:
+            observer_name=Config().observer_name
+            cur['synchronizer_name']=observer_name
+        except:
+            pass
+
+        return cur
+
+    def info(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.info(msg, extra=extra)
+
+    def debug(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.debug(msg, extra=extra)
         
-    def warn(self, msg):
-        self.logger.warn(msg)
+    def warn(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.warn(msg, extra=extra)
 
     # some code is using logger.warn(), some is using logger.warning()
-    def warning(self, msg):
-        self.logger.warning(msg)
+    def warning(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.warning(msg,extra=extra)
    
-    def error(self, msg):
-        self.logger.error(msg)    
+    def error(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.error(msg, extra=extra)    
  
-    def critical(self, msg):
-        self.logger.critical(msg)
+    def critical(self, msg, extra={}):
+        extra = self.extract_context(extra) 
+        self.logger.critical(msg, extra=extra)
 
     # logs an exception - use in an except statement
-    def log_exc(self,message):
-        self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
-        self.error("%s END TRACEBACK"%message)
+    def log_exc(self,message, extra={}):
+        extra = self.extract_context(extra) 
+        self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"), extra=extra)
+        self.error("%s END TRACEBACK"%message, extra=extra)
     
-    def log_exc_critical(self,message):
-        self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
-        self.critical("%s END TRACEBACK"%message)
+    def log_exc_critical(self,message, extra={}):
+        extra = self.extract_context(extra) 
+        self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"), extra=extra)
+        self.critical("%s END TRACEBACK"%message, extra=extra)
     
     # for investigation purposes, can be placed anywhere
-    def log_stack(self,message):
+    def log_stack(self,message, extra={}):
+        extra = self.extract_context(extra) 
         to_log="".join(traceback.format_stack())
-        self.info("%s BEG STACK"%message+"\n"+to_log)
-        self.info("%s END STACK"%message)
+        self.info("%s BEG STACK"%message+"\n"+to_log,extra=extra)
+        self.info("%s END STACK"%message,extra=extra)
 
     def enable_console(self, stream=sys.stdout):
         formatter = logging.Formatter("%(message)s")