Updated various synchronizers to take advantage of logstash capability
diff --git a/xos/synchronizers/base/SyncInstanceUsingAnsible.py b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
index 04b98df..fef8f86 100644
--- a/xos/synchronizers/base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
@@ -33,7 +33,7 @@
         return False
 
     def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
+        logger.info("defer object %s due to %s" % (str(o), reason),extra=o.tologdict())
         raise Exception("defer object %s due to %s" % (str(o), reason))
 
     def get_extra_attributes(self, o):
@@ -63,7 +63,7 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields)
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
 
     def pre_sync_hook(self, o, fields):
         pass
@@ -154,7 +154,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         self.prepare_record(o)
 
diff --git a/xos/synchronizers/base/steps/sync_container.py b/xos/synchronizers/base/steps/sync_container.py
index d647aef..b944495 100644
--- a/xos/synchronizers/base/steps/sync_container.py
+++ b/xos/synchronizers/base/steps/sync_container.py
@@ -119,7 +119,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -139,7 +139,7 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("delete'ing object %s" % str(o))
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -158,6 +158,6 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields, path="container")
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart,extra=o.tologdict())
 
 
diff --git a/xos/synchronizers/base/syncstep-portal.py b/xos/synchronizers/base/syncstep-portal.py
index 66ec1af..dfb810e 100644
--- a/xos/synchronizers/base/syncstep-portal.py
+++ b/xos/synchronizers/base/syncstep-portal.py
@@ -114,7 +114,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -129,7 +129,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -147,7 +147,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
 		except (InnocuousException,Exception) as e:
-                    logger.log_exc("Syncstep caught exception")
+                    logger.log_exc("Syncstep caught exception",extra=o.tologdict())
 
                     force_error = False
                     try:
@@ -180,7 +180,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0}
 
                     # Second failure
@@ -218,4 +218,4 @@
         return
 
     def __call__(self, **args):
-        return self.call(**args)
\ No newline at end of file
+        return self.call(**args)
diff --git a/xos/synchronizers/base/syncstep.py b/xos/synchronizers/base/syncstep.py
index e6b8d55..0e34010 100644
--- a/xos/synchronizers/base/syncstep.py
+++ b/xos/synchronizers/base/syncstep.py
@@ -201,7 +201,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -216,7 +216,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -235,7 +235,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
                 except (InnocuousException,Exception,DeferredException) as e:
-                    logger.log_exc("sync step failed!")
+                    logger.log_exc("sync step failed!",extra=o.tologdict())
                     try:
                         if (o.backend_status.startswith('2 - ')):
                             str_e = '%s // %r'%(o.backend_status[4:],e)
@@ -259,7 +259,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
 
                     # Second failure
diff --git a/xos/synchronizers/ec2/deleters/network_deleter.py b/xos/synchronizers/ec2/deleters/network_deleter.py
index aa9ef59..ba9cd09 100644
--- a/xos/synchronizers/ec2/deleters/network_deleter.py
+++ b/xos/synchronizers/ec2/deleters/network_deleter.py
@@ -15,5 +15,5 @@
             try:
                 network_deployment_deleter(network_deployment.id)    
             except:
-                logger.log_exc("Failed to delte network deployment %s" % network_deployment)
+                logger.log_exc("Failed to delete network deployment %s" % network_deployment,extra=network.tologdict())
         network.delete()
diff --git a/xos/synchronizers/ec2/deleters/slice_deleter.py b/xos/synchronizers/ec2/deleters/slice_deleter.py
index 49bf692..6b800ac 100644
--- a/xos/synchronizers/ec2/deleters/slice_deleter.py
+++ b/xos/synchronizers/ec2/deleters/slice_deleter.py
@@ -15,5 +15,5 @@
             try:
                 slice_deployment_deleter(slice_deployment.id)
             except:
-                logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment) 
+                logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment,extra=slice.tologdict()) 
         slice.delete()
diff --git a/xos/synchronizers/ec2/steps/sync_instances.py b/xos/synchronizers/ec2/steps/sync_instances.py
index fc11e05..efab74d 100644
--- a/xos/synchronizers/ec2/steps/sync_instances.py
+++ b/xos/synchronizers/ec2/steps/sync_instances.py
@@ -44,7 +44,7 @@
         result = aws_run('ec2 terminate-instances --instance-ids=%s'%instance.instance_id, env=e)
 
     def sync_record(self, instance):
-        logger.info("sync'ing instance:%s deployment:%s " % (instance, instance.node.deployment))
+        logger.info("sync'ing instance:%s deployment:%s " % (instance, instance.node.deployment),extra=instance.tologdict())
 
         if not instance.instance_id:
             # public keys
diff --git a/xos/synchronizers/ec2/syncstep.py b/xos/synchronizers/ec2/syncstep.py
index 3cba48b..3a31cb6 100644
--- a/xos/synchronizers/ec2/syncstep.py
+++ b/xos/synchronizers/ec2/syncstep.py
@@ -92,7 +92,7 @@
                 if (o.pk):
                     o.save(update_fields=['backend_status'])
 
-                logger.log_exc("sync step failed!")
+                logger.log_exc("sync step failed!",extra=o.tologdict())
                 failed.append(o)
 
         return failed
diff --git a/xos/synchronizers/hpc/steps/sync_cdnprefix.py b/xos/synchronizers/hpc/steps/sync_cdnprefix.py
index 7439633..eff3b5d 100644
--- a/xos/synchronizers/hpc/steps/sync_cdnprefix.py
+++ b/xos/synchronizers/hpc/steps/sync_cdnprefix.py
@@ -67,7 +67,7 @@
         return result
 
     def sync_record(self, cp):
-        logger.info("sync'ing cdn prefix %s" % str(cp))
+        logger.info("sync'ing cdn prefix %s" % str(cp),extra=cp.tologdict())
 
         if (not cp.contentProvider) or (not cp.contentProvider.content_provider_id):
             raise Exception("CDN Prefix %s is linked to a contentProvider without an id" % str(cp))
diff --git a/xos/synchronizers/hpc/steps/sync_contentprovider.py b/xos/synchronizers/hpc/steps/sync_contentprovider.py
index c58cb5e..3e30ed3 100644
--- a/xos/synchronizers/hpc/steps/sync_contentprovider.py
+++ b/xos/synchronizers/hpc/steps/sync_contentprovider.py
@@ -51,7 +51,7 @@
         return result
 
     def sync_record(self, cp):
-        logger.info("sync'ing content provider %s" % str(cp))
+        logger.info("sync'ing content provider %s" % str(cp), extra=cp.tologdict())
         account_name = self.make_account_name(cp.name)
 
         if (not cp.serviceProvider) or (not cp.serviceProvider.service_provider_id):
diff --git a/xos/synchronizers/hpc/steps/sync_hpcservices.py b/xos/synchronizers/hpc/steps/sync_hpcservices.py
index e49f93f..63bf19b 100644
--- a/xos/synchronizers/hpc/steps/sync_hpcservices.py
+++ b/xos/synchronizers/hpc/steps/sync_hpcservices.py
@@ -39,5 +39,5 @@
             return self.filter_hpc_service(HpcService.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)))
 
     def sync_record(self, hpc_service):
-        logger.info("sync'ing hpc_service %s" % str(hpc_service))
+        logger.info("sync'ing hpc_service %s" % str(hpc_service),extra=hpc_service.tologdict())
         hpc_service.save()
diff --git a/xos/synchronizers/hpc/steps/sync_originserver.py b/xos/synchronizers/hpc/steps/sync_originserver.py
index 0a675e1..bd5b227 100644
--- a/xos/synchronizers/hpc/steps/sync_originserver.py
+++ b/xos/synchronizers/hpc/steps/sync_originserver.py
@@ -55,7 +55,7 @@
         return result
 
     def sync_record(self, ors):
-        logger.info("sync'ing origin server %s" % str(ors))
+        logger.info("sync'ing origin server %s" % str(ors),extra=ors.tologdict())
 
         if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
             raise Exception("Origin Server %s is linked to a contentProvider with no id" % str(ors))
diff --git a/xos/synchronizers/hpc/steps/sync_serviceprovider.py b/xos/synchronizers/hpc/steps/sync_serviceprovider.py
index 0cf145f..af6d685 100644
--- a/xos/synchronizers/hpc/steps/sync_serviceprovider.py
+++ b/xos/synchronizers/hpc/steps/sync_serviceprovider.py
@@ -51,7 +51,7 @@
         return result
 
     def sync_record(self, sp):
-        logger.info("sync'ing service provider %s" % str(sp))
+        logger.info("sync'ing service provider %s" % str(sp),extra=sp.tologdict())
         account_name = self.make_account_name(sp.name)
         sp_dict = {"account": account_name, "name": sp.name, "enabled": sp.enabled}
         if not sp.service_provider_id:
diff --git a/xos/synchronizers/hpc/steps/sync_sitemap.py b/xos/synchronizers/hpc/steps/sync_sitemap.py
index 885c616..a1d177b 100644
--- a/xos/synchronizers/hpc/steps/sync_sitemap.py
+++ b/xos/synchronizers/hpc/steps/sync_sitemap.py
@@ -49,7 +49,7 @@
         all_map_ids = [x["map_id"] for x in self.client.onev.ListAll("Map")]
         for map in SiteMap.objects.all():
             if (map.map_id is not None) and (map.map_id not in all_map_ids):
-                logger.info("Map %s was not found on CMI" % map.map_id)
+                logger.info("Map %s was not found on CMI" % map.map_id,extra=map.tologdict())
                 map.map_id=None
                 map.save()
                 result = True
@@ -68,7 +68,7 @@
                 self.client.onev.UnBind("map", map.map_id, to_name, id)
 
     def sync_record(self, map):
-        logger.info("sync'ing SiteMap %s" % str(map))
+        logger.info("sync'ing SiteMap %s" % str(map),extra=map.tologdict())
 
         if not map.map:
             # no contents
diff --git a/xos/synchronizers/model_policy.py b/xos/synchronizers/model_policy.py
index d0bbbb1..e2121ec 100644
--- a/xos/synchronizers/model_policy.py
+++ b/xos/synchronizers/model_policy.py
@@ -41,7 +41,7 @@
     except AttributeError,e:
         raise e
     except Exception,e:
-            logger.info('Could not save %r. Exception: %r'%(d,e))
+            logger.info('Could not save %r. Exception: %r'%(d,e), extra=d.tologdict())
 
 def delete_if_inactive(d, o):
     try:
diff --git a/xos/synchronizers/onos/steps/sync_onosservice.py b/xos/synchronizers/onos/steps/sync_onosservice.py
index 944a05c..2e6acd9 100644
--- a/xos/synchronizers/onos/steps/sync_onosservice.py
+++ b/xos/synchronizers/onos/steps/sync_onosservice.py
@@ -59,7 +59,7 @@
 
     def sync_record(self, o):
         if o.no_container:
-            logger.info("no work to do for onos service, because o.no_container is set")
+            logger.info("no work to do for onos service, because o.no_container is set",extra=o.tologdict())
             o.save()
         else:
             super(SyncONOSService, self).sync_record(o)
diff --git a/xos/synchronizers/openstack/steps/sync_container.py b/xos/synchronizers/openstack/steps/sync_container.py
index d647aef..84a2c61 100644
--- a/xos/synchronizers/openstack/steps/sync_container.py
+++ b/xos/synchronizers/openstack/steps/sync_container.py
@@ -119,7 +119,7 @@
         return fields
 
     def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -139,7 +139,7 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("delete'ing object %s" % str(o))
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
 
         fields = self.get_ansible_fields(o)
 
@@ -158,6 +158,6 @@
             template_name = self.template_name
         tStart = time.time()
         run_template_ssh(template_name, fields, path="container")
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
 
 
diff --git a/xos/synchronizers/openstack/syncstep.py b/xos/synchronizers/openstack/syncstep.py
index d1639b4..0a01356 100644
--- a/xos/synchronizers/openstack/syncstep.py
+++ b/xos/synchronizers/openstack/syncstep.py
@@ -201,7 +201,7 @@
                 reset_queries()
             except:
                 # this shouldn't happen, but in case it does, catch it...
-                logger.log_exc("exception in reset_queries")
+                logger.log_exc("exception in reset_queries",extra=o.tologdict())
 
             sync_failed = False
             try:
@@ -216,7 +216,7 @@
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
             except:
-                logger.log_exc("Exception while loading scratchpad")
+                logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
                 pass
 
             if (not sync_failed):
@@ -234,7 +234,7 @@
                         o.backend_status = "1 - OK"
                         o.save(update_fields=['enacted','backend_status','backend_register'])
                 except (InnocuousException,Exception,DeferredException) as e:
-                    logger.log_exc("sync step failed!")
+                    logger.log_exc("sync step failed!",extra=o.tologdict())
                     try:
                         if (o.backend_status.startswith('2 - ')):
                             str_e = '%s // %r'%(o.backend_status[4:],e)
@@ -258,7 +258,7 @@
                         scratchpad = json.loads(o.backend_register)
                         scratchpad['exponent']
                     except:
-                        logger.log_exc("Exception while updating scratchpad")
+                        logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
                         scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
 
                     # Second failure
diff --git a/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py b/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
index c9648ff..15a9b91 100644
--- a/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
+++ b/xos/synchronizers/requestrouter/steps/sync_requestrouterservices.py
@@ -35,7 +35,7 @@
     def sync_record(self, rr_service):
 	try:
         	print "syncing service!"
-        	logger.info("sync'ing rr_service %s" % str(rr_service))
+        	logger.info("sync'ing rr_service %s" % str(rr_service),extra=rr_service.tologdict())
         	self.gen_slice_file(rr_service)
         	rr_service.save()
 		return True
diff --git a/xos/synchronizers/syndicate/steps/sync_volume.py b/xos/synchronizers/syndicate/steps/sync_volume.py
index e6dc90b..8773542 100644
--- a/xos/synchronizers/syndicate/steps/sync_volume.py
+++ b/xos/synchronizers/syndicate/steps/sync_volume.py
@@ -25,7 +25,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack
 if __name__ != "__main__": 
@@ -53,7 +53,7 @@
         Synchronize a Volume record with Syndicate.
         """
         
-        logger.info( "Sync Volume = %s\n\n" % volume.name )
+        logger.info( "Sync Volume = %s\n\n" % volume.name ,extra=volume.tologdict())
     
         user_email = volume.owner_id.email
         config = syndicatelib.get_config()
@@ -65,7 +65,7 @@
             observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
         except Exception, e:
             traceback.print_exc()
-            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET")
+            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET",extra=volume.tologdict())
             raise e
 
         # volume owner must exist as a Syndicate user...
@@ -74,7 +74,7 @@
             assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id )
+            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id ,extra=volume.tologdict())
             raise e
 
         # volume must exist 
@@ -84,7 +84,7 @@
             new_volume = syndicatelib.ensure_volume_exists( volume_principal_id, volume, user=user )
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure volume '%s' exists" % volume.name )
+            logger.error("Failed to ensure volume '%s' exists" % volume.name ,extra=volume.tologdict())
             raise e
            
         # did we create the Volume?
@@ -98,7 +98,7 @@
                 rc = syndicatelib.update_volume( volume )
             except Exception, e:
                 traceback.print_exc()
-                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
+                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message),extra=volume.tologdict())
                 raise e
                     
         return True
@@ -109,7 +109,7 @@
             syndicatelib.ensure_volume_absent( volume_name )
         except Exception, e:
             traceback.print_exc()
-            logger.exception("Failed to erase volume '%s'" % volume_name)
+            logger.exception("Failed to erase volume '%s'" % volume_name,extra=volume.tologdict())
             raise e
 
 
diff --git a/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py b/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
index 2889502..9fca2a4 100644
--- a/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
+++ b/xos/synchronizers/syndicate/steps/sync_volumeaccessright.py
@@ -23,7 +23,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack 
 if __name__ != "__main__":
@@ -57,7 +57,7 @@
         volume_name = vac.volume.name
         syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) 
         
-        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) )
+        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) ,extra=vac.tologdict())
         
         # validate config
         try:
@@ -65,7 +65,7 @@
            observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
         except Exception, e:
            traceback.print_exc()
-           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET")
+           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET",extra=vac.tologdict())
            raise e
             
         # ensure the user exists and has credentials
@@ -74,7 +74,7 @@
             assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (user_email, rc, user)
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to ensure user '%s' exists" % user_email )
+            logger.error("Failed to ensure user '%s' exists" % user_email ,extra=vac.tologdict())
             raise e
  
         # make the access right for the user to create their own UGs, and provision an RG for this user that will listen on localhost.
@@ -85,7 +85,7 @@
 
         except Exception, e:
             traceback.print_exc()
-            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps))
+            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps),extra=vac.tologdict())
             raise e
 
         return True
diff --git a/xos/synchronizers/syndicate/steps/sync_volumeslice.py b/xos/synchronizers/syndicate/steps/sync_volumeslice.py
index 1be61b9..9af97f3 100644
--- a/xos/synchronizers/syndicate/steps/sync_volumeslice.py
+++ b/xos/synchronizers/syndicate/steps/sync_volumeslice.py
@@ -23,7 +23,7 @@
 from logging import Logger
 logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
 logger = logging.getLogger()
-logger.setLevel( logging.INFO )
+logger.setLevel( logging.INFO ,extra=o.tologdict())
 
 # point to planetstack 
 if __name__ != "__main__":
@@ -50,7 +50,7 @@
 
     def sync_record(self, vs):
         
-        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
+        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name),extra=vs.tologdict())
         
         # extract arguments...
         user_email = vs.slice_id.creator.email
@@ -70,7 +70,7 @@
            
         except Exception, e:
            traceback.print_exc()
-           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL")
+           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL",extra=vs.tologdict())
            raise e
             
         # get secrets...
@@ -84,7 +84,7 @@
            
         except Exception, e:
            traceback.print_exc()
-           logger.error("Failed to load secret credentials")
+           logger.error("Failed to load secret credentials",extra=vs.tologdict())
            raise e
         
         # make sure there's a slice-controlled Syndicate user account for the slice owner
@@ -95,7 +95,7 @@
             assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
         except Exception, e:
             traceback.print_exc()
-            logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
+            logger.error('Failed to ensure slice user %s exists' % slice_principal_id,extra=vs.tologdict())
             raise e
             
         # grant the slice-owning user the ability to provision UGs in this Volume, and also provision for the user the (single) RG the slice will instantiate in each VM.
@@ -105,7 +105,7 @@
             
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name))
+            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name),extra=vs.tologdict())
             raise e
             
         # generate and save slice credentials....
@@ -115,7 +115,7 @@
                 
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
+            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name),extra=vs.tologdict())
             raise e
              
         # ... and push them all out.
@@ -125,7 +125,7 @@
                
         except Exception, e:
             traceback.print_exc()
-            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
+            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name),extra=vs.tologdict())
             raise e
         
         return True
diff --git a/xos/synchronizers/vbng/steps/sync_vbngtenant.py b/xos/synchronizers/vbng/steps/sync_vbngtenant.py
index 4fa351e..89e7bc0 100644
--- a/xos/synchronizers/vbng/steps/sync_vbngtenant.py
+++ b/xos/synchronizers/vbng/steps/sync_vbngtenant.py
@@ -37,7 +37,7 @@
         return objs
 
     def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
+        logger.info("defer object %s due to %s" % (str(o), reason),extra=o.tologdict())
         raise Exception("defer object %s due to %s" % (str(o), reason))
 
     def get_vbng_service(self, o):
@@ -77,7 +77,7 @@
                 if not ip:
                     raise Exception("vBNG service is linked to an ONOSApp, but the App's Service's Slice's first instance does not have an ip")
 
-                logger.info("Using ip %s from ONOS Instance %s" % (ip, instance))
+                logger.info("Using ip %s from ONOS Instance %s" % (ip, instance),extra=o.tologdict())
 
                 return "http://%s:8181/onos/virtualbng/" % ip
 
@@ -107,18 +107,18 @@
         return (vcpe.wan_ip, vcpe.wan_container_mac, vcpe.instance.node.name)
 
     def sync_record(self, o):
-        logger.info("sync'ing VBNGTenant %s" % str(o))
+        logger.info("sync'ing VBNGTenant %s" % str(o),extra=o.tologdict())
 
         if not o.routeable_subnet:
             (private_ip, private_mac, private_hostname) = self.get_private_interface(o)
-            logger.info("contacting vBNG service to request mapping for private ip %s mac %s host %s" % (private_ip, private_mac, private_hostname) )
+            logger.info("contacting vBNG service to request mapping for private ip %s mac %s host %s" % (private_ip, private_mac, private_hostname) ,extra=o.tologdict())
 
             url = self.get_vbng_url(o) + "privateip/%s/%s/%s" % (private_ip, private_mac, private_hostname)
-            logger.info( "vbng url: %s" % url )
+            logger.info( "vbng url: %s" % url ,extra=o.tologdict())
             r = requests.post(url )
             if (r.status_code != 200):
                 raise Exception("Received error from bng service (%d)" % r.status_code)
-            logger.info("received public IP %s from private IP %s" % (r.text, private_ip))
+            logger.info("received public IP %s from private IP %s" % (r.text, private_ip),extra=o.tologdict())
 
             if r.text == "0":
                 raise Exception("VBNG service failed to return a routeable_subnet (probably ran out)")
@@ -131,11 +131,11 @@
         o.save()
 
     def delete_record(self, o):
-        logger.info("deleting VBNGTenant %s" % str(o))
+        logger.info("deleting VBNGTenant %s" % str(o),extra=o.tologdict())
 
         if o.mapped_ip:
             private_ip = o.mapped_ip
-            logger.info("contacting vBNG service to delete private ip %s" % private_ip)
+            logger.info("contacting vBNG service to delete private ip %s" % private_ip,extra=o.tologdict())
             r = requests.delete(self.get_vbng_url(o) + "privateip/%s" % private_ip, )
             if (r.status_code != 200):
                 raise Exception("Received error from bng service (%d)" % r.status_code)
diff --git a/xos/synchronizers/vcpe/steps/sync_vcpetenant.py b/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
index 2f2147b..d52f075 100644
--- a/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/synchronizers/vcpe/steps/sync_vcpetenant.py
@@ -91,7 +91,7 @@
                                     if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
                                         dnsdemux_ip = ns.ip
                 if not dnsdemux_ip:
-                    logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label)
+                    logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label,extra=o.tologdict())
             else:
                 # Connect to dnsdemux using the instance's public address
                 for service in HpcService.objects.all():
@@ -104,7 +104,7 @@
                                     except:
                                         pass
                 if not dnsdemux_ip:
-                    logger.info("failed to find a dnsdemux with a public address")
+                    logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict())
 
             for prefix in CDNPrefix.objects.all():
                 cdn_prefixes.append(prefix.prefix)
@@ -122,13 +122,13 @@
                         if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
                             bbs_addrs.append(ns.ip)
             else:
-                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not")
+                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict())
             if not bbs_addrs:
-                logger.info("failed to find any usable addresses on bbs_slice")
+                logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict())
         elif vcpe_service.bbs_server:
             bbs_addrs.append(vcpe_service.bbs_server)
         else:
-            logger.info("neither bbs_slice nor bbs_server is configured in the vCPE")
+            logger.info("neither bbs_slice nor bbs_server is configured in the vCPE",extra=o.tologdict())
 
         vlan_ids = []
         s_tags = []
@@ -222,7 +222,7 @@
         if service.url_filter_kind == "broadbandshield":
             # disable url_filter if there are no bbs_addrs
             if url_filter_enable and (not fields.get("bbs_addrs",[])):
-                logger.info("disabling url_filter because there are no bbs_addrs")
+                logger.info("disabling url_filter because there are no bbs_addrs",extra=o.tologdict())
                 url_filter_enable = False
 
             if url_filter_enable:
@@ -239,19 +239,19 @@
                     bbs_port = 8018
 
                 if not bbs_hostname:
-                    logger.info("broadbandshield is not configured")
+                    logger.info("broadbandshield is not configured",extra=o.tologdict())
                 else:
                     tStart = time.time()
                     bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
                     bbs.sync(url_filter_level, url_filter_users)
 
                     if o.hpc_client_ip:
-                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip))
+                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip),extra=o.tologdict())
                         bbs.associate(o.hpc_client_ip)
                     else:
-                        logger.info("no hpc_client_ip to associate")
+                        logger.info("no hpc_client_ip to associate",extra=o.tologdict())
 
-                    logger.info("bbs update time %d" % int(time.time()-tStart))
+                    logger.info("bbs update time %d" % int(time.time()-tStart),extra=o.tologdict())
 
 
     def run_playbook(self, o, fields):
@@ -259,7 +259,7 @@
         quick_update = (o.last_ansible_hash == ansible_hash)
 
         if ENABLE_QUICK_UPDATE and quick_update:
-            logger.info("quick_update triggered; skipping ansible recipe")
+            logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
         else:
             if o.instance.isolation in ["container", "container_vm"]:
                 super(SyncVSGTenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")