Restage to pl/master
diff --git a/planetstack/syndicate_observer/steps/sync_volume.py b/planetstack/syndicate_observer/steps/sync_volume.py
new file mode 100644
index 0000000..767541c
--- /dev/null
+++ b/planetstack/syndicate_observer/steps/sync_volume.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+
+import os
+import sys
+import traceback
+import base64
+
+if __name__ == "__main__":
+    # for testing 
+    if os.getenv("OPENCLOUD_PYTHONPATH"):
+        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
+    else:
+        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
+ 
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models import Service
+from syndicate_storage.models import Volume
+
+import logging
+from logging import Logger
+logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
+logger = logging.getLogger()
+logger.setLevel( logging.INFO )
+
+# point to planetstack
+if __name__ != "__main__": 
+    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
+        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
+    else:
+        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works")
+
+# syndicatelib will be in stes/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+import syndicatelib
+
+
+class SyncVolume(SyncStep):
+    provides=[Volume]
+    requested_interval=0
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def fetch_pending(self):
+        try:
+            ret = Volume.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+            return ret
+        except Exception, e:
+            traceback.print_exc()
+            return None
+
+    def sync_record(self, volume):
+        """
+        Synchronize a Volume record with Syndicate.
+        """
+        
+        logger.info( "Sync Volume = %s\n\n" % volume.name )
+    
+        user_email = volume.owner_id.email
+        config = syndicatelib.get_config()
+        
+        volume_principal_id = syndicatelib.make_volume_principal_id( user_email, volume.name )
+
+        # get the observer secret 
+        try:
+            observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET")
+            raise e
+
+        # volume owner must exist as a Syndicate user...
+        try:
+            rc, user = syndicatelib.ensure_principal_exists( volume_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1)
+            assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id )
+            raise e
+
+        # volume must exist 
+        
+        # create or update the Volume
+        try:
+            new_volume = syndicatelib.ensure_volume_exists( volume_principal_id, volume, user=user )
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to ensure volume '%s' exists" % volume.name )
+            raise e
+           
+        # did we create the Volume?
+        if new_volume is not None:
+            # we're good
+            pass 
+             
+        # otherwise, just update it 
+        else:
+            try:
+                rc = syndicatelib.update_volume( volume )
+            except Exception, e:
+                traceback.print_exc()
+                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
+                raise e
+                    
+        return True
+
+
+
+
+if __name__ == "__main__":
+    sv = SyncVolume()
+
+
+    # first, set all volumes to not-enacted so we can test 
+    for v in Volume.objects.all():
+       v.enacted = None
+       v.save()
+    
+    # NOTE: for resetting only 
+    if len(sys.argv) > 1 and sys.argv[1] == "reset":
+       sys.exit(0)
+
+    recs = sv.fetch_pending()
+
+    for rec in recs:
+        rc = sv.sync_record( rec )
+        if not rc:
+          print "\n\nFailed to sync %s\n\n" % (rec.name)
+
diff --git a/planetstack/syndicate_observer/steps/sync_volumeaccessright.py b/planetstack/syndicate_observer/steps/sync_volumeaccessright.py
new file mode 100644
index 0000000..360cb6f
--- /dev/null
+++ b/planetstack/syndicate_observer/steps/sync_volumeaccessright.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+
+import os
+import sys
+import base64
+import traceback
+
+if __name__ == "__main__":
+    # for testing 
+    if os.getenv("OPENCLOUD_PYTHONPATH"):
+        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
+    else:
+        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
+ 
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models import Service
+
+import logging
+from logging import Logger
+logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
+logger = logging.getLogger()
+logger.setLevel( logging.INFO )
+
+# point to planetstack 
+if __name__ != "__main__":
+    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
+        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
+    else:
+        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works")
+
+from syndicate_storage.models import VolumeAccessRight
+
+# syndicatelib will be in stes/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+import syndicatelib
+
+class SyncVolumeAccessRight(SyncStep):
+    provides=[VolumeAccessRight]
+    requested_interval=0
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def fetch_pending(self):
+        return VolumeAccessRight.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, vac):
+        
+        syndicate_caps = "UNKNOWN"  # for exception handling
+        
+        # get arguments
+        config = syndicatelib.get_config()
+        user_email = vac.owner_id.email
+        volume_name = vac.volume.name
+        syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) 
+        
+        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) )
+        
+        # validate config
+        try:
+           RG_port = config.SYNDICATE_RG_DEFAULT_PORT
+           observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
+        except Exception, e:
+           traceback.print_exc()
+           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET")
+           raise e
+            
+        # ensure the user exists and has credentials
+        try:
+            rc, user = syndicatelib.ensure_principal_exists( user_email, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
+            assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (user_email, rc, user)
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to ensure user '%s' exists" % user_email )
+            raise e
+ 
+        # make the access right for the user to create their own UGs, and provision an RG for this user that will listen on localhost.
+        # the user will have to supply their own RG closure.
+        try:
+            rc = syndicatelib.setup_volume_access( user_email, volume_name, syndicate_caps, RG_port, observer_secret )
+            assert rc is True, "Failed to setup volume access for %s in %s" % (user_email, volume_name)
+
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps))
+            raise e
+
+        return True
+
+
+if __name__ == "__main__":
+
+    # first, set all VolumeAccessRights to not-enacted so we can test 
+    for v in VolumeAccessRight.objects.all():
+       v.enacted = None
+       v.save()
+
+    # NOTE: for resetting only 
+    if len(sys.argv) > 1 and sys.argv[1] == "reset":
+       sys.exit(0)
+
+
+    sv = SyncVolumeAccessRight()
+    recs = sv.fetch_pending()
+
+    for rec in recs:
+        sv.sync_record( rec )
+
diff --git a/planetstack/syndicate_observer/steps/sync_volumeslice.py b/planetstack/syndicate_observer/steps/sync_volumeslice.py
new file mode 100644
index 0000000..8941b97
--- /dev/null
+++ b/planetstack/syndicate_observer/steps/sync_volumeslice.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+import os
+import sys
+import base64
+import traceback
+
+if __name__ == "__main__":
+    # for testing 
+    if os.getenv("OPENCLOUD_PYTHONPATH"):
+        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
+    else:
+        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
+ 
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models import Service, Slice
+
+import logging
+from logging import Logger
+logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
+logger = logging.getLogger()
+logger.setLevel( logging.INFO )
+
+# point to planetstack 
+if __name__ != "__main__":
+    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
+        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
+    else:
+        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works") 
+
+from syndicate_storage.models import VolumeSlice,VolumeAccessRight,Volume
+
+# syndicatelib will be in stes/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+import syndicatelib
+
+
+class SyncVolumeSlice(SyncStep):
+    provides=[VolumeSlice]
+    requested_interval=0
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def fetch_pending(self):
+        return VolumeSlice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, vs):
+        
+        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
+        
+        # extract arguments...
+        user_email = vs.slice_id.creator.email
+        slice_name = vs.slice_id.name
+        volume_name = vs.volume_id.name
+        syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vs.cap_read_data, vs.cap_write_data, vs.cap_host_data )
+        RG_port = vs.RG_portnum
+        UG_port = vs.UG_portnum
+        slice_secret = None
+        
+        config = syndicatelib.get_config()
+        try:
+           observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
+           RG_closure = config.SYNDICATE_RG_CLOSURE
+           observer_pkey_path = config.SYNDICATE_PRIVATE_KEY
+           syndicate_url = config.SYNDICATE_SMI_URL
+           
+        except Exception, e:
+           traceback.print_exc()
+           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL")
+           raise e
+            
+        # get secrets...
+        try:
+           observer_pkey_pem = syndicatelib.get_private_key_pem( observer_pkey_path )
+           assert observer_pkey_pem is not None, "Failed to load Observer private key"
+           
+           # get/create the slice secret
+           slice_secret = syndicatelib.get_or_create_slice_secret( observer_pkey_pem, slice_name )    
+           assert slice_secret is not None, "Failed to get or create slice secret for %s" % slice_name
+           
+        except Exception, e:
+           traceback.print_exc()
+           logger.error("Failed to load secret credentials")
+           raise e
+        
+        # make sure there's a slice-controlled Syndicate user account for the slice owner
+        slice_principal_id = syndicatelib.make_slice_principal_id( user_email, slice_name )
+        
+        try:
+            rc, user = syndicatelib.ensure_principal_exists( slice_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
+            assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
+        except Exception, e:
+            traceback.print_exc()
+            logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
+            raise e
+            
+        # grant the slice-owning user the ability to provision UGs in this Volume, and also provision for the user the (single) RG the slice will instantiate in each VM.
+        try:
+            rc = syndicatelib.setup_volume_access( slice_principal_id, volume_name, syndicate_caps, RG_port, observer_secret, RG_closure=RG_closure )
+            assert rc is True, "Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name)
+            
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name))
+            raise e
+            
+        # generate and save slice credentials....
+        try:
+            slice_cred = syndicatelib.save_slice_credentials( observer_pkey_pem, syndicate_url, slice_principal_id, volume_name, slice_name, observer_secret, slice_secret, UG_port, existing_user=user )
+            assert slice_cred is not None, "Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name )
+                
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
+            raise e
+             
+        # ... and push them all out.
+        try:
+            rc = syndicatelib.push_credentials_to_slice( slice_name, slice_cred )
+            assert rc is True, "Failed to push credentials to slice %s for volume %s" % (slice_name, volume_name)
+               
+        except Exception, e:
+            traceback.print_exc()
+            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
+            raise e
+        
+        return True
+
+
+if __name__ == "__main__":
+    sv = SyncVolumeSlice()
+
+    # first, set all VolumeSlice to not-enacted so we can test 
+    for v in VolumeSlice.objects.all():
+       v.enacted = None
+       v.save()
+
+    # NOTE: for resetting only 
+    if len(sys.argv) > 1 and sys.argv[1] == "reset":
+       sys.exit(0)
+
+    recs = sv.fetch_pending()
+
+    for rec in recs:
+        if rec.slice_id.creator is None:
+           print "Ignoring slice %s, since it has no creator" % (rec.slice_id)
+           continue
+
+        sv.sync_record( rec )
+