Merge branch 'master' of git.planet-lab.org:/git/plstackapi

Conflicts:
	planetstack/core/admin.py
	planetstack/planetstack/urls.py
diff --git a/planetstack.deps b/planetstack.deps
new file mode 100644
index 0000000..6eae1fc
--- /dev/null
+++ b/planetstack.deps
@@ -0,0 +1,47 @@
+{
+    "Node": [
+        "Site", 
+        "Deployment"
+    ], 
+    "Slice": [
+        "Site"
+    ], 
+    "ReservedResource": [
+        "Sliver"
+    ], 
+    "SliceMembership": [
+        "User", 
+        "Slice", 
+        "Role"
+    ], 
+    "NetworkSlice": [
+        "Network", 
+        "Slice"
+    ], 
+    "Tag": [
+        "Project"
+    ], 
+    "User": [
+        "Site"
+    ], 
+    "SliceTag": [
+        "Slice"
+    ], 
+    "Reservation": [
+        "Slice"
+    ], 
+    "NetworkSliver": [
+        "Network", 
+        "Sliver"
+    ], 
+    "SitePrivilege": [
+        "User", 
+        "Site", 
+        "Role"
+    ], 
+    "Sliver": [
+        "Image", 
+        "Slice", 
+        "Node"
+    ]
+}
diff --git a/planetstack/core/admin.py b/planetstack/core/admin.py
index 730937a..90a9700 100644
--- a/planetstack/core/admin.py
+++ b/planetstack/core/admin.py
@@ -41,35 +41,70 @@
     def has_add_permission(self, request):
         return False
 
-class UserMembershipInline(generic.GenericTabularInline):
-    model = Member
-    exclude = ['enacted']
-    extra = 1
-    suit_classes = 'suit-tab suit-tab-membership'
-
-    def queryset(self, request):
-        qs = super(UserMembershipInline, self).queryset(request)
-        return qs.filter(user=request.user)
-        
-class MemberInline(generic.GenericTabularInline):
-    model = Member
-    exclude = ['enacted']
-    extra = 1
-    suit_classes = 'suit-tab suit-tab-members'
-
 class TagInline(generic.GenericTabularInline):
     model = Tag
     exclude = ['enacted']
     extra = 0
     suit_classes = 'suit-tab suit-tab-tags'
 
+class NetworkLookerUpper:
+    """ This is a callable that looks up a network name in a sliver and returns

+        the ip address for that network.

+    """

+

+    def __init__(self, name):

+        self.short_description = name

+        self.__name__ = name

+        self.network_name = name

+

+    def __call__(self, obj):

+        if obj is not None:

+            for nbs in obj.networksliver_set.all():

+                if (nbs.network.name == self.network_name):

+                    return nbs.ip

+        return ""
+
+    def __str__(self):
+        return self.network_name
+
 class SliverInline(PlStackTabularInline):
     model = Sliver
     fields = ['ip', 'instance_name', 'slice', 'numberCores', 'image', 'node', 'deploymentNetwork']
     extra = 0
-    #readonly_fields = ['ip', 'instance_name', 'image']
     readonly_fields = ['ip', 'instance_name']
     suit_classes = 'suit-tab suit-tab-slivers'
+
+# Note this is breaking in the admin.py when trying to use an inline to add a node/image 
+#    def _declared_fieldsets(self):
+#        # Return None so django will call get_fieldsets and we can insert our
+#        # dynamic fields
+#        return None
+#
+#    def get_readonly_fields(self, request, obj=None):
+#        readonly_fields = super(SliverInline, self).get_readonly_fields(request, obj)
+#
+#        # Lookup the networks that are bound to the slivers, and add those
+#        # network names to the list of readonly fields.
+#
+#        for sliver in obj.slivers.all():
+#            for nbs in sliver.networksliver_set.all():
+#                if nbs.ip:
+#                    network_name = nbs.network.name
+#                    if network_name not in [str(x) for x in readonly_fields]:
+#                        readonly_fields.append(NetworkLookerUpper(network_name))
+#
+#        return readonly_fields
+#
+#    def get_fieldsets(self, request, obj=None):
+#        form = self.get_formset(request, obj).form
+#        # fields = the read/write files + the read-only fields
+#        fields = self.fields
+#        for fieldName in self.get_readonly_fields(request,obj):
+#            if not fieldName in fields:
+#                fields.append(fieldName)
+#
+#        return [(None, {'fields': fields})]
+
     
 
 class SiteInline(PlStackTabularInline):
@@ -171,6 +206,13 @@
 
         return super(SlicePrivilegeInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
 
+class SliceNetworkInline(PlStackTabularInline):
+    model = Network.slices.through
+    extra = 0
+    verbose_name = "Network Connection"
+    verbose_name_plural = "Network Connections"
+    suit_classes = 'suit-tab suit-tab-slicenetworks'
+
 class SliceTagInline(PlStackTabularInline):
     model = SliceTag
     extra = 0
@@ -187,23 +229,6 @@
     save_on_top = False
     exclude = ['enacted']
 
-#class RoleMemberForm(forms.ModelForm):
-#    request=None
-#    member=forms.ModelChoiceField(queryset=Member.objects.all()) #first get all
-#
-#    def __init__(self,fata=None,files=None,auto_id='id_%s',prefix=None,initial=None,error_class=ErrorList,label_suffix=':',empty_permitted=False,instance=None):
-#        super(RoleMemberForm,self).__init__data,files,auto_id,prefix,initial,error_class,label_suffix,empty_permitted,instance)
-#
-#        self.fields["member"].queryset = member.objects.filter(
-
-class RoleMemberInline (admin.StackedInline):
-    model = Member
-#    form = RoleMemberForm
-    
-    def get_formset(self,request,obj=None, **kwargs):
-        self.form.request=request
-        return super(RoleMemberInline, self).get_formset(request, obj, **kwargs)
-
 class SliceRoleAdmin(PlanetStackBaseAdmin):
     model = SliceRole
     pass
@@ -212,15 +237,6 @@
     model = SiteRole
     pass
 
-class RoleAdmin(PlanetStackBaseAdmin):
-    fieldsets = [
-        ('Role', {'fields': ['role_type', 'description','content_type'],
-                  'classes':['collapse']})
-    ]
-    inlines = [ MemberInline,]
-    list_display = ('role_type','description','content_type')
-
-
 class DeploymentAdminForm(forms.ModelForm):
     sites = forms.ModelMultipleChoiceField(
         queryset=Site.objects.all(),
@@ -235,10 +251,10 @@
 
 class DeploymentAdmin(PlanetStackBaseAdmin):
     form = DeploymentAdminForm
-    inlines = [MemberInline,NodeInline,SliverInline,TagInline]
+    inlines = [DeploymentPrivilegeInline,NodeInline,TagInline]
     fieldsets = [
         (None, {'fields': ['sites'], 'classes':['suit-tab suit-tab-sites']}),]
-    suit_form_tabs =(('sites', 'Sites'),('nodes','Nodes'),('members','Members'),('tags','Tags'))
+    suit_form_tabs =(('sites', 'Sites'),('nodes','Nodes'),('deploymentprivileges','Privileges'),('tags','Tags'))
 
 class SiteAdmin(PlanetStackBaseAdmin):
     fieldsets = [
@@ -247,7 +263,7 @@
     ]
     suit_form_tabs =(('general', 'Site Details'),
         ('users','Users'),
-        ('members','Privileges'),
+        ('siteprivileges','Privileges'),
         ('deployments','Deployments'),
         ('slices','Slices'),
         ('nodes','Nodes'), 
@@ -255,7 +271,7 @@
     )
     list_display = ('name', 'login_base','site_url', 'enabled')
     filter_horizontal = ('deployments',)
-    inlines = [SliceInline,UserInline,TagInline, NodeInline, MemberInline]
+    inlines = [SliceInline,UserInline,TagInline, NodeInline, SitePrivilegeInline]
     search_fields = ['name']
 
     def queryset(self, request):
@@ -331,10 +347,12 @@
 class SliceAdmin(PlanetStackBaseAdmin):
     fieldsets = [('Slice Details', {'fields': ['name', 'site', 'serviceClass', 'description', 'slice_url'], 'classes':['suit-tab suit-tab-general']}),]
     list_display = ('name', 'site','serviceClass', 'slice_url')
-    inlines = [SlicePrivilegeInline,SliverInline, TagInline, ReservationInline]
+    inlines = [SlicePrivilegeInline,SliverInline, TagInline, ReservationInline,SliceNetworkInline]
 
 
+    #inlines = [SliverInline, SliceMembershipInline, TagInline, SliceTagInline, SliceNetworkInline]
     suit_form_tabs =(('general', 'Slice Details'),
+        ('slicenetworks','Networks'),
         ('sliceprivileges','Privileges'),
         ('slivers','Slivers'),
         ('tags','Tags'),
@@ -796,6 +814,68 @@
         else:
             return []
 
+class NetworkParameterTypeAdmin(admin.ModelAdmin):
+    exclude = ['enacted']
+    list_display = ("name", )
+
+class RouterAdmin(admin.ModelAdmin):
+    exclude = ['enacted']
+    list_display = ("name", )
+
+class RouterInline(admin.TabularInline):
+    # exclude = ['enacted']
+    model = Router.networks.through
+    extra = 0
+    verbose_name_plural = "Routers"
+    verbose_name = "Router"
+    suit_classes = 'suit-tab suit-tab-routers'
+
+class NetworkParameterInline(generic.GenericTabularInline):
+    exclude = ['enacted']
+    model = NetworkParameter
+    extra = 1
+    verbose_name_plural = "Parameters"
+    verbose_name = "Parameter"
+    suit_classes = 'suit-tab suit-tab-netparams'
+
+class NetworkSliversInline(admin.TabularInline):
+    exclude = ['enacted']
+    readonly_fields = ("ip", )
+    model = NetworkSliver
+    extra = 0
+    verbose_name_plural = "Slivers"
+    verbose_name = "Sliver"
+    suit_classes = 'suit-tab suit-tab-networkslivers'
+
+class NetworkSlicesInline(admin.TabularInline):
+    exclude = ['enacted']
+    model = NetworkSlice
+    extra = 0
+    verbose_name_plural = "Slices"
+    verbose_name = "Slice"
+    suit_classes = 'suit-tab suit-tab-networkslices'
+
+class NetworkAdmin(admin.ModelAdmin):
+    exclude = ['enacted']
+    list_display = ("name", "subnet", "ports", "labels")
+    readonly_fields = ("subnet", )
+
+    inlines = [NetworkParameterInline, NetworkSliversInline, NetworkSlicesInline, RouterInline]
+
+    fieldsets = [
+        (None, {'fields': ['name','template','ports','labels','owner','guaranteedBandwidth', 'permitAllSlices','permittedSlices','network_id','router_id','subnet_id','subnet'], 'classes':['suit-tab suit-tab-general']}),]
+
+    suit_form_tabs =(
+        ('general','Network Details'),
+        ('netparams', 'Parameters'),
+        ('networkslivers','Slivers'),
+        ('networkslices','Slices'),
+        ('routers','Routers'),
+    )
+class NetworkTemplateAdmin(admin.ModelAdmin):
+    exclude = ['enacted']
+    list_display = ("name", "guaranteedBandwidth", "visibility")
+
 # register a signal that caches the user's credentials when they log in
 def cache_credentials(sender, user, request, **kwds):
     auth = {'username': request.POST['username'],
@@ -825,10 +905,10 @@
 admin.site.register(Project, ProjectAdmin)
 admin.site.register(ServiceClass, ServiceClassAdmin)
 admin.site.register(Reservation, ReservationAdmin)
-#admin.site.register(SliceRole, SliceRoleAdmin)
-#admin.site.register(SiteRole, SiteRoleAdmin)
-#admin.site.register(PlanetStackRole)
-#admin.site.register(DeploymentRole)
+admin.site.register(Network, NetworkAdmin)
+admin.site.register(Router, RouterAdmin)
+admin.site.register(NetworkParameterType, NetworkParameterTypeAdmin)
+admin.site.register(NetworkTemplate, NetworkTemplateAdmin)
 
 if showAll:
     #admin.site.register(PlanetStack)
@@ -836,7 +916,6 @@
     admin.site.register(Node, NodeAdmin)
     #admin.site.register(SlicePrivilege, SlicePrivilegeAdmin)
     #admin.site.register(SitePrivilege, SitePrivilegeAdmin)
-    admin.site.register(Role, RoleAdmin)
     admin.site.register(Member, MemberAdmin)
     admin.site.register(Sliver, SliverAdmin)
     admin.site.register(Image, ImageAdmin)
diff --git a/planetstack/core/models/__init__.py b/planetstack/core/models/__init__.py
index 1cc4d07..b4c7cf6 100644
--- a/planetstack/core/models/__init__.py
+++ b/planetstack/core/models/__init__.py
@@ -24,3 +24,4 @@
 from .sliver import Sliver
 from .reservation import ReservedResource
 from .reservation import Reservation
+from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice
diff --git a/planetstack/core/models/network.py b/planetstack/core/models/network.py
new file mode 100644
index 0000000..55711a4
--- /dev/null
+++ b/planetstack/core/models/network.py
@@ -0,0 +1,122 @@
+import os
+import socket
+from django.db import models
+from core.models import PlCoreBase, Site, Slice, Sliver
+from django.contrib.contenttypes.models import ContentType
+from django.contrib.contenttypes import generic
+
+# If true, then IP addresses will be allocated by the model. If false, then
+# we will assume the observer handles it.
+NO_OBSERVER=False
+
+class NetworkTemplate(PlCoreBase):
+    VISIBILITY_CHOICES = (('public', 'public'), ('private', 'private'))
+    TRANSLATION_CHOICES = (('none', 'none'), ('NAT', 'NAT'))
+
+    name = models.CharField(max_length=32)
+    description = models.CharField(max_length=1024, blank=True, null=True)
+    guaranteedBandwidth = models.IntegerField(default=0)
+    visibility = models.CharField(max_length=30, choices=VISIBILITY_CHOICES, default="private")
+    translation = models.CharField(max_length=30, choices=TRANSLATION_CHOICES, default="none")
+    sharedNetworkName = models.CharField(max_length=30, blank=True, null=True)
+    sharedNetworkId = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum network")
+
+    def __unicode__(self):  return u'%s' % (self.name)
+
+class Network(PlCoreBase):
+    name = models.CharField(max_length=32)
+    template = models.ForeignKey(NetworkTemplate)
+    subnet = models.CharField(max_length=32, blank=True)
+    ports = models.CharField(max_length=1024, blank=True, null=True)
+    labels = models.CharField(max_length=1024, blank=True, null=True)
+    owner = models.ForeignKey(Slice, related_name="ownedNetworks")
+
+    guaranteedBandwidth = models.IntegerField(default=0)
+    permitAllSlices = models.BooleanField(default=False)
+    permittedSlices = models.ManyToManyField(Slice, blank=True, related_name="availableNetworks")
+    slices = models.ManyToManyField(Slice, blank=True, related_name="networks", through="NetworkSlice")
+    slivers = models.ManyToManyField(Sliver, blank=True, related_name="networks", through="NetworkSliver")
+
+    # for observer/manager
+    network_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum network")
+    router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
+    subnet_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum subnet id")
+
+    def __unicode__(self):  return u'%s' % (self.name)
+
+    def save(self, *args, **kwds):
+        if (not self.subnet) and (NO_OBSERVER):
+            from util.network_subnet_allocator import find_unused_subnet
+            self.subnet = find_unused_subnet(existing_subnets=[x.subnet for x in Network.objects.all()])
+        super(Network, self).save(*args, **kwds)
+
+class NetworkSlice(PlCoreBase):
+    # This object exists solely so we can implement the permission check when
+    # adding slices to networks. It adds no additional fields to the relation.
+
+    network = models.ForeignKey(Network)
+    slice = models.ForeignKey(Slice)
+
+    def save(self, *args, **kwds):
+        slice = self.slice
+        if (slice not in self.network.permittedSlices.all()) and (slice != self.network.owner) and (not self.network.permitAllSlices):
+            # to add a sliver to the network, then one of the following must be true:
+            #   1) sliver's slice is in network's permittedSlices list,
+            #   2) sliver's slice is network's owner, or
+            #   3) network's permitAllSlices is true
+            raise ValueError("Slice %s is not allowed to connect to network %s" % (str(slice), str(self.network)))
+
+        super(NetworkSlice, self).save(*args, **kwds)
+
+    def __unicode__(self):  return u'%s-%s' % (self.network.name, self.slice.name)
+
+class NetworkSliver(PlCoreBase):
+    network = models.ForeignKey(Network)
+    sliver = models.ForeignKey(Sliver)
+    ip = models.GenericIPAddressField(help_text="Sliver ip address", blank=True, null=True)
+    port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum port id")
+
+    def save(self, *args, **kwds):
+        slice = self.sliver.slice
+        if (slice not in self.network.permittedSlices.all()) and (slice != self.network.owner) and (not self.network.permitAllSlices):
+            # to add a sliver to the network, then one of the following must be true:
+            #   1) sliver's slice is in network's permittedSlices list,
+            #   2) sliver's slice is network's owner, or
+            #   3) network's permitAllSlices is true
+            raise ValueError("Slice %s is not allowed to connect to network %s" % (str(slice), str(self.network)))
+
+        if (not self.ip) and (NO_OBSERVER):
+            from util.network_subnet_allocator import find_unused_address
+            self.ip = find_unused_address(self.network.subnet,
+                                          [x.ip for x in self.network.networksliver_set.all()])
+        super(NetworkSliver, self).save(*args, **kwds)
+
+    def __unicode__(self):  return u'%s-%s' % (self.network.name, self.sliver.instance_name)
+
+class Router(PlCoreBase):
+    name = models.CharField(max_length=32)
+    owner = models.ForeignKey(Slice, related_name="routers")
+    permittedNetworks = models.ManyToManyField(Network, blank=True, related_name="availableRouters")
+    networks = models.ManyToManyField(Network, blank=True, related_name="routers")
+
+    def __unicode__(self):  return u'%s' % (self.name)
+
+class NetworkParameterType(PlCoreBase):
+    name = models.SlugField(help_text="The name of this parameter", max_length=128)
+    description = models.CharField(max_length=1024)
+
+    def __unicode__(self):  return u'%s' % (self.name)
+
+class NetworkParameter(PlCoreBase):
+    parameter = models.ForeignKey(NetworkParameterType, related_name="parameters", help_text="The type of the parameter")
+    value = models.CharField(help_text="The value of this parameter", max_length=1024)
+
+    # The required fields to do a ObjectType lookup, and object_id assignment
+    content_type = models.ForeignKey(ContentType)
+    object_id = models.PositiveIntegerField()
+    content_object = generic.GenericForeignKey('content_type', 'object_id')
+
+    def __unicode__(self):
+        return self.parameter.name
+
+
diff --git a/planetstack/core/models/plcorebase.py b/planetstack/core/models/plcorebase.py
index 30d4df3..62bcb75 100644
--- a/planetstack/core/models/plcorebase.py
+++ b/planetstack/core/models/plcorebase.py
@@ -1,53 +1,57 @@
 import os
 from django.db import models
 from django.forms.models import model_to_dict
-from openstack.event_manager import EventSender
+from observer.event_manager import EventSender
 
 
 class PlCoreBase(models.Model):
 
-    created = models.DateTimeField(auto_now_add=True)
-    updated = models.DateTimeField(auto_now=True)
-    enacted = models.DateTimeField(null=True, default=None)
+	created = models.DateTimeField(auto_now_add=True)
+	updated = models.DateTimeField(auto_now=True)
 
-    class Meta:
-        abstract = True
-        app_label = "core"
+	class Meta:
+		abstract = True
+		app_label = "core"
 
-    def __init__(self, *args, **kwargs):
-        super(PlCoreBase, self).__init__(*args, **kwargs)
-        self.__initial = self._dict
+	def __init__(self, *args, **kwargs):
+		super(PlCoreBase, self).__init__(*args, **kwargs)
+		self.__initial = self._dict
 
-    @property
-    def diff(self):
-        d1 = self.__initial
-        d2 = self._dict
-        diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
-        return dict(diffs)
+	@property
+	def diff(self):
+		d1 = self.__initial
+		d2 = self._dict
+		diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
+		return dict(diffs)
 
-    @property
-    def has_changed(self):
-        return bool(self.diff)
+	@property
+	def has_changed(self):
+		return bool(self.diff)
 
-    @property
-    def changed_fields(self):
-        return self.diff.keys()
+	@property
+	def changed_fields(self):
+		return self.diff.keys()
 
-    def get_field_diff(self, field_name):
-        return self.diff.get(field_name, None)
+	def get_field_diff(self, field_name):
+		return self.diff.get(field_name, None)
 
-    def save(self, *args, **kwargs):
-        super(PlCoreBase, self).save(*args, **kwargs)
-        
-        # Tell the observer that the source database has been updated
-        EventSender().fire()
+	def delete(self, *args, **kwds):
+		super(PlCoreBase, self).delete(*args, **kwds)
 
-        self.__initial = self._dict
+		EventSender().fire({'delete_flag':True,'model':self.__name__})
 
-    @property
-    def _dict(self):
-        return model_to_dict(self, fields=[field.name for field in
-                             self._meta.fields])
+	def save(self, *args, **kwargs):
+		super(PlCoreBase, self).save(*args, **kwargs)
+		
+		# Tell the observer that the source database has been updated
+		EventSender().fire()
+
+		self.__initial = self._dict
+
+	@property
+	def _dict(self):
+		return model_to_dict(self, fields=[field.name for field in
+							 self._meta.fields])
 
 
 
diff --git a/planetstack/core/models/sliver.py b/planetstack/core/models/sliver.py
index 44a6af1..9c00cee 100644
--- a/planetstack/core/models/sliver.py
+++ b/planetstack/core/models/sliver.py
@@ -26,7 +26,16 @@
     numberCores = models.IntegerField(verbose_name="Number of Cores", help_text="Number of cores for sliver", default=0)
     tags = generic.GenericRelation(Tag)
 
-    def __unicode__(self):  return u'%s' % (self.instance_name)
+    def __unicode__(self):
+        if self.instance_name:
+            return u'%s' % (self.instance_name)
+        elif self.id:
+            return u'uninstantiated-%s' % str(self.id)
+        elif self.slice:
+            return u'unsaved-sliver on %s' % self.slice.name
+        else:
+            return u'unsaved-sliver'
+
 
     def save(self, *args, **kwds):
         if not self.name:
diff --git a/planetstack/core/views/deployment.py b/planetstack/core/views/deployments.py
similarity index 100%
rename from planetstack/core/views/deployment.py
rename to planetstack/core/views/deployments.py
diff --git a/planetstack/dmdot b/planetstack/dmdot
new file mode 100755
index 0000000..2d95e9d
--- /dev/null
+++ b/planetstack/dmdot
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+
+import os
+import pdb
+import sys
+import json
+
+sys.path.append('.')
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+from django.db.models.fields.related import ForeignKey
+from core.models import *
+
+try:
+	output = sys.args[1]
+except:
+	output = '-json'
+
+g = globals()
+model_classes = []
+class_names = []
+for c in g.values():
+	if type(c)==type(PlCoreBase):
+		model_classes.append(c)
+		class_names.append(c.__name__)
+
+
+if (output=='-dot'):
+	print "digraph plstack {";
+	for c in model_classes:
+		fields = c._meta.fields
+		for f in fields:
+			if type(f)==ForeignKey and f.name.title() in class_names:
+				print '\t"%s"->"%s";'%(c.__name__,f.name.title())
+	print "}\n";
+elif (output=='-json'):
+	d = {}
+	for c in model_classes:
+		fields = c._meta.fields
+		for f in fields:
+			if type(f)==ForeignKey and f.name.title() in class_names:
+				try:
+					d[c.__name__].append(f.name.title())
+				except KeyError:
+					d[c.__name__]=[f.name.title()]
+	print json.dumps(d,indent=4)
+	
+	
diff --git a/planetstack/observer/__init__.py b/planetstack/observer/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/planetstack/observer/__init__.py
diff --git a/planetstack/openstack/backend.py b/planetstack/observer/backend.py
similarity index 74%
rename from planetstack/openstack/backend.py
rename to planetstack/observer/backend.py
index 2f4aa71..d8ae306 100644
--- a/planetstack/openstack/backend.py
+++ b/planetstack/observer/backend.py
@@ -1,12 +1,12 @@
 import threading
-from openstack.observer import OpenStackObserver
-from openstack.event_manager import EventListener
+from observer.event_loop import PlanetStackObserver
+from observer.event_manager import EventListener
 
 class Backend:
     
     def run(self):
         # start the openstack observer
-        observer = OpenStackObserver()
+        observer = PlanetStackObserver()
         observer_thread = threading.Thread(target=observer.run)
         observer_thread.start()
 
diff --git a/planetstack/observer/deleter.py b/planetstack/observer/deleter.py
new file mode 100644
index 0000000..9a62ccd
--- /dev/null
+++ b/planetstack/observer/deleter.py
@@ -0,0 +1,13 @@
+import os
+import base64
+from planetstack.config import Config
+
+class Deleter:
+	model=None # Must be overridden
+
+	def call(self,pk):
+		# Fetch object from PlanetStack db and delete it
+		pass
+
+	def __call__(self):
+		return self.call()
diff --git a/planetstack/observer/deleters/__init__.py b/planetstack/observer/deleters/__init__.py
new file mode 100755
index 0000000..9cfd951
--- /dev/null
+++ b/planetstack/observer/deleters/__init__.py
@@ -0,0 +1,18 @@
+import os
+
+deleters = {}
+_path = os.path.join('.',os.path.dirname(__file__))
+
+_files = os.listdir(_path)
+_files = filter(lambda x:x.endswith('deleter.py'),_files)
+_files = map(lambda x:x.rstrip('.py'),_files)
+
+"""
+for f in _files:
+	m = __import__(f)
+	deleter = getattr(m,f.title().replace('_',''))
+	try:
+		deleters[deleter.model].append(deleter)
+	except KeyError:
+		deleters[deleter.model]=[deleter]
+"""
diff --git a/planetstack/observer/deleters/slice_deleter.py b/planetstack/observer/deleters/slice_deleter.py
new file mode 100644
index 0000000..4cb0a72
--- /dev/null
+++ b/planetstack/observer/deleters/slice_deleter.py
@@ -0,0 +1,9 @@
+#from code.models import Slice
+
+class SliceDeleter:
+	model='Slice'
+
+	def call(self, pk):
+		s = Slice.objects.get(pk=pk)
+
+		# Proceed with delete
diff --git a/planetstack/observer/deleters/slice_deleter.pyc b/planetstack/observer/deleters/slice_deleter.pyc
new file mode 100644
index 0000000..9fc8022
--- /dev/null
+++ b/planetstack/observer/deleters/slice_deleter.pyc
Binary files differ
diff --git a/planetstack/observer/event_loop.py b/planetstack/observer/event_loop.py
new file mode 100644
index 0000000..671bdc3
--- /dev/null
+++ b/planetstack/observer/event_loop.py
@@ -0,0 +1,245 @@
+import time
+import traceback
+import commands
+import threading
+import json
+
+from datetime import datetime
+from collections import defaultdict
+from core.models import *
+from django.db.models import F, Q
+from openstack.manager import OpenStackManager
+from util.logger import Logger, logging, logger
+#from timeout import timeout
+from planetstack.config import Config
+from observer.steps import *
+
+debug_mode = False
+
+logger = Logger(logfile='observer.log', level=logging.INFO)
+
+class StepNotReady(Exception):
+	pass
+
+def toposort(g, steps):
+	reverse = {}
+
+	for k,v in g.items():
+		for rk in v:
+			try:
+				reverse[rk].append(k)
+			except:
+				reverse[rk]=k
+
+	sources = []
+	for k,v in g.items():
+		if not reverse.has_key(k):
+			sources.append(k)
+
+
+	for k,v in reverse.iteritems():
+		if (not v):
+			sources.append(k)
+
+	order = []
+	marked = []
+
+	while sources:
+		n = sources.pop()
+		try:
+			for m in g[n]:
+				if m not in marked:
+					sources.append(m)
+					marked.append(m)
+		except KeyError:
+			pass
+		order.append(n)
+	return order
+
+class PlanetStackObserver:
+	sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps]
+
+	def __init__(self):
+		# The Condition object that gets signalled by Feefie events
+		self.load_sync_steps()
+		self.event_cond = threading.Condition()
+
+	def wait_for_event(self, timeout):
+		self.event_cond.acquire()
+		self.event_cond.wait(timeout)
+		self.event_cond.release()
+		
+	def wake_up(self):
+		logger.info('Wake up routine called. Event cond %r'%self.event_cond)
+		self.event_cond.acquire()
+		self.event_cond.notify()
+		self.event_cond.release()
+
+	def load_sync_steps(self):
+		dep_path = Config().observer_dependency_path
+		try:
+			# This contains dependencies between records, not sync steps
+			self.model_dependency_graph = json.loads(open(dep_path).read())
+		except Exception,e:
+			raise e
+
+		backend_path = Config().observer_backend_dependency_path
+		try:
+			# This contains dependencies between backend records
+			self.backend_dependency_graph = json.loads(open(backend_path).read())
+		except Exception,e:
+			# We can work without a backend graph
+			self.backend_dependency_graph = {}
+
+		provides_dict = {}
+		for s in self.sync_steps:
+			for m in s.provides:
+				try:
+					provides_dict[m.__name__].append(s.__name__)
+				except KeyError:
+					provides_dict[m.__name__]=[s.__name__]
+
+				
+		step_graph = {}
+		for k,v in self.model_dependency_graph.iteritems():
+			try:
+				for source in provides_dict[k]:
+					for m in v:
+						try:
+							for dest in provides_dict[m]:
+								# no deps, pass
+								try:
+									step_graph[source].append(dest)
+								except:
+									step_graph[source]=[dest]
+						except KeyError:
+							pass
+					
+			except KeyError:
+				pass
+				# no dependencies, pass
+		
+		import pdb
+		pdb.set_trace()
+		if (self.backend_dependency_graph):
+			backend_dict = {}
+			for s in sync_steps:
+				for m in s.serves:
+					backend_dict[m]=s.__name__
+					
+			for k,v in backend_dependency_graph.iteritems():
+				try:
+					source = backend_dict[k]
+					for m in v:
+						try:
+							dest = backend_dict[m]
+						except KeyError:
+							# no deps, pass
+							pass
+						step_graph[source]=dest
+						
+				except KeyError:
+					pass
+					# no dependencies, pass
+
+		dependency_graph = step_graph
+
+		self.ordered_steps = toposort(dependency_graph, self.sync_steps)
+		print "Order of steps=",self.ordered_steps
+		self.load_run_times()
+		
+
+	def check_duration(self):
+		try:
+			if (duration > S.deadline):
+				logger.info('Sync step %s missed deadline, took %.2f seconds'%(S.name,duration))
+		except AttributeError:
+			# S doesn't have a deadline
+			pass
+
+	def update_run_time(self, step):
+		self.last_run_times[step.name]=time.time()
+
+	def check_schedule(self, step):
+		time_since_last_run = time.time() - self.last_run_times[step.name]
+		try:
+			if (time_since_last_run < step.requested_interval):
+				raise StepNotReady
+		except AttributeError:
+			logger.info('Step %s does not have requested_interval set'%step.name)
+			raise StepNotReady
+	
+	def load_run_times(self):
+		try:
+			jrun_times = open('/tmp/observer_run_times').read()
+			self.last_run_times = json.loads(jrun_times)
+		except:
+			self.last_run_times={}
+			for e in self.ordered_steps:
+				self.last_run_times[e.name]=0
+
+
+
+	def save_run_times(self):
+		run_times = json.dumps(self.last_run_times)
+		open('/tmp/observer_run_times','w').write(run_times)
+
+	def check_class_dependency(self, step, failed_steps):
+		for failed_step in failed_steps:
+			if (failed_step in self.dependency_graph[step.name]):
+				raise StepNotReady
+
+	def run(self):
+		if not self.manager.enabled or not self.manager.has_openstack:
+			return
+
+		while True:
+			try:
+				logger.info('Waiting for event')
+				tBeforeWait = time.time()
+				self.wait_for_event(timeout=300)
+				logger.info('Observer woke up')
+
+				# Set of whole steps that failed
+				failed_steps = []
+
+				# Set of individual objects within steps that failed
+				failed_step_objects = []
+
+				for S in self.ordered_steps:
+					start_time=time.time()
+					
+					sync_step = S()
+					sync_step.dependencies = self.dependencies[sync_step.name]
+					sync_step.debug_mode = debug_mode
+
+					should_run = False
+					try:
+						# Various checks that decide whether
+						# this step runs or not
+						self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
+						self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour
+						should_run = True
+					except StepNotReady:
+						logging.info('Step not ready: %s'%sync_step.name)
+						failed_steps.add(sync_step)
+					except:
+						failed_steps.add(sync_step)
+
+					if (should_run):
+						try:
+							duration=time.time() - start_time
+
+							# ********* This is the actual sync step
+							failed_objects = sync_step(failed=failed_step_objects)
+
+
+							check_deadline(sync_step, duration)
+							failed_step_objects.extend(failed_objects)
+							self.update_run_time(sync_step)
+						except:
+							failed_steps.add(S)
+				self.save_run_times()
+			except:
+				logger.log_exc("Exception in observer run loop")
+				traceback.print_exc()
diff --git a/planetstack/observer/event_manager.py b/planetstack/observer/event_manager.py
new file mode 100644
index 0000000..c4215ac
--- /dev/null
+++ b/planetstack/observer/event_manager.py
@@ -0,0 +1,90 @@
+import threading
+import requests, json
+
+from core.models import *
+#from openstack.manager import OpenStackManager
+from planetstack.config import Config
+from observer.deleters import deleters
+
+import os
+import base64
+from fofum import Fofum
+import json
+
+# decorator that marks dispatachable event methods	
+def event(func):
+	setattr(func, 'event', func.__name__)
+	return func		 
+
+class EventHandler:
+	# This code is currently not in use.
+	def __init__(self):
+		pass #self.manager = OpenStackManager()
+
+	@staticmethod
+	def get_events():
+		events = []
+		for name in dir(EventHandler):
+			attribute = getattr(EventHandler, name)
+			if hasattr(attribute, 'event'):
+				events.append(getattr(attribute, 'event'))
+		return events
+
+	def dispatch(self, event, *args, **kwds):
+		if hasattr(self, event):
+			return getattr(self, event)(*args, **kwds)
+			
+		
+class EventSender:
+	def __init__(self,user=None,clientid=None):
+		try:
+			clid = Config().feefie_client_id
+			user = Config().feefie_client_user
+		except:
+			clid = 'planetstack_core_team'
+			user = 'pl'
+
+		self.fofum = Fofum(user=user)
+		self.fofum.make(clid)
+
+	def fire(self,**args):
+		self.fofum.fire(json.dumps(args))
+
+class EventListener:
+	def __init__(self,wake_up=None):
+		self.handler = EventHandler()
+		self.wake_up = wake_up
+
+	def handle_event(self, payload):
+		payload_dict = json.loads(payload)
+
+		try:
+			deletion = payload_dict['deletion_flag']
+			if (deletion):
+				model = payload_dict['model']
+				pk = payload_dict['pk']
+
+				for deleter in deleters[model]:
+					deleter(pk)
+		except:
+			deletion = False
+
+		if (not deletion and self.wake_up):
+			self.wake_up()
+		
+
+	def run(self):
+		# This is our unique client id, to be used when firing and receiving events
+		# It needs to be generated once and placed in the config file
+
+		try:
+			clid = Config().feefie_client_id
+			user = Config().feefie_client_user
+		except:
+			clid = 'planetstack_core_team'
+			user = 'pl'
+
+		f = Fofum(user=user)
+		
+		listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
+		listener_thread.start()
diff --git a/planetstack/observer/openstacksyncstep.py b/planetstack/observer/openstacksyncstep.py
new file mode 100644
index 0000000..3ce3c68
--- /dev/null
+++ b/planetstack/observer/openstacksyncstep.py
@@ -0,0 +1,17 @@
+import os
+import base64
+from syncstep import SyncStep
+
+class OpenStackSyncStep:
+	""" PlanetStack Sync step for copying data to OpenStack 
+	""" 
+	
+	def __init__(self, **args):
+		super(SyncStep,self).__init__(**args)
+		return
+
+	
+
+
+	def __call__(self):
+		return self.call()
diff --git a/planetstack/observer/steps/__init__.py b/planetstack/observer/steps/__init__.py
new file mode 100644
index 0000000..6d7adb8
--- /dev/null
+++ b/planetstack/observer/steps/__init__.py
@@ -0,0 +1,10 @@
+from .sync_external_routes import SyncExternalRoutes
+from .sync_network_slivers import SyncNetworkSlivers
+from .sync_networks import SyncNetworks
+from .sync_site_privileges import SyncSitePrivileges
+from .sync_sites import SyncSites
+from .sync_slice_memberships import SyncSliceMemberships
+from .sync_slices import SyncSlices
+from .sync_sliver_ips import SyncSliverIps
+from .sync_slivers import SyncSlivers
+from .sync_users import SyncUsers
diff --git a/planetstack/observer/steps/garbage_collector.py b/planetstack/observer/steps/garbage_collector.py
new file mode 100644
index 0000000..f03577c
--- /dev/null
+++ b/planetstack/observer/steps/garbage_collector.py
@@ -0,0 +1,11 @@
+import os
+import base64
+from planetstack.config import Config
+
+class GarbageCollector(SyncStep):
+	requested_interval = 86400
+	provides=[]
+
+	def call(self):
+		pass
+        
diff --git a/planetstack/observer/steps/sync_external_routes.py b/planetstack/observer/steps/sync_external_routes.py
new file mode 100644
index 0000000..6c22c8b
--- /dev/null
+++ b/planetstack/observer/steps/sync_external_routes.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+
+class SyncExternalRoutes(SyncStep):
+	# XXX what does this provide?
+	requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
+
+	def __init__(self):
+		pass
+
+	def call(self):
+		routes = self.manager.driver.get_external_routes()
+		subnets = self.manager.driver.shell.quantum.list_subnets()['subnets']
+		for subnet in subnets:
+			try:
+				self.manager.driver.add_external_route(subnet, routes)
+			except:
+				logger.log_exc("failed to add external route for subnet %s" % subnet)
diff --git a/planetstack/observer/steps/sync_network_slivers.py b/planetstack/observer/steps/sync_network_slivers.py
new file mode 100644
index 0000000..9e24fae
--- /dev/null
+++ b/planetstack/observer/steps/sync_network_slivers.py
@@ -0,0 +1,75 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+
+class SyncNetworkSlivers(OpenStackSyncStep):
+	requested_interval = 3600
+	provides=[NetworkSliver]
+
+	def call(self):
+		networkSlivers = NetworkSliver.objects.all()
+		networkSlivers_by_id = {}
+		networkSlivers_by_port = {}
+		for networkSliver in networkSlivers:
+			networkSlivers_by_id[networkSliver.id] = networkSliver
+			networkSlivers_by_port[networkSliver.port_id] = networkSliver
+
+		networks = Network.objects.all()
+		networks_by_id = {}
+		for network in networks:
+			networks_by_id[network.network_id] = network
+
+		slivers = Sliver.objects.all()
+		slivers_by_instance_id = {}
+		for sliver in slivers:
+			slivers_by_instance_id[sliver.instance_id] = sliver
+
+		ports = self.manager.driver.shell.quantum.list_ports()["ports"]
+		for port in ports:
+			if port["id"] in networkSlivers_by_port:
+				# we already have it
+				print "already accounted for port", port["id"]
+				continue
+
+			if port["device_owner"] != "compute:nova":
+				# we only want the ports that connect to instances
+				continue
+
+			network = networks_by_id.get(port['network_id'], None)
+			if not network:
+				#print "no network for port", port["id"], "network", port["network_id"]
+				continue
+
+			sliver = slivers_by_instance_id.get(port['device_id'], None)
+			if not sliver:
+				print "no sliver for port", port["id"], "device_id", port['device_id']
+				continue
+
+			if network.template.sharedNetworkId is not None:
+				# If it's a shared network template, then more than one network
+				# object maps to the quantum network. We have to do a whole bunch
+				# of extra work to find the right one.
+				networks = network.template.network_set.all()
+				network = None
+				for candidate_network in networks:
+					if (candidate_network.owner == sliver.slice):
+						print "found network", candidate_network
+						network = candidate_network
+
+				if not network:
+					print "failed to find the correct network for a shared template for port", port["id"], "network", port["network_id"]
+					continue
+
+			if not port["fixed_ips"]:
+				print "port", port["id"], "has no fixed_ips"
+				continue
+
+#			 print "XXX", port
+
+			ns = NetworkSliver(network=network,
+							   sliver=sliver,
+							   ip=port["fixed_ips"][0]["ip_address"],
+							   port_id=port["id"])
+			ns.save()
diff --git a/planetstack/observer/steps/sync_networks.py b/planetstack/observer/steps/sync_networks.py
new file mode 100644
index 0000000..e64f0a4
--- /dev/null
+++ b/planetstack/observer/steps/sync_networks.py
@@ -0,0 +1,52 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+
+class SyncNetworks(OpenStackSyncStep):
+	provides=[Network]
+	requested_interval = 0
+
+	def save_network(self, network):
+		if not network.network_id:
+			if network.template.sharedNetworkName:
+				network.network_id = network.template.sharedNetworkId
+				(network.subnet_id, network.subnet) = self.driver.get_network_subnet(network.network_id)
+			else:
+				network_name = network.name
+
+				# create network
+				os_network = self.driver.create_network(network_name, shared=True)
+				network.network_id = os_network['id']
+
+				# create router
+				router = self.driver.create_router(network_name)
+				network.router_id = router['id']
+
+				# create subnet
+				next_subnet = self.get_next_subnet()
+				cidr = str(next_subnet.cidr)
+				ip_version = next_subnet.version
+				start = str(next_subnet[2])
+				end = str(next_subnet[-2])
+				subnet = self.driver.create_subnet(name=network_name,
+												   network_id = network.network_id,
+												   cidr_ip = cidr,
+												   ip_version = ip_version,
+												   start = start,
+												   end = end)
+				network.subnet = cidr
+				network.subnet_id = subnet['id']
+
+	def sync_record(self, site):
+		if network.owner and network.owner.creator:
+				try:
+					# update manager context
+					self.driver.init_caller(network.owner.creator, network.owner.name)
+					self.save_network(network)
+					logger.info("saved network: %s" % (network))
+				except Exception,e:
+					logger.log_exc("save network failed: %s" % network)	
+					raise e
+
diff --git a/planetstack/observer/steps/sync_site_privileges.py b/planetstack/observer/steps/sync_site_privileges.py
new file mode 100644
index 0000000..ac0dbac
--- /dev/null
+++ b/planetstack/observer/steps/sync_site_privileges.py
@@ -0,0 +1,15 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+
+class SyncSitePrivileges(OpenStackSyncStep):
+	requested_interval=0
+
+	provides=[SitePrivilege]
+	def sync_record(self, user):
+		if site_priv.user.kuser_id and site_priv.site.tenant_id:
+			self.driver.add_user_role(site_priv.user.kuser_id,
+									  site_priv.site.tenant_id,
+									  site_priv.role.role_type) 
diff --git a/planetstack/observer/steps/sync_sites.py b/planetstack/observer/steps/sync_sites.py
new file mode 100644
index 0000000..1f7a0f8
--- /dev/null
+++ b/planetstack/observer/steps/sync_sites.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import Site
+
+class SyncSites(OpenStackSyncStep):
+	provides=[Site]
+	requested_interval=0
+	def sync_record(self, site):
+		save_site = False
+		if not site.tenant_id:
+			tenant = self.driver.create_tenant(tenant_name=site.login_base,
+											   description=site.name,
+											   enabled=site.enabled)
+			site.tenant_id = tenant.id
+			save_site = True
+			# XXX - What's caller?
+			# self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+		# update the record
+		if site.id and site.tenant_id:
+			self.driver.update_tenant(site.tenant_id,
+									  description=site.name,
+									  enabled=site.enabled)
+
+		if (save_site):
+			site.save() # 
+
diff --git a/planetstack/observer/steps/sync_slice_memberships.py b/planetstack/observer/steps/sync_slice_memberships.py
new file mode 100644
index 0000000..66953f1
--- /dev/null
+++ b/planetstack/observer/steps/sync_slice_memberships.py
@@ -0,0 +1,14 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import *
+
+class SyncSliceMemberships(OpenStackSyncStep):
+	requested_interval=0
+	provides=[SliceMembership]
+	def sync_record(self, user):
+		if slice_memb.user.kuser_id and slice_memb.slice.tenant_id:
+				self.driver.add_user_role(slice_memb.user.kuser_id,
+										  slice_memb.slice.tenant_id,
+										  slice_memb.role.role_type)
diff --git a/planetstack/observer/steps/sync_slices.py b/planetstack/observer/steps/sync_slices.py
new file mode 100644
index 0000000..81ed925
--- /dev/null
+++ b/planetstack/observer/steps/sync_slices.py
@@ -0,0 +1,58 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import Slice
+
+class SyncSlices(OpenStackSyncStep):
+	provides=[Slice]
+	requested_interval=0
+	def sync_record(self, slice):
+		if not slice.tenant_id:
+			nova_fields = {'tenant_name': slice.name,
+				   'description': slice.description,
+				   'enabled': slice.enabled}
+			tenant = self.driver.create_tenant(**nova_fields)
+			slice.tenant_id = tenant.id
+
+			# XXX give caller an admin role at the tenant they've created
+			self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+			# refresh credentials using this tenant
+			self.driver.shell.connect(username=self.driver.shell.keystone.username,
+									  password=self.driver.shell.keystone.password,
+									  tenant=tenant.name)
+
+			# create network
+			network = self.driver.create_network(slice.name)
+			slice.network_id = network['id']
+
+			# create router
+			router = self.driver.create_router(slice.name)
+			slice.router_id = router['id']
+
+			# create subnet
+			next_subnet = self.get_next_subnet()
+			cidr = str(next_subnet.cidr)
+			ip_version = next_subnet.version
+			start = str(next_subnet[2])
+			end = str(next_subnet[-2]) 
+			subnet = self.driver.create_subnet(name=slice.name,
+											   network_id = network['id'],
+											   cidr_ip = cidr,
+											   ip_version = ip_version,
+											   start = start,
+											   end = end)
+			slice.subnet_id = subnet['id']
+			# add subnet as interface to slice's router
+			self.driver.add_router_interface(router['id'], subnet['id'])
+			# add external route
+			self.driver.add_external_route(subnet)
+
+
+		if slice.id and slice.tenant_id:
+			self.driver.update_tenant(slice.tenant_id,
+									  description=slice.description,
+									  enabled=slice.enabled)   
+
+		slice.save()
diff --git a/planetstack/observer/steps/sync_sliver_ips.py b/planetstack/observer/steps/sync_sliver_ips.py
new file mode 100644
index 0000000..d69fd5d
--- /dev/null
+++ b/planetstack/observer/steps/sync_sliver_ips.py
@@ -0,0 +1,25 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+
+class SyncSliverIps(OpenStackSyncStep):
+	provides=[Sliver]
+	requested_interval=0
+	def fetch_pending(self):
+		slivers = Sliver.objects.filter(ip=None)
+		return slivers
+
+	def sync_record(self, sliver):
+		self.manager.init_admin(tenant=sliver.slice.name)
+		servers = self.manager.driver.shell.nova.servers.findall(id=sliver.instance_id)
+		if not servers:
+			return
+		server = servers[0]
+		ips = server.addresses.get(sliver.slice.name, [])
+		if not ips:
+			return
+		sliver.ip = ips[0]['addr']
+		sliver.save()
+		logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
diff --git a/planetstack/observer/steps/sync_slivers.py b/planetstack/observer/steps/sync_slivers.py
new file mode 100644
index 0000000..adab39d
--- /dev/null
+++ b/planetstack/observer/steps/sync_slivers.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+
+class SyncSlivers(OpenStackSyncStep):
+	provides=[Sliver]
+	requested_interval=0
+	def sync_record(self, slice):
+		if not sliver.instance_id:
+				nics = self.get_requested_networks(sliver.slice)
+				file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+				slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
+				pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+				pubkeys.append(sliver.creator.public_key)
+				instance = self.driver.spawn_instance(name=sliver.name,
+									key_name = sliver.creator.keyname,
+									image_id = sliver.image.image_id,
+									hostname = sliver.node.name,
+									pubkeys = pubkeys,
+									nics = nics )
+				sliver.instance_id = instance.id
+				sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+
+		if sliver.instance_id and ("numberCores" in sliver.changed_fields):
+			self.driver.update_instance_metadata(sliver.instance_id, {"cpu_cores": str(sliver.numberCores)})
+
+		sliver.save()	
diff --git a/planetstack/observer/steps/sync_users.py b/planetstack/observer/steps/sync_users.py
new file mode 100644
index 0000000..3f509ef
--- /dev/null
+++ b/planetstack/observer/steps/sync_users.py
@@ -0,0 +1,35 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User
+
+class SyncUsers(OpenStackSyncStep):
+	provides=[User]
+	requested_interval=0
+	def sync_record(self, user):
+		name = user.email[:user.email.find('@')]
+		user_fields = {'name': name,
+					   'email': user.email,
+					   'password': hashlib.md5(user.password).hexdigest()[:6],
+					   'enabled': True}
+		if not user.kuser_id:
+			keystone_user = self.driver.create_user(**user_fields)
+			user.kuser_id = keystone_user.id
+		else:
+			self.driver.update_user(user.kuser_id, user_fields)		
+
+		if user.site:
+			self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'user')
+			if user.is_admin:
+				self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+			else:
+				# may have admin role so attempt to remove it
+				self.driver.delete_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+
+		if user.public_key:
+			self.init_caller(user, user.site.login_base)
+			self.save_key(user.public_key, user.keyname)
+			self.init_admin()
+
+		user.save()
diff --git a/planetstack/observer/syncstep.py b/planetstack/observer/syncstep.py
new file mode 100644
index 0000000..68e9f99
--- /dev/null
+++ b/planetstack/observer/syncstep.py
@@ -0,0 +1,61 @@
+import os
+import base64
+from planetstack.config import Config
+
+class FailedDependency(Exception):
+	pass
+
+class SyncStep:
+	""" A PlanetStack Sync step. 
+
+	Attributes:
+		psmodel		Model name the step synchronizes 
+		dependencies	list of names of models that must be synchronized first if the current model depends on them
+	""" 
+	slow=False
+	def get_prop(prop):
+		try:
+			sync_config_dir = Config().sync_config_dir
+		except:
+			sync_config_dir = '/etc/planetstack/sync'
+		prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+		return open(prop_config_path).read().rstrip()
+
+	def __init__(self, **args):
+		"""Initialize a sync step
+		   Keyword arguments:
+		   		name -- Name of the step
+				provides -- PlanetStack models sync'd by this step
+		"""
+		dependencies = []
+		try:
+			self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+		except:
+			self.soft_deadline = 5 # 5 seconds
+
+		return
+
+	def fetch_pending(self):
+		return Sliver.objects.filter(ip=None)
+	
+	def check_dependencies(self, obj):
+		for dep in self.dependencies:
+			peer_object = getattr(obj, dep.name.lowercase())
+			if (peer_object.pk==dep.pk):
+				raise DependencyFailed
+
+	def call(self, failed=[]):
+		pending = self.fetch_pending()
+		for o in pending:
+			if (not self.depends_on(o, failed)):
+				try:
+					check_dependencies(o) # Raises exception if failed					
+					self.sync_record(o)
+					o.enacted = datetime.now() # Is this the same timezone? XXX
+					o.save(update_fields=['enacted'])
+				except:
+					failed.append(o)
+		return failed
+
+	def __call__(self):
+		return self.call()
diff --git a/planetstack/observer/toposort.py b/planetstack/observer/toposort.py
new file mode 100755
index 0000000..34bf6f5
--- /dev/null
+++ b/planetstack/observer/toposort.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+
+import time
+import traceback
+import commands
+import threading
+import json
+
+from datetime import datetime
+from collections import defaultdict
+
+def toposort(g, steps):
+	reverse = {}
+
+	for k,v in g.items():
+		for rk in v:
+			try:
+				reverse[rk].append(k)
+			except:
+				reverse[rk]=k
+
+	sources = []
+	for k,v in g.items():
+		if not reverse.has_key(k):
+			sources.append(k)
+
+
+	for k,v in reverse.iteritems():
+		if (not v):
+			sources.append(k)
+
+	order = []
+	marked = []
+	while sources:
+		n = sources.pop()
+		try:
+			for m in g[n]:
+				if m not in marked:
+					sources.append(m)
+					marked.append(m)
+		except KeyError:
+			pass
+		if (n in steps):
+			order.append(n)
+
+	return order
+
+print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])
diff --git a/planetstack/openstack/driver.py b/planetstack/openstack/driver.py
index b6b6d48..0e5fbf0 100644
--- a/planetstack/openstack/driver.py
+++ b/planetstack/openstack/driver.py
@@ -18,12 +18,12 @@
         else:
             self.shell = OpenStackClient()
 
-    def create_role(self, name): 
+    def create_role(self, name):
         roles = self.shell.keystone.roles.findall(name=name)
         if not roles:
             role = self.shell.keystone.roles.create(name)
         else:
-            role = roles[0] 
+            role = roles[0]
         return role
 
     def delete_role(self, filter):
@@ -56,7 +56,7 @@
         for tenant in tenants:
             # nova does not automatically delete the tenant's instances
             # so we manually delete instances before deleteing the tenant   
-            instances = self.shell.nova_db.instance_get_all_by_filters(ctx, 
+            instances = self.shell.nova_db.instance_get_all_by_filters(ctx,
                        {'project_id': tenant.id}, 'id', 'asc')
             client = OpenStackClient(tenant=tenant.name)
             driver = OpenStackDriver(client=client)
@@ -160,12 +160,12 @@
         if router and subnet:
             self.shell.quantum.remove_interface_router(router_id, {'subnet_id': subnet_id})
  
-    def create_network(self, name):
+    def create_network(self, name, shared=False):
         nets = self.shell.quantum.list_networks(name=name)['networks']
         if nets: 
             net = nets[0]
         else:
-            net = self.shell.quantum.create_network({'network': {'name': name}})['network']
+            net = self.shell.quantum.create_network({'network': {'name': name, 'shared': shared}})['network']
         return net
  
     def delete_network(self, id):
@@ -210,7 +210,7 @@
         for snet in subnets:
             if snet['cidr'] == cidr_ip and snet['network_id'] == network_id:
                 subnet = snet
- 
+
         if not subnet:
             allocation_pools = [{'start': start, 'end': end}]
             subnet = {'subnet': {'name': name,
@@ -218,7 +218,7 @@
                                  'ip_version': ip_version,
                                  'cidr': cidr_ip,
                                  'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
-                                 'allocation_pools': allocation_pools}}          
+                                 'allocation_pools': allocation_pools}}
             subnet = self.shell.quantum.create_subnet(subnet)['subnet']
             self.add_external_route(subnet)
         # TODO: Add route to external network
@@ -328,9 +328,37 @@
         keys = self.shell.nova.keypairs.findall(id=id)
         for key in keys:
             self.shell.nova.keypairs.delete(key) 
-        return 1 
+        return 1
 
-    def spawn_instance(self, name, key_name=None, hostname=None, image_id=None, security_group=None, pubkeys=[]):
+    def get_private_networks(self, tenant=None):
+        if not tenant:
+            tenant = self.shell.nova.tenant
+        tenant = self.shell.keystone.tenants.find(name=tenant)
+        search_opts = {"tenant_id": tenant.id, "shared": False}
+        private_networks = self.shell.quantum.list_networks(**search_opts)
+        return private_networks
+
+    def get_shared_networks(self):
+        search_opts = {"shared": True}
+        shared_networks = self.shell.quantum.list_networks(**search_opts)
+        return shared_networks
+
+    def get_network_subnet(self, network_id):
+        subnet_id = None
+        subnet = None
+        if network_id:
+            os_networks = self.shell.quantum.list_networks(id=network_id)["networks"]
+            if os_networks:
+                os_network = os_networks[0]
+                if os_network['subnets']:
+                    subnet_id = os_network['subnets'][0]
+                    os_subnets = self.shell.quantum.list_subnets(id=subnet_id)['subnets']
+                    if os_subnets:
+                        subnet = os_subnets[0]['cidr']
+
+        return (subnet_id, subnet)
+
+    def spawn_instance(self, name, key_name=None, hostname=None, image_id=None, security_group=None, pubkeys=[], nics=None):
         flavor_name = self.config.nova_default_flavor
         flavor = self.shell.nova.flavors.find(name=flavor_name)
         #if not image:
@@ -354,13 +382,21 @@
                                             security_group = security_group,
                                             files=files,
                                             scheduler_hints=hints,
-                                            availability_zone=availability_zone)
+                                            availability_zone=availability_zone,
+                                            nics=nics)
         return server
-          
+
     def destroy_instance(self, id):
-        servers = self.shell.nova.servers.findall(id=id)
+        if (self.shell.nova.tenant=="admin"):
+            # findall() is implemented as a list() followed by a python search of the
+            # list. Since findall() doesn't accept "all_tenants", we do this using
+            # list() ourselves. This allows us to delete an instance as admin.
+            servers = self.shell.nova.servers.list(search_opts={"all_tenants": True})
+        else:
+            servers = self.shell.nova.servers.list()
         for server in servers:
-            self.shell.nova.servers.delete(server)
+            if server.id == id:
+                result=self.shell.nova.servers.delete(server)
 
     def update_instance_metadata(self, id, metadata):
         servers = self.shell.nova.servers.findall(id=id)
diff --git a/planetstack/openstack/event_manager.py b/planetstack/openstack/event_manager.py
deleted file mode 100644
index a849f09..0000000
--- a/planetstack/openstack/event_manager.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import threading
-import requests, json
-
-from core.models import *
-from openstack.manager import OpenStackManager
-from planetstack.config import Config
-
-import os
-import base64
-from fofum import Fofum
-
-# decorator that marks dispatachable event methods  
-def event(func):
-    setattr(func, 'event', func.__name__)
-    return func      
-
-class EventHandler:
-    # This code is currently not in use.
-    def __init__(self):
-        self.manager = OpenStackManager()
-
-    @staticmethod
-    def get_events():
-        events = []
-        for name in dir(EventHandler):
-            attribute = getattr(EventHandler, name)
-            if hasattr(attribute, 'event'):
-                events.append(getattr(attribute, 'event'))
-        return events
-
-    def dispatch(self, event, *args, **kwds):
-        if hasattr(self, event):
-            return getattr(self, event)(*args, **kwds)
-            
-        
-    @event
-    def save_site(self, id):
-        sites = Site.objects.filter(id=id)
-        if sites:
-            self.manager.save_site(sites[0])
-    
-    @event
-    def delete_site(self, tenant_id):
-        self.manager.driver.delete_tenant(tenant_id)
-
-    @event
-    def save_site_privilege(self, id):
-        site_privileges = SitePrivilege.objects.filter(id=id)
-        if site_privileges:
-            site_priv = self.manager.save_site_privilege(site_privileges[0])
-
-    @event
-    def delete_site_privilege(self, kuser_id, tenant_id, role_type):
-        self.manager.driver.delete_user_role(kuser_id, tenant_id, role_type)
-
-    @event
-    def save_slice(self, id):
-        slices = Slice.objects.filter(id=id)
-        if slices:
-            self.manager.save_slice(slices[0])
-    
-    @event
-    def delete_slice(self, tenant_id, network_id, router_id, subnet_id):
-        self.manager._delete_slice(tenant_id, network_id, router_id, subnet_id)
-
-    @event
-    def save_user(self, id):
-        users = User.objects.filter(id=id)
-        if users:
-            self.manager.save_user(users[0])
-        
-    @event
-    def delete_user(self, kuser_id):
-        self.manager.driver.delete_user(kuser_id)
-    
-    @event
-    def save_sliver(self, id):
-        slivers = Sliver.objects.filter(id=id)
-        if slivers:
-            self.manager.save_sliver(slivers[0])
-
-    @event
-    def delete_sliver(self, instance_id):
-        self.manager.destroy_instance(instance_id)                            
-
-    
-class EventSender:
-    def __init__(self,user=None,clientid=None):
-        try:
-            clid = Config().feefie_client_id
-            user = Config().feefie_client_user
-        except:
-            clid = 'planetstack_core_team'
-            user = 'pl'
-
-        self.fofum = Fofum(user=user)
-        self.fofum.make(clid)
-
-    def fire(self):
-        self.fofum.fire()
-
-class EventListener:
-    def __init__(self,wake_up=None):
-        self.handler = EventHandler()
-        self.wake_up = wake_up
-
-    def handle_event(self, payload):
-        payload_dict = json.loads(payload)
-
-	# The code below will come back when we optimize the observer syncs
-	# into 'small' and 'big' syncs.
-
-        #event = payload_dict['event']
-        #ctx = payload_dict['ctx']
-        #self.handler.dispatch(event,**ctx)   
-
-        if (self.wake_up):
-            self.wake_up()
-        
-
-    def run(self):
-        # This is our unique client id, to be used when firing and receiving events
-        # It needs to be generated once and placed in the config file
-
-        try:
-            clid = Config().feefie_client_id
-            user = Config().feefie_client_user
-        except:
-            clid = 'planetstack_core_team'
-            user = 'pl'
-
-        f = Fofum(user=user)
-        
-        listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
-        listener_thread.start()
diff --git a/planetstack/openstack/manager.py b/planetstack/openstack/manager.py
index 3ae7dea..2fb4ff8 100644
--- a/planetstack/openstack/manager.py
+++ b/planetstack/openstack/manager.py
@@ -301,17 +301,32 @@
             #del_route = 'route del -net %s' % self.cidr
             #commands.getstatusoutput(del_route)
 
+    def get_requested_networks(self, slice):
+        network_ids = [x.network_id for x in slice.networks.all()]
+
+        if slice.network_id is not None:
+            network_ids.append(slice.network_id)
+
+        networks = []
+        for network_id in network_ids:
+            networks.append({"net-id": network_id})
+
+        return networks
+
     @require_enabled
     def save_sliver(self, sliver):
         if not sliver.instance_id:
+            nics = self.get_requested_networks(sliver.slice)
+            file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
             slice_memberships = SliceMembership.objects.filter(slice=sliver.slice)
             pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
-            pubkeys.append(sliver.creator.public_key) 
+            pubkeys.append(sliver.creator.public_key)
             instance = self.driver.spawn_instance(name=sliver.name,
                                    key_name = sliver.creator.keyname,
                                    image_id = sliver.image.image_id,
                                    hostname = sliver.node.name,
-                                   pubkeys = pubkeys )
+                                   pubkeys = pubkeys,
+                                   nics = nics )
             sliver.instance_id = instance.id
             sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
 
@@ -368,7 +383,7 @@
         from core.models.image import Image
         # collect local images
         images = Image.objects.all()
-        images_dict = {}    
+        images_dict = {}
         for image in images:
             images_dict[image.name] = image
 
@@ -391,4 +406,149 @@
         old_image_names = set(images_dict.keys()).difference(glance_images_dict.keys())
         Image.objects.filter(name__in=old_image_names).delete()
 
+    @require_enabled
+    def save_network(self, network):
+        if not network.network_id:
+            if network.template.sharedNetworkName:
+                network.network_id = network.template.sharedNetworkId
+                (network.subnet_id, network.subnet) = self.driver.get_network_subnet(network.network_id)
+            else:
+                network_name = network.name
+
+                # create network
+                os_network = self.driver.create_network(network_name, shared=True)
+                network.network_id = os_network['id']
+
+                # create router
+                router = self.driver.create_router(network_name)
+                network.router_id = router['id']
+
+                # create subnet
+                next_subnet = self.get_next_subnet()
+                cidr = str(next_subnet.cidr)
+                ip_version = next_subnet.version
+                start = str(next_subnet[2])
+                end = str(next_subnet[-2])
+                subnet = self.driver.create_subnet(name=network_name,
+                                                   network_id = network.network_id,
+                                                   cidr_ip = cidr,
+                                                   ip_version = ip_version,
+                                                   start = start,
+                                                   end = end)
+                network.subnet = cidr
+                network.subnet_id = subnet['id']
+                # add subnet as interface to slice's router
+                self.driver.add_router_interface(router['id'], subnet['id'])
+                # add external route
+                self.driver.add_external_route(subnet)
+
+        network.save()
+        network.enacted = datetime.now()
+        network.save(update_fields=['enacted'])
+
+    def delete_network(self, network):
+        if (network.router_id) and (network.subnet_id):
+            self.driver.delete_router_interface(network.router_id, network.subnet_id)
+        if network.subnet_id:
+            self.driver.delete_subnet(network.subnet_id)
+        if network.router_id:
+            self.driver.delete_router(network.router_id)
+        if network.network_id:
+            self.driver.delete_network(network.network_id)
+
+    def save_network_template(self, template):
+        if (template.sharedNetworkName) and (not template.sharedNetworkId):
+            os_networks = self.driver.shell.quantum.list_networks(name=template.sharedNetworkName)['networks']
+            if os_networks:
+                template.sharedNetworkId = os_networks[0]["id"]
+
+        template.save()
+        template.enacted = datetime.now()
+        template.save(update_fields=['enacted'])
+
+    def find_or_make_template_for_network(self, name):
+        """ Given a network name, try to guess the right template for it """
+
+        # templates for networks we may encounter
+        if name=='nat-net':
+            template_dict = None # {"name": "private-nat", "visibility": "private", "translation": "nat"}
+        elif name=='sharednet1':
+            template_dict = {"name": "dedicated-public", "visibility": "public", "translation": "none"}
+        else:
+            template_dict = {"name": "private", "visibility": "private", "translation": "none"}
+
+        # if we have an existing template return it
+        templates = NetworkTemplate.objects.filter(name=template_dict["name"])
+        if templates:
+            return templates[0]
+
+        if template_dict == None:
+            return None
+
+        template = NetworkTemplate(**template_dict)
+        template.save()
+        return template
+
+    def refresh_network_templates(self):
+        for template in NetworkTemplate.objects.all():
+            if (template.sharedNetworkName) and (not template.sharedNetworkId):
+                 # this will cause us to try to fill in the sharedNetworkId
+                 self.save_network_template(template)
+
+    def refresh_networks(self):
+        # get a list of all networks in the model
+
+        networks = Network.objects.all()
+        networks_by_name = {}
+        networks_by_id = {}
+        for network in networks:
+            networks_by_name[network.name] = network
+            networks_by_id[network.network_id] = network
+
+        # Get a list of all shared networks in OS
+
+        os_networks = self.driver.shell.quantum.list_networks()['networks']
+        os_networks_by_name = {}
+        os_networks_by_id = {}
+        for os_network in os_networks:
+            os_networks_by_name[os_network['name']] = os_network
+            os_networks_by_id[os_network['id']] = os_network
+
+        for (uuid, os_network) in os_networks_by_id.items():
+            #print "checking OS network", os_network['name']
+            if (os_network['shared']) and (uuid not in networks_by_id):
+                # Only automatically create shared networks. This is for Andy's
+                # nat-net and sharednet1.
+
+                owner_slice = Slice.objects.get(tenant_id = os_network['tenant_id'])
+                template = self.find_or_make_template_for_network(os_network['name'])
+
+                if (template is None):
+                    # This is our way of saying we don't want to auto-instantiate
+                    # this network type.
+                    continue
+
+                (subnet_id, subnet) = self.driver.get_network_subnet(os_network['id'])
+
+                if owner_slice:
+                    #print "creating model object for OS network", os_network['name']
+                    new_network = Network(name = os_network['name'],
+                                          template = template,
+                                          owner = owner_slice,
+                                          network_id = uuid,
+                                          subnet_id = subnet_id)
+                    new_network.save()
+
+        for (network_id, network) in networks_by_id.items():
+            # If the network disappeared from OS, then reset its network_id to None
+            if (network.network_id is not None) and (network.network_id not in os_networks_by_id):
+                network.network_id = None
+
+            # If no OS object exists, then saving the network will create one
+            if (network.network_id is None):
+                #print "creating OS network for", network.name
+                self.save_network(network)
+            else:
+                pass #print "network", network.name, "has its OS object"
+
 
diff --git a/planetstack/openstack/observer.py b/planetstack/openstack/observer.py
deleted file mode 100644
index fbacb67..0000000
--- a/planetstack/openstack/observer.py
+++ /dev/null
@@ -1,307 +0,0 @@
-import time
-import traceback
-import commands
-import threading
-
-from datetime import datetime
-from collections import defaultdict
-from core.models import *
-from django.db.models import F, Q
-from openstack.manager import OpenStackManager
-from util.logger import Logger, logging, logger
-#from timeout import timeout
-
-
-logger = Logger(logfile='observer.log', level=logging.INFO)
-
-class OpenStackObserver:
-    
-    def __init__(self):
-        self.manager = OpenStackManager()
-        # The Condition object that gets signalled by Feefie events
-        self.event_cond = threading.Condition()
-
-    def wait_for_event(self, timeout):
-        self.event_cond.acquire()
-        self.event_cond.wait(timeout)
-        self.event_cond.release()
-        
-    def wake_up(self):
-        logger.info('Wake up routine called. Event cond %r'%self.event_cond)
-        self.event_cond.acquire()
-        self.event_cond.notify()
-        self.event_cond.release()
-
-    def run(self):
-        if not self.manager.enabled or not self.manager.has_openstack:
-            return
-        while True:
-            try:
-                logger.info('Observer run loop')
-                #self.sync_roles()
-                logger.info('Calling sync tenants')
-                self.sync_tenants()
-                self.sync_users()
-                self.sync_user_tenant_roles()
-                self.sync_slivers()
-                self.sync_sliver_ips()
-                logger.info('Calling sync external routes')
-                self.sync_external_routes()
-                self.wait_for_event(timeout=300)
-                logger.info('Observer woken up')
-            except:
-                traceback.print_exc() 
-
-    def sync_roles(self):
-        """
-        save all role that don't already exist in keystone. Remove keystone roles that
-        don't exist in planetstack
-        """
-        # sync all roles that don't already in keystone  
-        keystone_roles = self.manager.driver.shell.keystone.roles.findall()
-        keystone_role_names = [kr.name for kr in keystone_roles]
-        pending_roles = Role.objects.all()
-        pending_role_names = [r.role_type for r in pending_roles] 
-        for role in pending_roles:
-            if role.role_type not in keystone_role_names:
-                try:
-                    self.manager.save_role(role)
-                    logger.info("save role: %s" % (role))
-                except:
-                    logger.log_exc("save role failed: %s" % role)  
-                    traceback.print_exc()
-
-        # don't delete roles for now 
-        """ 
-        # delete keystone roles that don't exist in planetstack
-        for keystone_role in keystone_roles:
-            if keystone_role.name == 'admin':
-                continue
-            if keystone_role.name not in pending_role_names:
-                try:
-                    self.manager.driver.delete_role({id: keystone_role.id})
-                except:
-                    traceback.print_exc()
-        """
-
-    def sync_tenants(self):
-        """
-        Save all sites and sliceswhere enacted < updated or enacted == None. 
-        Remove sites and slices that no don't exist in openstack db if they 
-        have an enacted time (enacted != None).
-        """ 
-        # get all sites that need to be synced (enacted < updated or enacted is None)
-        pending_sites = Site.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for site in pending_sites:
-            try:
-                self.manager.save_site(site)
-                logger.info("saved site %s" % site)
-            except:
-                logger.log_exc("save site failed: %s" % site)
-
-        # get all slices that need to be synced (enacted < updated or enacted is None)
-        pending_slices = Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for slice in pending_slices:
-            try:
-                self.manager.init_caller(slice.creator, slice.creator.site.login_base)
-                self.manager.save_slice(slice)
-                logger.info("saved slice %s" % slice)
-            except:
-                logger.log_exc("save slice failed: %s" % slice)
-
-        # get all sites that where enacted != null. We can assume these sites
-        # have previously been synced and need to be checed for deletion.
-        sites = Site.objects.filter(enacted__isnull=False)
-        site_dict = {}
-        for site in sites:
-            site_dict[site.login_base] = site
-
-        # get all slices that where enacted != null. We can assume these slices
-        # have previously been synced and need to be checed for deletion.
-        slices = Slice.objects.filter(enacted__isnull=False)
-        slice_dict = {}
-        for slice in slices:
-            slice_dict[slice.name] = slice
-
-        # delete keystone tenants that don't have a site record
-        tenants = self.manager.driver.shell.keystone.tenants.findall()
-        system_tenants = ['admin','service']
-        for tenant in tenants:
-            if tenant.name in system_tenants: 
-                continue
-            if tenant.name not in site_dict and tenant.name not in slice_dict:
-                try:
-                    self.manager.driver.delete_tenant(tenant.id)
-                    logger.info("deleted tenant: %s" % (tenant))
-                except:
-                    logger.log_exc("delete tenant failed: %s" % tenant)
-
-
-    def sync_users(self):
-        """
-        save all users where enacted < updated or enacted == None. Remove users that
-        no don't exist in openstack db if they have an enacted time (enacted != None).
-        """ 
-        # get all users that need to be synced (enacted < updated or enacted is None)
-        pending_users = User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for user in pending_users:
-            try:
-                self.manager.save_user(user)
-                logger.info("saved user: %s" % (user))
-            except:
-                logger.log_exc("save user failed: %s" %user)
-
-        # get all users that where enacted != null. We can assume these users
-        # have previously been synced and need to be checed for deletion.
-        users = User.objects.filter(enacted__isnull=False)
-        user_dict = {}
-        for user in users:
-            user_dict[user.kuser_id] = user
-
-        # delete keystone users that don't have a user record
-        system_users = ['admin', 'nova', 'quantum', 'glance', 'cinder', 'swift', 'service']
-        users = self.manager.driver.shell.keystone.users.findall()
-        for user in users:
-            if user.name in system_users:
-                continue
-            if user.id not in user_dict:
-                try:
-                    #self.manager.driver.delete_user(user.id)
-                    logger.info("deleted user: %s" % user)
-                except:
-                    logger.log_exc("delete user failed: %s" % user)
-                    
-
-    def sync_user_tenant_roles(self):
-        """
-        Save all site privileges and slice memberships wheree enacted < updated or 
-        enacted == None. Remove ones that don't exist in openstack db if they have 
-        an enacted time (enacted != None).
-        """
-        # sync site privileges
-        pending_site_privileges = SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for site_priv in pending_site_privileges:
-            try:
-                self.manager.save_site_privilege(site_priv)  
-                logger.info("saved site privilege: %s" % (site_priv))
-            except: logger.log_exc("save site privilege failed: %s " % site_priv)
-
-        # sync slice memberships
-        pending_slice_memberships = SliceMembership.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for slice_memb in pending_slice_memberships:
-            try:
-                self.manager.save_slice_membership(slice_memb)
-                logger.info("saved slice membership: %s" % (slice_memb))
-            except: logger.log_exc("save slice membership failed: %s" % slice_memb)
-
-        # get all site privileges and slice memberships that have been enacted 
-        user_tenant_roles = defaultdict(list)
-        for site_priv in SitePrivilege.objects.filter(enacted__isnull=False):
-            user_tenant_roles[(site_priv.user.kuser_id, site_priv.site.tenant_id)].append(site_priv.role.role)
-        for slice_memb in SliceMembership.objects.filter(enacted__isnull=False):
-            user_tenant_roles[(slice_memb.user.kuser_id, slice_memb.slice.tenant_id)].append(slice_memb.role.role)  
- 
-        # Some user tenant role aren't stored in planetstack but they must be preserved. 
-        # Role that fall in this category are
-        # 1. Never remove a user's role that their home site
-        # 2. Never remove a user's role at a slice they've created.
-        # Keep track of all roles that must be preserved.     
-        users = User.objects.all()
-        preserved_roles = {}
-        for user in users:
-            tenant_ids = [s['tenant_id'] for s in user.slices.values()]
-            tenant_ids.append(user.site.tenant_id) 
-            preserved_roles[user.kuser_id] = tenant_ids
-
- 
-        # begin removing user tenant roles from keystone. This is stored in the 
-        # Metadata table.
-        for metadata in self.manager.driver.shell.keystone_db.get_metadata():
-            # skip admin roles
-            if metadata.user_id == self.manager.driver.admin_user.id:
-                continue
-            # skip preserved tenant ids
-            if metadata.user_id in preserved_roles and \
-               metadata.tenant_id in preserved_roles[metadata.user_id]: 
-                continue           
-            # get roles for user at this tenant
-            user_tenant_role_ids = user_tenant_roles.get((metadata.user_id, metadata.tenant_id), [])
-
-            if user_tenant_role_ids:
-                # The user has roles at the tenant. Check if roles need to 
-                # be updated.
-                user_keystone_role_ids = metadata.data.get('roles', [])
-                for role_id in user_keystone_role_ids:
-                    if role_id not in user_tenant_role_ids: 
-                        user_keystone_role_ids.pop(user_keystone_role_ids.index(role_id))
-            else:
-                # The user has no roles at this tenant. 
-                metadata.data['roles'] = [] 
-            #session.add(metadata)
-            logger.info("pruning metadata for %s at %s" % (metadata.user_id, metadata.tenant_id))
- 
-    def sync_slivers(self):
-        """
-        save all slivers where enacted < updated or enacted == None. Remove slivers that
-        no don't exist in openstack db if they have an enacted time (enacted != None).
-        """
-        # get all users that need to be synced (enacted < updated or enacted is None)
-        pending_slivers = Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        for sliver in pending_slivers:
-            if sliver.creator: 
-                try: 
-                    # update manager context
-                    self.manager.init_caller(sliver.creator, sliver.slice.name)
-                    self.manager.save_sliver(sliver)
-                    logger.info("saved sliver: %s" % (sliver))
-                except:
-                    logger.log_exc("save sliver failed: %s" % sliver) 
-
-        # get all slivers where enacted != null. We can assume these users
-        # have previously been synced and need to be checed for deletion.
-        slivers = Sliver.objects.filter(enacted__isnull=False)
-        sliver_dict = {}
-        for sliver in slivers:
-            sliver_dict[sliver.instance_id] = sliver
-
-        # delete sliver that don't have a sliver record
-        ctx = self.manager.driver.shell.nova_db.ctx 
-        instances = self.manager.driver.shell.nova_db.instance_get_all(ctx)
-        for instance in instances:
-            if instance.uuid not in sliver_dict:
-                try:
-                    # lookup tenant and update context  
-                    tenant = self.manager.driver.shell.keystone.tenants.find(id=instance.project_id) 
-                    self.manager.init_admin(tenant=tenant.name)  
-                    self.manager.driver.destroy_instance(instance.uuid)
-                    logger.info("destroyed sliver: %s" % (instance))
-                except:
-                    logger.log_exc("destroy sliver failed: %s" % instance) 
-                
-
-    def sync_sliver_ips(self):
-        # fill in null ip addresses
-        slivers = Sliver.objects.filter(ip=None)
-        for sliver in slivers:
-            # update connection
-            self.manager.init_admin(tenant=sliver.slice.name)
-            servers = self.manager.driver.shell.nova.servers.findall(id=sliver.instance_id)
-            if not servers:
-                continue
-            server = servers[0]
-            ips = server.addresses.get(sliver.slice.name, [])
-            if not ips:
-                continue
-            sliver.ip = ips[0]['addr']
-            sliver.save()
-            logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
-
-    def sync_external_routes(self):
-        routes = self.manager.driver.get_external_routes() 
-        subnets = self.manager.driver.shell.quantum.list_subnets()['subnets']
-        for subnet in subnets:
-            try: 
-                self.manager.driver.add_external_route(subnet, routes)         
-            except: 
-                logger.log_exc("failed to add external route for subnet %s" % subnet)
- 
diff --git a/planetstack/openstack/openstack-db-cleanup.sh b/planetstack/openstack/openstack-db-cleanup.sh
new file mode 100755
index 0000000..9baca6e
--- /dev/null
+++ b/planetstack/openstack/openstack-db-cleanup.sh
@@ -0,0 +1,16 @@
+#! /bin/bash
+
+# to install
+#    chmod 0755 /opt/planetstack/openstack/openstack-db-cleanup.sh
+#    ln -s /opt/planetstack/openstack/openstack-db-cleanup.sh /etc/cron.daily/openstack-db-cleanup.cron
+
+mkdir -p /opt/planetstack/ovs-backups
+BACKUP_NAME=/opt/planetstack/ovs-backups/backup-`date "+%Y-%M-%d"`.sql
+mysqldump --create-options --routines --triggers --databases keystone ovs_quantum nova glance cinder > $BACKUP_NAME
+gzip $BACKUP_NAME
+
+mysql keystone -e "DELETE FROM token WHERE NOT DATE_SUB(CURDATE(),INTERVAL 2 DAY) <= expires;"
+mysqlcheck --optimize --databases keystone ovs_quantum nova glance cinder
+
+date >> /var/log/openstack-db-cleanup.log
+mysql keystone -e "select count(*) from token;" >> /var/log/openstack-db-cleanup.log
diff --git a/planetstack/planetstack-backend.py b/planetstack/planetstack-backend.py
old mode 100644
new mode 100755
index 0270264..7d94b67
--- a/planetstack/planetstack-backend.py
+++ b/planetstack/planetstack-backend.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 import os
 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
-from openstack.backend import Backend 
+from observer.backend import Backend 
 
 if __name__ == '__main__':
 
diff --git a/planetstack/planetstack/config.py b/planetstack/planetstack/config.py
index 7927803..b9abd3a 100644
--- a/planetstack/planetstack/config.py
+++ b/planetstack/planetstack/config.py
@@ -13,228 +13,228 @@
 """
 
 def isbool(v):
-    return v.lower() in ("true", "false")
+	return v.lower() in ("true", "false")
 
 def str2bool(v):
-    return v.lower() in ("true", "1")
+	return v.lower() in ("true", "1")
 
 class Config:
 
-    def __init__(self, config_file='/opt/planetstack/plstackapi_config'):
-        self._files = []
-        self.config_path = os.path.dirname(config_file)
-        self.config = ConfigParser.ConfigParser()
-        self.filename = config_file
-        if not os.path.isfile(self.filename):
-            self.create(self.filename)
-        self.load(self.filename)
+	def __init__(self, config_file='/opt/planetstack/plstackapi_config'):
+		self._files = []
+		self.config_path = os.path.dirname(config_file)
+		self.config = ConfigParser.ConfigParser()
+		self.filename = config_file
+		if not os.path.isfile(self.filename):
+			self.create(self.filename)
+		self.load(self.filename)
 
 
-    def _header(self):
-        header = """
+	def _header(self):
+		header = """
 DO NOT EDIT. This file was automatically generated at
 %s from:
 
 %s
 """ % (time.asctime(), os.linesep.join(self._files))
 
-        # Get rid of the surrounding newlines
-        return header.strip().split(os.linesep)
+		# Get rid of the surrounding newlines
+		return header.strip().split(os.linesep)
 
-    def create(self, filename):
-        if not os.path.exists(os.path.dirname(filename)):
-            os.makedirs(os.path.dirname(filename))
-        configfile = open(filename, 'w')
-        configfile.write(default_config)
-        configfile.close()
+	def create(self, filename):
+		if not os.path.exists(os.path.dirname(filename)):
+			os.makedirs(os.path.dirname(filename))
+		configfile = open(filename, 'w')
+		configfile.write(default_config)
+		configfile.close()
 
 
-    def load(self, filename):
-        if filename:
-            try:
-                self.config.read(filename)
-            except ConfigParser.MissingSectionHeaderError:
-                if filename.endswith('.xml'):
-                    self.load_xml(filename)
-                else:
-                    self.load_shell(filename)
-            self._files.append(filename)
-            self.set_attributes()
+	def load(self, filename):
+		if filename:
+			try:
+				self.config.read(filename)
+			except ConfigParser.MissingSectionHeaderError:
+				if filename.endswith('.xml'):
+					self.load_xml(filename)
+				else:
+					self.load_shell(filename)
+			self._files.append(filename)
+			self.set_attributes()
 
-    def load_xml(self, filename):
-        xml = XML(filename)
-        categories = xml.xpath('//configuration/variables/category')
-        for category in categories:
-            section_name = category.get('id')
-            if not self.config.has_section(section_name):
-                self.config.add_section(section_name)
-            options = category.xpath('./variablelist/variable')
-            for option in options:
-                option_name = option.get('id')
-                value = option.xpath('./value')[0].text
-                if not value:
-                    value = ""
-                self.config.set(section_name, option_name, value)
+	def load_xml(self, filename):
+		xml = XML(filename)
+		categories = xml.xpath('//configuration/variables/category')
+		for category in categories:
+			section_name = category.get('id')
+			if not self.config.has_section(section_name):
+				self.config.add_section(section_name)
+			options = category.xpath('./variablelist/variable')
+			for option in options:
+				option_name = option.get('id')
+				value = option.xpath('./value')[0].text
+				if not value:
+					value = ""
+				self.config.set(section_name, option_name, value)
 
-    def load_shell(self, filename):
-        f = open(filename, 'r')
-        for line in f:
-            try:
-                if line.startswith('#'):
-                    continue
-                parts = line.strip().split("=")
-                if len(parts) < 2:
-                    continue
-                option = parts[0]
-                value = parts[1].replace('"', '').replace("'","")
-                section, var = self.locate_varname(option, strict=False)
-                if section and var:
-                    self.set(section, var, value)
-            except:
-                pass
-        f.close()
+	def load_shell(self, filename):
+		f = open(filename, 'r')
+		for line in f:
+			try:
+				if line.startswith('#'):
+					continue
+				parts = line.strip().split("=")
+				if len(parts) < 2:
+					continue
+				option = parts[0]
+				value = parts[1].replace('"', '').replace("'","")
+				section, var = self.locate_varname(option, strict=False)
+				if section and var:
+					self.set(section, var, value)
+			except:
+				pass
+		f.close()
 
-    def locate_varname(self, varname, strict=True):
-        varname = varname.lower()
-        sections = self.config.sections()
-        section_name = ""
-        var_name = ""
-        for section in sections:
-            if varname.startswith(section.lower()) and len(section) > len(section_name):
-                section_name = section.lower()
-                var_name = varname.replace(section_name, "")[1:]
-        if strict and not self.config.has_option(section_name, var_name):
-            raise ConfigParser.NoOptionError(var_name, section_name)
-        return (section_name, var_name)
+	def locate_varname(self, varname, strict=True):
+		varname = varname.lower()
+		sections = self.config.sections()
+		section_name = ""
+		var_name = ""
+		for section in sections:
+			if varname.startswith(section.lower()) and len(section) > len(section_name):
+				section_name = section.lower()
+				var_name = varname.replace(section_name, "")[1:]
+		if strict and not self.config.has_option(section_name, var_name):
+			raise ConfigParser.NoOptionError(var_name, section_name)
+		return (section_name, var_name)
 
-    def set_attributes(self):
-        sections = self.config.sections()
-        for section in sections:
-            for item in self.config.items(section):
-                name = "%s_%s" % (section, item[0])
-                value = item[1]
-                if isbool(value):
-                    value = str2bool(value)
-                elif value.isdigit():
-                    value = int(value)
-                setattr(self, name, value)
-                setattr(self, name.upper(), value)
+	def set_attributes(self):
+		sections = self.config.sections()
+		for section in sections:
+			for item in self.config.items(section):
+				name = "%s_%s" % (section, item[0])
+				value = item[1]
+				if isbool(value):
+					value = str2bool(value)
+				elif value.isdigit():
+					value = int(value)
+				setattr(self, name, value)
+				setattr(self, name.upper(), value)
 
 
-    def verify(self, config1, config2, validate_method):
-        return True
+	def verify(self, config1, config2, validate_method):
+		return True
 
-    def validate_type(self, var_type, value):
-        return True
+	def validate_type(self, var_type, value):
+		return True
 
-    @staticmethod
-    def is_xml(config_file):
-        try:
-            x = Xml(config_file)
-            return True
-        except:
-            return False
+	@staticmethod
+	def is_xml(config_file):
+		try:
+			x = Xml(config_file)
+			return True
+		except:
+			return False
 
-    @staticmethod
-    def is_ini(config_file):
-        try:
-            c = ConfigParser.ConfigParser()
-            c.read(config_file)
-            return True
-        except ConfigParser.MissingSectionHeaderError:
-            return False
+	@staticmethod
+	def is_ini(config_file):
+		try:
+			c = ConfigParser.ConfigParser()
+			c.read(config_file)
+			return True
+		except ConfigParser.MissingSectionHeaderError:
+			return False
 
 
-    def dump(self, sections = []):
-        sys.stdout.write(output_python())
+	def dump(self, sections = []):
+		sys.stdout.write(output_python())
 
-    def output_python(self, encoding = "utf-8"):
-        buf = codecs.lookup(encoding)[3](StringIO())
-        buf.writelines(["# " + line + os.linesep for line in self._header()])
+	def output_python(self, encoding = "utf-8"):
+		buf = codecs.lookup(encoding)[3](StringIO())
+		buf.writelines(["# " + line + os.linesep for line in self._header()])
 
-        for section in self.sections():
-            buf.write("[%s]%s" % (section, os.linesep))
-            for (name,value) in self.items(section):
-                buf.write("%s=%s%s" % (name,value,os.linesep))
-            buf.write(os.linesep)
-        return buf.getvalue()
+		for section in self.sections():
+			buf.write("[%s]%s" % (section, os.linesep))
+			for (name,value) in self.items(section):
+				buf.write("%s=%s%s" % (name,value,os.linesep))
+			buf.write(os.linesep)
+		return buf.getvalue()
 
-    def output_shell(self, show_comments = True, encoding = "utf-8"):
-        """
-        Return variables as a shell script.
-        """
+	def output_shell(self, show_comments = True, encoding = "utf-8"):
+		"""
+		Return variables as a shell script.
+		"""
 
-        buf = codecs.lookup(encoding)[3](StringIO())
-        buf.writelines(["# " + line + os.linesep for line in self._header()])
+		buf = codecs.lookup(encoding)[3](StringIO())
+		buf.writelines(["# " + line + os.linesep for line in self._header()])
 
-        for section in self.sections():
-            for (name,value) in self.items(section):
-                # bash does not have the concept of NULL
-                if value:
-                    option = "%s_%s" % (section.upper(), name.upper())
-                    if isbool(value):
-                        value = str(str2bool(value))
-                    elif not value.isdigit():
-                        value = '"%s"' % value
-                    buf.write(option + "=" + value + os.linesep)
-        return buf.getvalue()
+		for section in self.sections():
+			for (name,value) in self.items(section):
+				# bash does not have the concept of NULL
+				if value:
+					option = "%s_%s" % (section.upper(), name.upper())
+					if isbool(value):
+						value = str(str2bool(value))
+					elif not value.isdigit():
+						value = '"%s"' % value
+					buf.write(option + "=" + value + os.linesep)
+		return buf.getvalue()
 
-    def output_php(self, encoding = "utf-8"):
-        """
-        Return variables as a PHP script.
-        """
+	def output_php(self, encoding = "utf-8"):
+		"""
+		Return variables as a PHP script.
+		"""
 
-        buf = codecs.lookup(encoding)[3](StringIO())
-        buf.write("<?php" + os.linesep)
-        buf.writelines(["// " + line + os.linesep for line in self._header()])
+		buf = codecs.lookup(encoding)[3](StringIO())
+		buf.write("<?php" + os.linesep)
+		buf.writelines(["// " + line + os.linesep for line in self._header()])
 
-        for section in self.sections():
-            for (name,value) in self.items(section):
-                option = "%s_%s" % (section, name)
-                buf.write(os.linesep)
-                buf.write("// " + option + os.linesep)
-                if value is None:
-                    value = 'NULL'
-                buf.write("define('%s', %s);" % (option, value) + os.linesep)
+		for section in self.sections():
+			for (name,value) in self.items(section):
+				option = "%s_%s" % (section, name)
+				buf.write(os.linesep)
+				buf.write("// " + option + os.linesep)
+				if value is None:
+					value = 'NULL'
+				buf.write("define('%s', %s);" % (option, value) + os.linesep)
 
-        buf.write("?>" + os.linesep)
+		buf.write("?>" + os.linesep)
 
-        return buf.getvalue()
+		return buf.getvalue()
 
-    def output_xml(self, encoding = "utf-8"):
-        pass
+	def output_xml(self, encoding = "utf-8"):
+		pass
 
-    def output_variables(self, encoding="utf-8"):
-        """
-        Return list of all variable names.
-        """
+	def output_variables(self, encoding="utf-8"):
+		"""
+		Return list of all variable names.
+		"""
 
-        buf = codecs.lookup(encoding)[3](StringIO())
-        for section in self.sections():
-            for (name,value) in self.items(section):
-                option = "%s_%s" % (section,name)
-                buf.write(option + os.linesep)
+		buf = codecs.lookup(encoding)[3](StringIO())
+		for section in self.sections():
+			for (name,value) in self.items(section):
+				option = "%s_%s" % (section,name)
+				buf.write(option + os.linesep)
 
-        return buf.getvalue()
-        pass
+		return buf.getvalue()
+		pass
 
-    def write(self, filename=None):
-        if not filename:
-            filename = self.filename
-        configfile = open(filename, 'w')
-        self.config.write(configfile)
+	def write(self, filename=None):
+		if not filename:
+			filename = self.filename
+		configfile = open(filename, 'w')
+		self.config.write(configfile)
 
-    def save(self, filename=None):
-        self.write(filename)
+	def save(self, filename=None):
+		self.write(filename)
 
-    def __getattr__(self, attr):
-        return getattr(self.config, attr)
+	def __getattr__(self, attr):
+		return getattr(self.config, attr)
 
 if __name__ == '__main__':
-    filename = None
-    if len(sys.argv) > 1:
-        filename = sys.argv[1]
-        config = Config(filename)
-    else:
-        config = Config()
-    config.dump()
+	filename = None
+	if len(sys.argv) > 1:
+		filename = sys.argv[1]
+		config = Config(filename)
+	else:
+		config = Config()
+	config.dump()
diff --git a/planetstack/plstackapi_config b/planetstack/plstackapi_config
index 1d7b761..deaf2e3 100644
--- a/planetstack/plstackapi_config
+++ b/planetstack/plstackapi_config
@@ -27,3 +27,6 @@
 default_image=None
 default_flavor=m1.small
 default_security_group=default
+
+[observer]
+pl_dependency_graph='/opt/planetstack/model-deps'
diff --git a/planetstack/tests/GetConfiguration.py b/planetstack/tests/GetConfiguration.py
new file mode 100644
index 0000000..b3193f4
--- /dev/null
+++ b/planetstack/tests/GetConfiguration.py
@@ -0,0 +1,233 @@
+import os
+import json
+import socket
+import sys
+import time
+
+sys.path.append("/opt/planetstack")
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+from openstack.manager import OpenStackManager
+from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+
+def ps_id_to_pl_id(x):
+    # Since we don't want the PlanetStack object IDs to conflict with existing
+    # PlanetLab object IDs in the CMI, just add 100000 to the PlanetStack object
+    # IDs.
+    return 100000 + x
+
+def pl_id_to_ps_id(x):
+    return x - 100000
+
+def pl_slice_id(slice):
+    if slice.name == "princeton_vcoblitz":
+        # 70 is the slice id of princeton_vcoblitz on vicci
+        return 70
+    else:
+        return ps_id_to_pl_id(slice.id)
+
+def filter_fields(src, fields):
+    dest = {}
+    for (key,value) in src.items():
+        if (not fields) or (key in fields):
+            dest[key] = value
+    return dest
+
+def GetSlices(filter={}):
+    ps_slices = Slice.objects.filter(**filter)
+    slices = []
+    for ps_slice in ps_slices:
+        node_ids=[]
+        for ps_sliver in ps_slice.slivers.all():
+            node_ids.append(ps_id_to_pl_id(ps_sliver.node.id))
+
+        slice = {"instantiation": "plc-instantiated",
+                 "description": "planetstack slice",
+                 "slice_id": pl_slice_id(ps_slice),
+                 "node_ids": node_ids,
+                 "url": "planetstack",
+                 "max_nodes": 1000,
+                 "peer_slice_id": None,
+                 "slice_tag_ids": [],
+                 "peer_id": None,
+                 "site_id": ps_id_to_pl_id(ps_slice.site_id),
+                 "name": ps_slice.name}
+
+                 # creator_person_id, person_ids, expires, created
+
+        slices.append(slice)
+    return slices
+
+def GetNodes(node_ids=None, fields=None):
+    if node_ids:
+        ps_nodes = Node.objects.filter(id__in=[pl_id_to_ps_id(nid) for nid in node_ids])
+    else:
+        ps_nodes = Node.objects.all()
+    nodes = []
+    for ps_node in ps_nodes:
+        slice_ids=[]
+        for ps_sliver in ps_node.slivers.all():
+            slice_ids.append(pl_slice_id(ps_sliver.slice))
+
+        node = {"node_id": ps_id_to_pl_id(ps_node.id),
+                "site_id": ps_id_to_pl_id(ps_node.site_id),
+                "node_type": "regular",
+                "peer_node_id": None,
+                "hostname": ps_node.name,
+                "conf_file_ids": [],
+                "slice_ids": slice_ids,
+                "model": "planetstack",
+                "peer_id": None,
+                "node_tag_ids": []}
+
+                # last_updated, key, boot_state, pcu_ids, node_type, session, last_boot,
+                # interface_ids, slice_ids_whitelist, run_level, ssh_rsa_key, last_pcu_reboot,
+                # nodegroup_ids, verified, last_contact, boot_nonce, version,
+                # last_pcu_configuration, last_download, date_created, ports
+
+        nodes.append(node)
+
+    nodes = [filter_fields(node, fields) for node in nodes]
+
+    return nodes
+
+def GetTags(slicename,node_id):
+    return {}
+
+def GetSites():
+    ps_sites = Site.objects.all()
+    sites = []
+    for ps_site in ps_sites:
+        slice_ids=[]
+        for ps_slice in ps_site.slices.all():
+            slice_ids.append(pl_slice_id(ps_slice))
+
+        node_ids=[]
+        for ps_node in ps_site.nodes.all():
+            node_ids.append(ps_id_to_pl_id(ps_node.id))
+
+
+        site = {"site_id": ps_id_to_pl_id(ps_site.id),
+                "node_ids": node_ids,
+                "pcu_ids": [],
+                "max_slices": 100,
+                "max_slivers": 1000,
+                "is_public": False,
+                "peer_site_id": None,
+                "abbrebiated_name": ps_site.abbreviated_name,
+                "address_ids": [],
+                "name": ps_site.name,
+                "url": None,
+                "site_tag_ids": [],
+                "enabled": True,
+                "longitude": float(ps_site.location.longitude),
+                "latitude": float(ps_site.location.latitude),
+                "slice_ids": slice_ids,
+                "login_base": ps_site.login_base,
+                "peer_id": None}
+
+                # last_updated, ext_consortium_id, person_ids, date_created
+
+        sites.append(site)
+
+    return sites
+
+def GetInterfaces(slicename, node_ids):
+    interfaces = []
+    ps_slices = Slice.objects.filter(name=slicename)
+    for ps_slice in ps_slices:
+        for ps_sliver in ps_slice.slivers.all():
+            node_id = ps_id_to_pl_id(ps_sliver.node_id)
+            if node_id in node_ids:
+                ps_node = ps_sliver.node
+                interface = {"node_id": node_id,
+                             "ip": socket.gethostbyname(ps_node.name),
+                             "broadcast": None,
+                             "mac": "11:22:33:44:55:66",
+                             "bwlimit": None,
+                             "network": None,
+                             "is_primary": True,
+                             "dns1": None,
+                             "hostname": None,
+                             "netmask": None,
+                             "interface_tag_ids": [],
+                             "interface_id": node_id,     # assume each node has only one interface
+                             "gateway": None,
+                             "dns2": None,
+                             "type": "ipv4",
+                             "method": "dhcp"}
+                interfaces.append(interface)
+    return interfaces
+
+def GetConfiguration(name):
+    slicename = name["name"]
+    if "node_id" in name:
+        node_id = name["node_id"]
+    else:
+        node_id = 0
+
+    node_sliver_tags = GetTags(slicename, node_id)
+
+    slices = GetSlices({"name": slicename})
+    perhost = {}
+    allinterfaces = {}
+    hostipmap = {}
+    nodes = []
+    if len(slices)==1:
+        slice = slices[0]
+        node_ids = slice['node_ids']
+        nodes = GetNodes(node_ids, ['hostname', 'node_id', 'site_id'])
+        nodemap = {}
+        for node in nodes:
+            nodemap[node['node_id']]=node['hostname']
+
+        interfaces = GetInterfaces(slicename, node_ids)
+        hostipmap = {}
+        for interface in interfaces:
+            if nodemap[interface['node_id']] not in allinterfaces:
+                allinterfaces[nodemap[interface['node_id']]] = []
+            interface['interface_tags'] = []
+            allinterfaces[nodemap[interface['node_id']]].append(interface)
+            if interface['is_primary']:
+                hostipmap[nodemap[interface['node_id']]] = interface['ip']
+
+        for nid in node_ids:
+            sliver_tags = GetTags(slicename,nid)
+            perhost[nodemap[nid]] = sliver_tags
+
+    slivers = GetSlices()
+    if node_id != 0:
+        slivers = [slice for slice in slivers if (node_id in slice.node_ids)]
+
+    sites = GetSites()
+    for site in sites:
+        site["site_tags"] = []
+
+    timestamp = int(time.time())
+    return {'version': 3,
+            'timestamp': timestamp,
+            'configuration': node_sliver_tags,
+            'allconfigurations':perhost,
+            'hostipmap':hostipmap,
+            'slivers': slivers,
+            'interfaces': allinterfaces,
+            'sites': sites,
+            'nodes': nodes}
+
+if __name__ == '__main__':
+    slices = GetSlices()
+    nodes = GetNodes()
+
+    if ("-d" in sys.argv):
+        config = GetConfiguration({"name": "princeton_coblitz"})
+        print config
+        print slices
+        print nodes
+    else:
+        configs={}
+        for slicename in ["princeton_vcoblitz"]:
+            configs[slicename] = GetConfiguration({"name": slicename})
+
+        file("planetstack_config","w").write(json.dumps(configs))
+        file("planetstack_slices","w").write(json.dumps(slices))
+        file("planetstack_nodes","w").write(json.dumps(nodes))
diff --git a/planetstack/tests/networktest.py b/planetstack/tests/networktest.py
new file mode 100644
index 0000000..7f3cf70
--- /dev/null
+++ b/planetstack/tests/networktest.py
@@ -0,0 +1,195 @@
+"""
+    Network Data Model Test
+
+    1) Create a slice1
+    2) Create sliver1 on slice1
+    3) Verify one quantum network created for sliver1
+    4) Create a private network, network1
+    5) Connect network1 to slice1
+    6) Create sliver1_2 on slice1
+    7) Verify two quantum networks created for sliver1_2
+"""
+
+import os
+import json
+import sys
+import time
+
+sys.path.append("/opt/planetstack")
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+from openstack.manager import OpenStackManager
+from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+
+from planetstacktest import PlanetStackTest, fail_unless, fail
+
+class NetworkTest(PlanetStackTest):
+    def __init__(self):
+        PlanetStackTest.__init__(self)
+
+    def wait_for_ports(self, sliver, count=1, max_time=120):
+        print "waiting for %d ports on %s" % (count, str(sliver))
+        while max_time>0:
+            ports = self.manager.driver.shell.quantum.list_ports(device_id=sliver.instance_id)["ports"]
+            if len(ports)==count:
+                return ports
+
+            fail_unless(len(ports)<=count, "too many ports")
+
+            time.sleep(10)
+            max_time = max_time - 10
+
+        fail("timed out while waiting for port creation")
+
+    def ports_to_networks(self, ports):
+        networks = []
+        for port in ports:
+            port_networks = networks + self.manager.driver.shell.quantum.list_networks(id=port["network_id"])["networks"]
+            for network in port_networks:
+                if not (network in networks):
+                    networks.append(network)
+        return networks
+
+    def ports_to_network_names(self, ports):
+        network_names = []
+        for network in self.ports_to_networks(ports):
+             network_names.append(network["name"])
+        return network_names
+
+    def verify_network_names(self, ports, network_names):
+        port_network_names = sorted(self.ports_to_network_names(ports))
+        network_names = sorted(network_names)
+        fail_unless(port_network_names == network_names, "mismatched network names: %s != %s" % (str(port_network_names), str(network_names)))
+        print "   verified network ports to", ",".join(port_network_names)
+
+    def test_slice1(self):
+        slice1Name = self.make_slice_name()
+        slice1 = Slice(name = slice1Name,
+                       omf_friendly=True,
+                       site=self.testSite,
+                       creator=self.testUser)
+        slice1=self.save_and_wait_for_enacted(slice1, nonempty_fields=["tenant_id"])
+
+        sliver1 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=slice1,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver1=self.save_and_wait_for_enacted(sliver1, nonempty_fields=["instance_id", "ip"])
+
+        # sliver1 should have only one port, its private network
+        ports = self.wait_for_ports(sliver1, count=1)
+        self.verify_network_names(ports, [slice1.name])
+
+        network1 = Network(name = slice1Name + "-pvt",
+                           template = self.get_network_template("private"),
+                           owner = slice1)
+        network1=self.save_and_wait_for_enacted(network1, nonempty_fields=["network_id", "subnet_id", "router_id", "subnet"])
+
+        network1_slice1 = NetworkSlice(network=network1, slice=slice1)
+        network1_slice1.save() # does not need to be enacted
+
+        sliver1_2 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=slice1,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver1_2=self.save_and_wait_for_enacted(sliver1_2, nonempty_fields=["instance_id", "ip"])
+
+        ports = self.wait_for_ports(sliver1_2, count=2)
+        self.verify_network_names(ports, [slice1.name, network1.name])
+
+        self.slice1 = slice1
+        self.network1 = network1
+
+    def test_slice2(self):
+        slice2Name = self.make_slice_name()
+        slice2 = Slice(name = slice2Name,
+                       omf_friendly=True,
+                       site=self.testSite,
+                       creator=self.testUser)
+        slice2=self.save_and_wait_for_enacted(slice2, nonempty_fields=["tenant_id"])
+
+        network2 = Network(name = slice2Name + "-pvt",
+                           template = self.get_network_template("private"),
+                           owner = slice2)
+        network2=self.save_and_wait_for_enacted(network2, nonempty_fields=["network_id", "subnet_id", "router_id", "subnet"])
+
+        network2_slice2 = NetworkSlice(network=network2, slice=slice2)
+        network2_slice2.save() # does not need to be enacted
+
+        sliver2_1 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=slice2,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver2_1=self.save_and_wait_for_enacted(sliver2_1, nonempty_fields=["instance_id", "ip"])
+
+        ports = self.wait_for_ports(sliver2_1, count=2)
+        self.verify_network_names(ports, [slice2.name, network2.name])
+
+        self.slice2 = slice2
+        self.network2 = network2
+
+    def test_shared_private_net(self):
+        # connect network2 to slice1
+        self.network2.permittedSlices.add(self.slice1)
+        network2_slice1 = NetworkSlice(network=self.network2, slice=self.slice1)
+        network2_slice1.save()
+
+        sliver1_3 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=self.slice1,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver1_3=self.save_and_wait_for_enacted(sliver1_3, nonempty_fields=["instance_id", "ip"])
+
+        ports = self.wait_for_ports(sliver1_3, count=3)
+        self.verify_network_names(ports, [self.slice1.name, self.network1.name, self.network2.name])
+
+    def test_nat_net(self):
+        slice3Name = self.make_slice_name()
+        slice3 = Slice(name = slice3Name,
+                       omf_friendly=True,
+                       site=self.testSite,
+                       creator=self.testUser)
+        slice3=self.save_and_wait_for_enacted(slice3, nonempty_fields=["tenant_id"])
+
+        network3 = Network(name = slice3Name + "-nat",
+                           template = self.get_network_template("private-nat"),
+                           owner = slice3)
+        # note that router_id will not be filled in for nat-net, since nat-net has no routers
+        network3=self.save_and_wait_for_enacted(network3, nonempty_fields=["network_id", "subnet_id", "subnet"])
+
+        network3_slice3 = NetworkSlice(network=network3, slice=slice3)
+        network3_slice3.save() # does not need to be enacted
+
+        sliver3_1 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=slice3,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver3_1=self.save_and_wait_for_enacted(sliver3_1, nonempty_fields=["instance_id", "ip"])
+
+        ports = self.wait_for_ports(sliver3_1, count=2)
+        self.verify_network_names(ports, [slice3.name, "nat-net"])
+
+    def run(self):
+        self.setup()
+        try:
+             self.test_slice1()
+             self.test_slice2()
+             self.test_shared_private_net()
+             self.test_nat_net()
+             print "SUCCESS"
+        finally:
+             self.cleanup()
+
+def main():
+    NetworkTest().run()
+
+if __name__=="__main__":
+    main()
+
+
+
diff --git a/planetstack/tests/planetstacktest.py b/planetstack/tests/planetstacktest.py
new file mode 100644
index 0000000..77ed95f
--- /dev/null
+++ b/planetstack/tests/planetstacktest.py
@@ -0,0 +1,94 @@
+import os
+import json
+import sys
+import time
+
+sys.path.append("/opt/planetstack")
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+from openstack.manager import OpenStackManager
+from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+
+TEST_SITE_NAME = "Princeton University"
+TEST_USER_EMAIL = "sbaker@planetstack.org"
+TEST_IMAGE_NAME = "Fedora 16 LXC rev 1.3"
+TEST_NODE_NAME = "viccidev3.cs.princeton.edu"
+TEST_DEPLOYMENT_NAME = "VICCI"
+
+def fail(msg):
+    print msg
+    sys.exit(-1)
+
+def fail_unless(condition, msg):
+    if not condition:
+        fail(msg)
+
+class PlanetStackTest:
+    def __init__(self):
+        self.objs_saved = []
+        self.counter = 0
+
+    def setup(self):
+        self.manager = OpenStackManager()
+
+        print "getting test site"
+        self.testSite = Site.objects.get(name=TEST_SITE_NAME)
+
+        print "getting test user"
+        self.testUser = User.objects.get(email=TEST_USER_EMAIL)
+
+        print "getting test image"
+        self.testImage = Image.objects.get(name=TEST_IMAGE_NAME)
+
+        print "getting test node"
+        self.testNode = Node.objects.get(name=TEST_NODE_NAME)
+
+        print "getting test deployment"
+        self.testDeployment = Deployment.objects.get(name=TEST_DEPLOYMENT_NAME)
+
+    def save_and_wait_for_enacted(self, x, nonempty_fields=[]):
+        print "saving", x.__class__.__name__, str(x)
+        x.save()
+        self.objs_saved.append(x)
+        print "   waiting for", str(x), "to be enacted"
+        tStart = time.time()
+        while True:
+            new_x = x.__class__.objects.get(id=x.id)
+            if (new_x.enacted != None) and (new_x.enacted >= new_x.updated):
+                print "  ", str(x), "has been enacted"
+                break
+            time.sleep(5)
+
+        if nonempty_fields:
+            print "   waiting for", ", ".join(nonempty_fields), "to be nonempty"
+            while True:
+                new_x = x.__class__.objects.get(id=x.id)
+                keep_waiting=False
+                for field in nonempty_fields:
+                    if not getattr(new_x, field, None):
+                        keep_waiting=True
+                if not keep_waiting:
+                    break
+
+        print "   saved and enacted in %d seconds" % int(time.time() - tStart)
+
+        return new_x
+
+    def make_slice_name(self):
+        self.counter = self.counter +1
+        return "test-" + str(time.time()) + "." + str(self.counter)
+
+    def get_network_template(self,name):
+        template = NetworkTemplate.objects.get(name=name)
+        return template
+
+    def cleanup(self):
+        print "cleaning up"
+        print "press return"
+        sys.stdin.readline()
+        for obj in self.objs_saved:
+            try:
+                 print "  deleting", str(obj)
+                 obj.delete()
+            except:
+                 print "failed to delete", str(obj)
diff --git a/planetstack/tests/slivertest.py b/planetstack/tests/slivertest.py
new file mode 100644
index 0000000..271fe5b
--- /dev/null
+++ b/planetstack/tests/slivertest.py
@@ -0,0 +1,51 @@
+"""
+    Basic Sliver Test
+
+    1) Create a slice1
+    2) Create sliver1 on slice1
+"""
+
+import os
+import json
+import sys
+import time
+
+sys.path.append("/opt/planetstack")
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+from openstack.manager import OpenStackManager
+from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice
+
+from planetstacktest import PlanetStackTest, fail_unless
+
+class SliverTest(PlanetStackTest):
+    def __init__(self):
+        PlanetStackTest.__init__(self)
+
+    def run_sliver1(self):
+        slice1Name = self.make_slice_name()
+        slice1 = Slice(name = slice1Name,
+                       omf_friendly=True,
+                       site=self.testSite,
+                       creator=self.testUser)
+        slice1=self.save_and_wait_for_enacted(slice1, nonempty_fields=["tenant_id"])
+
+        sliver1 = Sliver(image = self.testImage,
+                         creator=self.testUser,
+                         slice=slice1,
+                         node=self.testNode,
+                         deploymentNetwork=self.testDeployment)
+        sliver1=self.save_and_wait_for_enacted(sliver1, nonempty_fields=["instance_id", "ip"])
+
+    def run(self):
+        self.setup()
+        try:
+             self.run_sliver1()
+        finally:
+             self.cleanup()
+
+def main():
+    SliverTest().run()
+
+if __name__=="__main__":
+    main()