Added ec2 observer, WIP
diff --git a/planetstack/ec2_observer/__init__.py b/planetstack/ec2_observer/__init__.py
new file mode 100644
index 0000000..d9a63be
--- /dev/null
+++ b/planetstack/ec2_observer/__init__.py
@@ -0,0 +1,32 @@
+from planetstack.config import Config
+
+try:
+ observer_disabled = Config().observer_disabled
+except:
+ observer_disabled = False
+
+print_once = True
+
+if (not observer_disabled):
+ from .event_manager import EventSender
+
+ def notify_observer(model=None, delete=False, pk=None, model_dict={}):
+ try:
+ if (model and delete):
+ if hasattr(model,"__name__"):
+ modelName = model.__name__
+ else:
+ modelName = model.__class__.__name__
+ EventSender().fire(delete_flag = delete, model = modelName, pk = pk, model_dict=model_dict)
+ else:
+ EventSender().fire()
+ except Exception,e:
+ print "Exception in Observer. This should not disrupt the front end. %s"%str(e)
+
+else:
+ def notify_observer(model=None, delete=False, pk=None, model_dict={}):
+ global print_once
+ if (print_once):
+ print "The observer is disabled"
+ print_once = False
+ return
diff --git a/planetstack/ec2_observer/__init__.pyc b/planetstack/ec2_observer/__init__.pyc
new file mode 100644
index 0000000..3db20f7
--- /dev/null
+++ b/planetstack/ec2_observer/__init__.pyc
Binary files differ
diff --git a/planetstack/ec2_observer/aws_lib.py b/planetstack/ec2_observer/aws_lib.py
new file mode 100644
index 0000000..e116295
--- /dev/null
+++ b/planetstack/ec2_observer/aws_lib.py
@@ -0,0 +1,18 @@
+import os
+import json
+
+class AwsException(Exception):
+ pass
+
+def aws_run(cmd):
+ cmd = 'aws %s'%cmd
+ pipe = os.popen(cmd)
+ output_str = pipe.read()
+
+ if (not pipe.close()):
+ output = json.loads(output_str)
+ return output
+ else:
+ raise AwsException("Error running command: %s"%cmd)
+
+
diff --git a/planetstack/ec2_observer/backend.py b/planetstack/ec2_observer/backend.py
new file mode 100644
index 0000000..5a00e71
--- /dev/null
+++ b/planetstack/ec2_observer/backend.py
@@ -0,0 +1,24 @@
+import threading
+import time
+from ec2_observer.event_loop import PlanetStackObserver
+from ec2_observer.event_manager import EventListener
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class Backend:
+
+ def run(self):
+ try:
+ # start the openstack observer
+ observer = PlanetStackObserver()
+ observer_thread = threading.Thread(target=observer.run)
+ observer_thread.start()
+
+ # start event listene
+ event_manager = EventListener(wake_up=observer.wake_up)
+ event_manager_thread = threading.Thread(target=event_manager.run)
+ event_manager_thread.start()
+ except:
+ logger.log_exc("Exception in child thread")
+
diff --git a/planetstack/ec2_observer/deleter.py b/planetstack/ec2_observer/deleter.py
new file mode 100644
index 0000000..e088558
--- /dev/null
+++ b/planetstack/ec2_observer/deleter.py
@@ -0,0 +1,16 @@
+import os
+import base64
+from planetstack.config import Config
+
+class Deleter:
+ model=None # Must be overridden
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def call(self, pk, model_dict):
+ # Fetch object from PlanetStack db and delete it
+ pass
+
+ def __call__(self, *args, **kwargs):
+ return self.call(*args, **kwargs)
diff --git a/planetstack/ec2_observer/deleters/__init__.py b/planetstack/ec2_observer/deleters/__init__.py
new file mode 100755
index 0000000..9cfd951
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/__init__.py
@@ -0,0 +1,18 @@
+import os
+
+deleters = {}
+_path = os.path.join('.',os.path.dirname(__file__))
+
+_files = os.listdir(_path)
+_files = filter(lambda x:x.endswith('deleter.py'),_files)
+_files = map(lambda x:x.rstrip('.py'),_files)
+
+"""
+for f in _files:
+ m = __import__(f)
+ deleter = getattr(m,f.title().replace('_',''))
+ try:
+ deleters[deleter.model].append(deleter)
+ except KeyError:
+ deleters[deleter.model]=[deleter]
+"""
diff --git a/planetstack/ec2_observer/deleters/__init__.pyc b/planetstack/ec2_observer/deleters/__init__.pyc
new file mode 100644
index 0000000..8ab2a57
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/__init__.pyc
Binary files differ
diff --git a/planetstack/ec2_observer/deleters/network_deleter.py b/planetstack/ec2_observer/deleters/network_deleter.py
new file mode 100644
index 0000000..0d21fda
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/network_deleter.py
@@ -0,0 +1,19 @@
+from core.models import Network, NetworkDeployments
+from observer.deleter import Deleter
+from observer.deleters.network_deployment_deleter import NetworkDeploymentDeleter
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class NetworkDeleter(Deleter):
+ model='Network'
+
+ def call(self, pk):
+ network = Network.objects.get(pk=pk)
+ network_deployment_deleter = NetworkDeploymentDeleter()
+ for network_deployment in NetworkDeployments.objects.filter(network=network):
+ try:
+ network_deployment_deleter(network_deployment.id)
+ except:
+ logger.log_exc("Failed to delte network deployment %s" % network_deployment)
+ network.delete()
diff --git a/planetstack/ec2_observer/deleters/network_deployment_deleter.py b/planetstack/ec2_observer/deleters/network_deployment_deleter.py
new file mode 100644
index 0000000..72b10b2
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/network_deployment_deleter.py
@@ -0,0 +1,21 @@
+from core.models import Network, NetworkDeployments
+from observer.deleter import Deleter
+from openstack.driver import OpenStackDriver
+
+class NetworkDeploymentDeleter(Deleter):
+ model='NetworkDeployment'
+
+ def call(self, pk):
+ network_deployment = NetworkDeployments.objects.get(pk=pk)
+ driver = OpenStackDriver().client_driver(caller=network_deployment.network.owner.creator,
+ tenant=network_deployment.network.owner.name,
+ deployment=network_deployment.deployment.name)
+ if (network_deployment.router_id) and (network_deployment.subnet_id):
+ driver.delete_router_interface(network_deployment.router_id, network_deployment.subnet_id)
+ if network_deployment.subnet_id:
+ driver.delete_subnet(network_deployment.subnet_id)
+ if network_deployment.router_id:
+ driver.delete_router(network_deployment.router_id)
+ if network_deployment.net_id:
+ driver.delete_network(network_deployment.net_id)
+ network_deployment.delete()
diff --git a/planetstack/ec2_observer/deleters/network_sliver_deleter.py b/planetstack/ec2_observer/deleters/network_sliver_deleter.py
new file mode 100644
index 0000000..71ba040
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/network_sliver_deleter.py
@@ -0,0 +1,13 @@
+from core.models import NetworkSliver
+from observer.deleter import Deleter
+
+class NetworkSliverDeleter(Deleter):
+ model='NetworkSliver'
+
+ def call(self, pk):
+ network_sliver = NetworkSlivers.objects.get(pk=pk)
+ # handle openstack delete
+
+ network_sliver.delete()
+
+
diff --git a/planetstack/ec2_observer/deleters/site_deleter.py b/planetstack/ec2_observer/deleters/site_deleter.py
new file mode 100644
index 0000000..c97dee1
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/site_deleter.py
@@ -0,0 +1,14 @@
+from core.models import Site, SiteDeployments
+from observer.deleter import Deleter
+from observer.deleters.site_deployment_deleter import SiteDeploymentDeleter
+
+class SiteDeleter(Deleter):
+ model='Site'
+
+ def call(self, pk):
+ site = Site.objects.get(pk=pk)
+ site_deployments = SiteDeployments.objects.filter(site=site)
+ site_deployment_deleter = SiteDeploymentDeleter()
+ for site_deployment in site_deployments:
+ site_deployment_deleter(site_deployment.id)
+ site.delete()
diff --git a/planetstack/ec2_observer/deleters/site_deployment_deleter.py b/planetstack/ec2_observer/deleters/site_deployment_deleter.py
new file mode 100644
index 0000000..fa97be2
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/site_deployment_deleter.py
@@ -0,0 +1,12 @@
+from core.models import Site, SiteDeployments
+from observer.deleter import Deleter
+
+class SiteDeploymentDeleter(Deleter):
+ model='SiteDeployments'
+
+ def call(self, pk):
+ site_deployment = SiteDeployments.objects.get(pk=pk)
+ if site_deployment.tenant_id:
+ driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+ driver.delete_tenant(site_deployment.tenant_id)
+ site_deployment.delete()
diff --git a/planetstack/ec2_observer/deleters/slice_deleter.py b/planetstack/ec2_observer/deleters/slice_deleter.py
new file mode 100644
index 0000000..90b58c3
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/slice_deleter.py
@@ -0,0 +1,19 @@
+from core.models import Slice, SliceDeployments, User
+from observer.deleter import Deleter
+from observer.deleters.slice_deployment_deleter import SliceDeploymentDeleter
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SliceDeleter(Deleter):
+ model='Slice'
+
+ def call(self, pk):
+ slice = Slice.objects.get(pk=pk)
+ slice_deployment_deleter = SliceDeploymentDeleter()
+ for slice_deployment in SliceDeployments.objects.filter(slice=slice):
+ try:
+ slice_deployment_deleter(slice_deployment.id)
+ except:
+ logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment)
+ slice.delete()
diff --git a/planetstack/ec2_observer/deleters/slice_deleter.pyc b/planetstack/ec2_observer/deleters/slice_deleter.pyc
new file mode 100644
index 0000000..9fc8022
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/slice_deleter.pyc
Binary files differ
diff --git a/planetstack/ec2_observer/deleters/slice_deployment_deleter.py b/planetstack/ec2_observer/deleters/slice_deployment_deleter.py
new file mode 100644
index 0000000..33e0836
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/slice_deployment_deleter.py
@@ -0,0 +1,34 @@
+from core.models import Slice, SliceDeployments, User
+from observer.deleter import Deleter
+from openstack.driver import OpenStackDriver
+
+class SliceDeploymentDeleter(Deleter):
+ model='SliceDeployments'
+
+ def call(self, pk):
+ slice_deployment = SliceDeployments.objects.get(pk=pk)
+ user = User.objects.get(id=slice_deployment.slice.creator.id)
+ driver = OpenStackDriver().admin_driver(deployment=slice_deployment.deployment.name)
+ client_driver = driver.client_driver(caller=user,
+ tenant=slice_deployment.slice.name,
+ deployment=slice_deployment.deployment.name)
+
+ if slice_deployment.router_id and slice_deployment.subnet_id:
+ client_driver.delete_router_interface(slice_deployment.router_id, slice_deployment.subnet_id)
+ if slice_deployment.subnet_id:
+ client_driver.delete_subnet(slice_deployment.subnet_id)
+ if slice_deployment.router_id:
+ client_driver.delete_router(slice_deployment.router_id)
+ if slice_deployment.network_id:
+ client_driver.delete_network(slice_deployment.network_id)
+ if slice_deployment.tenant_id:
+ driver.delete_tenant(slice_deployment.tenant_id)
+ # delete external route
+ #subnet = None
+ #subnets = client_driver.shell.quantum.list_subnets()['subnets']
+ #for snet in subnets:
+ # if snet['id'] == slice_deployment.subnet_id:
+ # subnet = snet
+ #if subnet:
+ # driver.delete_external_route(subnet)
+ slice_deployment.delete()
diff --git a/planetstack/ec2_observer/deleters/sliver_deleter.py b/planetstack/ec2_observer/deleters/sliver_deleter.py
new file mode 100644
index 0000000..097f0f7
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/sliver_deleter.py
@@ -0,0 +1,14 @@
+from core.models import Sliver, SliceDeployments
+from observer.deleter import Deleter
+
+class SliverDeleter(Deleter):
+ model='Sliver'
+
+ def call(self, pk):
+ sliver = Sliver.objects.get(pk=pk)
+ if sliver.instance_id:
+ driver = self.driver.client_driver(caller=sliver.creator,
+ tenant=sliver.slice.name,
+ deployment=sliver.deploymentNetwork.name)
+ driver.destroy_instance(sliver.instance_id)
+ sliver.delete()
diff --git a/planetstack/ec2_observer/deleters/user_deleter.py b/planetstack/ec2_observer/deleters/user_deleter.py
new file mode 100644
index 0000000..3573f8d
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/user_deleter.py
@@ -0,0 +1,13 @@
+from core.models import User, UserDeployments
+from observer.deleter import Deleter
+from observer.deleters.user_deployment_deleter import UserDeploymentDeleter
+
+class UserDeleter(Deleter):
+ model='User'
+
+ def call(self, pk):
+ user = User.objects.get(pk=pk)
+ user_deployment_deleter = UserDeploymentDeleter()
+ for user_deployment in UserDeployments.objects.filter(user=user):
+ user_deployment_deleter(user_deployment.id)
+ user.delete()
diff --git a/planetstack/ec2_observer/deleters/user_deployment_deleter.py b/planetstack/ec2_observer/deleters/user_deployment_deleter.py
new file mode 100644
index 0000000..49d349b
--- /dev/null
+++ b/planetstack/ec2_observer/deleters/user_deployment_deleter.py
@@ -0,0 +1,12 @@
+from core.models import User, UserDeployments
+from observer.deleter import Deleter
+
+class UserDeploymentDeleter(Deleter):
+ model='UserDeployment'
+
+ def call(self, pk):
+ user_deployment = UserDeployments.objects.get(pk=pk)
+ if user_deployment.user.kuser_id:
+ driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
+ driver.delete_user(user_deployment.user.kuser_id)
+ user_deployment.delete()
diff --git a/planetstack/ec2_observer/dmdot b/planetstack/ec2_observer/dmdot
new file mode 100755
index 0000000..2d95e9d
--- /dev/null
+++ b/planetstack/ec2_observer/dmdot
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+
+import os
+import pdb
+import sys
+import json
+
+sys.path.append('.')
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
+
+from django.db.models.fields.related import ForeignKey
+from core.models import *
+
+try:
+ output = sys.args[1]
+except:
+ output = '-json'
+
+g = globals()
+model_classes = []
+class_names = []
+for c in g.values():
+ if type(c)==type(PlCoreBase):
+ model_classes.append(c)
+ class_names.append(c.__name__)
+
+
+if (output=='-dot'):
+ print "digraph plstack {";
+ for c in model_classes:
+ fields = c._meta.fields
+ for f in fields:
+ if type(f)==ForeignKey and f.name.title() in class_names:
+ print '\t"%s"->"%s";'%(c.__name__,f.name.title())
+ print "}\n";
+elif (output=='-json'):
+ d = {}
+ for c in model_classes:
+ fields = c._meta.fields
+ for f in fields:
+ if type(f)==ForeignKey and f.name.title() in class_names:
+ try:
+ d[c.__name__].append(f.name.title())
+ except KeyError:
+ d[c.__name__]=[f.name.title()]
+ print json.dumps(d,indent=4)
+
+
diff --git a/planetstack/ec2_observer/ec2_backend.py b/planetstack/ec2_observer/ec2_backend.py
new file mode 100644
index 0000000..5a00e71
--- /dev/null
+++ b/planetstack/ec2_observer/ec2_backend.py
@@ -0,0 +1,24 @@
+import threading
+import time
+from ec2_observer.event_loop import PlanetStackObserver
+from ec2_observer.event_manager import EventListener
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class Backend:
+
+ def run(self):
+ try:
+ # start the openstack observer
+ observer = PlanetStackObserver()
+ observer_thread = threading.Thread(target=observer.run)
+ observer_thread.start()
+
+ # start event listene
+ event_manager = EventListener(wake_up=observer.wake_up)
+ event_manager_thread = threading.Thread(target=event_manager.run)
+ event_manager_thread.start()
+ except:
+ logger.log_exc("Exception in child thread")
+
diff --git a/planetstack/ec2_observer/error_mapper.py b/planetstack/ec2_observer/error_mapper.py
new file mode 100644
index 0000000..a7daa59
--- /dev/null
+++ b/planetstack/ec2_observer/error_mapper.py
@@ -0,0 +1,25 @@
+from planetstack.config import Config
+from util.logger import Logger, logging, logger
+
+class ErrorMapper:
+ def __init__(self, error_map_file):
+ self.error_map = {}
+ try:
+ error_map_lines = open(error_map_file).read().splitlines()
+ for l in error_map_lines:
+ if (not l.startswith('#')):
+ splits = l.split('->')
+ k,v = map(lambda i:i.rstrip(),splits)
+ self.error_map[k]=v
+ except:
+ logging.info('Could not read error map')
+
+
+ def map(self, error):
+ return self.error_map[error]
+
+
+
+
+
+
diff --git a/planetstack/ec2_observer/event_loop.py b/planetstack/ec2_observer/event_loop.py
new file mode 100644
index 0000000..02725be
--- /dev/null
+++ b/planetstack/ec2_observer/event_loop.py
@@ -0,0 +1,310 @@
+import os
+import imp
+import inspect
+import time
+import traceback
+import commands
+import threading
+import json
+
+from datetime import datetime
+from collections import defaultdict
+from core.models import *
+from django.db.models import F, Q
+#from openstack.manager import OpenStackManager
+from openstack.driver import OpenStackDriver
+from util.logger import Logger, logging, logger
+#from timeout import timeout
+from planetstack.config import Config
+from ec2_observer.steps import *
+from syncstep import SyncStep
+from toposort import toposort
+from ec2_observer.error_mapper import error_mapper
+
+debug_mode = False
+
+logger = Logger(level=logging.INFO)
+
+class StepNotReady(Exception):
+ pass
+
+class NoOpDriver:
+ def __init__(self):
+ self.enabled = True
+
+class PlanetStackObserver:
+ #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
+ sync_steps = []
+
+ def __init__(self):
+ # The Condition object that gets signalled by Feefie events
+ self.step_lookup = {}
+ self.load_sync_step_modules()
+ self.load_sync_steps()
+ self.event_cond = threading.Condition()
+
+ self.driver_kind = getattr(Config(), "observer_driver", "openstack")
+ if self.driver_kind=="openstack":
+ self.driver = OpenStackDriver()
+ else:
+ self.driver = NoOpDriver()
+
+ def wait_for_event(self, timeout):
+ self.event_cond.acquire()
+ self.event_cond.wait(timeout)
+ self.event_cond.release()
+
+ def wake_up(self):
+ logger.info('Wake up routine called. Event cond %r'%self.event_cond)
+ self.event_cond.acquire()
+ self.event_cond.notify()
+ self.event_cond.release()
+
+ def load_sync_step_modules(self, step_dir=None):
+ if step_dir is None:
+ if hasattr(Config(), "observer_steps_dir"):
+ step_dir = Config().observer_steps_dir
+ else:
+ step_dir = "/opt/planetstack/observer/steps"
+
+ for fn in os.listdir(step_dir):
+ pathname = os.path.join(step_dir,fn)
+ if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
+ module = imp.load_source(fn[:-3],pathname)
+ for classname in dir(module):
+ c = getattr(module, classname, None)
+
+ # make sure 'c' is a descendent of SyncStep and has a
+ # provides field (this eliminates the abstract base classes
+ # since they don't have a provides)
+
+ if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
+ self.sync_steps.append(c)
+ logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
+ # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
+
+ def load_sync_steps(self):
+ dep_path = Config().observer_dependency_graph
+ logger.info('Loading model dependency graph from %s' % dep_path)
+ try:
+ # This contains dependencies between records, not sync steps
+ self.model_dependency_graph = json.loads(open(dep_path).read())
+ except Exception,e:
+ raise e
+
+ try:
+ backend_path = Config().observer_pl_dependency_graph
+ logger.info('Loading backend dependency graph from %s' % backend_path)
+ # This contains dependencies between backend records
+ self.backend_dependency_graph = json.loads(open(backend_path).read())
+ except Exception,e:
+ logger.info('Backend dependency graph not loaded')
+ # We can work without a backend graph
+ self.backend_dependency_graph = {}
+
+ provides_dict = {}
+ for s in self.sync_steps:
+ self.step_lookup[s.__name__] = s
+ for m in s.provides:
+ try:
+ provides_dict[m.__name__].append(s.__name__)
+ except KeyError:
+ provides_dict[m.__name__]=[s.__name__]
+
+
+ step_graph = {}
+ for k,v in self.model_dependency_graph.iteritems():
+ try:
+ for source in provides_dict[k]:
+ for m in v:
+ try:
+ for dest in provides_dict[m]:
+ # no deps, pass
+ try:
+ if (dest not in step_graph[source]):
+ step_graph[source].append(dest)
+ except:
+ step_graph[source]=[dest]
+ except KeyError:
+ pass
+
+ except KeyError:
+ pass
+ # no dependencies, pass
+
+ #import pdb
+ #pdb.set_trace()
+ if (self.backend_dependency_graph):
+ backend_dict = {}
+ for s in self.sync_steps:
+ for m in s.serves:
+ backend_dict[m]=s.__name__
+
+ for k,v in backend_dependency_graph.iteritems():
+ try:
+ source = backend_dict[k]
+ for m in v:
+ try:
+ dest = backend_dict[m]
+ except KeyError:
+ # no deps, pass
+ pass
+ step_graph[source]=dest
+
+ except KeyError:
+ pass
+ # no dependencies, pass
+
+ dependency_graph = step_graph
+
+ self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
+ print "Order of steps=",self.ordered_steps
+ self.load_run_times()
+
+
+ def check_duration(self, step, duration):
+ try:
+ if (duration > step.deadline):
+ logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
+ except AttributeError:
+ # S doesn't have a deadline
+ pass
+
+ def update_run_time(self, step, deletion):
+ if (not deletion):
+ self.last_run_times[step.__name__]=time.time()
+ else:
+ self.last_deletion_run_times[step.__name__]=time.time()
+
+
+ def check_schedule(self, step, deletion):
+ last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
+
+ time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
+ try:
+ if (time_since_last_run < step.requested_interval):
+ raise StepNotReady
+ except AttributeError:
+ logger.info('Step %s does not have requested_interval set'%step.__name__)
+ raise StepNotReady
+
+ def load_run_times(self):
+ try:
+ jrun_times = open('/tmp/observer_run_times').read()
+ self.last_run_times = json.loads(jrun_times)
+ except:
+ self.last_run_times={}
+ for e in self.ordered_steps:
+ self.last_run_times[e]=0
+ try:
+ jrun_times = open('/tmp/observer_deletion_run_times').read()
+ self.last_deletion_run_times = json.loads(jrun_times)
+ except:
+ self.last_deletion_run_times={}
+ for e in self.ordered_steps:
+ self.last_deletion_run_times[e]=0
+
+
+
+ def save_run_times(self):
+ run_times = json.dumps(self.last_run_times)
+ open('/tmp/observer_run_times','w').write(run_times)
+
+ deletion_run_times = json.dumps(self.last_deletion_run_times)
+ open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
+
+ def check_class_dependency(self, step, failed_steps):
+ step.dependenices = []
+ for obj in step.provides:
+ step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
+ for failed_step in failed_steps:
+ if (failed_step in step.dependencies):
+ raise StepNotReady
+
+ def run(self):
+ if not self.driver.enabled:
+ return
+ if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
+ return
+
+ while True:
+ try:
+ error_map_file = getattr(Config(), "error_map_path", "/opt/planetstack/error_map.txt")
+ error_mapper = ErrorMapper(error_map_file)
+
+ logger.info('Waiting for event')
+ tBeforeWait = time.time()
+ self.wait_for_event(timeout=30)
+ logger.info('Observer woke up')
+
+ # Two passes. One for sync, the other for deletion.
+ for deletion in (False,True):
+ logger.info('Creation pass...')
+ # Set of whole steps that failed
+ failed_steps = []
+
+ # Set of individual objects within steps that failed
+ failed_step_objects = set()
+
+ ordered_steps = self.ordered_steps if not deletion else reversed(self.ordered_steps)
+
+ for S in ordered_steps:
+ step = self.step_lookup[S]
+ start_time=time.time()
+
+ sync_step = step(driver=self.driver,error_map=error_mapper)
+ sync_step.__name__ = step.__name__
+ sync_step.dependencies = []
+ try:
+ mlist = sync_step.provides
+
+ for m in mlist:
+ sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
+ except KeyError:
+ pass
+ sync_step.debug_mode = debug_mode
+
+ should_run = False
+ try:
+ # Various checks that decide whether
+ # this step runs or not
+ self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
+ self.check_schedule(sync_step,deletion) # dont run sync_network_routes if time since last run < 1 hour
+ should_run = True
+ except StepNotReady:
+ logging.info('Step not ready: %s'%sync_step.__name__)
+ failed_steps.append(sync_step)
+ except Exception,e:
+ logging.error('%r',e)
+ logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
+ failed_steps.append(sync_step)
+
+ if (should_run):
+ try:
+ duration=time.time() - start_time
+
+ logger.info('Executing step %s' % sync_step.__name__)
+
+ # ********* This is the actual sync step
+ #import pdb
+ #pdb.set_trace()
+ failed_objects = sync_step(failed=list(failed_step_objects), deletion=deletion)
+
+
+ self.check_duration(sync_step, duration)
+ if failed_objects:
+ failed_step_objects.update(failed_objects)
+
+ if (not deletion):
+ self.update_run_time(sync_step)
+ else:
+ self.update_deletion_run_time(sync_step)
+ except Exception,e:
+ logging.error('Model step failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
+ logger.log_exc(e)
+ failed_steps.append(S)
+ self.save_run_times()
+ except Exception, e:
+ logging.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
+ logger.log_exc("Exception in observer run loop")
+ traceback.print_exc()
diff --git a/planetstack/ec2_observer/event_manager.py b/planetstack/ec2_observer/event_manager.py
new file mode 100644
index 0000000..19d9e25
--- /dev/null
+++ b/planetstack/ec2_observer/event_manager.py
@@ -0,0 +1,94 @@
+import threading
+import requests, json
+
+from planetstack.config import Config
+
+import uuid
+import os
+import imp
+import inspect
+import base64
+from fofum import Fofum
+import json
+import traceback
+
+# decorator that marks dispatachable event methods
+def event(func):
+ setattr(func, 'event', func.__name__)
+ return func
+
+class EventHandler:
+ # This code is currently not in use.
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get_events():
+ events = []
+ for name in dir(EventHandler):
+ attribute = getattr(EventHandler, name)
+ if hasattr(attribute, 'event'):
+ events.append(getattr(attribute, 'event'))
+ return events
+
+ def dispatch(self, event, *args, **kwds):
+ if hasattr(self, event):
+ return getattr(self, event)(*args, **kwds)
+
+
+class EventSender:
+ def __init__(self,user=None,clientid=None):
+ try:
+ user = Config().feefie_client_user
+ except:
+ user = 'pl'
+
+ try:
+ clid = Config().feefie_client_id
+ except:
+ clid = self.random_client_id()
+
+
+ self.fofum = Fofum(user=user)
+ self.fofum.make(clid)
+
+ def fire(self,**kwargs):
+ kwargs["uuid"] = str(uuid.uuid1())
+ self.fofum.fire(json.dumps(kwargs))
+
+class EventListener:
+ def __init__(self,wake_up=None):
+ self.handler = EventHandler()
+ self.wake_up = wake_up
+
+ def handle_event(self, payload):
+ payload_dict = json.loads(payload)
+
+ if (self.wake_up):
+ self.wake_up()
+
+ def random_client_id(self):
+ try:
+ return self.client_id
+ except AttributeError:
+ self.client_id = base64.urlsafe_b64encode(os.urandom(12))
+ return self.client_id
+
+ def run(self):
+ # This is our unique client id, to be used when firing and receiving events
+ # It needs to be generated once and placed in the config file
+
+ try:
+ user = Config().feefie_client_user
+ except:
+ user = 'pl'
+
+ try:
+ clid = Config().feefie_client_id
+ except:
+ clid = self.random_client_id()
+
+ f = Fofum(user=user)
+
+ listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
+ listener_thread.start()
diff --git a/planetstack/ec2_observer/event_manager.pyc b/planetstack/ec2_observer/event_manager.pyc
new file mode 100644
index 0000000..0cc333d
--- /dev/null
+++ b/planetstack/ec2_observer/event_manager.pyc
Binary files differ
diff --git a/planetstack/ec2_observer/openstacksyncstep.py b/planetstack/ec2_observer/openstacksyncstep.py
new file mode 100644
index 0000000..51b510f
--- /dev/null
+++ b/planetstack/ec2_observer/openstacksyncstep.py
@@ -0,0 +1,14 @@
+import os
+import base64
+from syncstep import SyncStep
+
+class OpenStackSyncStep(SyncStep):
+ """ PlanetStack Sync step for copying data to OpenStack
+ """
+
+ def __init__(self, **args):
+ SyncStep.__init__(self, **args)
+ return
+
+ def __call__(self, **args):
+ return self.call(**args)
diff --git a/planetstack/ec2_observer/planetstack.deps b/planetstack/ec2_observer/planetstack.deps
new file mode 100644
index 0000000..6eae1fc
--- /dev/null
+++ b/planetstack/ec2_observer/planetstack.deps
@@ -0,0 +1,47 @@
+{
+ "Node": [
+ "Site",
+ "Deployment"
+ ],
+ "Slice": [
+ "Site"
+ ],
+ "ReservedResource": [
+ "Sliver"
+ ],
+ "SliceMembership": [
+ "User",
+ "Slice",
+ "Role"
+ ],
+ "NetworkSlice": [
+ "Network",
+ "Slice"
+ ],
+ "Tag": [
+ "Project"
+ ],
+ "User": [
+ "Site"
+ ],
+ "SliceTag": [
+ "Slice"
+ ],
+ "Reservation": [
+ "Slice"
+ ],
+ "NetworkSliver": [
+ "Network",
+ "Sliver"
+ ],
+ "SitePrivilege": [
+ "User",
+ "Site",
+ "Role"
+ ],
+ "Sliver": [
+ "Image",
+ "Slice",
+ "Node"
+ ]
+}
diff --git a/planetstack/ec2_observer/start.sh b/planetstack/ec2_observer/start.sh
new file mode 100755
index 0000000..1e623cd
--- /dev/null
+++ b/planetstack/ec2_observer/start.sh
@@ -0,0 +1 @@
+nohup python ec2_backend.py -C /opt/planetstack/hpc_observer/hpc_observer_config > /dev/null 2>&1 &
diff --git a/planetstack/ec2_observer/steps/__init__.py b/planetstack/ec2_observer/steps/__init__.py
new file mode 100644
index 0000000..eabf46c
--- /dev/null
+++ b/planetstack/ec2_observer/steps/__init__.py
@@ -0,0 +1,15 @@
+#from .sync_external_routes import SyncExternalRoutes
+from .sync_network_slivers import SyncNetworkSlivers
+from .sync_networks import SyncNetworks
+from .sync_network_deployments import SyncNetworkDeployments
+from .sync_site_privileges import SyncSitePrivileges
+from .sync_sites import SyncSites
+from .sync_slice_memberships import SyncSliceMemberships
+from .sync_slices import SyncSlices
+#from .sync_sliver_ips import SyncSliverIps
+from .sync_slivers import SyncSlivers
+from .sync_users import SyncUsers
+from .sync_roles import SyncRoles
+from .sync_nodes import SyncNodes
+from .sync_images import SyncImages
+from .garbage_collector import GarbageCollector
diff --git a/planetstack/ec2_observer/steps/sync_external_routes.py b/planetstack/ec2_observer/steps/sync_external_routes.py
new file mode 100644
index 0000000..1e1a347
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_external_routes.py
@@ -0,0 +1,18 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+
+class SyncExternalRoutes(SyncStep):
+ # XXX what does this provide?
+ provides=[]
+ requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
+
+ def call(self, **args):
+ routes = self.driver.get_external_routes()
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ for subnet in subnets:
+ try:
+ self.driver.add_external_route(subnet, routes)
+ except:
+ logger.log_exc("failed to add external route for subnet %s" % subnet)
diff --git a/planetstack/ec2_observer/steps/sync_images.py b/planetstack/ec2_observer/steps/sync_images.py
new file mode 100644
index 0000000..32b3363
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_images.py
@@ -0,0 +1,32 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models.image import Image
+from awslib import *
+
+class SyncImages(OpenStackSyncStep):
+ provides=[Image]
+ requested_interval=3600
+
+ def fetch_pending(self):
+ images = Image.objects.all()
+ image_names = [image.name for image in images]
+
+ new_images = []
+
+ aws_images = aws_run('ec2 describe-images')
+
+ for aws_image in aws_images:
+ if aws_image not in image_names:
+ image = Image(image_id=image_id,
+ name=aws_image['name'],
+ disk_format='XXX'
+ container_format='XXX'
+ new_images.append(image)
+
+ return new_images
+
+ def sync_record(self, image):
+ image.save()
diff --git a/planetstack/ec2_observer/steps/sync_mock_nodes.py b/planetstack/ec2_observer/steps/sync_mock_nodes.py
new file mode 100644
index 0000000..3cb3dd0
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_mock_nodes.py
@@ -0,0 +1,42 @@
+import os
+import base64
+import random
+from datetime import datetime
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.node import Node
+from core.models.deployment import Deployment
+from core.models.site import Site
+
+class SyncNodes(OpenStackSyncStep):
+ provides=[Node]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # collect local nodes
+ sites = Site.objects.all()
+ one_and_only_deployment = Deployments.objects.all()
+
+ node_hostnames = [node.name for node in nodes]
+
+ instance_types = 'm1.small | m1.medium | m1.large | m1.xlarge | m3.medium | m3.large | m3.xlarge | m3.2xlarge'.split(' | ')
+
+ all_new_nodes = []
+ for s in sites:
+ node_names = [n.name for n in s.nodes]
+ new_node_names = list(set(instance_types) - set(node_names))
+ new_nodes = []
+ for node_name in new_node_names:
+ node = Node(name=node_name,
+ site=s, deployment=one_and_only_deployment)
+ new_nodes.append(node)
+
+ all_new_nodes.extend(new_nodes)
+
+ return all_new_nodes
+
+
+ def sync_record(self, node):
+ node.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_network_deployments.py b/planetstack/ec2_observer/steps/sync_network_deployments.py
new file mode 100644
index 0000000..d1b51d5
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_network_deployments.py
@@ -0,0 +1,117 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+from core.models.slice import *
+from core.models.slice import Sliver
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncNetworkDeployments(OpenStackSyncStep):
+ requested_interval = 0
+ provides=[Networ, NetworkDeployments, Sliver]
+
+ def fetch_pending(self):
+ # network deployments are not visible to users. We must ensure
+ # networks are deployed at all deploymets available to their slices.
+ slice_deployments = SliceDeployments.objects.all()
+ slice_deploy_lookup = defaultdict(list)
+ for slice_deployment in slice_deployments:
+ slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+
+ network_deployments = NetworkDeployments.objects.all()
+ network_deploy_lookup = defaultdict(list)
+ for network_deployment in network_deployments:
+ network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
+
+ for network in Network.objects.filter():
+ # ignore networks that have
+ # template.visibility = private and template.translation = none
+ if network.template.visibility == 'private' and not network.template.translation == 'none':
+ continue
+ expected_deployments = slice_deploy_lookup[network.owner]
+ for expected_deployment in expected_deployments:
+ if network not in network_deploy_lookup or \
+ expected_deployment not in network_deploy_lookup[network]:
+ nd = NetworkDeployments(network=network, deployment=expected_deployment)
+ nd.save()
+ return NetworkDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def get_next_subnet(self, deployment=None):
+ # limit ourself to 10.0.x.x for now
+ valid_subnet = lambda net: net.startswith('10.0')
+ driver = self.driver.admin_driver(deployment=deployment)
+ subnets = driver.shell.quantum.list_subnets()['subnets']
+ ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+ if valid_subnet(subnet['cidr'])]
+ ints.sort()
+ if ints:
+ last_ip = IPAddress(ints[-1])
+ else:
+ last_ip = IPAddress('10.0.0.0')
+ last_network = IPNetwork(str(last_ip) + "/24")
+ next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+ return next_network
+
+ def save_network_deployment(self, network_deployment):
+ if not network_deployment.network_id and network_deployment.network.template.sharedNetworkName:
+ network_deployment.network_id = network_deployment.network.template.sharedNetworkId
+
+ if not network_deployment.net_id:
+ network_name = network_deployment.network.name
+
+ # create network
+ os_network = self.driver.create_network(network_name, shared=True)
+ network_deployment.net_id = os_network['id']
+
+ # create router
+ #router = self.driver.create_router(network_name)
+ #network_deployment.router_id = router['id']
+
+ # create subnet
+ next_subnet = self.get_next_subnet(deployment=network_deployment.deployment.name)
+ cidr = str(next_subnet.cidr)
+ ip_version = next_subnet.version
+ start = str(next_subnet[2])
+ end = str(next_subnet[-2])
+ subnet = self.driver.create_subnet(name=network_name,
+ network_id = network_deployment.net_id,
+ cidr_ip = cidr,
+ ip_version = ip_version,
+ start = start,
+ end = end)
+ network_deployment.subnet = cidr
+ network_deployment.subnet_id = subnet['id']
+ # add subnet as interface to slice's router
+ #self.driver.add_router_interface(router['id'], subnet['id'])
+ # add external route
+ #self.driver.add_external_route(subnet)
+ logger.info("created private subnet (%s) for network: %s" % (cidr, network_deployment.network))
+ else:
+ (network_deployment.subnet_id, network_deployment.subnet) = self.driver.get_network_subnet(network_deployment.net_id)
+ logger.info("sync'ed subnet (%s) for network: %s" % (network_deployment.subnet, network_deployment.network))
+
+ network_deployment.save()
+
+ def sync_record(self, network_deployment):
+ if network_deployment.network.owner and network_deployment.network.owner.creator:
+ try:
+ # update manager context
+ real_driver = self.driver
+ self.driver = self.driver.client_driver(caller=network_deployment.network.owner.creator,
+ tenant=network_deployment.network.owner.name,
+ deployment=network_deployment.deployment.name)
+ self.save_network_deployment(network_deployment)
+ self.driver = real_driver
+ logger.info("saved network deployment: %s" % (network_deployment))
+ except Exception,e:
+ logger.log_exc("save network deployment failed: %s" % network_deployment)
+ raise e
+
+
+
diff --git a/planetstack/ec2_observer/steps/sync_network_slivers.py b/planetstack/ec2_observer/steps/sync_network_slivers.py
new file mode 100644
index 0000000..7e69330
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_network_slivers.py
@@ -0,0 +1,80 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+
+class SyncNetworkSlivers(OpenStackSyncStep):
+ requested_interval = 3600
+ provides=[NetworkSliver]
+
+ def fetch_pending(self):
+ return NetworkSliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def call(self, failed=[]):
+ networkSlivers = NetworkSliver.objects.all()
+ networkSlivers_by_id = {}
+ networkSlivers_by_port = {}
+ for networkSliver in networkSlivers:
+ networkSlivers_by_id[networkSliver.id] = networkSliver
+ networkSlivers_by_port[networkSliver.port_id] = networkSliver
+
+ networks = Network.objects.all()
+ networks_by_id = {}
+ for network in networks:
+ networks_by_id[network.network_id] = network
+
+ slivers = Sliver.objects.all()
+ slivers_by_instance_id = {}
+ for sliver in slivers:
+ slivers_by_instance_id[sliver.instance_id] = sliver
+
+ driver = self.driver.admin_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.node.deployment.name)
+ ports = driver.shell.quantum.list_ports()["ports"]
+ for port in ports:
+ if port["id"] in networkSlivers_by_port:
+ # we already have it
+ print "already accounted for port", port["id"]
+ continue
+
+ if port["device_owner"] != "compute:nova":
+ # we only want the ports that connect to instances
+ continue
+
+ network = networks_by_id.get(port['network_id'], None)
+ if not network:
+ #print "no network for port", port["id"], "network", port["network_id"]
+ continue
+
+ sliver = slivers_by_instance_id.get(port['device_id'], None)
+ if not sliver:
+ print "no sliver for port", port["id"], "device_id", port['device_id']
+ continue
+
+ if network.template.sharedNetworkId is not None:
+ # If it's a shared network template, then more than one network
+ # object maps to the quantum network. We have to do a whole bunch
+ # of extra work to find the right one.
+ networks = network.template.network_set.all()
+ network = None
+ for candidate_network in networks:
+ if (candidate_network.owner == sliver.slice):
+ print "found network", candidate_network
+ network = candidate_network
+
+ if not network:
+ print "failed to find the correct network for a shared template for port", port["id"], "network", port["network_id"]
+ continue
+
+ if not port["fixed_ips"]:
+ print "port", port["id"], "has no fixed_ips"
+ continue
+
+# print "XXX", port
+
+ ns = NetworkSliver(network=network,
+ sliver=sliver,
+ ip=port["fixed_ips"][0]["ip_address"],
+ port_id=port["id"])
+ ns.save()
diff --git a/planetstack/ec2_observer/steps/sync_networks.py b/planetstack/ec2_observer/steps/sync_networks.py
new file mode 100644
index 0000000..cc277c6
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_networks.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncNetworks(SyncStep):
+ provides=[Network]
+ requested_interval = 0
+
+ def fetch_pending(self):
+ return Network.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, network):
+ network.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_roles.py b/planetstack/ec2_observer/steps/sync_roles.py
new file mode 100644
index 0000000..5dc30d9
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_roles.py
@@ -0,0 +1,37 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole
+from core.models.slice import SliceRole
+from core.models.deployment import DeploymentRole
+
+class SyncRoles(OpenStackSyncStep):
+ provides=[Role]
+ requested_interval=0
+
+ def fetch_pending(self):
+ site_roles = SiteRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ slice_roles = SliceRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ deployment_roles = DeploymentRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ roles = []
+ for site_role in site_roles:
+ roles.append(site_role)
+ for slice_role in slice_roles:
+ roles.append(slice_role)
+ for deployment_role in deployment_roles:
+ roles.append(deployment_role)
+
+ return roles
+
+
+ def sync_record(self, role):
+ if not role.enacted:
+ deployments = Deployment.objects.all()
+ for deployment in deployments:
+ driver = self.driver.admin_driver(deployment=deployment.name)
+ driver.create_role(role.role)
+ role.save()
diff --git a/planetstack/ec2_observer/steps/sync_site_deployments.py b/planetstack/ec2_observer/steps/sync_site_deployments.py
new file mode 100644
index 0000000..a996c85
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_site_deployments.py
@@ -0,0 +1,28 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+
+class SyncSiteDeployments(OpenStackSyncStep):
+ requested_interval=0
+ provides=[Site, SiteDeployments]
+
+ def fetch_pending(self):
+ return SiteDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, site_deployment):
+ if not site_deployment.tenant_id:
+ driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+ tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
+ description=site_deployment.site.name,
+ enabled=site_deployment.site.enabled)
+ site_deployment.tenant_id = tenant.id
+ site_deployment.save()
+ elif site_deployment.site.id and site_deployment.tenant_id:
+ driver = self.driver.admin_driver(deployment=site_deployment.name)
+ driver.update_tenant(site_deployment.tenant_id,
+ description=site_deployment.site.name,
+ enabled=site_deployment.site.enabled)
+
diff --git a/planetstack/ec2_observer/steps/sync_site_privileges.py b/planetstack/ec2_observer/steps/sync_site_privileges.py
new file mode 100644
index 0000000..b57ae43
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_site_privileges.py
@@ -0,0 +1,31 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from core.models.user import User, UserDeployments
+
+class SyncSitePrivileges(OpenStackSyncStep):
+ requested_interval=0
+ provides=[SitePrivilege]
+
+ def fetch_pending(self):
+ return SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, site_priv):
+ if site_priv.user.kuser_id and site_priv.site.tenant_id:
+ self.driver.add_user_role(site_priv.user.kuser_id,
+ site_priv.site.tenant_id,
+ site_priv.role.role)
+
+ # sync site privileges at all site deployments
+ site_deployments = SiteDeployments.objects.filter(site=site_priv.site)
+ for site_deployment in site_deployments:
+ user_deployments = UserDeployments.objects.filter(deployment=site_deployment.deployment)
+ if user_deployments:
+ kuser_id = user_deployments[0].kuser_id
+ driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+ driver.add_user_role(kuser_id,
+ site_deployment.tenant_id,
+ site_priv.role.role)
diff --git a/planetstack/ec2_observer/steps/sync_sites.py b/planetstack/ec2_observer/steps/sync_sites.py
new file mode 100644
index 0000000..5771aef
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_sites.py
@@ -0,0 +1,34 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models.site import Site
+from ec2_observer.awslib import *
+
+class SyncSites(SyncStep):
+ provides=[Site]
+ requested_interval=3600
+
+ def fetch_pending(self):
+ current_sites = Site.objects.all()
+ zones = aws_run('ec2 describe-availability-zones')
+ available_sites = [zone['ZoneName'] for zone in zones]
+
+ new_site_names = list(set(available_sites) - set(zones))
+
+ new_sites = []
+ for s in new_site_names:
+ site = Site(name=s,
+ login_base=s,
+ site_url="www.amazon.com",
+ enabled=True,
+ is_public=True,
+ abbreviated_name=s)
+ new_sites.append(site)
+
+ return new_sites
+
+ def sync_record(self, site):
+ site.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_slice_deployments.py b/planetstack/ec2_observer/steps/sync_slice_deployments.py
new file mode 100644
index 0000000..580edd1
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slice_deployments.py
@@ -0,0 +1,107 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.deployment import Deployment
+from core.models.site import SiteDeployments
+from core.models.slice import Slice, SliceDeployments
+from core.models.user import UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSliceDeployments(OpenStackSyncStep):
+ provides=[SliceDeployments]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # slice deployments are not visible to users. We must ensure
+ # slices are deployed at all deploymets available to their site.
+ site_deployments = SiteDeployments.objects.all()
+ site_deploy_lookup = defaultdict(list)
+ for site_deployment in site_deployments:
+ site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+
+ slice_deployments = SliceDeployments.objects.all()
+ slice_deploy_lookup = defaultdict(list)
+ for slice_deployment in slice_deployments:
+ slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+
+ all_deployments = Deployment.objects.all()
+ for slice in Slice.objects.all():
+ # slices are added to all deployments for now
+ expected_deployments = all_deployments
+ #expected_deployments = site_deploy_lookup[slice.site]
+ for expected_deployment in expected_deployments:
+ if slice not in slice_deploy_lookup or \
+ expected_deployment not in slice_deploy_lookup[slice]:
+ sd = SliceDeployments(slice=slice, deployment=expected_deployment)
+ sd.save()
+
+ # now we can return all slice deployments that need to be enacted
+ return SliceDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def get_next_subnet(self, deployment=None):
+ # limit ourself to 10.0.x.x for now
+ valid_subnet = lambda net: net.startswith('10.0')
+ driver = self.driver.admin_driver(deployment=deployment)
+ subnets = driver.shell.quantum.list_subnets()['subnets']
+ ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+ if valid_subnet(subnet['cidr'])]
+ ints.sort()
+ if ints:
+ last_ip = IPAddress(ints[-1])
+ else:
+ last_ip = IPAddress('10.0.0.1')
+ last_ip = IPAddress(ints[-1])
+ last_network = IPNetwork(str(last_ip) + "/24")
+ next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+ return next_network
+
+
+ def sync_record(self, slice_deployment):
+ logger.info("sync'ing slice deployment %s" % slice_deployment)
+ if not slice_deployment.tenant_id:
+ nova_fields = {'tenant_name': slice_deployment.slice.name,
+ 'description': slice_deployment.slice.description,
+ 'enabled': slice_deployment.slice.enabled}
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ tenant = driver.create_tenant(**nova_fields)
+ slice_deployment.tenant_id = tenant.id
+
+ # XXX give caller an admin role at the tenant they've created
+ deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
+ deployment=slice_deployment.deployment)
+ if not deployment_users:
+ logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
+ else:
+ deployment_user = deployment_users[0]
+ # lookup user id at this deployment
+ kuser= driver.shell.keystone.users.find(email=slice_deployment.slice.creator.email)
+
+ # add required roles at the slice's tenant
+ driver.add_user_role(kuser.id, tenant.id, 'admin')
+
+ # refresh credentials using this tenant
+ client_driver = self.driver.client_driver(caller=deployment_user.user,
+ tenant=tenant.name,
+ deployment=slice_deployment.deployment.name)
+
+
+ if slice_deployment.id and slice_deployment.tenant_id:
+ # update existing tenant
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ driver.update_tenant(slice_deployment.tenant_id,
+ description=slice_deployment.slice.description,
+ enabled=slice_deployment.slice.enabled)
+
+ if slice_deployment.tenant_id:
+ # update slice/tenant quota
+ driver = self.driver.client_driver(deployment=slice_deployment.deployment.name,
+ tenant=slice_deployment.slice.name)
+ driver.shell.nova.quotas.update(tenant_id=slice_deployment.tenant_id, instances=int(slice_deployment.slice.max_slivers))
+
+ slice_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_slice_memberships.py b/planetstack/ec2_observer/steps/sync_slice_memberships.py
new file mode 100644
index 0000000..b6b1638
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slice_memberships.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import *
+from core.models.user import UserDeployments
+
+class SyncSliceMemberships(OpenStackSyncStep):
+ requested_interval=0
+ provides=[SlicePrivilege]
+
+ def fetch_pending(self):
+ return SlicePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, slice_memb):
+ # sync slice memberships at all slice deployments
+ slice_deployments = SliceDeployments.objects.filter(slice=slice_memb.slice)
+ for slice_deployment in slice_deployments:
+ if not slice_deployment.tenant_id:
+ continue
+ user_deployments = UserDeployments.objects.filter(deployment=slice_deployment.deployment,
+ user=slice_memb.user)
+ if user_deployments:
+ kuser_id = user_deployments[0].kuser_id
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ driver.add_user_role(kuser_id,
+ slice_deployment.tenant_id,
+ slice_memb.role.role)
diff --git a/planetstack/ec2_observer/steps/sync_slices.py b/planetstack/ec2_observer/steps/sync_slices.py
new file mode 100644
index 0000000..6cf0772
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slices.py
@@ -0,0 +1,23 @@
+import os
+import base64
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import Slice, SliceDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSlices(OpenStackSyncStep):
+ provides=[Slice]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, slice):
+ for slice_deployment in SliceDeployments.objects.filter(slice=slice):
+ # bump the 'updated' timestamp and trigger observer to update
+ # slice across all deployments
+ slice_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_sliver_ips.py b/planetstack/ec2_observer/steps/sync_sliver_ips.py
new file mode 100644
index 0000000..e2212d1
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_sliver_ips.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+
+class SyncSliverIps(OpenStackSyncStep):
+ provides=[Sliver]
+ requested_interval=0
+
+ def fetch_pending(self):
+ slivers = Sliver.objects.filter(ip=None)
+ return slivers
+
+ def sync_record(self, sliver):
+ driver = self.driver.client_driver(tenant=sliver.slice.name,
+ deployment=sliver.node.deployment.name)
+ servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ return
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if not ips:
+ return
+ sliver.ip = ips[0]['addr']
+ if sliver.ip:
+ sliver.save()
+ logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
diff --git a/planetstack/ec2_observer/steps/sync_slivers.py b/planetstack/ec2_observer/steps/sync_slivers.py
new file mode 100644
index 0000000..b576bbc
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slivers.py
@@ -0,0 +1,91 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+from core.models.slice import SlicePrivilege, SliceDeployments
+from core.models.network import Network, NetworkSlice, NetworkDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSlivers(OpenStackSyncStep):
+ provides=[Sliver]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, sliver):
+ logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+ metadata_update = {}
+ if ("numberCores" in sliver.changed_fields):
+ metadata_update["cpu_cores"] = str(sliver.numberCores)
+
+ for tag in sliver.slice.tags.all():
+ if tag.name.startswith("sysctl-"):
+ metadata_update[tag.name] = tag.value
+
+ if not sliver.instance_id:
+ driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
+ # public keys
+ slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+ pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+ if sliver.creator.public_key:
+ pubkeys.append(sliver.creator.public_key)
+ if sliver.slice.creator.public_key:
+ pubkeys.append(sliver.slice.creator.public_key)
+ # netowrks
+ # include all networks available to the slice and/or associated network templates
+ nics = []
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ network_deployments = NetworkDeployments.objects.filter(network__in=networks,
+ deployment=sliver.node.deployment)
+ # Gather private networks first. This includes networks with a template that has
+ # visibility = private and translation = none
+ for network_deployment in network_deployments:
+ if network_deployment.network.template.visibility == 'private' and \
+ network_deployment.network.template.translation == 'none':
+ nics.append({'net-id': network_deployment.net_id})
+
+ # now include network template
+ network_templates = [network.template.sharedNetworkName for network in networks \
+ if network.template.sharedNetworkName]
+ for net in driver.shell.quantum.list_networks()['networks']:
+ if net['name'] in network_templates:
+ nics.append({'net-id': net['id']})
+
+ file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+
+ # look up image id
+ deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
+ image_id = None
+ images = deployment_driver.shell.glance.get_images()
+ for image in images:
+ if image['name'] == sliver.image.name:
+ image_id = image['id']
+
+ # look up key name at the deployment
+ # create/fetch keypair
+ keyname = None
+ if sliver.creator.public_key:
+ keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+ sliver.slice.name
+ key_fields = {'name': keyname,
+ 'public_key': sliver.creator.public_key}
+ driver.create_keypair(**key_fields)
+
+ instance = driver.spawn_instance(name=sliver.name,
+ key_name = keyname,
+ image_id = image_id,
+ hostname = sliver.node.name,
+ pubkeys = pubkeys,
+ nics = nics )
+ sliver.instance_id = instance.id
+ sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+ sliver.save()
+
+ if sliver.instance_id and metadata_update:
+ driver.update_instance_metadata(sliver.instance_id, metadata_update)
+
diff --git a/planetstack/ec2_observer/steps/sync_user_deployments.py b/planetstack/ec2_observer/steps/sync_user_deployments.py
new file mode 100644
index 0000000..39943f7
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_user_deployments.py
@@ -0,0 +1,98 @@
+import os
+import base64
+import hashlib
+from collections import defaultdict
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import SiteDeployments, Deployment
+from core.models.user import User, UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncUserDeployments(OpenStackSyncStep):
+ provides=[User, UserDeployments]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # user deployments are not visible to users. We must ensure
+ # user are deployed at all deploymets available to their sites.
+
+ deployments = Deployment.objects.all()
+ site_deployments = SiteDeployments.objects.all()
+ site_deploy_lookup = defaultdict(list)
+ for site_deployment in site_deployments:
+ site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+
+ user_deploy_lookup = defaultdict(list)
+ for user_deployment in UserDeployments.objects.all():
+ user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
+
+ all_deployments = Deployment.objects.filter()
+ for user in User.objects.all():
+ if user.is_admin:
+ # admins should have an account at all deployments
+ expected_deployments = deployments
+ else:
+ # normal users should have an account at their site's deployments
+ #expected_deployments = site_deploy_lookup[user.site]
+ # users are added to all deployments for now
+ expected_deployments = deployments
+ for expected_deployment in expected_deployments:
+ if not user in user_deploy_lookup or \
+ expected_deployment not in user_deploy_lookup[user]:
+ # add new record
+ ud = UserDeployments(user=user, deployment=expected_deployment)
+ ud.save()
+ #user_deployments.append(ud)
+ #else:
+ # # update existing record
+ # ud = UserDeployments.objects.get(user=user, deployment=expected_deployment)
+ # user_deployments.append(ud)
+
+ return UserDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user_deployment):
+ logger.info("sync'ing user %s at deployment %s" % (user_deployment.user, user_deployment.deployment.name))
+ name = user_deployment.user.email[:user_deployment.user.email.find('@')]
+ user_fields = {'name': user_deployment.user.email,
+ 'email': user_deployment.user.email,
+ 'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
+ 'enabled': True}
+ driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
+ if not user_deployment.kuser_id:
+ keystone_user = driver.create_user(**user_fields)
+ user_deployment.kuser_id = keystone_user.id
+ else:
+ driver.update_user(user_deployment.kuser_id, user_fields)
+
+ # setup user deployment home site roles
+ if user_deployment.user.site:
+ site_deployments = SiteDeployments.objects.filter(site=user_deployment.user.site,
+ deployment=user_deployment.deployment)
+ if site_deployments:
+ # need the correct tenant id for site at the deployment
+ tenant_id = site_deployments[0].tenant_id
+ driver.add_user_role(user_deployment.kuser_id,
+ tenant_id, 'user')
+ if user_deployment.user.is_admin:
+ driver.add_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+ else:
+ # may have admin role so attempt to remove it
+ driver.delete_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+
+ #if user_deployment.user.public_key:
+ # if not user_deployment.user.keyname:
+ # keyname = user_deployment.user.email.lower().replace('@', 'AT').replace('.', '')
+ # user_deployment.user.keyname = keyname
+ # user_deployment.user.save()
+ #
+ # user_driver = driver.client_driver(caller=user_deployment.user,
+ # tenant=user_deployment.user.site.login_base,
+ # deployment=user_deployment.deployment.name)
+ # key_fields = {'name': user_deployment.user.keyname,
+ # 'public_key': user_deployment.user.public_key}
+ # user_driver.create_keypair(**key_fields)
+
+ user_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_users.py b/planetstack/ec2_observer/steps/sync_users.py
new file mode 100644
index 0000000..71f9c0f
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_users.py
@@ -0,0 +1,20 @@
+import os
+import base64
+import hashlib
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User, UserDeployments
+
+class SyncUsers(OpenStackSyncStep):
+ provides=[User]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user):
+ for user_deployment in UserDeployments.objects.filter(user=user):
+ # bump the 'updated' field so user account are updated across
+ # deployments.
+ user_deployment.save()
diff --git a/planetstack/ec2_observer/syncstep.py b/planetstack/ec2_observer/syncstep.py
new file mode 100644
index 0000000..187b318
--- /dev/null
+++ b/planetstack/ec2_observer/syncstep.py
@@ -0,0 +1,84 @@
+import os
+import base64
+from datetime import datetime
+from planetstack.config import Config
+from util.logger import Logger, logging
+from observer.steps import *
+
+logger = Logger(level=logging.INFO)
+
+class FailedDependency(Exception):
+ pass
+
+class SyncStep:
+ """ A PlanetStack Sync step.
+
+ Attributes:
+ psmodel Model name the step synchronizes
+ dependencies list of names of models that must be synchronized first if the current model depends on them
+ """
+ slow=False
+ def get_prop(prop):
+ try:
+ sync_config_dir = Config().sync_config_dir
+ except:
+ sync_config_dir = '/etc/planetstack/sync'
+ prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+ return open(prop_config_path).read().rstrip()
+
+ def __init__(self, **args):
+ """Initialize a sync step
+ Keyword arguments:
+ name -- Name of the step
+ provides -- PlanetStack models sync'd by this step
+ """
+ dependencies = []
+ self.driver = args.get('driver')
+ self.error_map = args.get('error_map')
+
+ try:
+ self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+ except:
+ self.soft_deadline = 5 # 5 seconds
+
+ return
+
+ def fetch_pending(self, deletion=False):
+ return []
+ #return Sliver.objects.filter(ip=None)
+
+ def check_dependencies(self, obj, failed):
+ for dep in self.dependencies:
+ peer_object = getattr(obj, dep.lower())
+ if (peer_object.pk==failed.pk):
+ raise FailedDependency
+
+ def call(self, failed=[], deletion=False):
+ pending = self.fetch_pending(deletion)
+ for o in pending:
+ try:
+ for f in failed:
+ self.check_dependencies(o,f) # Raises exception if failed
+ if (deletion):
+ self.delete_record(o)
+ o.delete(purge=True)
+ else:
+ self.sync_record(o)
+ o.enacted = datetime.now() # Is this the same timezone? XXX
+ o.backend_status = "OK"
+ o.save(update_fields=['enacted'])
+ except Exception,e:
+ try:
+ o.backend_status = self.error_map.map(str(e))
+ except:
+ o.backend_status = str(e)
+
+ o.save(update_fields=['backend_status'])
+
+ logger.log_exc("sync step failed!")
+ failed.append(o)
+
+ return failed
+
+ def __call__(self, **args):
+ return self.call(**args)
diff --git a/planetstack/ec2_observer/syncstep.pyc b/planetstack/ec2_observer/syncstep.pyc
new file mode 100644
index 0000000..f4775af
--- /dev/null
+++ b/planetstack/ec2_observer/syncstep.pyc
Binary files differ
diff --git a/planetstack/ec2_observer/toposort.py b/planetstack/ec2_observer/toposort.py
new file mode 100644
index 0000000..a2c9389
--- /dev/null
+++ b/planetstack/ec2_observer/toposort.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+
+import time
+import traceback
+import commands
+import threading
+import json
+import pdb
+
+from datetime import datetime
+from collections import defaultdict
+
+# Topological sort
+# Notes:
+# - Uses a stack instead of recursion
+# - Forfeits optimization involving tracking currently visited nodes
+def toposort(g, steps=None):
+ # Get set of all nodes, including those without outgoing edges
+ keys = set(g.keys())
+ values = set({})
+ for v in g.values():
+ values=values | set(v)
+
+ all_nodes=list(keys|values)
+ if (not steps):
+ steps = all_nodes
+
+ # Final order
+ order = []
+
+ # DFS stack, not using recursion
+ stack = []
+
+ # Unmarked set
+ unmarked = all_nodes
+
+ # visiting = [] - skip, don't expect 1000s of nodes, |E|/|V| is small
+
+ while unmarked:
+ stack.insert(0,unmarked[0]) # push first unmarked
+
+ while (stack):
+ n = stack[0]
+ add = True
+ try:
+ for m in g[n]:
+ if (m in unmarked):
+ if (m not in stack):
+ add = False
+ stack.insert(0,m)
+ else:
+ # Should not happen, if so there's a loop
+ print 'Loop at %s'%m
+ except KeyError:
+ pass
+ if (add):
+ if (n in steps):
+ order.append(n)
+ item = stack.pop(0)
+ unmarked.remove(item)
+
+ noorder = list(set(steps) - set(order))
+ return order + noorder
+
+def main():
+ graph_file=open('planetstack.deps').read()
+ g = json.loads(graph_file)
+ print toposort(g)
+
+if (__name__=='__main__'):
+ main()
+
+#print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])
diff --git a/planetstack/ec2_observer/toposort.pyc b/planetstack/ec2_observer/toposort.pyc
new file mode 100644
index 0000000..e788e86
--- /dev/null
+++ b/planetstack/ec2_observer/toposort.pyc
Binary files differ