Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 1 | import os |
| 2 | import imp |
| 3 | import inspect |
| 4 | import time |
| 5 | import traceback |
| 6 | import commands |
| 7 | import threading |
| 8 | import json |
| 9 | |
| 10 | from datetime import datetime |
| 11 | from collections import defaultdict |
| 12 | from core.models import * |
| 13 | from django.db.models import F, Q |
| 14 | #from openstack.manager import OpenStackManager |
| 15 | from openstack.driver import OpenStackDriver |
| 16 | from util.logger import Logger, logging, logger |
| 17 | #from timeout import timeout |
| 18 | from planetstack.config import Config |
| 19 | from ec2_observer.steps import * |
| 20 | from syncstep import SyncStep |
| 21 | from toposort import toposort |
Sapan Bhatia | b726219 | 2014-07-22 00:30:16 -0400 | [diff] [blame] | 22 | from ec2_observer.error_mapper import * |
Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 23 | |
| 24 | debug_mode = False |
| 25 | |
| 26 | logger = Logger(level=logging.INFO) |
| 27 | |
| 28 | class StepNotReady(Exception): |
| 29 | pass |
| 30 | |
| 31 | class NoOpDriver: |
| 32 | def __init__(self): |
| 33 | self.enabled = True |
| 34 | |
| 35 | class PlanetStackObserver: |
| 36 | #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector] |
| 37 | sync_steps = [] |
| 38 | |
| 39 | def __init__(self): |
| 40 | # The Condition object that gets signalled by Feefie events |
| 41 | self.step_lookup = {} |
| 42 | self.load_sync_step_modules() |
| 43 | self.load_sync_steps() |
| 44 | self.event_cond = threading.Condition() |
| 45 | |
| 46 | self.driver_kind = getattr(Config(), "observer_driver", "openstack") |
| 47 | if self.driver_kind=="openstack": |
| 48 | self.driver = OpenStackDriver() |
| 49 | else: |
| 50 | self.driver = NoOpDriver() |
| 51 | |
| 52 | def wait_for_event(self, timeout): |
| 53 | self.event_cond.acquire() |
| 54 | self.event_cond.wait(timeout) |
| 55 | self.event_cond.release() |
| 56 | |
| 57 | def wake_up(self): |
| 58 | logger.info('Wake up routine called. Event cond %r'%self.event_cond) |
| 59 | self.event_cond.acquire() |
| 60 | self.event_cond.notify() |
| 61 | self.event_cond.release() |
| 62 | |
| 63 | def load_sync_step_modules(self, step_dir=None): |
| 64 | if step_dir is None: |
| 65 | if hasattr(Config(), "observer_steps_dir"): |
| 66 | step_dir = Config().observer_steps_dir |
| 67 | else: |
| 68 | step_dir = "/opt/planetstack/observer/steps" |
| 69 | |
| 70 | for fn in os.listdir(step_dir): |
| 71 | pathname = os.path.join(step_dir,fn) |
| 72 | if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"): |
| 73 | module = imp.load_source(fn[:-3],pathname) |
| 74 | for classname in dir(module): |
| 75 | c = getattr(module, classname, None) |
| 76 | |
| 77 | # make sure 'c' is a descendent of SyncStep and has a |
| 78 | # provides field (this eliminates the abstract base classes |
| 79 | # since they don't have a provides) |
| 80 | |
| 81 | if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps): |
| 82 | self.sync_steps.append(c) |
| 83 | logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])) |
| 84 | # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]) |
| 85 | |
| 86 | def load_sync_steps(self): |
| 87 | dep_path = Config().observer_dependency_graph |
| 88 | logger.info('Loading model dependency graph from %s' % dep_path) |
| 89 | try: |
| 90 | # This contains dependencies between records, not sync steps |
| 91 | self.model_dependency_graph = json.loads(open(dep_path).read()) |
| 92 | except Exception,e: |
| 93 | raise e |
| 94 | |
| 95 | try: |
| 96 | backend_path = Config().observer_pl_dependency_graph |
| 97 | logger.info('Loading backend dependency graph from %s' % backend_path) |
| 98 | # This contains dependencies between backend records |
| 99 | self.backend_dependency_graph = json.loads(open(backend_path).read()) |
| 100 | except Exception,e: |
| 101 | logger.info('Backend dependency graph not loaded') |
| 102 | # We can work without a backend graph |
| 103 | self.backend_dependency_graph = {} |
| 104 | |
| 105 | provides_dict = {} |
| 106 | for s in self.sync_steps: |
| 107 | self.step_lookup[s.__name__] = s |
| 108 | for m in s.provides: |
| 109 | try: |
| 110 | provides_dict[m.__name__].append(s.__name__) |
| 111 | except KeyError: |
| 112 | provides_dict[m.__name__]=[s.__name__] |
| 113 | |
| 114 | |
| 115 | step_graph = {} |
| 116 | for k,v in self.model_dependency_graph.iteritems(): |
| 117 | try: |
| 118 | for source in provides_dict[k]: |
| 119 | for m in v: |
| 120 | try: |
| 121 | for dest in provides_dict[m]: |
| 122 | # no deps, pass |
| 123 | try: |
| 124 | if (dest not in step_graph[source]): |
| 125 | step_graph[source].append(dest) |
| 126 | except: |
| 127 | step_graph[source]=[dest] |
| 128 | except KeyError: |
| 129 | pass |
| 130 | |
| 131 | except KeyError: |
| 132 | pass |
| 133 | # no dependencies, pass |
| 134 | |
| 135 | #import pdb |
| 136 | #pdb.set_trace() |
| 137 | if (self.backend_dependency_graph): |
| 138 | backend_dict = {} |
| 139 | for s in self.sync_steps: |
| 140 | for m in s.serves: |
| 141 | backend_dict[m]=s.__name__ |
| 142 | |
| 143 | for k,v in backend_dependency_graph.iteritems(): |
| 144 | try: |
| 145 | source = backend_dict[k] |
| 146 | for m in v: |
| 147 | try: |
| 148 | dest = backend_dict[m] |
| 149 | except KeyError: |
| 150 | # no deps, pass |
| 151 | pass |
| 152 | step_graph[source]=dest |
| 153 | |
| 154 | except KeyError: |
| 155 | pass |
| 156 | # no dependencies, pass |
| 157 | |
| 158 | dependency_graph = step_graph |
| 159 | |
| 160 | self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps)) |
| 161 | print "Order of steps=",self.ordered_steps |
| 162 | self.load_run_times() |
| 163 | |
| 164 | |
| 165 | def check_duration(self, step, duration): |
| 166 | try: |
| 167 | if (duration > step.deadline): |
| 168 | logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration)) |
| 169 | except AttributeError: |
| 170 | # S doesn't have a deadline |
| 171 | pass |
| 172 | |
| 173 | def update_run_time(self, step, deletion): |
| 174 | if (not deletion): |
| 175 | self.last_run_times[step.__name__]=time.time() |
| 176 | else: |
| 177 | self.last_deletion_run_times[step.__name__]=time.time() |
| 178 | |
| 179 | |
| 180 | def check_schedule(self, step, deletion): |
| 181 | last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times |
| 182 | |
| 183 | time_since_last_run = time.time() - last_run_times.get(step.__name__, 0) |
| 184 | try: |
| 185 | if (time_since_last_run < step.requested_interval): |
| 186 | raise StepNotReady |
| 187 | except AttributeError: |
| 188 | logger.info('Step %s does not have requested_interval set'%step.__name__) |
| 189 | raise StepNotReady |
| 190 | |
| 191 | def load_run_times(self): |
| 192 | try: |
| 193 | jrun_times = open('/tmp/observer_run_times').read() |
| 194 | self.last_run_times = json.loads(jrun_times) |
| 195 | except: |
| 196 | self.last_run_times={} |
| 197 | for e in self.ordered_steps: |
| 198 | self.last_run_times[e]=0 |
| 199 | try: |
| 200 | jrun_times = open('/tmp/observer_deletion_run_times').read() |
| 201 | self.last_deletion_run_times = json.loads(jrun_times) |
| 202 | except: |
| 203 | self.last_deletion_run_times={} |
| 204 | for e in self.ordered_steps: |
| 205 | self.last_deletion_run_times[e]=0 |
| 206 | |
| 207 | |
| 208 | |
| 209 | def save_run_times(self): |
| 210 | run_times = json.dumps(self.last_run_times) |
| 211 | open('/tmp/observer_run_times','w').write(run_times) |
| 212 | |
| 213 | deletion_run_times = json.dumps(self.last_deletion_run_times) |
| 214 | open('/tmp/observer_deletion_run_times','w').write(deletion_run_times) |
| 215 | |
| 216 | def check_class_dependency(self, step, failed_steps): |
| 217 | step.dependenices = [] |
| 218 | for obj in step.provides: |
| 219 | step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, [])) |
| 220 | for failed_step in failed_steps: |
| 221 | if (failed_step in step.dependencies): |
| 222 | raise StepNotReady |
| 223 | |
| 224 | def run(self): |
| 225 | if not self.driver.enabled: |
| 226 | return |
| 227 | if (self.driver_kind=="openstack") and (not self.driver.has_openstack): |
| 228 | return |
| 229 | |
| 230 | while True: |
| 231 | try: |
| 232 | error_map_file = getattr(Config(), "error_map_path", "/opt/planetstack/error_map.txt") |
| 233 | error_mapper = ErrorMapper(error_map_file) |
| 234 | |
| 235 | logger.info('Waiting for event') |
| 236 | tBeforeWait = time.time() |
Sapan Bhatia | e7e4ca1 | 2014-07-22 01:27:02 -0400 | [diff] [blame] | 237 | self.wait_for_event(timeout=5) |
Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 238 | logger.info('Observer woke up') |
| 239 | |
| 240 | # Two passes. One for sync, the other for deletion. |
| 241 | for deletion in (False,True): |
Sapan Bhatia | bab3376 | 2014-07-22 01:21:36 -0400 | [diff] [blame] | 242 | logger.info('Deletion=%r...'%deletion) |
Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 243 | # Set of whole steps that failed |
| 244 | failed_steps = [] |
| 245 | |
| 246 | # Set of individual objects within steps that failed |
| 247 | failed_step_objects = set() |
| 248 | |
| 249 | ordered_steps = self.ordered_steps if not deletion else reversed(self.ordered_steps) |
| 250 | |
| 251 | for S in ordered_steps: |
| 252 | step = self.step_lookup[S] |
| 253 | start_time=time.time() |
| 254 | |
| 255 | sync_step = step(driver=self.driver,error_map=error_mapper) |
| 256 | sync_step.__name__ = step.__name__ |
| 257 | sync_step.dependencies = [] |
| 258 | try: |
| 259 | mlist = sync_step.provides |
| 260 | |
| 261 | for m in mlist: |
| 262 | sync_step.dependencies.extend(self.model_dependency_graph[m.__name__]) |
| 263 | except KeyError: |
| 264 | pass |
| 265 | sync_step.debug_mode = debug_mode |
| 266 | |
| 267 | should_run = False |
| 268 | try: |
| 269 | # Various checks that decide whether |
| 270 | # this step runs or not |
| 271 | self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed |
Sapan Bhatia | 35ecc41 | 2014-07-22 00:27:35 -0400 | [diff] [blame] | 272 | self.check_schedule(sync_step, deletion) # dont run sync_network_routes if time since last run < 1 hour |
Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 273 | should_run = True |
| 274 | except StepNotReady: |
| 275 | logging.info('Step not ready: %s'%sync_step.__name__) |
| 276 | failed_steps.append(sync_step) |
| 277 | except Exception,e: |
| 278 | logging.error('%r',e) |
| 279 | logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion)) |
| 280 | failed_steps.append(sync_step) |
| 281 | |
| 282 | if (should_run): |
| 283 | try: |
| 284 | duration=time.time() - start_time |
| 285 | |
| 286 | logger.info('Executing step %s' % sync_step.__name__) |
| 287 | |
| 288 | # ********* This is the actual sync step |
| 289 | #import pdb |
| 290 | #pdb.set_trace() |
| 291 | failed_objects = sync_step(failed=list(failed_step_objects), deletion=deletion) |
| 292 | |
| 293 | |
| 294 | self.check_duration(sync_step, duration) |
| 295 | if failed_objects: |
| 296 | failed_step_objects.update(failed_objects) |
| 297 | |
Sapan Bhatia | 06ca508 | 2014-07-22 01:22:00 -0400 | [diff] [blame] | 298 | self.update_run_time(sync_step,deletion) |
Sapan Bhatia | 26d40bc | 2014-05-12 15:28:02 -0400 | [diff] [blame] | 299 | except Exception,e: |
| 300 | logging.error('Model step failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e) |
| 301 | logger.log_exc(e) |
| 302 | failed_steps.append(S) |
| 303 | self.save_run_times() |
| 304 | except Exception, e: |
| 305 | logging.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e) |
| 306 | logger.log_exc("Exception in observer run loop") |
| 307 | traceback.print_exc() |