def main(): # Generate command line parser parser = argparse.ArgumentParser(usage='%(prog)s [options]') parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False, help='Run as daemon.') # smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid # throwing unrecognized argument exceptions parser.add_argument('-C', '--config', dest='config_file', action='store', default=DEFAULT_CONFIG_FN, help='Name of config file.') args = parser.parse_args() if args.daemon: daemon() models_active = False wait = False while not models_active: try: _ = Instance.objects.first() _ = NetworkTemplate.objects.first() models_active = True except Exception, e: logger.info(str(e)) logger.info('Waiting for data model to come up before starting...') time.sleep(10) wait = True
def main(): # Generate command line parser parser = argparse.ArgumentParser(usage='%(prog)s [options]') parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False, help='Run as daemon.') # smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid # throwing unrecognized argument exceptions parser.add_argument('-C', '--config', dest='config_file', action='store', default=DEFAULT_CONFIG_FN, help='Name of config file.') args = parser.parse_args() if args.daemon: daemon() if django_setup: # 1.7 django_setup() models_active = False wait = False while not models_active: try: _ = Instance.objects.first() _ = NetworkTemplate.objects.first() models_active = True except Exception,e: logger.info(str(e)) logger.info('Waiting for data model to come up before starting...') time.sleep(10) wait = True
def check_duration(self, step, duration): try: if (duration > step.deadline): logger.info('Sync step %s missed deadline, took %.2f seconds' % (step.name, duration)) except AttributeError: # S doesn't have a deadline pass
def __init__(self, error_map_file): self.error_map = {} try: error_map_lines = open(error_map_file).read().splitlines() for l in error_map_lines: if (not l.startswith('#')): splits = l.split('->') k, v = map(lambda i: i.rstrip(), splits) self.error_map[k] = v except: logger.info('Could not read error map')
def __init__(self, sync_steps): self.watch_map = {} self.sync_steps = sync_steps #self.load_sync_step_modules() self.load_sync_steps() r = redis.Redis("redis") channels = self.watch_map.keys() self.redis = r self.pubsub = self.redis.pubsub() self.pubsub.subscribe(channels) logger.info("XOS watcher initialized")
def check_schedule(self, step, deletion): last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times time_since_last_run = time.time() - last_run_times.get( step.__name__, 0) try: if (time_since_last_run < step.requested_interval): raise StepNotReady except AttributeError: logger.info('Step %s does not have requested_interval set' % step.__name__) raise StepNotReady
def main(): models_active = False wait = False while not models_active: try: _ = Instance.objects.first() _ = NetworkTemplate.objects.first() models_active = True except Exception, e: logger.info(str(e)) logger.info('Waiting for data model to come up before starting...') time.sleep(10) wait = True
def load_sync_step_modules(self, step_dir=None): if step_dir is None: step_dir = Config().observer_steps_dir for fn in os.listdir(step_dir): pathname = os.path.join(step_dir, fn) if os.path.isfile(pathname) and fn.endswith( ".py") and (fn != "__init__.py"): module = imp.load_source(fn[:-3], pathname) for classname in dir(module): c = getattr(module, classname, None) # make sure 'c' is a descendent of SyncStep and has a # provides field (this eliminates the abstract base classes # since they don't have a provides) if inspect.isclass(c) and issubclass( c, SyncStep) and hasattr( c, "provides") and ( c not in self.sync_steps): self.sync_steps.append(c) logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
except AttributeError,e: raise e def update_dep(d, o): try: print 'Trying to update %s'%d save_fields = [] if (d.updated < o.updated): save_fields = ['updated'] if (save_fields): d.save(update_fields=save_fields) except AttributeError,e: raise e except Exception,e: logger.info('Could not save %r. Exception: %r'%(d,e), extra=d.tologdict()) def delete_if_inactive(d, o): try: d.delete() print "Deleted %s (%s)"%(d,d.__class__.__name__) except: pass return #@atomic def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return
except AttributeError,e: raise e def update_dep(d, o): try: print 'Trying to update %s'%d save_fields = [] if (d.updated < o.updated): save_fields = ['updated'] if (save_fields): d.save(update_fields=save_fields) except AttributeError,e: raise e except Exception,e: logger.info('Could not save %r. Exception: %r'%(d,e)) def delete_if_inactive(d, o): try: d.delete() print "Deleted %s (%s)"%(d,d.__class__.__name__) except: pass return #@atomic def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return
raise e def update_dep(d, o): try: print 'Trying to update %s' % d save_fields = [] if (d.updated < o.updated): save_fields = ['updated'] if (save_fields): d.save(update_fields=save_fields) except AttributeError, e: raise e except Exception, e: logger.info('Could not save %r. Exception: %r' % (d, e), extra=d.tologdict()) def delete_if_inactive(d, o): try: d.delete() print "Deleted %s (%s)" % (d, d.__class__.__name__) except: pass return #@atomic def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances):
def wake_up(self): logger.info('Wake up routine called. Event cond %r' % self.event_cond) self.event_cond.acquire() self.event_cond.notify() self.event_cond.release()
def sync(self, S, deletion): try: step = self.lookup_step_class(S) start_time = time.time() logger.debug("Starting to work on step %s, deletion=%s" % (step.__name__, str(deletion))) dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph # if not deletion else self.deletion_step_conditions step_conditions = self.step_conditions step_status = self.step_status # if not deletion else self.deletion_step_status # Wait for step dependencies to be met try: deps = dependency_graph[S] has_deps = True except KeyError: has_deps = False go = True failed_dep = None if (has_deps): for d in deps: if d == step.__name__: logger.debug(" step %s self-wait skipped" % step.__name__) go = True continue cond = step_conditions[d] cond.acquire() if (step_status[d] is STEP_STATUS_WORKING): logger.debug(" step %s wait on dep %s" % (step.__name__, d)) cond.wait() logger.debug(" step %s wait on dep %s cond returned" % (step.__name__, d)) elif step_status[d] == STEP_STATUS_OK: go = True else: logger.debug(" step %s has failed dep %s" % (step.__name__, d)) go = False failed_dep = d cond.release() if (not go): break else: go = True if (not go): logger.debug("Step %s skipped" % step.__name__) self.failed_steps.append(step) my_status = STEP_STATUS_KO else: sync_step = self.lookup_step(S) sync_step.__name__ = step.__name__ sync_step.dependencies = [] try: mlist = sync_step.provides try: for m in mlist: lst = self.model_dependency_graph[m.__name__] nlst = map(lambda a_b: a_b[1], lst) sync_step.dependencies.extend(nlst) except Exception as e: raise e except KeyError: pass sync_step.debug_mode = debug_mode should_run = False try: # Various checks that decide whether # this step runs or not self.check_class_dependency( sync_step, self.failed_steps) # dont run Slices if Sites failed # dont run sync_network_routes if time since last run < 1 # hour self.check_schedule(sync_step, deletion) should_run = True except StepNotReady: logger.info('Step not ready: %s' % sync_step.__name__) self.failed_steps.append(sync_step) my_status = STEP_STATUS_KO except Exception as e: logger.error('%r' % e) logger.log_exc("sync step failed: %r. Deletion: %r" % (sync_step, deletion)) self.failed_steps.append(sync_step) my_status = STEP_STATUS_KO if (should_run): try: duration = time.time() - start_time logger.debug('Executing step %s, deletion=%s' % (sync_step.__name__, deletion)) failed_objects = sync_step(failed=list( self.failed_step_objects), deletion=deletion) self.check_duration(sync_step, duration) if failed_objects: self.failed_step_objects.update(failed_objects) logger.debug("Step %r succeeded, deletion=%s" % (sync_step.__name__, deletion)) my_status = STEP_STATUS_OK self.update_run_time(sync_step, deletion) except Exception as e: logger.error( 'Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (sync_step.__name__, e)) logger.log_exc("Exception in sync step") self.failed_steps.append(S) my_status = STEP_STATUS_KO else: logger.info("Step %r succeeded due to non-run" % step) my_status = STEP_STATUS_OK try: my_cond = step_conditions[S] my_cond.acquire() step_status[S] = my_status my_cond.notify_all() my_cond.release() except KeyError as e: logger.debug('Step %r is a leaf' % step) pass finally: try: model_accessor.reset_queries() except: # this shouldn't happen, but in case it does, catch it... logger.log_exc("exception in reset_queries") model_accessor.connection_close()
def load_sync_steps(self): dep_path = Config.get("dependency_graph") logger.info('Loading model dependency graph from %s' % dep_path) try: # This contains dependencies between records, not sync steps self.model_dependency_graph = json.loads(open(dep_path).read()) for left, lst in self.model_dependency_graph.items(): new_lst = [] for k in lst: try: tup = (k, k.lower()) new_lst.append(tup) deps = self.model_dependency_graph[k] except: self.model_dependency_graph[k] = [] self.model_dependency_graph[left] = new_lst except Exception as e: raise e try: # FIXME `pl_dependency_graph` is never defined, this will always fail # NOTE can we remove it? backend_path = Config.get("pl_dependency_graph") logger.info('Loading backend dependency graph from %s' % backend_path) # This contains dependencies between backend records self.backend_dependency_graph = json.loads( open(backend_path).read()) for k, v in self.backend_dependency_graph.items(): try: self.model_dependency_graph[k].extend(v) except KeyError: self.model_dependency_graphp[k] = v except Exception as e: logger.info('Backend dependency graph not loaded') # We can work without a backend graph self.backend_dependency_graph = {} provides_dict = {} for s in self.sync_steps: self.step_lookup[s.__name__] = s for m in s.provides: try: provides_dict[m.__name__].append(s.__name__) except KeyError: provides_dict[m.__name__] = [s.__name__] step_graph = {} phantom_steps = [] for k, v in self.model_dependency_graph.items(): try: for source in provides_dict[k]: if (not v): step_graph[source] = [] for m, _ in v: try: for dest in provides_dict[m]: # no deps, pass try: if (dest not in step_graph[source]): step_graph[source].append(dest) except: step_graph[source] = [dest] except KeyError: if (m not in provides_dict): try: step_graph[source] += ['#%s' % m] except: step_graph[source] = ['#%s' % m] phantom_steps += ['#%s' % m] pass except KeyError: pass # no dependencies, pass self.dependency_graph = step_graph self.deletion_dependency_graph = invert_graph(step_graph) pp = pprint.PrettyPrinter(indent=4) logger.debug(pp.pformat(step_graph)) self.ordered_steps = toposort( self.dependency_graph, phantom_steps + map(lambda s: s.__name__, self.sync_steps)) self.ordered_steps = [ i for i in self.ordered_steps if i != 'SyncObject' ] logger.info("Order of steps=%s" % self.ordered_steps) self.load_run_times()
def info(msg): logger.info( basename(str(inspect.stack()[1][1])) + ':' + str(inspect.stack()[1][2]) + ' ' + str(inspect.stack()[1][3]) + '() ' + str(msg))