def remake(non_empty_job_list): '''Remake the selected targets (equivalent to clean and make). ''' non_empty_job_list = list(non_empty_job_list) from compmake.ui.console import ask_question if get_compmake_status() == compmake_status_interactive: question = "Should I clean and remake %d jobs? [y/n] " % \ len(non_empty_job_list) answer = ask_question(question) if not answer: info('Not cleaned.') return for job in non_empty_job_list: mark_remake(job) manager = ManagerLocal() manager.add_targets(non_empty_job_list) manager.process() if manager.failed: return RET_CODE_JOB_FAILED else: return 0
def reload(module): ''' Reloads a module. Usage:: reload module=my_module ''' if module.startswith('compmake'): try: dave = pwd.getpwuid(os.getuid())[0] except: dave = 'Dave' user_error("I'm sorry, %s. I'm afraid I can't do that." % dave) return try: # otherwise import("A.B") returns A instead of A.B m = __import__(module, fromlist=['dummy']) except Exception as e: raise UserError('Cannot find module "%s": %s.' % (module, e)) try: imp.reload(m) except Exception as e: raise UserError('Obtained this exception while reloading the module:' ' %s' % e) info('Reloaded module "%s".' % module)
def display_stats(job_list): states_order = [Cache.NOT_STARTED, Cache.IN_PROGRESS, Cache.MORE_REQUESTED, Cache.FAILED, Cache.DONE] # initialize counters to 0 states2count = dict(map(lambda x: (x, 0), states_order)) function2state2count = {} total = 0 for job_id in job_list: cache = get_job_cache(job_id) states2count[cache.state] += 1 total += 1 function_id = get_job(job_id).command_desc # initialize record if not present if not function_id in function2state2count: function2state2count[function_id] = dict(map(lambda x: (x, 0), states_order) + [('all', 0)]) # update function2state2count[function_id][cache.state] += 1 function2state2count[function_id]['all'] += 1 if total == 100: # XXX: use standard method info("Loading a large number of jobs...") print("Found %s jobs in total. Summary by state:" % total) for state in states_order: desc = "%30s" % Cache.state2desc[state] # colorize output desc = colored(desc, **state2color[state]) num = states2count[state] if num > 0: print("%s: %5d" % (desc, num)) print("Summary by function:") for function_id, function_stats in function2state2count.items(): ndone = function_stats[Cache.DONE] nfailed = function_stats[Cache.FAILED] nrest = function_stats['all'] - ndone - nfailed failed_s = "%5d failed" % nfailed if nfailed > 0: failed_s = colored(failed_s, color='red') s = "%5d done, %s, %5d to do." % (ndone, failed_s, nrest) print(" %30s(): %s" % (function_id, s))
def process(self): ''' Start processing jobs. ''' # precompute job priorities #print "Computing priorities..." self.priorities = compute_priorities(self.all_targets) #print "... done" if not self.todo: info('Nothing to do.') return True self.process_init() try: while self.todo: assert self.ready_todo or self.processing assert not self.failed.intersection(self.todo) self.publish_progress() self.instance_some_jobs() self.publish_progress() if self.ready_todo and not self.processing: publish('manager-failed', reason='No resources.', targets=self.targets, done=self.done, todo=self.todo, failed=self.failed, ready=self.ready_todo, processing=self.processing, all_targets=self.all_targets) raise CompmakeException('Cannot find computing resources, giving up.') self.publish_progress() self.loop_until_something_finishes() self.process_finished() publish('manager-succeeded', targets=self.targets, done=self.done, all_targets=self.all_targets, todo=self.todo, failed=self.failed, ready=self.ready_todo, processing=self.processing) return True except JobInterrupted: # XXX I'm getting confused raise KeyboardInterrupt
def more(non_empty_job_list, loop=1): '''Makes more of the selected targets. ''' non_empty_job_list = list(non_empty_job_list) for x in range(int(loop)): if loop > 1: info("------- more: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = ManagerLocal() manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0
def config(args): ''' Get/set configuration parameters. Call like: @> config <switch> <value> Without arguments, shows all configuration switches. ''' if not args: # show show_config(sys.stdout) return name = args.pop(0) if not args: if not name in config_switches: raise UserError("I don't know the switch '%s'." % name) info('config %s %s' % (name, compmake_config.__dict__[name])) return set_config_from_strings(name, args)
def clean(job_list): '''Cleans the result of the selected computation \ (or everything is nothing specified). ''' job_list = list(job_list) if not job_list: job_list = list(all_jobs()) if not job_list: return from compmake.ui.console import ask_question if get_compmake_status() == compmake_status_interactive: question = "Should I clean %d jobs? [y/n] " % len(job_list) answer = ask_question(question) if not answer: info('Not cleaned.') return for job_id in job_list: clean_target(job_id)
def parmore(non_empty_job_list, loop=1): '''Parallel equivalent of "more". ''' non_empty_job_list = list(non_empty_job_list) for job in non_empty_job_list: mark_more(job) for x in range(int(loop)): if loop > 1: info("------- parmore: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = MultiprocessingManager() manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0
def clustmore(non_empty_job_list, loop=1): '''Cluster equivalent of "more". Note: you should use the Redis backend to use multiprocessing. ''' cluster_conf = compmake_config.cluster_conf #@UndefinedVariable hosts = parse_yaml_configuration(open(cluster_conf)) for x in range(int(loop)): if loop > 1: info("------- more: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = ClusterManager(hosts) manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0
def my_host_failed(self, host): self.failed_hosts.add(host) while host in self.hosts_ready: self.hosts_ready.remove(host) info('Host %s failed, removing from stack (failed now %s)' % (host, self.failed_hosts))
def set_namespace(n): info('Using namespace %r.' % n) sys.modules["compmake.jobs.storage"].namespace = n