def make_single(job_list, more=False): ''' Makes a single job -- not for users, but for slave mode. ''' if len(job_list) > 1: raise UserError("I want only one job") from compmake import jobs try: job_id = job_list[0] if more: mark_more(job_id) jobs.make(job_id, more) return 0 except JobFailed: return RET_CODE_JOB_FAILED
def more(non_empty_job_list, loop=1): '''Makes more of the selected targets. ''' non_empty_job_list = list(non_empty_job_list) for x in range(int(loop)): if loop > 1: info("------- more: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = ManagerLocal() manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0
def parmore(non_empty_job_list, loop=1): '''Parallel equivalent of "more". ''' non_empty_job_list = list(non_empty_job_list) for job in non_empty_job_list: mark_more(job) for x in range(int(loop)): if loop > 1: info("------- parmore: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = MultiprocessingManager() manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0
def clustmore(non_empty_job_list, loop=1): '''Cluster equivalent of "more". Note: you should use the Redis backend to use multiprocessing. ''' cluster_conf = compmake_config.cluster_conf #@UndefinedVariable hosts = parse_yaml_configuration(open(cluster_conf)) for x in range(int(loop)): if loop > 1: info("------- more: iteration %d --- " % x) for job in non_empty_job_list: mark_more(job) manager = ClusterManager(hosts) manager.add_targets(non_empty_job_list, more=True) manager.process() if manager.failed: return RET_CODE_JOB_FAILED return 0