def on_complete(self, pypy=None): # print either error that command failed or on_complete info id exists if self.pypy.returncode > 0: Printer.err('Error! Command ({process.pid}) ended with {process.returncode}'. format(process=self.pypy.executor.process)) Printer.err(Command.to_string(self.pypy.executor.command)) elif self.end_fmt: Printer.out(self.end_fmt.format(**dict(self=self))) if not self.pypy.progress: Printer.separator() output = self.pypy.executor.output.read() if output: Printer.out(output)
def on_complete(self, pypy=None): if self.pypy.returncode > 0: if self.message: Printer.separator() Printer.open() Printer.out(self.message) else: Printer.open() # if file pointer exist try to read errors and outputs output = self.pypy.executor.output.read() if output: if self.pypy.full_output: Printer.out('Output (last {} lines, rest in {}): ', self.tail, Paths.abspath(self.pypy.full_output)) else: Printer.out('Output (last {} lines): ', self.tail) Printer.err(format_n_lines(output, -self.tail, indent=Printer.indent * ' ')) Printer.close()
def run_local_mode(debug=False): global arg_options, arg_others, arg_rest proc, time_limit, memory_limit = get_args() total = len(proc) if total == 1: pypy = run_local_mode_one(proc[0], time_limit, memory_limit) GlobalResult.returncode = pypy.returncode else: # optionally we use counter progress = ProgressCounter('Running {:02d} of {total:02d}') for p in proc: Printer.separator() progress.next(locals()) Printer.separator() Printer.open() pypy = run_local_mode_one(p, time_limit, memory_limit) Printer.close() GlobalResult.returncode = max(GlobalResult.returncode, pypy.returncode) return GlobalResult.returncode if not debug else pypy
def do_work(parser, args=None): """ :type args: list :type parser: utils.argparser.ArgParser """ # parse arguments options, others, rest = parser.parse(args) # check commands if not rest: parser.exit_usage('No command specified!', exit_code=1) # check limits (at least one limit must be set) if (options.time_limit, options.memory_limit) == (None, None): parser.exit_usage('No limits specified!', exit_code=2) # prepare executor executor = BinExecutor(rest) pypy = PyPy(executor, progress=not options.batch) # set limits pypy.error_monitor.message = None pypy.limit_monitor.time_limit = options.time_limit pypy.limit_monitor.memory_limit = options.memory_limit # turn on output if options.batch: pypy.info_monitor.stdout_stderr = None else: pypy.info_monitor.stdout_stderr = Paths.temp_file('exec-limit.log') # start process Printer.separator() pypy.start() pypy.join() return pypy.returncode
def run_pbs_mode(configs, debug=False): """ :type debug: bool :type configs: scripts.config.yaml_config.ConfigPool """ global arg_options, arg_others, arg_rest pbs_module = get_pbs_module(arg_options.host) Printer.dynamic_output = not arg_options.batch Printer.dyn('Parsing yaml files') jobs = list() """ :type: list[(str, PBSModule)] """ for yaml_file, yaml_config in configs.files.items(): for case in yaml_config.get_one(yaml_file): pbs_run = pbs_module.Module(case) pbs_run.queue = arg_options.get('queue', True) pbs_run.ppn = arg_options.get('ppn', 1) pbs_content = create_pbs_job_content(pbs_module, case) IO.write(case.fs.pbs_script, pbs_content) qsub_command = pbs_run.get_pbs_command(case.fs.pbs_script) jobs.append((qsub_command, pbs_run)) # start jobs Printer.dyn('Starting jobs') total = len(jobs) job_id = 0 multijob = MultiJob(pbs_module.ModuleJob) for qsub_command, pbs_run in jobs: job_id += 1 Printer.dyn('Starting jobs {:02d} of {:02d}', job_id, total) output = subprocess.check_output(qsub_command) job = pbs_module.ModuleJob.create(output, pbs_run.case) job.full_name = "Case {}".format(pbs_run.case) multijob.add(job) Printer.out() Printer.out('{} job/s inserted into queue', total) # # first update to get more info about multijob jobs Printer.out() Printer.separator() Printer.dyn('Updating job status') multijob.update() # print jobs statuses Printer.out() if not arg_options.batch: multijob.print_status() Printer.separator() Printer.dyn(multijob.get_status_line()) returncodes = dict() # wait for finish while multijob.is_running(): Printer.dyn('Updating job status') multijob.update() Printer.dyn(multijob.get_status_line()) # if some jobs changed status add new line to dynamic output remains jobs_changed = multijob.get_all(status=JobState.COMPLETED) if jobs_changed: Printer.out() Printer.separator() # get all jobs where was status update to COMPLETE state for job in jobs_changed: returncodes[job] = finish_pbs_job(job, arg_options.batch) if jobs_changed: Printer.separator() Printer.out() # after printing update status lets sleep for a bit if multijob.is_running(): time.sleep(5) Printer.out(multijob.get_status_line()) Printer.out('All jobs finished') # get max return code or number 2 if there are no returncodes returncode = max(returncodes.values()) if returncodes else 2 sys.exit(returncode)
def run_local_mode(configs, debug=False): """ :type debug: bool :type configs: scripts.config.yaml_config.ConfigPool """ global arg_options, arg_others, arg_rest runner = ParallelThreads(arg_options.parallel) runner.stop_on_error = not arg_options.keep_going for yaml_file, yaml_config in configs.files.items(): for case in yaml_config.get_one(yaml_file): # create main process which first clean output dir # and then execute test following with comparisons multi_process = create_process_from_case(case) runner.add(multi_process) # run! runner.start() while runner.is_running(): time.sleep(1) Printer.separator() Printer.out('Summary: ') Printer.open() for thread in runner.threads: multithread = thread """ :type: RuntestMultiThread """ returncode = multithread.returncode GlobalResult.add(multithread) if multithread.clean.with_error(): Printer.out("[{:^6}]:{:3} | Could not clean directory '{}': {}", 'ERROR', multithread.clean.returncode, multithread.clean.dir, multithread.clean.error) continue if not multithread.pypy.with_success(): Printer.out("[{:^6}]:{:3} | Run error, case: {}", multithread.pypy.returncode_map.get(str(multithread.pypy.returncode), 'ERROR'), multithread.pypy.returncode, multithread.pypy.case.to_string()) continue if multithread.comp.with_error(): Printer.out("[{:^6}]:{:3} | Compare error, case: {}, Details: ", 'FAILED', multithread.comp.returncode, multithread.pypy.case.to_string()) Printer.open(2) for t in multithread.comp.threads: if t: Printer.out('[{:^6}]: {}', 'OK', t.name) else: Printer.out('[{:^6}]: {}', 'FAILED', t.name) Printer.close(2) continue Printer.out("[{:^6}]:{:3} | Test passed: {}", 'PASSED', multithread.pypy.returncode, multithread.pypy.case.to_string()) Printer.close() # exit with runner's exit code GlobalResult.returncode = runner.returncode return runner if debug else runner.returncode
def run_pbs_mode(debug=False): pbs_module = get_pbs_module() jobs = prepare_pbs_files(pbs_module) if debug: return 0 # start jobs Printer.dyn('Starting jobs') total = len(jobs) job_id = 0 multijob = MultiJob(pbs_module.ModuleJob) for qsub_command, pbs_run in jobs: job_id += 1 Printer.dyn('Starting jobs {:02d} of {:02d}', job_id, total) output = subprocess.check_output(qsub_command) job = pbs_module.ModuleJob.create(output, pbs_run.case) job.full_name = "Case {}".format(pbs_run.case) multijob.add(job) Printer.out() Printer.out('{} job/s inserted into queue', total) # # first update to get more info about multijob jobs Printer.out() Printer.separator() Printer.dyn('Updating job status') multijob.update() # print jobs statuses Printer.out() if not arg_options.batch: multijob.print_status() Printer.separator() Printer.dyn(multijob.get_status_line()) returncodes = dict() # wait for finish while multijob.is_running(): Printer.dyn('Updating job status') multijob.update() Printer.dyn(multijob.get_status_line()) # if some jobs changed status add new line to dynamic output remains jobs_changed = multijob.get_all(status=JobState.COMPLETED) if jobs_changed: Printer.out() Printer.separator() # get all jobs where was status update to COMPLETE state for job in jobs_changed: returncodes[job] = finish_pbs_job(job, arg_options.batch) if jobs_changed: Printer.separator() Printer.out() # after printing update status lets sleep for a bit if multijob.is_running(): time.sleep(5) Printer.out(multijob.get_status_line()) Printer.out('All jobs finished') # get max return code or number 2 if there are no returncodes return max(returncodes.values()) if returncodes else 2