def finish_pbs(multijob, finish_method): # first update to get more info about multijob jobs Printer.all.sep() Printer.all.out('Determining job status') multijob.update() # print jobs statuses with Printer.all.with_level(): multijob.print_status() Printer.console.sep() Printer.console.dyn(multijob.get_status_line()) result = ResultHolder() # use dynamic sleeper sleeper = DynamicSleep(min=300, max=5000, steps=5) # wait for finish while multijob.is_running(): Printer.console.dyn('Updating job status') multijob.update() Printer.console.dyn(multijob.get_status_line()) # if some jobs changed status add new line to dynamic output remains jobs_changed = multijob.get_all(status=JobState.COMPLETED) if jobs_changed: Printer.console.newline() Printer.all.sep() # get all jobs where was status update to COMPLETE state for job in jobs_changed: pypy = finish_method(job, not Printer.batched.is_muted()) if pypy: result.add(pypy) if jobs_changed: Printer.console.newline() # after printing update status lets sleep for a bit if multijob.is_running(): sleeper.sleep() Printer.all.sep() # print final result Printer.all.out(multijob.get_status_line()) Printer.all.out('All jobs finished') return result, multijob
def __init__(self, executor, progress=False, period=.5): super(PyPy, self).__init__(name='pypy') self.executor = executor self.period = period self.case = None self._progress = None self.on_process_start = Event() self.on_process_complete = Event() self.on_process_update = Event() # register monitors self.limit_monitor = monitors.LimitMonitor(self) self.start_monitor = monitors.StartInfoMonitor(self) self.end_monitor = monitors.EndInfoMonitor(self) self.progress_monitor = monitors.ProgressMonitor(self) self.output_monitor = monitors.OutputMonitor(self) self.error_monitor = monitors.ErrorMonitor(self) self.end_monitor.deactivate() self.log = False self.custom_error = None # different settings in batched mode self.progress = progress # dynamic sleeper self.sleeper = DynamicSleep() # path to full output self.full_output = None
def __init__(self, executor, period=.5): super(PyPy, self).__init__(name='pypy') self.executor = executor self.period = period self.case = None self.status_file = None self.extra = dict() self._short_sleep = False self.on_process_start = Event() self.on_process_complete = Event() self.on_process_update = Event() self.on_process_complete += self.generate_status_file self.monitor = monitors.MainMonitor(self) self.log = False self.custom_error = None # dynamic sleeper self.sleeper = DynamicSleep() # path to full output self.full_output = None
class PyPy(ExtendedThread): """ Class PyPy is main class which executes command having multiple monitors registered PyPy = BinExecutor + monitors :type executor : scripts.core.execution.BinExecutor :type case : ConfigCase """ returncode_map = { '0': 'SUCCESS', '1': 'ERROR', '5': 'TERM', 'None': 'SKIP', '-1': 'SKIP', } def __init__(self, executor, progress=False, period=.5): super(PyPy, self).__init__(name='pypy') self.executor = executor self.period = period self.case = None self._progress = None self.on_process_start = Event() self.on_process_complete = Event() self.on_process_update = Event() # register monitors self.limit_monitor = monitors.LimitMonitor(self) self.start_monitor = monitors.StartInfoMonitor(self) self.end_monitor = monitors.EndInfoMonitor(self) self.progress_monitor = monitors.ProgressMonitor(self) self.output_monitor = monitors.OutputMonitor(self) self.error_monitor = monitors.ErrorMonitor(self) self.end_monitor.deactivate() self.log = False self.custom_error = None # different settings in batched mode self.progress = progress # dynamic sleeper self.sleeper = DynamicSleep() # path to full output self.full_output = None @property def escaped_command(self): return self.executor.escaped_command @property def progress(self): return self._progress @progress.setter def progress(self, value): self._progress = value def _run(self): # start executor self.executor.start() wait_for(self.executor, 'process') if self.executor.broken: Printer.all.err('Could not start command "{}": {}', Command.to_string(self.executor.command), self.executor.exception) self.returncode = self.executor.returncode # if process is not broken, propagate start event self.on_process_start(self) while self.executor.is_running(): self.on_process_update(self) self.sleeper.sleep() # get return code rc = getattr(self.executor, 'returncode', None) self.returncode = rc if self.custom_error is None or str(rc) == "0" else self.custom_error # propagate on_complete event self.on_process_complete(self) def to_json(self): if self.case: return dict( returncode=self.returncode, name=self.case.as_string, case=self.case, log=self.full_output ) json = super(PyPy, self).to_json() json['log'] = self.full_output json['type'] = 'exec' return json def dump(self): return PyPyResult(self)
class PyPy(ExtendedThread): """ Class PyPy is main class which executes command having multiple monitors registered PyPy = BinExecutor + monitors :type executor : scripts.core.execution.BinExecutor :type case : ConfigCase """ returncode_map = { '0': 'SUCCESS', '1': 'ERROR', '5': 'TERM', 'None': 'SKIP', '-1': 'SKIP', } def __init__(self, executor, period=.5): super(PyPy, self).__init__(name='pypy') self.executor = executor self.period = period self.case = None self.status_file = None self.extra = dict() self._short_sleep = False self.on_process_start = Event() self.on_process_complete = Event() self.on_process_update = Event() self.on_process_complete += self.generate_status_file self.monitor = monitors.MainMonitor(self) self.log = False self.custom_error = None # dynamic sleeper self.sleeper = DynamicSleep() # path to full output self.full_output = None def wakeup(self): try: self._short_sleep = True self.sleeper.event.set() except: pass @property def escaped_command(self): return self.executor.escaped_command def _run(self): # start executor self.executor.start() wait_for(self.executor, 'process') if self.executor.broken: printf.error('Could not start command "{}": {}', Command.to_string(self.executor.command), self.executor.exception) self.returncode = RC(self.executor.returncode) # if process is not broken, propagate start event self.on_process_start(self) while self.executor.is_running(): self.on_process_update(self) if self._short_sleep: time.sleep(0.01) else: self.sleeper.sleep() # get return code rc = getattr(self.executor, 'returncode', None) self.returncode = RC(rc if self.custom_error is None or str(rc) == "0" else self.custom_error) # reverse return code if death test is set if self.case and self.case.death_test is not None: self.returncode = RC( self.case.death_test.reverse_return_code(self.returncode)) self.returncode.reversed = self.case.death_test.value != self.case.death_test.FALSE # propagate on_complete event self.on_process_complete(self) def status(self): import getpass import platform result = dict( returncode=self.returncode(), duration=self.duration, username=getpass.getuser(), hostname=platform.node(), nodename=platform.node().split('.')[0].strip('0123456789'), commit=self.get_commit(), ) if self.case: result.update(self.case.info) return result def to_json(self): if self.case: return dict(returncode=self.returncode(), name=self.case.as_string, case=self.case, log=self.full_output) json = super(PyPy, self).to_json() json['log'] = self.full_output json['type'] = 'exec' return json def dump(self): return PyPyResult(self) @classmethod def get_commit(cls): """ Calls git show on git root to determine unix timestamp of the current commit (HEAD) :return: """ import subprocess try: root = Paths.flow123d_root() # get current hash(%H) and date(%ct) from git repo result = subprocess.check_output( 'git show -s --format=%H,%ct HEAD'.split(), cwd=root).decode() sha, date = str(result).strip().split(',') return dict(hash=sha, date=int(date)) except: return None @classmethod def generate_status_file(cls, target): """ Will generate status file if target has option turned on :type target: PyPy """ if target.status_file: IO.write(target.status_file, json.dumps(target.status(), indent=4)) output_dir = Paths.dirname(target.status_file) files = Paths.browse( output_dir, [PathFilters.filter_wildcards('*/profiler_info_*.log.json')]) # profiler json is missing? if not files: IO.write( Paths.join(output_dir, 'profiler_info_dummy.log.json'), '{}') def __repr__(self): if self.case: return str(self.case) return super().__repr__()