def skipped_builder(*args, **kwargs): result_builder = JobResultBuilder( outcome=IJobResult.OUTCOME_SKIP, comments=_("Explicitly skipped before execution")) if self._current_comments != "": result_builder.comments = self._current_comments return result_builder
def _run_command(self, job, environ): start_time = time.time() slug = slugify(job.id) output_writer = CommandOutputWriter( stdout_path=os.path.join( self._jobs_io_log_dir, "{}.stdout".format(slug)), stderr_path=os.path.join( self._jobs_io_log_dir, "{}.stderr".format(slug))) io_log_gen = IOLogRecordGenerator() log = os.path.join(self._jobs_io_log_dir, "{}.record.gz".format(slug)) with gzip.open(log, mode='wb') as gzip_stream, io.TextIOWrapper( gzip_stream, encoding='UTF-8') as record_stream: writer = IOLogRecordWriter(record_stream) io_log_gen.on_new_record.connect(writer.write_record) delegate = extcmd.Chain([ self._job_runner_ui_delegate, io_log_gen, self._command_io_delegate, output_writer]) ecmd = extcmd.ExternalCommandWithDelegate(delegate) return_code = self.execute_job(job, environ, ecmd, self._stdin) io_log_gen.on_new_record.disconnect(writer.write_record) if return_code == 0: outcome = IJobResult.OUTCOME_PASS elif return_code < 0: outcome = IJobResult.OUTCOME_CRASH else: outcome = IJobResult.OUTCOME_FAIL return JobResultBuilder( outcome=outcome, return_code=return_code, io_log_filename=log, execution_duration=time.time() - start_time)
def _run_single_job_with_ui_loop(self, job, job_state, ui): comments = "" while True: if job.plugin in ('user-interact', 'user-interact-verify', 'user-verify', 'manual'): ui.notify_about_purpose(job) if (self.is_interactive and job.plugin in ('user-interact', 'user-interact-verify', 'manual')): ui.notify_about_steps(job) if job.plugin == 'manual': cmd = 'run' else: cmd = ui.wait_for_interaction_prompt(job) if cmd == 'run' or cmd is None: result_builder = self.runner.run_job( job, job_state, self.config, ui).get_builder(comments=comments) elif cmd == 'comment': new_comment = input( self.C.BLUE( _('Please enter your comments:') + '\n')) if new_comment: comments += new_comment + '\n' continue elif cmd == 'skip': result_builder = JobResultBuilder( outcome=IJobResult.OUTCOME_SKIP, comments=_("Explicitly skipped before" " execution")) if comments != "": result_builder.comments = comments break elif cmd == 'quit': raise SystemExit() else: result_builder = self.runner.run_job( job, job_state, self.config, ui).get_builder() else: if 'noreturn' in job.get_flag_set(): ui.noreturn_job() result_builder = self.runner.run_job(job, job_state, self.config, ui).get_builder() if (self.is_interactive and result_builder.outcome == IJobResult.OUTCOME_UNDECIDED): try: if comments != "": result_builder.comments = comments ui.notify_about_verification(job) self._interaction_callback(self.runner, job, result_builder, self.config) except ReRunJob: continue break return result_builder
def _make_result_for(self, job): builder = JobResultBuilder(outcome='pass') if job.plugin == 'local': pass elif job.plugin == 'resource': pass else: builder.io_log = [(0, 'stdout', b'IO-LOG-STDOUT\n'), (1, 'stderr', b'IO-LOG-STDERR\n')] return builder.get_result()
def _get_dry_run_result(self, job): """ Internal method of JobRunner. Returns a result that is used when running in dry-run mode (where we don't really test anything) """ return JobResultBuilder( outcome=IJobResult.OUTCOME_SKIP, comments=_("Job skipped in dry-run mode")).get_result()
def run_job(self, job, job_state, config=None, ui=None): """ Run the specified job an return the result. :param job: A JobDefinition to run :param job_state: The JobState associated to the job to execute. :param config: A PlainBoxConfig that may influence how this job is executed. This is only used for the environment variables (that should be specified in the environment but, for simplicity in certain setups, can be pulled from a special section of the configuration file. :param ui: A IJobRunnerUI object (optional) which will be used do relay external process interaction events during the execution of this job. :returns: A IJobResult subclass that describes the result :raises ValueError: In the future, this method will not run jobs that don't themselves validate correctly. Right now this is not enforced. This method is the entry point for running all kinds of jobs. Typically execution blocks while a command, embeded in many jobs, is running in another process. How a job is executed depends mostly on the value of the :attr:`plainbox.abc.IJobDefinition.plugin` field. The result of a job may in some cases be OUTCOME_UNDECIDED, in which case the application should ask the user what the outcome is (and present sufficient information to make that choice, typically this is the job description and the output of the command) """ # TRANSLATORS: %r is the name of the job logger.info(_("Running %r"), job) func_name = "run_{}_job".format(job.plugin.replace('-', '_')) try: runner = getattr(self, func_name) except AttributeError: return JobResultBuilder( outcome=IJobResult.OUTCOME_NOT_IMPLEMENTED, comments=_('This type of job is not supported') ).get_result() else: if self._dry_run and job.plugin not in self._DRY_RUN_PLUGINS: return self._get_dry_run_result(job) else: self._job_runner_ui_delegate.ui = ui try: return runner(job, job_state, config) finally: self._job_runner_ui_delegate.ui = None
def _make_cert_attachments(self): state = self.manager.default_device_context.state partial_id_list = [ 'dmi_attachment', 'sysfs_attachment', 'udev_attachment' ] for partial_id in partial_id_list: job = JobDefinition({ 'id': CERTIFICATION_NS + partial_id, 'plugin': 'attachment' }) result = JobResultBuilder(io_log=[( 0, 'stdout', 'STDOUT-{}\n'.format(partial_id).encode('utf-8') ), (1, 'stderr', 'STDERR-{}\n'.format(partial_id).encode('utf-8') )]).get_result() state.add_unit(job) state.update_job_result(job, result)
def test_smoke_disk(self): builder = JobResultBuilder() builder.comments = 'it works' builder.execution_duration = 0.1 builder.io_log_filename = 'log' builder.outcome = 'pass' builder.return_code = 0 result = builder.get_result() self.assertEqual(result.comments, "it works") self.assertEqual(result.execution_duration, 0.1) self.assertEqual(result.io_log_filename, 'log') self.assertEqual(result.outcome, "pass") self.assertEqual(result.return_code, 0) # Sanity check: the builder we can re-create is identical builder2 = result.get_builder() self.assertEqual(builder, builder2)
def run_job(self, job, job_state, environ=None, ui=None): """ Only one resouce object is created from this runner. Exception: 'graphics_card' resource job creates two objects to simulate hybrid graphics. """ if job.plugin != 'resource': return super().run_job(job, job_state, environ, ui) builder = JobResultBuilder() if job.partial_id == 'graphics_card': builder.io_log = [(0, 'stdout', b'a: b\n'), (1, 'stdout', b'\n'), (2, 'stdout', b'a: c\n')] else: builder.io_log = [(0, 'stdout', b'a: b\n')] builder.outcome = 'pass' builder.return_code = 0 return builder.get_result()
def test_smoke_memory(self): builder = JobResultBuilder() builder.comments = 'it works' builder.execution_duration = 0.1 builder.io_log = [(0, 'stdout', b'ok\n')] builder.outcome = 'pass' builder.return_code = 0 result = builder.get_result() self.assertEqual(result.comments, "it works") self.assertEqual(result.execution_duration, 0.1) self.assertEqual( result.io_log, (IOLogRecord(delay=0, stream_name='stdout', data=b'ok\n'), )) self.assertEqual(result.outcome, "pass") self.assertEqual(result.return_code, 0) # Sanity check: the builder we can re-create is identical builder2 = result.get_builder() self.assertEqual(builder, builder2)
def run_single_job_with_ui(self, job, ui): job_start_time = time.time() job_state = self.state.job_state_map[job.id] ui.considering_job(job, job_state) if job_state.can_start(): ui.about_to_start_running(job, job_state) self.metadata.running_job_name = job.id self.manager.checkpoint() ui.started_running(job, job_state) result_builder = self._run_single_job_with_ui_loop( job, job_state, ui) assert result_builder is not None result_builder.execution_duration = time.time() - job_start_time job_result = result_builder.get_result() self.metadata.running_job_name = None self.manager.checkpoint() ui.finished_running(job, job_state, job_result) else: # Set the outcome of jobs that cannot start to # OUTCOME_NOT_SUPPORTED _except_ if any of the inhibitors point to # a job with an OUTCOME_SKIP outcome, if that is the case mirror # that outcome. This makes 'skip' stronger than 'not-supported' outcome = IJobResult.OUTCOME_NOT_SUPPORTED for inhibitor in job_state.readiness_inhibitor_list: if (inhibitor.cause == InhibitionCause.FAILED_RESOURCE and 'fail-on-resource' in job.get_flag_set()): outcome = IJobResult.OUTCOME_FAIL break elif inhibitor.cause != InhibitionCause.FAILED_DEP: continue related_job_state = self.state.job_state_map[ inhibitor.related_job.id] if related_job_state.result.outcome == IJobResult.OUTCOME_SKIP: outcome = IJobResult.OUTCOME_SKIP result_builder = JobResultBuilder( outcome=outcome, comments=job_state.get_readiness_description(), execution_duration=time.time() - job_start_time) job_result = result_builder.get_result() ui.job_cannot_start(job, job_state, job_result) self.state.update_job_result(job, job_result) ui.finished(job, job_state, job_result)
def register_test_result(self, test): """Registers outcome of a test.""" _logger.info("Storing test result: %s", test) job_id = test['id'] builder_kwargs = { 'outcome': test['outcome'], 'comments': test.get('comments', pod.UNSET), 'execution_duration': time.time() - test['start_time'], } if 'result' in test: # if we're registering skipped test as an outcome of resuming # session, the result field of the test object will be missing builder_kwargs['return_code'] = test['result'].return_code builder_kwargs['io_log_filename'] = test['result'].io_log_filename builder_kwargs['io_log'] = test['result'].io_log else: builder_kwargs['return_code'] = 0 result = JobResultBuilder(**builder_kwargs).get_result() self.assistant.use_job_result(job_id, result) self.index += 1 self.assistant.update_app_blob(self._get_app_blob())
def cant_start_builder(*args, **kwargs): result_builder = JobResultBuilder( outcome=outcome, comments=job_state.get_readiness_description()) return result_builder
def test_get_builder_kwargs(self): result = JobResultBuilder(outcome='pass').get_result() self.assertEqual(result.get_builder(outcome='fail').outcome, 'fail')
def test_add_comment(self): builder = JobResultBuilder() builder.add_comment('first comment') # ;-) self.assertEqual(builder.comments, 'first comment') builder.add_comment('second comment') self.assertEqual(builder.comments, 'first comment\nsecond comment')
def test_io_log_clash(self): builder = JobResultBuilder() builder.io_log = [(0, 'stout', b'hi')] builder.io_log_filename = 'log' with self.assertRaises(ValueError): builder.get_result()
def test_smoke_hollow(self): self.assertTrue(JobResultBuilder().get_result().is_hollow)
def run_qml_job(self, job, job_state, config): """ Method called to run a job with plugin field equal to 'qml'. The 'qml' job implements the following scenario: * Maybe display the description to the user * Run qmlscene with provided test and wait for it to finish * Decide on the outcome based on the result object returned by qml shell * The method ends here .. note:: QML jobs are fully manual jobs with graphical user interface implemented in QML. They implement proposal described in CEP-5. """ if job.plugin != "qml": # TRANSLATORS: please keep 'plugin' untranslated raise ValueError(_("bad job plugin value")) try: ctrl = self._get_ctrl_for_job(job) except LookupError: return JobResultBuilder( outcome=IJobResult.OUTCOME_NOT_SUPPORTED, comments=_('No suitable execution controller is available)') ).get_result() # Run the embedded command start_time = time.time() delegate, io_log_gen = self._prepare_io_handling(job, config) # Create a subprocess.Popen() like object that uses the delegate # system to observe all IO as it occurs in real time. delegate_cls = extcmd.ExternalCommandWithDelegate extcmd_popen = delegate_cls(delegate) # Stream all IOLogRecord entries to disk record_path = self.get_record_path_for_job(job) with gzip.open(record_path, mode='wb') as gzip_stream, \ io.TextIOWrapper( gzip_stream, encoding='UTF-8') as record_stream: writer = IOLogRecordWriter(record_stream) io_log_gen.on_new_record.connect(writer.write_record) try: # Start the process and wait for it to finish getting the # result code. This will actually call a number of callbacks # while the process is running. It will also spawn a few # threads although all callbacks will be fired from a single # thread (which is _not_ the main thread) logger.debug( _("job[%s] starting qml shell: %s"), job.id, job.qml_file) # Run the job command using extcmd ctrl.on_leftover_files.connect(self.on_leftover_files) try: return_code, result = ctrl.execute_job_with_result( job, job_state, config, self._session_dir, extcmd_popen) finally: ctrl.on_leftover_files.disconnect(self.on_leftover_files) logger.debug( _("job[%s] shell return code: %r"), job.id, return_code) finally: io_log_gen.on_new_record.disconnect(writer.write_record) execution_duration = time.time() - start_time if return_code != 0 or result is None: outcome = IJobResult.OUTCOME_FAIL else: outcome = result['outcome'] # Create a result object and return it return JobResultBuilder( outcome=outcome, return_code=return_code, io_log_filename=record_path, execution_duration=execution_duration ).get_result()
def undecided_builder(*args, **kwargs): return JobResultBuilder(outcome=IJobResult.OUTCOME_UNDECIDED)
def _run_single_job_with_ui_loop(self, job, ui): print(self.C.header(job.tr_summary(), fill='-')) print(_("ID: {0}").format(job.id)) print( _("Category: {0}").format( self.sa.get_job_state(job.id).effective_category_id)) comments = "" while True: if job.plugin in ('user-interact', 'user-interact-verify', 'user-verify', 'manual'): job_state = self.sa.get_job_state(job.id) if (not self.is_interactive and job.plugin in ('user-interact', 'user-interact-verify', 'manual')): result_builder = JobResultBuilder( outcome=IJobResult.OUTCOME_SKIP, comments=_("Trying to run interactive job in a silent" " session")) return result_builder if job_state.can_start(): ui.notify_about_purpose(job) if (self.is_interactive and job.plugin in ('user-interact', 'user-interact-verify', 'manual')): if job_state.can_start(): ui.notify_about_steps(job) if job.plugin == 'manual': cmd = 'run' else: if job_state.can_start(): cmd = ui.wait_for_interaction_prompt(job) else: # 'running' the job will make it marked as skipped # because of the failed dependency cmd = 'run' if cmd == 'run' or cmd is None: result_builder = self.sa.run_job(job.id, ui, False) elif cmd == 'comment': new_comment = input( self.C.BLUE( _('Please enter your comments:') + '\n')) if new_comment: comments += new_comment + '\n' continue elif cmd == 'skip': result_builder = JobResultBuilder( outcome=IJobResult.OUTCOME_SKIP, comments=_("Explicitly skipped before" " execution")) if comments != "": result_builder.comments = comments break elif cmd == 'quit': raise SystemExit() else: result_builder = self.sa.run_job(job.id, ui, False) else: if 'noreturn' in job.get_flag_set(): ui.noreturn_job() result_builder = self.sa.run_job(job.id, ui, False) if (self.is_interactive and result_builder.outcome == IJobResult.OUTCOME_UNDECIDED): try: if comments != "": result_builder.comments = comments ui.notify_about_verification(job) self._interaction_callback(job, result_builder) except ReRunJob: self.sa.use_job_result(job.id, result_builder.get_result()) continue break return result_builder