def _maybe_skip_last_job_after_resume(self, session): last_job = session.metadata.running_job_name if last_job is None: return print("We have previously tried to execute {}".format(last_job)) action = self.ask_for_resume_action() if action == 'skip': result = MemoryJobResult({ 'outcome': 'skip', 'comment': "Skipped after resuming execution" }) elif action == 'fail': result = MemoryJobResult({ 'outcome': 'fail', 'comment': "Failed after resuming execution" }) elif action == 'run': result = None if result: session.update_job_result(session.job_state_map[last_job].job, result) session.metadata.running_job_name = None session.persistent_save()
def _maybe_skip_last_job_after_resume(self, manager): last_job = manager.state.metadata.running_job_name if last_job is None: return print("We have previously tried to execute {}".format(last_job)) action = self.ask_for_resume_action() if action == 'skip': result = MemoryJobResult({ 'outcome': 'skip', 'comment': "Skipped after resuming execution" }) elif action == 'fail': result = MemoryJobResult({ 'outcome': 'fail', 'comment': "Failed after resuming execution" }) elif action == 'run': result = None if result: manager.state.update_job_result( manager.state.job_state_map[last_job].job, result) manager.state.metadata.running_job_name = None manager.checkpoint()
def make_realistic_test_session(self, session_dir): # Create a more realistic session with two jobs but with richer set # of data in the actual jobs and results. job_a = JobDefinition({ 'plugin': 'shell', 'name': 'job_a', 'summary': 'This is job A', 'command': 'echo testing && true', 'requires': 'job_b.ready == "yes"' }) job_b = JobDefinition({ 'plugin': 'resource', 'name': 'job_b', 'summary': 'This is job B', 'command': 'echo ready: yes' }) session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'io_log': [(0, 'stdout', b'testing\n')], }) result_b = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'comments': 'foo', 'io_log': [(0, 'stdout', b'ready: yes\n')], }) session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
def test_get_certification_status_map(self): result_A = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) self.session.update_job_result(self.job_A, result_A) self.session.job_state_map[ self.job_A.id].effective_certification_status = 'foo' self.assertEqual(self.session.get_certification_status_map(), {}) self.assertEqual( self.session.get_certification_status_map( outcome_filter=(IJobResult.OUTCOME_PASS, ), certification_status_filter=('foo', )), {self.job_A.id: self.session.job_state_map[self.job_A.id]}) result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) self.session.job_state_map[ self.job_Y.id].effective_certification_status = 'bar' self.assertEqual(self.session.get_certification_status_map(), {}) self.assertEqual( self.session.get_certification_status_map( outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), certification_status_filter=('foo', 'bar')), {self.job_A.id: self.session.job_state_map[self.job_A.id]}) self.session.update_job_result(self.job_Y, result_Y) self.assertEqual( self.session.get_certification_status_map( outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), certification_status_filter=('foo', 'bar')), { self.job_A.id: self.session.job_state_map[self.job_A.id], self.job_Y.id: self.session.job_state_map[self.job_Y.id] })
def _handle_last_job_after_resume(self, last_job): if last_job is None: return if self.ctx.args.session_id: # session_id is present only if auto-resume is used result_dict = { 'outcome': IJobResult.OUTCOME_PASS, 'comments': _("Automatically passed after resuming execution"), } result_path = os.path.join( self.ctx.sa.get_session_dir(), 'CHECKBOX_DATA', '__result') if os.path.exists(result_path): try: with open(result_path, 'rt') as f: result_dict = json.load(f) # the only really important field in the result is # 'outcome' so let's make sure it doesn't contain # anything stupid if result_dict.get('outcome') not in [ 'pass', 'fail', 'skip']: result_dict['outcome'] = IJobResult.OUTCOME_PASS except json.JSONDecodeError as e: pass print(_("Automatically resuming session. " "Outcome of the previous job: {}".format( result_dict['outcome']))) result = MemoryJobResult(result_dict) self.ctx.sa.use_job_result(last_job, result) return print(_("Previous session run tried to execute job: {}").format( last_job)) cmd = self._pick_action_cmd([ Action('s', _("skip that job"), 'skip'), Action('p', _("mark it as passed and continue"), 'pass'), Action('f', _("mark it as failed and continue"), 'fail'), Action('r', _("run it again"), 'run'), ], _("What do you want to do with that job?")) if cmd == 'skip' or cmd is None: result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_SKIP, 'comments': _("Skipped after resuming execution") }) elif cmd == 'pass': result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'comments': _("Passed after resuming execution") }) elif cmd == 'fail': result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_FAIL, 'comments': _("Failed after resuming execution") }) elif cmd == 'run': result = None if result: self.ctx.sa.use_job_result(last_job, result)
def test_get_outcome_stats(self): result_A = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) result_R = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) self.session.update_job_result(self.job_A, result_A) self.session.update_job_result(self.job_R, result_R) self.session.update_job_result(self.job_Y, result_Y) self.assertEqual(self.session.get_outcome_stats(), { IJobResult.OUTCOME_PASS: 1, IJobResult.OUTCOME_FAIL: 2 })
def setUp(self): self.exporter_unit = self._get_all_exporter_units()[ '2013.com.canonical.plainbox::html'] self.resource_map = { '2013.com.canonical.certification::lsb': [ Resource({'description': 'Ubuntu 14.04 LTS'})], '2013.com.canonical.certification::package': [ Resource({'name': 'plainbox', 'version': '1.0'}), Resource({'name': 'fwts', 'version': '0.15.2'})], } self.job1 = JobDefinition({'id': 'job_id1', '_summary': 'job 1'}) self.job2 = JobDefinition({'id': 'job_id2', '_summary': 'job 2'}) self.job3 = JobDefinition({'id': 'job_id3', '_summary': 'job 3'}) self.result_fail = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_FAIL, 'return_code': 1, 'io_log': [(0, 'stderr', b'FATAL ERROR\n')], }) self.result_pass = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'io_log': [(0, 'stdout', b'foo\n')], 'comments': 'blah blah' }) self.result_skip = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_SKIP, 'comments': 'No such device' }) self.attachment = JobDefinition({ 'id': 'dmesg_attachment', 'plugin': 'attachment'}) self.attachment_result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'io_log': [(0, 'stdout', b'bar\n')], 'return_code': 0 }) self.session_manager = SessionManager.create() self.session_manager.add_local_device_context() self.session_state = self.session_manager.default_device_context.state session_state = self.session_state session_state.add_unit(self.job1) session_state.add_unit(self.job2) session_state.add_unit(self.job3) session_state.add_unit(self.attachment) session_state.update_job_result(self.job1, self.result_fail) session_state.update_job_result(self.job2, self.result_pass) session_state.update_job_result(self.job3, self.result_skip) session_state.update_job_result( self.attachment, self.attachment_result) for resource_id, resource_list in self.resource_map.items(): session_state.set_resource_list(resource_id, resource_list)
def run_job_if_possible(session, runner, config, job, update=True): """ Coupling point for session, runner, config and job :returns: (job_state, job_result) """ job_state = session.job_state_map[job.id] if job_state.can_start(): job_result = runner.run_job(job, config) else: # Set the outcome of jobs that cannot start to # OUTCOME_NOT_SUPPORTED _except_ if any of the inhibitors point to # a job with an OUTCOME_SKIP outcome, if that is the case mirror # that outcome. This makes 'skip' stronger than 'not-supported' outcome = IJobResult.OUTCOME_NOT_SUPPORTED for inhibitor in job_state.readiness_inhibitor_list: if inhibitor.cause != inhibitor.FAILED_DEP: continue related_job_state = session.job_state_map[inhibitor.related_job.id] if related_job_state.result.outcome == IJobResult.OUTCOME_SKIP: outcome = IJobResult.OUTCOME_SKIP job_result = MemoryJobResult({ 'outcome': outcome, 'comments': job_state.get_readiness_description() }) assert job_result is not None if update: session.update_job_result(job, job_result) return job_state, job_result
def test_resource_job_with_broken_output(self, mock_logger): # This function checks how SessionState parses partially broken # resource jobs. A JobResult with broken output is constructed below. # The output will describe one proper record, one broken record and # another proper record in that order. result_R = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'io_log': [(0, 'stdout', b"attr: value-1\n"), (1, 'stdout', b"\n"), (1, 'stdout', b"I-sound-like-a-broken-record\n"), (1, 'stdout', b"\n"), (1, 'stdout', b"attr: value-2\n")], }) # Since we cannot control the output of scripts and people indeed make # mistakes a warning is issued but no exception is raised to the # caller. self.session.update_job_result(self.job_R, result_R) # The observation here is that the parser is not handling the exception # in away which would allow for recovery. Out of all the output only # the first record is created and stored properly. The third, proper # record is entirely ignored. expected = {'R': [Resource({'attr': 'value-1'})]} self.assertEqual(self.session._resource_map, expected) # Make sure the right warning was logged mock_logger.warning.assert_called_once_with( "local script %s returned invalid RFC822 data: %s", self.job_R.id, RFC822SyntaxError( None, 3, "Unexpected non-empty line: " "'I-sound-like-a-broken-record\\n'"))
def _populate_session_state(self, job, state): io_log = [ IOLogRecord(count, 'stdout', line.encode('utf-8')) for count, line in enumerate( job.get_record_value('io_log').splitlines(keepends=True)) ] result = MemoryJobResult({ 'outcome': job.get_record_value('outcome', job.get_record_value('status')), 'comments': job.get_record_value('comments'), 'execution_duration': job.get_record_value('duration'), 'io_log': io_log, }) state.update_job_result(job, result) if job.plugin == 'resource': new_resource_list = [] for record in gen_rfc822_records_from_io_log(job, result): resource = Resource(record.data) new_resource_list.append(resource) if not new_resource_list: new_resource_list = [Resource({})] state.set_resource_list(job.id, new_resource_list) job_state = state.job_state_map[job.id] job_state.effective_category_id = job.get_record_value( 'category_id', 'com.canonical.plainbox::uncategorised') job_state.effective_certification_status = job.get_record_value( 'certification_status', 'unspecified')
def run_manual_job(self, job, config): """ Method called to run a job with plugin field equal to 'manual' The 'manual' job implements the following scenario: * Display the description to the user * Ask the user to perform some operation * Ask the user to decide on the outcome .. note:: Technically this method almost always returns a result with OUTCOME_UNDECIDED to indicate that it could not determine if the test passed or not. Manual jobs are basically fully human driven and could totally ignore the job runner. This method is provided for completeness. .. warning:: Before the interaction callback is fully removed and deprecated it may also return other values through that callback. """ if job.plugin != "manual": # TRANSLATORS: please keep 'plugin' untranslated raise ValueError(_("bad job plugin value")) return MemoryJobResult({'outcome': IJobResult.OUTCOME_UNDECIDED})
def _run_single_job_with_session(self, ns, session, runner, job): print("[ {} ]".format(job.name).center(80, '-')) if job.description is not None: print(job.description) print("^" * len(job.description.splitlines()[-1])) print() job_state = session.job_state_map[job.name] logger.debug("Job name: %s", job.name) logger.debug("Plugin: %s", job.plugin) logger.debug("Direct dependencies: %s", job.get_direct_dependencies()) logger.debug("Resource dependencies: %s", job.get_resource_dependencies()) logger.debug("Resource program: %r", job.requires) logger.debug("Command: %r", job.command) logger.debug("Can start: %s", job_state.can_start()) logger.debug("Readiness: %s", job_state.get_readiness_description()) if job_state.can_start(): print("Running... (output in {}.*)".format( join(session.jobs_io_log_dir, slugify(job.name)))) session.metadata.running_job_name = job.name session.persistent_save() job_result = runner.run_job(job) session.metadata.running_job_name = None session.persistent_save() print("Outcome: {}".format(job_result.outcome)) print("Comments: {}".format(job_result.comments)) else: job_result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_NOT_SUPPORTED, 'comments': job_state.get_readiness_description() }) if job_result is not None: session.update_job_result(job, job_result)
def test_resource_job_result_updates_resource_and_job_states(self): # This function checks what happens when a JobResult for job R (which # is a resource job via the resource plugin) is presented to the # session. result_R = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'io_log': [(0, 'stdout', b"attr: value\n")], }) self.session.update_job_result(self.job_R, result_R) # The most obvious thing that can happen, is that the result is simply # stored in the associated job state object. self.assertIs(self.job_state('R').result, result_R) # Initially the _resource_map was empty. SessionState parses the io_log # of results of resource jobs and creates appropriate resource objects. self.assertIn("R", self.session._resource_map) expected = {'R': [Resource({'attr': 'value'})]} self.assertEqual(self.session._resource_map, expected) # As job results are presented to the session the readiness of other # jobs is changed. Since A depends on R via a resource expression and # the particular resource that were produced by R in this test should # allow the expression to match the readiness inhibitor from A should # have been removed. Since this test does not use # update_desired_job_list() a will still have the UNDESIRED inhibitor # but it will no longer have the PENDING_RESOURCE inhibitor, self.assertEqual( self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.UNDESIRED) # Now if we put A on the desired list this should clear the UNDESIRED # inhibitor and make A runnable. self.session.update_desired_job_list([self.job_A]) self.assertTrue(self.job_state('A').can_start())
def test_smoke(self): result = MemoryJobResult({}) self.assertEqual(str(result), "None") self.assertEqual(repr(result), "<MemoryJobResult outcome:None>") self.assertIsNone(result.outcome) self.assertIsNone(result.comments) self.assertEqual(result.io_log, ()) self.assertIsNone(result.return_code)
def _dry_run_result(self, job): """ Produce the result that is used when running in dry-run mode """ return MemoryJobResult({ 'outcome': IJobResult.OUTCOME_SKIP, 'comments': "Job skipped in dry-run mode" })
def test_io_log_as_text_attachment(self): result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'comments': "it said foo", 'io_log': [(0, 'stdout', b'foo')], 'return_code': 0 }) self.assertEqual(result.io_log_as_text_attachment, 'foo')
def test_repr_JobResult_with_MemoryJobResult(self, mocked_helper): """ verify that _repr_JobResult() called with MemoryJobResult calls _repr_MemoryJobResult """ result = MemoryJobResult({}) self.helper._repr_JobResult(result) mocked_helper._repr_MemoryJobResult.assertCalledOnceWith(result)
def test_repr_SessionState_typical_session(self): """ verify the representation of a SessionState with some unused jobs Unused jobs should just have no representation. Their checksum should not be mentioned. Their results (empty results) should be ignored. """ used_job = JobDefinition({ "plugin": "shell", "id": "used", "command": "echo 'hello world'", }) unused_job = JobDefinition({ "plugin": "shell", "id": "unused", "command": "echo 'hello world'", }) used_result = MemoryJobResult({ "io_log": [ (0.0, "stdout", b'hello world\n'), ], 'outcome': IJobResult.OUTCOME_PASS }) session_state = SessionState([used_job, unused_job]) session_state.update_desired_job_list([used_job]) session_state.update_job_result(used_job, used_result) data = self.helper._repr_SessionState(session_state, self.session_dir) self.assertEqual( data, { 'jobs': { 'used': ('8c393c19fdfde1b6afc5b79d0a1617ecf7531cd832a16450dc' '2f3f50d329d373') }, 'results': { 'used': [{ 'comments': None, 'execution_duration': None, 'io_log': [[0.0, 'stdout', 'aGVsbG8gd29ybGQK']], 'outcome': 'pass', 'return_code': None }] }, 'desired_job_list': ['used'], 'mandatory_job_list': [], 'metadata': { 'title': None, 'flags': [], 'running_job_name': None, 'app_blob': '', 'app_id': None, 'custom_joblist': False, 'rejected_jobs': [] }, })
def test_desired_job_X_can_run_with_passing_job_Y(self): # A variant of the test case above, simply Y passes this time, making X # runnable self.session.update_desired_job_list([self.job_X]) result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) self.session.update_job_result(self.job_Y, result_Y) # Now X is runnable self.assertEqual(self.job_state('X').readiness_inhibitor_list, []) self.assertTrue(self.job_state('X').can_start())
def test_resource_job_result_overwrites_old_resources(self): # This function checks what happens when a JobResult for job R is # presented to a session that has some resources from that job already. result_R_old = MemoryJobResult( {'io_log': [(0, 'stdout', b"attr: old value\n")]}) self.session.update_job_result(self.job_R, result_R_old) # So here the old result is stored into a new 'R' resource expected_before = {'R': [Resource({'attr': 'old value'})]} self.assertEqual(self.session._resource_map, expected_before) # Now we present the second result for the same job result_R_new = MemoryJobResult( {'io_log': [(0, 'stdout', b"attr: new value\n")]}) self.session.update_job_result(self.job_R, result_R_new) # What should happen here is that the R resource is entirely replaced # by the data from the new result. The data should not be merged or # appended in any way. expected_after = {'R': [Resource({'attr': 'new value'})]} self.assertEqual(self.session._resource_map, expected_after)
def _build_JobResult(cls, result_repr, flags, location): """ Reconstruct a single job result. Convert the representation of MemoryJobResult or DiskJobResult back into an actual instance. """ # Load all common attributes... outcome = _validate( result_repr, key='outcome', value_type=str, value_choice=sorted( OUTCOME_METADATA_MAP.keys(), key=lambda outcome: outcome or "none" ), value_none=True) comments = _validate( result_repr, key='comments', value_type=str, value_none=True) return_code = _validate( result_repr, key='return_code', value_type=int, value_none=True) execution_duration = _validate( result_repr, key='execution_duration', value_type=float, value_none=True) # Construct either DiskJobResult or MemoryJobResult if 'io_log_filename' in result_repr: io_log_filename = cls._load_io_log_filename( result_repr, flags, location) if (flags & cls.FLAG_FILE_REFERENCE_CHECKS_F and not os.path.isfile(io_log_filename) and flags & cls.FLAG_REWRITE_LOG_PATHNAMES_F): io_log_filename2 = cls._rewrite_pathname(io_log_filename, location) logger.warning(_("Rewrote file name from %r to %r"), io_log_filename, io_log_filename2) io_log_filename = io_log_filename2 if (flags & cls.FLAG_FILE_REFERENCE_CHECKS_F and not os.path.isfile(io_log_filename)): raise BrokenReferenceToExternalFile( _("cannot access file: {!r}").format(io_log_filename)) return DiskJobResult({ 'outcome': outcome, 'comments': comments, 'execution_duration': execution_duration, 'io_log_filename': io_log_filename, 'return_code': return_code }) else: io_log = [ cls._build_IOLogRecord(record_repr) for record_repr in _validate( result_repr, key='io_log', value_type=list)] return MemoryJobResult({ 'outcome': outcome, 'comments': comments, 'execution_duration': execution_duration, 'io_log': io_log, 'return_code': return_code })
def __init__(self, job): """ Initialize a new job state object. The job will be inhibited by a single UNDESIRED inhibitor and will have a result with OUTCOME_NONE that basically says it did not run yet. """ self._job = job self._readiness_inhibitor_list = [UndesiredJobReadinessInhibitor] self._result = MemoryJobResult({'outcome': IJobResult.OUTCOME_NONE})
def _get_dry_run_result(self, job): """ Internal method of JobRunner. Returns a result that is used when running in dry-run mode (where we don't really test anything) """ return MemoryJobResult({ 'outcome': IJobResult.OUTCOME_SKIP, 'comments': _("Job skipped in dry-run mode") })
def test_repr_JobResult_with_MemoryJobResult(self): """ verify that _repr_JobResult() called with MemoryJobResult calls _repr_MemoryJobResult """ mpo = mock.patch.object with mpo(self.helper, '_repr_MemoryJobResult'): result = MemoryJobResult({}) self.helper._repr_JobResult(result, self.session_dir) self.helper._repr_MemoryJobResult.assert_called_once_with( result, None)
def handle_last_job_after_resume(self): last_job = self.metadata.running_job_name if last_job is None: return print( _("Previous session run tried to execute job: {}").format( last_job)) cmd = self._pick_action_cmd([ Action('s', _("skip that job"), 'skip'), Action('p', _("mark it as passed and continue"), 'pass'), Action('f', _("mark it as failed and continue"), 'fail'), Action('r', _("run it again"), 'run'), ], _("What do you want to do with that job?")) if cmd == 'skip' or cmd is None: result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_SKIP, 'comments': _("Skipped after resuming execution") }) elif cmd == 'pass': result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'comments': _("Passed after resuming execution") }) elif cmd == 'fail': result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_FAIL, 'comments': _("Failed after resuming execution") }) elif cmd == 'run': result = None if result: self.state.update_job_result( self.state.job_state_map[last_job].job, result) self.metadata.running_job_name = None self.manager.checkpoint()
def _get_persistance_subset(self): # Don't save resource job results, fresh data are required # so we can't reuse the old ones # The inhibitor list needs to be recomputed as well, don't save it. state = {} state['_job'] = self._job if self._job.plugin == 'resource': state['_result'] = MemoryJobResult( {'outcome': IJobResult.OUTCOME_NONE}) else: state['_result'] = self._result return state
def test_everything(self): result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'comments': "it said blah", 'io_log': [(0, 'stdout', b'blah\n')], 'return_code': 0 }) self.assertEqual(str(result), "pass") self.assertEqual(repr(result), "<MemoryJobResult outcome:'pass'>") self.assertEqual(result.outcome, IJobResult.OUTCOME_PASS) self.assertEqual(result.comments, "it said blah") self.assertEqual(result.io_log, ((0, 'stdout', b'blah\n'), )) self.assertEqual(result.return_code, 0)
def test_normal_job_result_updates(self): # This function checks what happens when a JobResult for job A is # presented to the session. result_A = MemoryJobResult({}) self.session.update_job_result(self.job_A, result_A) # As before the result should be stored as-is self.assertIs(self.job_state('A').result, result_A) # Unlike before _resource_map should be left unchanged self.assertEqual(self.session._resource_map, {}) # One interesting observation is that readiness inhibitors are entirely # unaffected by existing test results beyond dependency and resource # relationships. While a result for job A was presented, job A is still # inhibited by the UNDESIRED inhibitor. self.assertEqual( self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.UNDESIRED)
def test_desired_job_X_cannot_run_with_no_resource_R(self): # A variant of the two test cases above, using A-R jobs self.session.update_desired_job_list([self.job_A]) result_R = MemoryJobResult({ 'io_log': [(0, 'stdout', b'attr: wrong value\n')], }) self.session.update_job_result(self.job_R, result_R) # Now A is inhibited by FAILED_RESOURCE self.assertNotEqual(self.job_state('A').readiness_inhibitor_list, []) self.assertEqual( self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.FAILED_RESOURCE) self.assertEqual(self.job_inhibitor('A', 0).related_job, self.job_R) self.assertEqual( self.job_inhibitor('A', 0).related_expression, self.job_A_expr) self.assertFalse(self.job_state('A').can_start())
def _run_single_job_with_session(self, ns, manager, runner, job): if job.plugin not in ['local', 'resource']: print("[ {} ]".format(job.tr_summary()).center(80, '-')) job_state = manager.state.job_state_map[job.id] logger.debug("Job id: %s", job.id) logger.debug("Plugin: %s", job.plugin) logger.debug("Direct dependencies: %s", job.get_direct_dependencies()) logger.debug("Resource dependencies: %s", job.get_resource_dependencies()) logger.debug("Resource program: %r", job.requires) logger.debug("Command: %r", job.command) logger.debug("Can start: %s", job_state.can_start()) logger.debug("Readiness: %s", job_state.get_readiness_description()) if job_state.can_start(): if job.plugin not in ['local', 'resource']: if job.description is not None: print(job.description) print("^" * len(job.description.splitlines()[-1])) print() print("Running... (output in {}.*)".format( join(manager.storage.location, slugify(job.id)))) manager.state.metadata.running_job_name = job.id manager.checkpoint() # TODO: get a confirmation from the user for certain types of # job.plugin job_result = runner.run_job(job, self.config) if (job_result.outcome == IJobResult.OUTCOME_UNDECIDED and self.is_interactive): job_result = self._interaction_callback( runner, job, self.config) manager.state.metadata.running_job_name = None manager.checkpoint() if job.plugin not in ['local', 'resource']: print("Outcome: {}".format(job_result.outcome)) if job_result.comments is not None: print("Comments: {}".format(job_result.comments)) else: job_result = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_NOT_SUPPORTED, 'comments': job_state.get_readiness_description() }) if job.plugin not in ['local', 'resource']: print("Outcome: {}".format(job_result.outcome)) if job_result is not None: manager.state.update_job_result(job, job_result)