def make_realistic_test_session(self): # Create a more realistic session with two jobs but with richer set # of data in the actual jobs and results. job_a = JobDefinition({ 'plugin': 'shell', 'name': 'job_a', 'command': 'echo testing && true', 'requires': 'job_b.ready == "yes"' }) job_b = JobDefinition({ 'plugin': 'resource', 'name': 'job_b', 'command': 'echo ready: yes' }) session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = JobResult({ 'job': job_a, 'outcome': 'pass', 'return_code': 0, 'io_log': ( IOLogRecord(0, 'stdout', 'testing\n'), ) }) result_b = JobResult({ 'job': job_b, 'outcome': 'pass', 'return_code': 0, 'io_log': ( IOLogRecord(0, 'stdout', 'ready: yes\n'), ) }) session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
class SessionStateSmokeTests(TestCase): def setUp(self): A = make_job('A', requires='R.attr == "value"') B = make_job('B', depends='C') C = make_job('C') self.job_list = [A, B, C] self.session_state = SessionState(self.job_list) def test_initial_job_list(self): expected = self.job_list observed = self.session_state.job_list self.assertEqual(expected, observed) def test_initial_desired_job_list(self): expected = [] observed = self.session_state.desired_job_list self.assertEqual(expected, observed) def test_initial_run_list(self): expected = [] observed = self.session_state.run_list self.assertEqual(expected, observed) def test_update_mandatory_job_list_updates(self): D = make_job('D') self.session_state.update_mandatory_job_list([D]) expected = [D] observed = self.session_state.mandatory_job_list self.assertEqual(expected, observed)
def test_also_after_suspend_manual_flag(self): # Define a job job = make_job("A", summary="foo", flags='also-after-suspend-manual') # Define an empty session session = SessionState([]) # Add the job to the session session.add_unit(job) # Both jobs got added to job list self.assertEqual(len(session.job_list), 2) self.assertIn(job, session.job_list) self.assertEqual(session.job_list[1].id, 'after-suspend-manual-A') self.assertEqual(session.job_list[1].summary, 'foo after suspend (S3)') self.assertEqual( session.job_list[1].depends, 'A com.canonical.certification::suspend/suspend_advanced') sibling = session.job_list[1] self.assertNotIn('also-after-suspend-manual', sibling.get_flag_set()) # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) # Both jobs are not added to the desired job list self.assertNotIn(job, session.desired_job_list) self.assertNotIn(sibling, session.desired_job_list) # Both jobs are not in the run list self.assertNotIn(job, session.run_list) self.assertNotIn(sibling, session.run_list) # Both jobs are not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor]) self.assertEqual( session.job_state_map[sibling.id].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor])
def make_realistic_test_session(self, session_dir): # Create a more realistic session with two jobs but with richer set # of data in the actual jobs and results. job_a = JobDefinition({ 'plugin': 'shell', 'name': 'job_a', 'command': 'echo testing && true', 'requires': 'job_b.ready == "yes"' }) job_b = JobDefinition({ 'plugin': 'resource', 'name': 'job_b', 'command': 'echo ready: yes' }) session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'io_log': [(0, 'stdout', b'testing\n')], }) result_b = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'comments': 'foo', 'io_log': [(0, 'stdout', b'ready: yes\n')], }) session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
def test_add_sibling_unit(self): # Define a job job = make_job("A", summary="foo", siblings='[{"id": "B"}]') # Define an empty session session = SessionState([]) # Add the job to the session session.add_unit(job) # Both jobs got added to job list self.assertEqual(len(session.job_list), 2) self.assertIn(job, session.job_list) self.assertEqual(session.job_list[1].id, 'B') self.assertEqual(session.job_list[1].summary, 'foo') sibling = session.job_list[1] # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) # Both jobs are not added to the desired job list self.assertNotIn(job, session.desired_job_list) self.assertNotIn(sibling, session.desired_job_list) # Both jobs are not in the run list self.assertNotIn(job, session.run_list) self.assertNotIn(sibling, session.run_list) # Both jobs are not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor]) self.assertEqual( session.job_state_map[sibling.id].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor])
def test_crash_on_missing_job(self): """ http://pad.lv/1334296 """ A = make_job("A") state = SessionState([]) problems = state.update_desired_job_list([A]) self.assertEqual(problems, [DependencyUnknownError(A)]) self.assertEqual(state.desired_job_list, [])
def __init__(self, provider_list, config, ns): super().__init__(provider_list, config) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list)
def test_get_estimated_duration_manual(self): two_seconds = make_job("two_seconds", plugin="manual", command="farboo", estimated_duration=2.0) shell_job = make_job("shell_job", plugin="shell", command="boofar", estimated_duration=0.6) session = SessionState([two_seconds, shell_job]) session.update_desired_job_list([two_seconds, shell_job]) self.assertEqual(session.get_estimated_duration(), (0.6, 32.0))
def test_get_estimated_duration_automated_unknown(self): three_seconds = make_job("three_seconds", plugin="shell", command="frob", estimated_duration=3.0) no_estimated_duration = make_job("no_estimated_duration", plugin="shell", command="borf") session = SessionState([three_seconds, no_estimated_duration]) session.update_desired_job_list([three_seconds, no_estimated_duration]) self.assertEqual(session.get_estimated_duration(), (None, 0.0))
def test_get_estimated_duration_manual_unknown(self): four_seconds = make_job("four_seconds", plugin="shell", command="fibble", estimated_duration=4.0) no_estimated_duration = make_job("no_estimated_duration", plugin="user-verify", command="bibble") session = SessionState([four_seconds, no_estimated_duration]) session.update_desired_job_list([four_seconds, no_estimated_duration]) self.assertEqual(session.get_estimated_duration(), (4.0, None))
def __init__(self, provider_loader, config_loader, ns): super().__init__(provider_loader, config_loader) self.ns = ns self.unit_list = list( itertools.chain(*[p.unit_list for p in self.provider_list])) self.session = SessionState(self.unit_list) self.desired_job_list = self._get_matching_job_list( ns, self.session.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list)
def _add_test_plan_sheet(self, workbook, plan, job_list): """A sheet for a given test plan.""" # Create a sheet for this test plan sheet = workbook.add_worksheet(_('{}').format(plan.tr_name()[0:30])) # Define cell formatting fmt_header = workbook.add_format({ 'bold': True, 'font_color': '#ffffff', 'bg_color': '#77216f', # Light Aubergine }) fmt_code = workbook.add_format({ 'font_name': 'courier', }) fmt_info = workbook.add_format({ 'font_color': '#dd4814', # Ubuntu Orange 'font_name': 'Ubuntu', 'font_size': 16, }) # Create a section with static information sheet.write('A2', _("Test Plan Name"), fmt_info) sheet.write('B2', plan.tr_name()) sheet.write('A3', _("Test Plan ID"), fmt_info) sheet.write('B3', plan.id, fmt_code) sheet.merge_range('A4:B4', 'TIP: plainbox run -T {}'.format(plan.id), fmt_code) # We can add anything we want to all the rows in range(INFO_OFFSET) INFO_OFFSET = 5 # Find what is the effective run list of this test plan state = SessionState(job_list) state.update_desired_job_list( select_jobs(job_list, [plan.get_qualifier()])) def max_of(callback): """Get the maximum of some function applied to each job.""" return max((callback(job) for job in state.run_list), default=0) COL_ID, COL_SUMMARY = range(2) # Add columns: id sheet.write(INFO_OFFSET, COL_ID, _("Test Case ID"), fmt_header) sheet.write(INFO_OFFSET, COL_SUMMARY, _("Summary"), fmt_header) sheet.set_column( COL_ID, COL_ID, max_of(lambda job: nr_cols_of(job.partial_id)) * WIDTH_FACTOR + WIDTH_PADDING) sheet.set_column( COL_SUMMARY, COL_SUMMARY, max_of(lambda job: nr_cols_of(job.tr_summary())) * WIDTH_FACTOR + WIDTH_PADDING) # Add the information about each job as a separate row for index, job in enumerate(state.run_list, INFO_OFFSET + 1): sheet.set_row(index, HEIGHT_FACTOR + HEIGHT_PADDING) sheet.write(index, COL_ID, job.partial_id, fmt_code) sheet.write(index, COL_SUMMARY, job.tr_summary()) # Make sure the sheet is read only sheet.protect()
def make_test_session(self): # Create a small session with two jobs and two results job_a = make_job('job_a') job_b = make_job('job_b') session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = make_job_result(job_a, 'pass') result_b = make_job_result(job_b, 'fail') session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
def test_get_estimated_duration_auto(self): # Define jobs with an estimated duration one_second = make_job("one_second", plugin="shell", command="foobar", estimated_duration=1.0) half_second = make_job("half_second", plugin="shell", command="barfoo", estimated_duration=0.5) session = SessionState([one_second, half_second]) session.update_desired_job_list([one_second, half_second]) self.assertEqual(session.get_estimated_duration(), (1.5, 0.0))
def make_test_session(self): # Create a small session with two jobs and two results job_a = make_job('job_a') job_b = make_job('job_b') session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = make_job_result(outcome=IJobResult.OUTCOME_PASS) result_b = make_job_result(outcome=IJobResult.OUTCOME_FAIL) session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
def test_crash_in_update_desired_job_list(self): # This checks if a DependencyError can cause crash # update_desired_job_list() with a ValueError, in certain conditions. A = make_job('A', depends='X') L = make_job('L', plugin='local') session = SessionState([A, L]) problems = session.update_desired_job_list([A, L]) # We should get exactly one DependencyMissingError related to job A and # the undefined job X (that is presumably defined by the local job L) self.assertEqual(len(problems), 1) self.assertIsInstance(problems[0], DependencyMissingError) self.assertIs(problems[0].affected_job, A)
def test_dont_remove_missing_jobs(self): """ http://pad.lv/1444126 """ A = make_job("A", depends="B") B = make_job("B", depends="C") state = SessionState([A, B]) problems = state.update_desired_job_list([A, B]) self.assertEqual(problems, [ DependencyMissingError(B, 'C', 'direct'), DependencyMissingError(A, 'B', 'direct'), ]) self.assertEqual(state.desired_job_list, []) self.assertEqual(state.run_list, [])
def test_resume_session(self): # All of the tests below are using one session. The session has four # jobs, Job A depends on a resource provided by job R which has no # dependencies at all. Both Job X and Y depend on job A. # # A -(resource dependency)-> R # # X -(direct dependency) -> A # # Y -(direct dependency) -> A self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") self.job_X = make_job("X", depends='A') self.job_Y = make_job("Y", depends='A') self.job_list = [self.job_A, self.job_R, self.job_X, self.job_Y] # Create a new session (session_dir is empty) self.session = SessionState(self.job_list) result_R = JobResult({ 'job': self.job_R, 'io_log': make_io_log(((0, 'stdout', b"attr: value\n"), ), self._sandbox) }) result_A = JobResult({ 'job': self.job_A, 'outcome': JobResult.OUTCOME_PASS }) result_X = JobResult({ 'job': self.job_X, 'outcome': JobResult.OUTCOME_PASS }) # Job Y can't start as it requires job A self.assertFalse(self.job_state('Y').can_start()) self.session.update_desired_job_list([self.job_X, self.job_Y]) self.session.open() self.session.update_job_result(self.job_R, result_R) self.session.update_job_result(self.job_A, result_A) self.session.update_job_result(self.job_X, result_X) self.session.persistent_save() self.session.close() # Create a new session (session_dir should contain session data) self.session = SessionState(self.job_list) self.session.open() # Resume the previous session self.session.resume() # This time job Y can start self.assertTrue(self.job_state('Y').can_start()) self.session.close()
def test_resume_session(self): # All of the tests below are using one session. The session has four # jobs, Job A depends on a resource provided by job R which has no # dependencies at all. Both Job X and Y depend on job A. # # A -(resource dependency)-> R # # X -(direct dependency) -> A # # Y -(direct dependency) -> A self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") self.job_X = make_job("X", depends='A') self.job_Y = make_job("Y", depends='A') self.job_list = [self.job_A, self.job_R, self.job_X, self.job_Y] # Create a new session (session_dir is empty) self.session = SessionState(self.job_list) result_R = JobResult({ 'job': self.job_R, 'io_log': make_io_log(((0, 'stdout', b"attr: value\n"),), self._sandbox) }) result_A = JobResult({ 'job': self.job_A, 'outcome': JobResult.OUTCOME_PASS }) result_X = JobResult({ 'job': self.job_X, 'outcome': JobResult.OUTCOME_PASS }) # Job Y can't start as it requires job A self.assertFalse(self.job_state('Y').can_start()) self.session.update_desired_job_list([self.job_X, self.job_Y]) self.session.open() self.session.update_job_result(self.job_R, result_R) self.session.update_job_result(self.job_A, result_A) self.session.update_job_result(self.job_X, result_X) self.session.persistent_save() self.session.close() # Create a new session (session_dir should contain session data) self.session = SessionState(self.job_list) self.session.open() # Resume the previous session self.session.resume() # This time job Y can start self.assertTrue(self.job_state('Y').can_start()) self.session.close()
def setUp(self): # All of the tests below are using one session. The session has four # jobs, clustered into two independent groups. Job A depends on a # resource provided by job R which has no dependencies at all. Job X # depends on job Y which in turn has no dependencies at all. # # A -(resource dependency)-> R # # X -(direct dependency) -> Y self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") self.job_X = make_job("X", depends='Y') self.job_Y = make_job("Y") self.job_list = [self.job_A, self.job_R, self.job_X, self.job_Y] self.session = SessionState(self.job_list)
def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.id)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner(self.session.session_dir, self.provider_list, self.session.jobs_io_log_dir, command_io_delegate=self, dry_run=self.ns.dry_run) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0
def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.name)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner( self.session.session_dir, self.session.jobs_io_log_dir, command_io_delegate=self, outcome_callback=None, # SRU runs are never interactive dry_run=self.ns.dry_run ) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0
def test_init_with_identical_jobs(self): A = make_job("A") second_A = make_job("A") third_A = make_job("A") session = SessionState([A, second_A, third_A]) # But we don't really store both, just the first one self.assertEqual(session.job_list, [A])
def test_category_map(self): """ Ensure that passing OPTION_WITH_CATEGORY_MAP causes a category id -> tr_name mapping to show up. """ exporter = self.TestSessionStateExporter([ SessionStateExporterBase.OPTION_WITH_CATEGORY_MAP ]) # Create three untis, two categories (foo, bar) and two jobs (froz, # bot) so that froz.category_id == foo cat_foo = CategoryUnit({ 'id': 'foo', 'name': 'The foo category', }) cat_bar = CategoryUnit({ 'id': 'bar', 'name': 'The bar category', }) job_froz = JobDefinition({ 'plugin': 'shell', 'id': 'froz', 'category_id': 'foo' }) # Create and export a session with the three units state = SessionState([cat_foo, cat_bar, job_froz]) session_manager = mock.Mock(spec_set=SessionManager, state=state) data = exporter.get_session_data_subset(session_manager) # Ensure that only the foo category was used, and the bar category was # discarded as nothing was referencing it self.assertEqual(data['category_map'], { 'foo': 'The foo category', })
def __init__(self, provider, ns): super(AnalyzeInvocation, self).__init__(provider) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list)
def _run_jobs(self, ns, job_list, exporter, transport=None): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem matching_job_list = self._get_matching_job_list(ns, job_list) print("[ Analyzing Jobs ]".center(80, '=')) # Create a session that handles most of the stuff needed to run jobs try: session = SessionState(job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.name)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with session.open(): if session.previous_session_file(): if self.ask_for_resume(): session.resume() else: session.clean() self._update_desired_job_list(session, matching_job_list) if (sys.stdin.isatty() and sys.stdout.isatty() and not ns.not_interactive): outcome_callback = self.ask_for_outcome else: outcome_callback = None runner = JobRunner( session.session_dir, session.jobs_io_log_dir, outcome_callback=outcome_callback, dry_run=ns.dry_run ) self._run_jobs_with_session(ns, session, runner) # Get a stream with exported session data. exported_stream = io.BytesIO() data_subset = exporter.get_session_data_subset(session) exporter.dump(data_subset, exported_stream) exported_stream.seek(0) # Need to rewind the file, puagh # Write the stream to file if requested self._save_results(ns.output_file, exported_stream) # Invoke the transport? if transport: exported_stream.seek(0) try: transport.send(exported_stream.read()) except InvalidSchema as exc: print("Invalid destination URL: {0}".format(exc)) except ConnectionError as exc: print(("Unable to connect " "to destination URL: {0}").format(exc)) except HTTPError as exc: print(("Server returned an error when " "receiving or processing: {0}").format(exc)) # FIXME: sensible return value return 0
def test_set_resource_list(self): # Define an empty session session = SessionState([]) # Define a resource old_res = Resource({'attr': 'old value'}) # Set the resource list with the old resource # So here the old result is stored into a new 'R' resource session.set_resource_list('R', [old_res]) # Ensure that it worked self.assertEqual(session._resource_map, {'R': [old_res]}) # Define another resource new_res = Resource({'attr': 'new value'}) # Now we present the second result for the same job session.set_resource_list('R', [new_res]) # What should happen here is that the R resource is entirely replaced # by the data from the new result. The data should not be merged or # appended in any way. self.assertEqual(session._resource_map, {'R': [new_res]})
def test_init_with_identical_jobs(self): A = make_job("A") second_A = make_job("A") third_A = make_job("A") # Identical jobs are folded for backwards compatibility with some local # jobs that re-added existing jobs session = SessionState([A, second_A, third_A]) # But we don't really store both, just the first one self.assertEqual(session.job_list, [A])
def test_add_job(self): # Define a job job = make_job("A") # Define an empty session session = SessionState([]) # Add the job to the session session.add_job(job) # The job got added to job list self.assertIn(job, session.job_list) # The job got added to job state map self.assertIs(session.job_state_map[job.name].job, job) # The job is not added to the desired job list self.assertNotIn(job, session.desired_job_list) # The job is not in the run list self.assertNotIn(job, session.run_list) # The job is not selected to run self.assertEqual( session.job_state_map[job.name].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor])
def test_add_job(self): # Define a job job = make_job("A") # Define an empty session session = SessionState([]) # Add the job to the session session.add_job(job) # The job got added to job list self.assertIn(job, session.job_list) # The job got added to job state map self.assertIs(session.job_state_map[job.id].job, job) # The job is not added to the desired job list self.assertNotIn(job, session.desired_job_list) # The job is not in the run list self.assertNotIn(job, session.run_list) # The job is not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, [UndesiredJobReadinessInhibitor])
def test_init_with_colliding_jobs(self): # This is similar to the test above but the jobs actually differ In # this case the _second_ job is rejected but it really signifies a # deeper problem that should only occur during development of jobs A = make_job("A") different_A = make_job("A", plugin="resource") with self.assertRaises(DependencyDuplicateError) as call: SessionState([A, different_A]) self.assertIs(call.exception.job, A) self.assertIs(call.exception.duplicate_job, different_A) self.assertIs(call.exception.affected_job, different_A)
def test_add_job_duplicate_job(self): # Define a job job = make_job("A") # Define an empty session session = SessionState([]) # Add the job to the session session.add_job(job) # The job got added to job list self.assertIn(job, session.job_list) # Define a perfectly identical job duplicate_job = make_job("A") self.assertEqual(job, duplicate_job) # Try adding it to the session # # Note that this does not raise any exceptions as the jobs are perfect # duplicates. session.add_job(duplicate_job) # The new job _did not_ get added to the job list self.assertEqual(len(session.job_list), 1) self.assertIsNot(duplicate_job, session.job_list[0])
def _run_jobs(self, ns, job_list, exporter): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem matching_job_list = self._get_matching_job_list(ns, job_list) print("[ Analyzing Jobs ]".center(80, '=')) # Create a session that handles most of the stuff needed to run jobs session = SessionState(job_list) self._update_desired_job_list(session, matching_job_list) with session.open(): if (sys.stdin.isatty() and sys.stdout.isatty() and not ns.not_interactive): outcome_callback = self.ask_for_outcome else: outcome_callback = None runner = JobRunner(self.checkbox, session.session_dir, session.jobs_io_log_dir, outcome_callback=outcome_callback) self._run_jobs_with_session(ns, session, runner) self._save_results(ns, session, exporter) # FIXME: sensible return value return 0
def make_realistic_test_session(self, session_dir): # Create a more realistic session with two jobs but with richer set # of data in the actual jobs and results. job_a = JobDefinition({ 'plugin': 'shell', 'name': 'job_a', 'summary': 'This is job A', 'command': 'echo testing && true', 'requires': 'job_b.ready == "yes"' }) job_b = JobDefinition({ 'plugin': 'resource', 'name': 'job_b', 'summary': 'This is job B', 'command': 'echo ready: yes' }) session = SessionState([job_a, job_b]) session.update_desired_job_list([job_a, job_b]) result_a = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'io_log': [(0, 'stdout', b'testing\n')], }) result_b = MemoryJobResult({ 'outcome': IJobResult.OUTCOME_PASS, 'return_code': 0, 'comments': 'foo', 'io_log': [(0, 'stdout', b'ready: yes\n')], }) session.update_job_result(job_a, result_a) session.update_job_result(job_b, result_b) return session
def test_mandatory_jobs_are_first_in_run_list(self): A = make_job('A') B = make_job('B') session = SessionState([A, B]) session.update_mandatory_job_list([B]) session.update_desired_job_list([A]) self.assertEqual(session.run_list, [B, A])
def setUp(self): self.A = make_job('a', name='A') self.B = make_job('b', name='B', plugin='local', description='foo') self.C = make_job('c', name='C') self.D = make_job('d', name='D', plugin='shell') self.E = make_job('e', name='E', plugin='shell') self.F = make_job('f', name='F', plugin='resource', description='baz') state = SessionState([self.A, self.B, self.C, self.D, self.E, self.F]) # D and E are a child of B state.job_state_map[self.D.id].via_job = self.B state.job_state_map[self.E.id].via_job = self.B self.tree = SelectableJobTreeNode.create_tree( state, [self.A, self.B, self.C, self.D, self.E, self.F])
def _run_jobs(self, ns, job_list): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem matching_job_list = self._get_matching_job_list(ns, job_list) print("[ Analyzing Jobs ]".center(80, '=')) # Create a session that handles most of the stuff needed to run jobs session = SessionState(job_list) self._update_desired_job_list(session, matching_job_list) with session.open(): if (sys.stdin.isatty() and sys.stdout.isatty() and not ns.not_interactive): outcome_callback = self.ask_for_outcome else: outcome_callback = None runner = JobRunner(self._checkbox, session.session_dir, outcome_callback=outcome_callback) self._run_jobs_with_session(ns, session, runner) print("[ Results ]".center(80, '=')) for job_name in sorted(session.job_state_map): job_state = session.job_state_map[job_name] if job_state.result.outcome != JobResult.OUTCOME_NONE: print("{}: {}".format(job_name, job_state.result.outcome))
def test_add_job_clashing_job(self): # Define a job job = make_job("A") # Define an empty session session = SessionState([]) # Add the job to the session session.add_job(job) # The job got added to job list self.assertIn(job, session.job_list) # Define a different job that clashes with the initial job clashing_job = make_job("A", plugin='other') self.assertNotEqual(job, clashing_job) self.assertEqual(job.name, clashing_job.name) # Try adding it to the session # # This raises an exception with self.assertRaises(DependencyDuplicateError) as call: session.add_job(clashing_job) # The exception gets job in the right order self.assertIs(call.exception.affected_job, job) self.assertIs(call.exception.affecting_job, clashing_job) # The new job _did not_ get added to the job list self.assertEqual(len(session.job_list), 1) self.assertIsNot(clashing_job, session.job_list[0])
def test_observe_result__missing_resource_key(self, mock_logger): job = make_job("R", plugin="resource") template = TemplateUnit({ 'template-resource': job.id, 'id': 'foo-{missing}', 'plugin': 'shell' }) result = mock.Mock(spec=IJobResult, outcome=IJobResult.OUTCOME_PASS) result.get_io_log.return_value = [(0, 'stdout', b'attr: value1\n'), (0, 'stdout', b'\n'), (0, 'stdout', b'attr: value2\n')] session_state = SessionState([template, job]) self.ctrl.observe_result(session_state, job, result) # Ensure that a warning was logged mock_logger.warning.assert_called_with( "Ignoring %s with missing template parameter %s", "foo-{missing}", "missing")
def setUp(self): A = make_job('A') B = make_job('B', plugin='local', description='foo') C = make_job('C') D = make_job('D', plugin='shell') E = make_job('E', plugin='local', description='bar') F = make_job('F', plugin='shell') G = make_job('G', plugin='local', description='baz') R = make_job('R', plugin='resource') Z = make_job('Z', plugin='local', description='zaz') state = SessionState([A, B, C, D, E, F, G, R, Z]) # D and E are a child of B state.job_state_map[D.id].via_job = B state.job_state_map[E.id].via_job = B # F is a child of E state.job_state_map[F.id].via_job = E self.tree = JobTreeNode.create_tree(state, [R, B, C, D, E, F, G, A, Z])
def test_category_map_and_uncategorised(self): """ Ensure that OPTION_WITH_CATEGORY_MAP synthetizes the special 'uncategorised' category. """ exporter = self.TestSessionStateExporter([ SessionStateExporterBase.OPTION_WITH_CATEGORY_MAP ]) # Create a job without a specific category job = JobDefinition({ 'plugin': 'shell', 'id': 'id', }) # Create and export a session with that one job state = SessionState([job]) session_manager = mock.Mock(spec_set=SessionManager, state=state) data = exporter.get_session_data_subset(session_manager) # Ensure that the special 'uncategorized' category is used self.assertEqual(data['category_map'], { 'com.canonical.plainbox::uncategorised': 'Uncategorised', })
class _SRUInvocation: """ Helper class instantiated to perform a particular invocation of the sru command. Unlike the SRU command itself, this class is instantiated each time. """ def __init__(self, ns, config): self.ns = ns self.checkbox = CheckBox() self.config = config self.whitelist = WhiteList.from_file(os.path.join( self.checkbox.whitelists_dir, "sru.whitelist")) self.job_list = self.checkbox.get_builtin_jobs() # XXX: maybe allow specifying system_id from command line? self.exporter = XMLSessionStateExporter(system_id=None) self.session = None self.runner = None def run(self): # Compute the run list, this can give us notification about problems in # the selected jobs. Currently we just display each problem # Create a session that handles most of the stuff needed to run jobs try: self.session = SessionState(self.job_list) except DependencyDuplicateError as exc: # Handle possible DependencyDuplicateError that can happen if # someone is using plainbox for job development. print("The job database you are currently using is broken") print("At least two jobs contend for the name {0}".format( exc.job.name)) print("First job defined in: {0}".format(exc.job.origin)) print("Second job defined in: {0}".format( exc.duplicate_job.origin)) raise SystemExit(exc) with self.session.open(): self._set_job_selection() self.runner = JobRunner( self.session.session_dir, self.session.jobs_io_log_dir, command_io_delegate=self, outcome_callback=None, # SRU runs are never interactive dry_run=self.ns.dry_run ) self._run_all_jobs() if self.config.fallback_file is not Unset: self._save_results() self._submit_results() # FIXME: sensible return value return 0 def _set_job_selection(self): desired_job_list = get_matching_job_list(self.job_list, self.whitelist) problem_list = self.session.update_desired_job_list(desired_job_list) if problem_list: logger.warning("There were some problems with the selected jobs") for problem in problem_list: logger.warning("- %s", problem) logger.warning("Problematic jobs will not be considered") def _save_results(self): print("Saving results to {0}".format(self.config.fallback_file)) data = self.exporter.get_session_data_subset(self.session) with open(self.config.fallback_file, "wt", encoding="UTF-8") as stream: translating_stream = ByteStringStreamTranslator(stream, "UTF-8") self.exporter.dump(data, translating_stream) def _submit_results(self): print("Submitting results to {0} for secure_id {1}".format( self.config.c3_url, self.config.secure_id)) options_string = "secure_id={0}".format(self.config.secure_id) # Create the transport object try: transport = CertificationTransport( self.config.c3_url, options_string, self.config) except InvalidSecureIDError as exc: print(exc) return False # Prepare the data for submission data = self.exporter.get_session_data_subset(self.session) with tempfile.NamedTemporaryFile(mode='w+b') as stream: # Dump the data to the temporary file self.exporter.dump(data, stream) # Flush and rewind stream.flush() stream.seek(0) try: # Send the data, reading from the temporary file result = transport.send(stream) if 'url' in result: print("Successfully sent, submission status at {0}".format( result['url'])) else: print("Successfully sent, server response: {0}".format( result)) except InvalidSchema as exc: print("Invalid destination URL: {0}".format(exc)) except ConnectionError as exc: print("Unable to connect to destination URL: {0}".format(exc)) except HTTPError as exc: print(("Server returned an error when " "receiving or processing: {0}").format(exc)) except IOError as exc: print("Problem reading a file: {0}".format(exc)) def _run_all_jobs(self): again = True while again: again = False for job in self.session.run_list: # Skip jobs that already have result, this is only needed when # we run over the list of jobs again, after discovering new # jobs via the local job output result = self.session.job_state_map[job.name].result if result.outcome is not None: continue self._run_single_job(job) self.session.persistent_save() if job.plugin == "local": # After each local job runs rebuild the list of matching # jobs and run everything again self._set_job_selection() again = True break def _run_single_job(self, job): print("- {}:".format(job.name), end=' ') job_state, job_result = run_job_if_possible( self.session, self.runner, self.config, job) print("{0}".format(job_result.outcome)) if job_result.comments is not None: print("comments: {0}".format(job_result.comments)) if job_state.readiness_inhibitor_list: print("inhibitors:") for inhibitor in job_state.readiness_inhibitor_list: print(" * {}".format(inhibitor)) self.session.update_job_result(job, job_result)
class SessionStateReactionToJobResultTests(TestCase): # This test checks how a simple session with a few typical job reacts to # job results of various kinds. It checks most of the resource presentation # error conditions that I could think of. def setUp(self): # All of the tests below are using one session. The session has four # jobs, clustered into two independent groups. Job A depends on a # resource provided by job R which has no dependencies at all. Job X # depends on job Y which in turn has no dependencies at all. # # A -(resource dependency)-> R # # X -(direct dependency) -> Y self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") self.job_X = make_job("X", depends='Y') self.job_Y = make_job("Y") self.job_L = make_job("L", plugin="local") self.job_list = [ self.job_A, self.job_R, self.job_X, self.job_Y, self.job_L] self.session = SessionState(self.job_list) def job_state(self, name): # A helper function to avoid overly long expressions return self.session.job_state_map[name] def job_inhibitor(self, name, index): # Another helper that shortens deep object nesting return self.job_state(name).readiness_inhibitor_list[index] def test_assumptions(self): # This function checks the assumptions of SessionState initial state. # The job list is what we set when constructing the session. # self.assertEqual(self.session.job_list, self.job_list) # The run_list is still empty because the desired_job_list is equally # empty. self.assertEqual(self.session.run_list, []) self.assertEqual(self.session.desired_job_list, []) # All jobs have state objects that indicate they cannot run (because # they have the UNDESIRED inhibitor set for them by default). self.assertFalse(self.job_state('A').can_start()) self.assertFalse(self.job_state('R').can_start()) self.assertFalse(self.job_state('X').can_start()) self.assertFalse(self.job_state('Y').can_start()) self.assertEqual(self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.UNDESIRED) self.assertEqual(self.job_inhibitor('R', 0).cause, JobReadinessInhibitor.UNDESIRED) self.assertEqual(self.job_inhibitor('X', 0).cause, JobReadinessInhibitor.UNDESIRED) self.assertEqual(self.job_inhibitor('Y', 0).cause, JobReadinessInhibitor.UNDESIRED) def test_desire_job_A_updates_state_map(self): # This function checks what happens when the job A becomes desired via # the update_desired_job_list() call. self.session.update_desired_job_list([self.job_A]) self.assertEqual(self.session.desired_job_list, [self.job_A]) # This should topologically sort the job list, according to the # relationship created by the resource requirement. This is not really # testing the dependency solver (it has separate tests), just that this # basic property is established and that the run_list properly shows # that R must run before A can run. self.assertEqual(self.session.run_list, [self.job_R, self.job_A]) # This also recomputes job readiness state so that job R is no longer # undesired, has no other inhibitor and thus can start self.assertEqual(self.job_state('R').readiness_inhibitor_list, []) self.assertTrue(self.job_state('R').can_start()) # While the A job still cannot run it now has a different inhibitor, # one with the PENDING_RESOURCE cause. The inhibitor also properly # pinpoints the related job and related expression. self.assertNotEqual(self.job_state('A').readiness_inhibitor_list, []) self.assertEqual(self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.PENDING_RESOURCE) self.assertEqual(self.job_inhibitor('A', 0).related_job, self.job_R) self.assertEqual(self.job_inhibitor('A', 0).related_expression, self.job_A_expr) self.assertFalse(self.job_state('A').can_start()) def test_resource_job_result_updates_resource_and_job_states(self): # This function checks what happens when a JobResult for job R (which # is a resource job via the resource plugin) is presented to the # session. result_R = MemoryJobResult({ 'io_log': [(0, 'stdout', b"attr: value\n")], }) self.session.update_job_result(self.job_R, result_R) # The most obvious thing that can happen, is that the result is simply # stored in the associated job state object. self.assertIs(self.job_state('R').result, result_R) # Initially the _resource_map was empty. SessionState parses the io_log # of results of resource jobs and creates appropriate resource objects. self.assertIn("R", self.session._resource_map) expected = {'R': [Resource({'attr': 'value'})]} self.assertEqual(self.session._resource_map, expected) # As job results are presented to the session the readiness of other # jobs is changed. Since A depends on R via a resource expression and # the particular resource that were produced by R in this test should # allow the expression to match the readiness inhibitor from A should # have been removed. Since this test does not use # update_desired_job_list() a will still have the UNDESIRED inhibitor # but it will no longer have the PENDING_RESOURCE inhibitor, self.assertEqual(self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.UNDESIRED) # Now if we put A on the desired list this should clear the UNDESIRED # inhibitor and make A runnable. self.session.update_desired_job_list([self.job_A]) self.assertTrue(self.job_state('A').can_start()) def test_normal_job_result_updates(self): # This function checks what happens when a JobResult for job A is # presented to the session. result_A = MemoryJobResult({}) self.session.update_job_result(self.job_A, result_A) # As before the result should be stored as-is self.assertIs(self.job_state('A').result, result_A) # Unlike before _resource_map should be left unchanged self.assertEqual(self.session._resource_map, {}) # One interesting observation is that readiness inhibitors are entirely # unaffected by existing test results beyond dependency and resource # relationships. While a result for job A was presented, job A is still # inhibited by the UNDESIRED inhibitor. self.assertEqual(self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.UNDESIRED) def test_resource_job_with_broken_output(self): # This function checks how SessionState parses partially broken # resource jobs. A JobResult with broken output is constructed below. # The output will describe one proper record, one broken record and # another proper record in that order. result_R = MemoryJobResult({ 'io_log': [ (0, 'stdout', b"attr: value-1\n"), (1, 'stdout', b"\n"), (1, 'stdout', b"I-sound-like-a-broken-record\n"), (1, 'stdout', b"\n"), (1, 'stdout', b"attr: value-2\n") ], }) # Since we cannot control the output of scripts and people indeed make # mistakes a warning is issued but no exception is raised to the # caller. self.session.update_job_result(self.job_R, result_R) # The observation here is that the parser is not handling the exception # in away which would allow for recovery. Out of all the output only # the first record is created and stored properly. The third, proper # record is entirely ignored. expected = {'R': [Resource({'attr': 'value-1'})]} self.assertEqual(self.session._resource_map, expected) def test_desire_job_X_updates_state_map(self): # This function checks what happens when the job X becomes desired via # the update_desired_job_list() call. self.session.update_desired_job_list([self.job_X]) self.assertEqual(self.session.desired_job_list, [self.job_X]) # As in the similar A - R test function above this topologically sorts # all affected jobs. Here X depends on Y so Y should be before X on the # run list. self.assertEqual(self.session.run_list, [self.job_Y, self.job_X]) # As in the A - R test above this also recomputes the job readiness # state. Job Y is now runnable but job X has a PENDING_DEP inhibitor. self.assertEqual(self.job_state('Y').readiness_inhibitor_list, []) # While the A job still cannot run it now has a different inhibitor, # one with the PENDING_RESOURCE cause. The inhibitor also properly # pinpoints the related job and related expression. self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) self.assertEqual(self.job_inhibitor('X', 0).cause, JobReadinessInhibitor.PENDING_DEP) self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) self.assertFalse(self.job_state('X').can_start()) def test_desired_job_X_cannot_run_with_failed_job_Y(self): # This function checks how SessionState reacts when the desired job X # readiness state changes when presented with a failed result to job Y self.session.update_desired_job_list([self.job_X]) # When X is desired, as above, it should be inhibited with PENDING_DEP # on Y self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) self.assertEqual(self.job_inhibitor('X', 0).cause, JobReadinessInhibitor.PENDING_DEP) self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) self.assertFalse(self.job_state('X').can_start()) # When a failed Y result is presented X should switch to FAILED_DEP result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) self.session.update_job_result(self.job_Y, result_Y) # Now job X should have a FAILED_DEP inhibitor instead of the # PENDING_DEP it had before. Everything else should stay as-is. self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) self.assertEqual(self.job_inhibitor('X', 0).cause, JobReadinessInhibitor.FAILED_DEP) self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) self.assertFalse(self.job_state('X').can_start()) def test_desired_job_X_can_run_with_passing_job_Y(self): # A variant of the test case above, simply Y passes this time, making X # runnable self.session.update_desired_job_list([self.job_X]) result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) self.session.update_job_result(self.job_Y, result_Y) # Now X is runnable self.assertEqual(self.job_state('X').readiness_inhibitor_list, []) self.assertTrue(self.job_state('X').can_start()) def test_desired_job_X_cannot_run_with_no_resource_R(self): # A variant of the two test cases above, using A-R jobs self.session.update_desired_job_list([self.job_A]) result_R = MemoryJobResult({ 'io_log': [(0, 'stdout', b'attr: wrong value\n')], }) self.session.update_job_result(self.job_R, result_R) # Now A is inhibited by FAILED_RESOURCE self.assertNotEqual(self.job_state('A').readiness_inhibitor_list, []) self.assertEqual(self.job_inhibitor('A', 0).cause, JobReadinessInhibitor.FAILED_RESOURCE) self.assertEqual(self.job_inhibitor('A', 0).related_job, self.job_R) self.assertEqual(self.job_inhibitor('A', 0).related_expression, self.job_A_expr) self.assertFalse(self.job_state('A').can_start()) def test_resource_job_result_overwrites_old_resources(self): # This function checks what happens when a JobResult for job R is # presented to a session that has some resources from that job already. result_R_old = MemoryJobResult({ 'io_log': [(0, 'stdout', b"attr: old value\n")] }) self.session.update_job_result(self.job_R, result_R_old) # So here the old result is stored into a new 'R' resource expected_before = {'R': [Resource({'attr': 'old value'})]} self.assertEqual(self.session._resource_map, expected_before) # Now we present the second result for the same job result_R_new = MemoryJobResult({ 'io_log': [(0, 'stdout', b"attr: new value\n")] }) self.session.update_job_result(self.job_R, result_R_new) # What should happen here is that the R resource is entirely replaced # by the data from the new result. The data should not be merged or # appended in any way. expected_after = {'R': [Resource({'attr': 'new value'})]} self.assertEqual(self.session._resource_map, expected_after) def test_local_job_creates_jobs(self): # Create a result for the local job L result_L = MemoryJobResult({ 'io_log': [ (0, 'stdout', b'name: foo\n'), (1, 'stdout', b'plugin: manual\n'), ], }) # Show this result to the session self.session.update_job_result(self.job_L, result_L) # A job should be generated self.assertTrue("foo" in self.session.job_state_map) job_foo = self.session.job_state_map['foo'].job self.assertTrue(job_foo.name, "foo") self.assertTrue(job_foo.plugin, "manual") # It should be linked to the job L via the via attribute self.assertTrue(job_foo.via, self.job_L.get_checksum())
def test_persistent_save(self): self.job_A = make_job("A") self.job_list = [self.job_A] self.session = SessionState(self.job_list) result_A = JobResult({ 'job': self.job_A, 'outcome': JobResult.OUTCOME_PASS, 'comments': 'All good', 'return_code': 0, 'io_log': ((0, 'stdout', "Success !\n"),) }) session_json_text = """{ "_job_state_map": { "A": { "_job": { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" }, "_result": { "data": { "job": { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" }, "outcome": "pass", "return_code": 0, "comments": "All good", "io_log": [ [ 0, "stdout", "Success !\\n" ] ] }, "_class_id": "JOB_RESULT" }, "_class_id": "JOB_STATE" } }, "_desired_job_list": [ { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" } ], "_class_id": "SESSION_STATE" }""" self.session.open() self.session.update_desired_job_list([self.job_A]) self.session.update_job_result(self.job_A, result_A) self.session.persistent_save() session_file = self.session.previous_session_file() self.session.close() self.assertIsNotNone(session_file) with open(session_file) as f: raw_json = json.load(f) self.maxDiff = None self.assertEqual(raw_json, json.loads(session_json_text))
def setUp(self): self.job_a = make_job("a") self.job_b = make_job("b") self.session = SessionState([self.job_a, self.job_b])
class AnalyzeInvocation(CheckBoxInvocationMixIn): def __init__(self, provider_list, config, ns): super().__init__(provider_list, config) self.ns = ns self.job_list = self.get_job_list(ns) self.desired_job_list = self._get_matching_job_list(ns, self.job_list) self.session = SessionState(self.job_list) self.problem_list = self.session.update_desired_job_list( self.desired_job_list) def run(self): if self.ns.run_local: if self.ns.print_desired_job_list: self._print_desired_job_list() if self.ns.print_run_list: self._print_run_list() self._run_local_jobs() if self.ns.print_stats: self._print_general_stats() if self.ns.print_dependency_report: self._print_dependency_report() if self.ns.print_interactivity_report: self._print_interactivity_report() if self.ns.print_estimated_duration_report: self._print_estimated_duration_report() if self.ns.print_validation_report: self._print_validation_report(self.ns.only_errors) if self.ns.print_requirement_report: self._print_requirement_report() if self.ns.print_desired_job_list: self._print_desired_job_list() if self.ns.print_run_list: self._print_run_list() def _print_desired_job_list(self): print(_("[Desired Job List]").center(80, '=')) for job in self.session.desired_job_list: print("{}".format(job.id)) def _print_run_list(self): print(_("[Run List]").center(80, '=')) for job in self.session.run_list: print("{}".format(job.id)) def _run_local_jobs(self): print(_("[Running Local Jobs]").center(80, '=')) manager = SessionManager.create_with_state(self.session) try: manager.state.metadata.title = "plainbox dev analyze session" manager.state.metadata.flags = [SessionMetaData.FLAG_INCOMPLETE] manager.checkpoint() runner = JobRunner( manager.storage.location, self.provider_list, os.path.join(manager.storage.location, 'io-logs'), command_io_delegate=self) again = True while again: for job in self.session.run_list: if job.plugin == 'local': if self.session.job_state_map[job.id].result.outcome is None: self._run_local_job(manager, runner, job) break else: again = False manager.state.metadata.flags = [] manager.checkpoint() finally: manager.destroy() def _run_local_job(self, manager, runner, job): print("{job}".format(job=job.id)) manager.state.metadata.running_job_name = job.id manager.checkpoint() result = runner.run_job(job, self.config) self.session.update_job_result(job, result) new_desired_job_list = self._get_matching_job_list( self.ns, self.session.job_list) new_problem_list = self.session.update_desired_job_list( new_desired_job_list) if new_problem_list: print(_("Problem list"), new_problem_list) self.problem_list.extend(new_problem_list) def _print_general_stats(self): print(_("[General Statistics]").center(80, '=')) print(_("Known jobs: {}").format(len(self.job_list))) print(_("Selected jobs: {}").format(len(self.desired_job_list))) def _print_dependency_report(self): print(_("[Dependency Report]").center(80, '=')) if self.problem_list: for problem in self.problem_list: print(" * {}".format(problem)) else: print(_("Selected jobs have no dependency problems")) def _print_interactivity_report(self): print(_("[Interactivity Report]").center(80, '=')) if not self.session.run_list: return max_job_len = max(len(job.id) for job in self.session.run_list) fmt = "{{job:{}}} : {{interactive:11}} : {{duration}}".format( max_job_len) for job in self.session.run_list: print( fmt.format( job=job.id, interactive=( _("automatic") if job.automated else _("interactive")), duration=( # TODO: use python-babel to format localized timedelta # in 14.04+ as 12.04 babel API is too limited timedelta(seconds=job.estimated_duration) if job.estimated_duration is not None else _("unknown")) ) ) def _print_estimated_duration_report(self): print(_("[Estimated Duration Report]").center(80, '=')) print(_("Estimated test duration:")) automated, manual = self.session.get_estimated_duration() print(" " + _("automated tests: {}").format( timedelta(seconds=automated) if automated is not None else _("cannot estimate"))) print(" " + _("manual tests: {}").format( timedelta(seconds=manual) if manual is not None else _("cannot estimate"))) print(" " + _("total: {}").format( timedelta(seconds=manual + automated) if manual is not None and automated is not None else _("cannot estimate"))) def _print_validation_report(self, only_errors): print(_("[Validation Report]").center(80, '=')) if not self.session.run_list: return max_job_len = max(len(job.id) for job in self.session.run_list) fmt = "{{job:{}}} : {{problem}}".format(max_job_len) problem = None for job in self.session.run_list: try: job.validate() except ValueError as exc: problem = str(exc) else: if only_errors: continue problem = "" print(fmt.format(job=job.id, problem=problem)) if problem: print(_("Job defined in {}").format(job.origin)) if only_errors and problem is None: print(_("No problems found")) def _print_requirement_report(self): print(_("[Requirement Report]").center(80, '=')) if not self.session.run_list: return requirements = set() for job in self.session.run_list: if job.requires: resource_program = job.get_resource_program() if 'package' in resource_program.required_resources: for packages in [ resource.text for resource in resource_program.expression_list if resource.resource_id == 'package']: node = ast.parse(packages) visitor = RequirementNodeVisitor() visitor.visit(node) requirements.add((' | ').join(visitor.packages_seen)) if requirements: print(',\n'.join(sorted(requirements)))
class SessionStateLocalStorageTests(TestCase): def setUp(self): # session data are kept in XDG_CACHE_HOME/plainbox/.session # To avoid resuming a real session, we have to select a temporary # location instead self._sandbox = tempfile.mkdtemp() self._env = os.environ os.environ['XDG_CACHE_HOME'] = self._sandbox def job_state(self, name): # A helper function to avoid overly long expressions return self.session.job_state_map[name] def test_persistent_save(self): self.job_A = make_job("A") self.job_list = [self.job_A] self.session = SessionState(self.job_list) result_A = JobResult({ 'job': self.job_A, 'outcome': JobResult.OUTCOME_PASS, 'comments': 'All good', 'return_code': 0, 'io_log': ((0, 'stdout', "Success !\n"),) }) session_json_text = """{ "_job_state_map": { "A": { "_job": { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" }, "_result": { "data": { "job": { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" }, "outcome": "pass", "return_code": 0, "comments": "All good", "io_log": [ [ 0, "stdout", "Success !\\n" ] ] }, "_class_id": "JOB_RESULT" }, "_class_id": "JOB_STATE" } }, "_desired_job_list": [ { "data": { "name": "A", "plugin": "dummy", "requires": null, "depends": null }, "_class_id": "JOB_DEFINITION" } ], "_class_id": "SESSION_STATE" }""" self.session.open() self.session.update_desired_job_list([self.job_A]) self.session.update_job_result(self.job_A, result_A) self.session.persistent_save() session_file = self.session.previous_session_file() self.session.close() self.assertIsNotNone(session_file) with open(session_file) as f: raw_json = json.load(f) self.maxDiff = None self.assertEqual(raw_json, json.loads(session_json_text)) def test_resume_session(self): # All of the tests below are using one session. The session has four # jobs, Job A depends on a resource provided by job R which has no # dependencies at all. Both Job X and Y depend on job A. # # A -(resource dependency)-> R # # X -(direct dependency) -> A # # Y -(direct dependency) -> A self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") self.job_X = make_job("X", depends='A') self.job_Y = make_job("Y", depends='A') self.job_list = [self.job_A, self.job_R, self.job_X, self.job_Y] # Create a new session (session_dir is empty) self.session = SessionState(self.job_list) result_R = JobResult({ 'job': self.job_R, 'io_log': make_io_log(((0, 'stdout', b"attr: value\n"),), self._sandbox) }) result_A = JobResult({ 'job': self.job_A, 'outcome': JobResult.OUTCOME_PASS }) result_X = JobResult({ 'job': self.job_X, 'outcome': JobResult.OUTCOME_PASS }) # Job Y can't start as it requires job A self.assertFalse(self.job_state('Y').can_start()) self.session.update_desired_job_list([self.job_X, self.job_Y]) self.session.open() self.session.update_job_result(self.job_R, result_R) self.session.update_job_result(self.job_A, result_A) self.session.update_job_result(self.job_X, result_X) self.session.persistent_save() self.session.close() # Create a new session (session_dir should contain session data) self.session = SessionState(self.job_list) self.session.open() # Resume the previous session self.session.resume() # This time job Y can start self.assertTrue(self.job_state('Y').can_start()) self.session.close() def tearDown(self): shutil.rmtree(self._sandbox) os.environ = self._env
class SessionStateTrimTests(TestCase): """ Tests for SessionState.trim_job_list() """ def setUp(self): self.job_a = make_job("a") self.job_b = make_job("b") self.session = SessionState([self.job_a, self.job_b]) def test_trim_does_remove_jobs(self): """ verify that trim_job_list() removes jobs as requested """ self.session.trim_job_list(JobIdQualifier("a")) self.assertEqual(self.session.job_list, [self.job_b]) def test_trim_does_remove_job_state(self): """ verify that trim_job_list() removes job state for removed jobs """ self.assertIn("a", self.session.job_state_map) self.session.trim_job_list(JobIdQualifier("a")) self.assertNotIn("a", self.session.job_state_map) def test_trim_does_remove_resources(self): """ verify that trim_job_list() removes resources for removed jobs """ self.session.set_resource_list("a", [Resource({'attr': 'value'})]) self.assertIn("a", self.session.resource_map) self.session.trim_job_list(JobIdQualifier("a")) self.assertNotIn("a", self.session.resource_map) def test_trim_fires_on_job_removed(self): """ verify that trim_job_list() fires on_job_removed() signal """ signal_fired = False def on_job_removed(job): self.assertIs(job, self.job_a) nonlocal signal_fired signal_fired = True self.session.on_job_removed.connect(on_job_removed) self.session.trim_job_list(JobIdQualifier("a")) self.assertTrue(signal_fired) def test_trim_fires_on_job_state_map_changed(self): """ verify that trim_job_list() fires on_job_state_map_changed() signal """ signal_fired = False def on_job_state_map_changed(): nonlocal signal_fired signal_fired = True self.session.on_job_state_map_changed.connect(on_job_state_map_changed) self.session.trim_job_list(JobIdQualifier("a")) self.assertTrue(signal_fired) def test_trim_fires_on_job_state_map_changed_only_when_needed(self): """ verify that trim_job_list() does not fires on_job_state_map_changed() signal needlessly, when no jobs is actually being removed. """ signal_fired = False def on_job_state_map_changed(): nonlocal signal_fired signal_fired = True self.session.on_job_state_map_changed.connect(on_job_state_map_changed) self.session.trim_job_list(JobIdQualifier("x")) self.assertFalse(signal_fired) def test_trim_raises_ValueError_for_jobs_on_run_list(self): """ verify that trim_job_list() raises ValueError when any of the jobs marked for removal is in the run_list. """ self.session.update_desired_job_list([self.job_a]) with self.assertRaises(ValueError) as boom: self.session.trim_job_list(JobIdQualifier("a")) self.assertEqual( str(boom.exception), "cannot remove jobs that are on the run list: a")