def testWaitForChildResults(self): """Should gather status and return records for job summaries.""" parent_job_id = 54321 jobs = [ FakeJob( 0, [FakeStatus('GOOD', 'T0', ''), FakeStatus('GOOD', 'T1', '')], parent_job_id=parent_job_id), FakeJob(1, [ FakeStatus('ERROR', 'T0', 'err', False), FakeStatus('GOOD', 'T1', '') ], parent_job_id=parent_job_id), FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no')], parent_job_id=parent_job_id), FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken')], parent_job_id=parent_job_id), FakeJob(4, [ FakeStatus('ERROR', 'SERVER_JOB', 'server error'), FakeStatus('GOOD', 'T0', '') ], parent_job_id=parent_job_id), ] # TODO: Write a better test for the case where we yield # results for aborts vs cannot yield results because of # a premature abort. Currently almost all client aborts # have been converted to failures and when aborts do happen # they result in server job failures for which we always # want results. #FakeJob(5, [FakeStatus('ERROR', 'T0', 'gah', True)], # parent_job_id=parent_job_id), # The next job shouldn't be recorded in the results. #FakeJob(6, [FakeStatus('GOOD', 'SERVER_JOB', '')], # parent_job_id=12345)] for status in jobs[4].statuses: status.entry['job'] = {'name': 'broken_infra_job'} # Expect one call to get a list of all child jobs. self.afe.get_jobs(parent_job_id=parent_job_id).AndReturn(jobs[:6]) job_id_set = set([job.id for job in jobs]) yield_values = [[jobs[1]], [jobs[0], jobs[2]], jobs[3:6]] self.mox.StubOutWithMock(time, 'sleep') for yield_this in yield_values: self.afe.get_jobs(id__in=list(job_id_set), finished=True).AndReturn(yield_this) for job in yield_this: self.expect_yield_job_entries(job) job_id_set.remove(job.id) time.sleep(mox.IgnoreArg()) self.mox.ReplayAll() results = [ result for result in job_status.wait_for_child_results( self.afe, self.tko, parent_job_id) ] for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there. for status in job.statuses: self.assertTrue(True in map(status.equals_record, results))
def testWaitForSingleJobHostsWithTimeout(self): """Discover a single host for this job then timeout.""" self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') self.mox.StubOutWithMock(job_status, '_abort_jobs_if_timedout') manager = self.mox.CreateMock(host_lock_manager.HostLockManager) expected_hostnames = ['host1', 'host0'] expected_hosts = [FakeHost(h) for h in expected_hostnames] job = FakeJob(7, hostnames=[None, None]) time.sleep(mox.IgnoreArg()).MultipleTimes() job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.expect_hosts_query_and_lock([job], manager, [], False) # First, only one test in the job has had a host assigned at all. # Since no hosts are running, expect no locking. job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) job.hostnames = [None] + expected_hostnames[1:] self.expect_hosts_query_and_lock([job], manager, [], False) # Then, that host starts running, but no other tests have hosts. job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:]) # The second test gets a host assigned, but it's not yet running. # Since no new running hosts are found, no locking should happen. job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) job.hostnames = expected_hostnames self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:], False) # A timeout occurs, and only the locked hosts should be returned. job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(True) # The last loop update; doesn't impact behavior. job_status.gather_job_hostnames(mox.IgnoreArg(), job).AndReturn(expected_hostnames) self.mox.ReplayAll() # Because of the timeout only one host is returned. expect_timeout_hostnames = ['host0'] self.assertEquals( sorted(expect_timeout_hostnames), sorted( job_status.wait_for_and_lock_job_hosts( self.afe, [job], manager, wait_timeout_mins=DEFAULT_WAITTIMEOUT_MINS)))
def testWaitForResults(self): """Should gather status and return records for job summaries.""" jobs = [ FakeJob( 0, [FakeStatus('GOOD', 'T0', ''), FakeStatus('GOOD', 'T1', '')]), FakeJob(1, [ FakeStatus('ERROR', 'T0', 'err', False), FakeStatus('GOOD', 'T1', '') ]), FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no')]), FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken')]), FakeJob(4, [ FakeStatus('ERROR', 'SERVER_JOB', 'server error'), FakeStatus('GOOD', 'T0', '') ]), ] # TODO: Write a better test for the case where we yield # results for aborts vs cannot yield results because of # a premature abort. Currently almost all client aborts # have been converted to failures, and when aborts do happen # they result in server job failures for which we always # want results. # FakeJob(5, [FakeStatus('ERROR', 'T0', 'gah', True)]), # The next job shouldn't be recorded in the results. # FakeJob(6, [FakeStatus('GOOD', 'SERVER_JOB', '')])] for status in jobs[4].statuses: status.entry['job'] = {'name': 'broken_infra_job'} # To simulate a job that isn't ready the first time we check. self.afe.get_jobs(id=jobs[0].id, finished=True).AndReturn([]) # Expect all the rest of the jobs to be good to go the first time. for job in jobs[1:]: self.expect_result_gathering(job) # Then, expect job[0] to be ready. self.expect_result_gathering(jobs[0]) # Expect us to poll twice. self.mox.StubOutWithMock(time, 'sleep') time.sleep(5) time.sleep(5) self.mox.ReplayAll() results = [ result for result in job_status.wait_for_results(self.afe, self.tko, jobs) ] for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there. for status in job.statuses: self.assertTrue(True in map(status.equals_record, results))
def testWaitForMultipleJobsToFinish(self): """Ensure we detect when all jobs have stopped running.""" self.mox.StubOutWithMock(time, 'sleep') job0 = FakeJob(0, []) job1 = FakeJob(1, []) self.afe.get_jobs(id=job0.id, finished=True).AndReturn([]) self.afe.get_jobs(id=job1.id, finished=True).AndReturn([]) self.afe.get_jobs(id=job0.id, finished=True).AndReturn([]) self.afe.get_jobs(id=job1.id, finished=True).AndReturn([job1]) self.afe.get_jobs(id=job0.id, finished=True).AndReturn([job0]) time.sleep(mox.IgnoreArg()).MultipleTimes() self.mox.ReplayAll() job_status.wait_for_jobs_to_finish(self.afe, [job0, job1])
def testFailedJobRetry(self): """Make sure the suite survives even if the retry failed.""" test_to_retry = self.files['seven'] fake_job = FakeJob(id=self._FAKE_JOB_ID) test_results = self._createSuiteMockResults() self.schedule_and_expect_these_results( self.suite, [test_results[0] + test_results[1]], self.recorder) self.mox.StubOutWithMock(self.suite._job_creator, 'create_job') self.suite._job_creator.create_job( test_to_retry, retry_for=self._FAKE_JOB_ID).AndRaise( error.RPCException('Expected during test')) # Do not file a bug. self.mox.StubOutWithMock(self.suite, '_should_report') self.suite._should_report(mox.IgnoreArg()).AndReturn(False) self.mox.ReplayAll() self.suite.schedule(self.recorder.record_entry) self.suite._retry_handler._retry_map = { self._FAKE_JOB_ID: { 'state': RetryHandler.States.NOT_ATTEMPTED, 'retry_max': 1}} self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry self.suite.wait(self.recorder.record_entry) expected_retry_map = { self._FAKE_JOB_ID: { 'state': RetryHandler.States.ATTEMPTED, 'retry_max': 1}} expected_jobs_to_tests = self.suite._jobs_to_tests.copy() self.assertEquals(self.suite._retry_handler._retry_map, expected_retry_map) self.assertEquals(self.suite._jobs_to_tests, expected_jobs_to_tests)
def testJobRetryTestWarn(self): """Test that no retry is scheduled if test warns.""" test_to_retry = self.files['seven'] fake_job = FakeJob(id=self._FAKE_JOB_ID) test_results = self._createSuiteMockResults(will_file_bug=True, result_status='WARN') self.schedule_and_expect_these_results( self.suite, [test_results[0] + test_results[1]], self.recorder) # A bug should be filed if test warns. self.mock_bug_filing(test_results) self.mox.ReplayAll() self.suite.schedule(self.recorder.record_entry, True) self.suite._retry_handler._retry_map = { self._FAKE_JOB_ID: { 'state': RetryHandler.States.NOT_ATTEMPTED, 'retry_max': 1 } } self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry expected_jobs_to_tests = self.suite._jobs_to_tests.copy() expected_retry_map = self.suite._retry_handler._retry_map.copy() self.suite.wait(self.recorder.record_entry) # Check retry map and _jobs_to_tests, ensure no retry was scheduled. self.assertEquals(self.suite._retry_handler._retry_map, expected_retry_map) self.assertEquals(self.suite._jobs_to_tests, expected_jobs_to_tests)
def testYieldSubdir(self): """Make sure subdir are properly set for test and non-test status.""" job_tag = '0-owner/172.33.44.55' job_name = 'broken_infra_job' job = FakeJob(0, [ FakeStatus('ERROR', 'SERVER_JOB', 'server error', subdir='---', job_tag=job_tag), FakeStatus('GOOD', 'T0', '', subdir='T0.subdir', job_tag=job_tag) ], parent_job_id=54321) for status in job.statuses: status.entry['job'] = {'name': job_name} self.expect_yield_job_entries(job) self.mox.ReplayAll() results = list(job_status._yield_job_results(self.afe, self.tko, job)) for i in range(len(results)): result = results[i] if result.test_name.endswith('SERVER_JOB'): expected_name = '%s_%s' % (job_name, job.statuses[i].test_name) expected_subdir = job_tag else: expected_name = job.statuses[i].test_name expected_subdir = os.path.join(job_tag, job.statuses[i].subdir) self.assertEqual(results[i].test_name, expected_name) self.assertEqual(results[i].subdir, expected_subdir)
def testWaitForJobToStart(self): """Ensure we detect when a job has started running.""" self.mox.StubOutWithMock(time, 'sleep') job = FakeJob(0, []) self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([job]) self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([]) time.sleep(mox.IgnoreArg()).MultipleTimes() self.mox.ReplayAll() job_status.wait_for_jobs_to_start(self.afe, [job])
def testGatherPerHostResults(self): """Should gather per host results.""" # For the 0th job, the 1st entry is more bad/specific. # For all the others, it's the 0th that we expect. jobs = [ FakeJob(0, [ FakeStatus('FAIL', 'T0', '', hostname='h0'), FakeStatus('FAIL', 'T1', 'bad', hostname='h0') ]), FakeJob(1, [ FakeStatus('ERROR', 'T0', 'err', False, 'h1'), FakeStatus('GOOD', 'T1', '', hostname='h1') ]), FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no', hostname='h2')]), FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken', hostname='h3')]), FakeJob(4, [FakeStatus('ERROR', 'T0', 'gah', True, 'h4')]), FakeJob(5, [FakeStatus('GOOD', 'T0', 'Yay', hostname='h5')]) ] # Method under test returns status available right now. for job in jobs: entries = map(lambda s: s.entry, job.statuses) self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) self.tko.get_job_test_statuses_from_db(job.id).AndReturn( job.statuses) self.mox.ReplayAll() results = job_status.gather_per_host_results(self.afe, self.tko, jobs).values() for status in [jobs[0].statuses[1] ] + [j.statuses[0] for j in jobs[1:]]: self.assertTrue( True in map(status.equals_hostname_record, results))
def testWaitForMultiJobHostsToRunAndGetLocked(self): """Ensure we lock all running hosts for all jobs as discovered.""" self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') manager = self.mox.CreateMock(host_lock_manager.HostLockManager) expected_hostnames = ['host1', 'host0', 'host2'] expected_hosts = [FakeHost(h) for h in expected_hostnames] job0 = FakeJob(0, hostnames=[]) job1 = FakeJob(1, hostnames=[]) time.sleep(mox.IgnoreArg()).MultipleTimes() # First, only one test in either job has had a host assigned at all. # Since no hosts are running, expect no locking. job0.hostnames = [None, expected_hostnames[2]] job1.hostnames = [None] self.expect_hosts_query_and_lock([job0, job1], manager, [], False) # Then, that host starts running, but no other tests have hosts. self.expect_hosts_query_and_lock([job0, job1], manager, expected_hosts[2:]) # The test in the second job gets a host assigned, but it's not yet # running. # Since no new running hosts are found, no locking should happen. job1.hostnames = expected_hostnames[1:2] self.expect_hosts_query_and_lock([job0, job1], manager, expected_hosts[2:], False) # The second job's test's host starts running as well. self.expect_hosts_query_and_lock([job0, job1], manager, expected_hosts[1:]) # All three hosts across both jobs are now running. job0.hostnames = [expected_hostnames[0], expected_hostnames[2]] self.expect_hosts_query_and_lock([job0, job1], manager, expected_hosts) # The last loop update; doesn't impact behavior. job_status.gather_job_hostnames(mox.IgnoreArg(), job0).AndReturn(job0.hostnames) job_status.gather_job_hostnames(mox.IgnoreArg(), job1).AndReturn(job1.hostnames) self.mox.ReplayAll() self.assertEquals( sorted(expected_hostnames), sorted( job_status.wait_for_and_lock_job_hosts(self.afe, [job0, job1], manager)))
def testJobRetryTestFail(self): """Test retry works.""" test_to_retry = self.files['seven'] fake_new_job_id = self._FAKE_JOB_ID + 1 fake_job = FakeJob(id=self._FAKE_JOB_ID) fake_new_job = FakeJob(id=fake_new_job_id) test_results = self._createSuiteMockResults() self.schedule_and_expect_these_results( self.suite, [test_results[0] + test_results[1]], self.recorder) self.mox.StubOutWithMock(self.suite._job_creator, 'create_job') self.suite._job_creator.create_job( test_to_retry, retry_for=self._FAKE_JOB_ID).AndReturn(fake_new_job) self.mox.ReplayAll() self.suite.schedule(self.recorder.record_entry) self.suite._retry_handler._retry_map = { self._FAKE_JOB_ID: { 'state': RetryHandler.States.NOT_ATTEMPTED, 'retry_max': 1 } } self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry self.suite.wait(self.recorder.record_entry) expected_retry_map = { self._FAKE_JOB_ID: { 'state': RetryHandler.States.RETRIED, 'retry_max': 1 }, fake_new_job_id: { 'state': RetryHandler.States.NOT_ATTEMPTED, 'retry_max': 0 } } # Check retry map is correctly updated self.assertEquals(self.suite._retry_handler._retry_map, expected_retry_map) # Check _jobs_to_tests is correctly updated self.assertEquals(self.suite._jobs_to_tests[fake_new_job_id], test_to_retry)
def testWaitForSingleJobHostsToRunAndGetLockedSerially(self): """Lock running hosts as discovered, serially.""" self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') manager = self.mox.CreateMock(host_lock_manager.HostLockManager) expected_hostnames = ['host1', 'host0'] expected_hosts = [FakeHost(h) for h in expected_hostnames] job = FakeJob(7, hostnames=[None, None]) time.sleep(mox.IgnoreArg()).MultipleTimes() self.expect_hosts_query_and_lock([job], manager, [], False) # First, only one test in the job has had a host assigned at all. # Since no hosts are running, expect no locking. job.hostnames = [None] + expected_hostnames[1:] self.expect_hosts_query_and_lock([job], manager, [], False) # Then, that host starts running, but no other tests have hosts. self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:]) # The second test gets a host assigned, but it's not yet running. # Since no new running hosts are found, no locking should happen. job.hostnames = expected_hostnames self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:], False) # The second test's host starts running as well, and the first stops. self.expect_hosts_query_and_lock([job], manager, expected_hosts[:1]) # The last loop update; doesn't impact behavior. job_status.gather_job_hostnames(mox.IgnoreArg(), job).AndReturn(expected_hostnames) self.mox.ReplayAll() self.assertEquals( sorted(expected_hostnames), sorted( job_status.wait_for_and_lock_job_hosts(self.afe, [job], manager)))
def testGatherJobHostnamesSomeStillQueued(self): """Not all entries for the job were Running, though all had hosts.""" job = FakeJob(0, []) expected_hosts = ['host2', 'host1'] entries = [{ 'status': 'Running', 'host': { 'hostname': h } } for h in expected_hosts] entries[-1]['status'] = 'Queued' self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) self.mox.ReplayAll() self.assertTrue(expected_hosts[-1] not in job_status.gather_job_hostnames(self.afe, job))
def testGatherJobHostnamesAllRan(self): """All entries for the job were assigned hosts.""" job = FakeJob(0, []) expected_hosts = ['host2', 'host1'] entries = [{ 'status': 'Running', 'host': { 'hostname': h } } for h in expected_hosts] self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) self.mox.ReplayAll() self.assertEquals( sorted(expected_hosts), sorted(job_status.gather_job_hostnames(self.afe, job)))
def testWaitForAndLockWithTimeOutInStartJobs(self): """If we experience a timeout, no locked hosts are returned""" self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') self.mox.StubOutWithMock(job_status, '_abort_jobs_if_timedout') job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(True) manager = self.mox.CreateMock(host_lock_manager.HostLockManager) expected_hostnames = ['host1', 'host0'] expected_hosts = [FakeHost(h) for h in expected_hostnames] job = FakeJob(7, hostnames=[None, None]) job_status.gather_job_hostnames(mox.IgnoreArg(), job).AndReturn(expected_hostnames) self.mox.ReplayAll() self.assertFalse( job_status.wait_for_and_lock_job_hosts( self.afe, [job], manager, wait_timeout_mins=DEFAULT_WAITTIMEOUT_MINS))
def testWaitForChildResults(self): """Should gather status and return records for job summaries.""" parent_job_id = 54321 jobs = [ FakeJob( 0, [FakeStatus('GOOD', 'T0', ''), FakeStatus('GOOD', 'T1', '')], parent_job_id=parent_job_id), FakeJob(1, [ FakeStatus('ERROR', 'T0', 'err', False), FakeStatus('GOOD', 'T1', '') ], parent_job_id=parent_job_id), FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no')], parent_job_id=parent_job_id), FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken')], parent_job_id=parent_job_id), FakeJob(4, [ FakeStatus('ERROR', 'SERVER_JOB', 'server error'), FakeStatus('GOOD', 'T0', '') ], parent_job_id=parent_job_id), ] # TODO: Write a better test for the case where we yield # results for aborts vs cannot yield results because of # a premature abort. Currently almost all client aborts # have been converted to failures and when aborts do happen # they result in server job failures for which we always # want results. #FakeJob(5, [FakeStatus('ERROR', 'T0', 'gah', True)], # parent_job_id=parent_job_id), # The next job shouldn't be recorded in the results. #FakeJob(6, [FakeStatus('GOOD', 'SERVER_JOB', '')], # parent_job_id=12345)] for status in jobs[4].statuses: status.entry['job'] = {'name': 'broken_infra_job'} # Expect one call to get a list of all child jobs. self.afe.get_jobs(parent_job_id=parent_job_id).AndReturn(jobs[:6]) # Have the first two jobs be finished by the first polling, # and the remaining ones (not including #6) for the second polling. self.afe.get_jobs(parent_job_id=parent_job_id, finished=True).AndReturn([jobs[1]]) self.expect_yield_job_entries(jobs[1]) self.afe.get_jobs(parent_job_id=parent_job_id, finished=True).AndReturn(jobs[:2]) self.expect_yield_job_entries(jobs[0]) self.afe.get_jobs(parent_job_id=parent_job_id, finished=True).AndReturn(jobs[:6]) for job in jobs[2:6]: self.expect_yield_job_entries(job) # Then, expect job[0] to be ready. # Expect us to poll thrice self.mox.StubOutWithMock(time, 'sleep') time.sleep(5) time.sleep(5) time.sleep(5) self.mox.ReplayAll() results = [ result for result in job_status.wait_for_child_results( self.afe, self.tko, parent_job_id) ] for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there. for status in job.statuses: self.assertTrue(True in map(status.equals_record, results))
def expect_job_scheduling(self, recorder, tests_to_skip=[], ignore_deps=False, raises=False, suite_deps=[], suite=None, extra_keyvals={}): """Expect jobs to be scheduled for 'tests' in |self.files|. @param recorder: object with a record_entry to be used to record test results. @param tests_to_skip: [list, of, test, names] that we expect to skip. @param ignore_deps: If true, ignore tests' dependencies. @param raises: If True, expect exceptions. @param suite_deps: If True, add suite level dependencies. @param extra_keyvals: Extra keyvals set to tests. """ record_job_id = suite and suite._results_dir if record_job_id: self.mox.StubOutWithMock(suite, '_remember_job_keyval') recorder.record_entry( StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG), log_in_subdir=False) tests = self.files.values() n = 1 for test in tests: if test.name in tests_to_skip: continue dependencies = [] if not ignore_deps: dependencies.extend(test.dependencies) if suite_deps: dependencies.extend(suite_deps) dependencies.append(self._BOARD) build = self._BUILDS[provision.CROS_VERSION_PREFIX] keyvals = { 'build': build, 'suite': self._TAG, 'builds': SuiteTest._BUILDS, 'experimental':test.experimental, } keyvals.update(extra_keyvals) job_mock = self.afe.create_job( control_file=test.text, name=mox.And(mox.StrContains(build), mox.StrContains(test.name)), control_type=mox.IgnoreArg(), meta_hosts=[self._BOARD], dependencies=dependencies, keyvals=keyvals, max_runtime_mins=24*60, timeout_mins=1440, parent_job_id=None, reboot_before=mox.IgnoreArg(), run_reset=mox.IgnoreArg(), priority=priorities.Priority.DEFAULT, synch_count=test.sync_count, require_ssp=test.require_ssp ) if raises: job_mock.AndRaise(error.NoEligibleHostException()) recorder.record_entry( StatusContains.CreateFromStrings('START', test.name), log_in_subdir=False) recorder.record_entry( StatusContains.CreateFromStrings('TEST_NA', test.name), log_in_subdir=False) recorder.record_entry( StatusContains.CreateFromStrings('END', test.name), log_in_subdir=False) else: fake_job = FakeJob(id=n) job_mock.AndReturn(fake_job) if record_job_id: suite._remember_job_keyval(fake_job) n += 1
def testWaitForJobToStartAlreadyStarted(self): """Ensure we don't wait forever if a job already started.""" job = FakeJob(0, []) self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([]) self.mox.ReplayAll() job_status.wait_for_jobs_to_start(self.afe, [job])
def testWaitForJobToFinishAlreadyFinished(self): """Ensure we don't wait forever if a job already finished.""" job = FakeJob(0, []) self.afe.get_jobs(id=job.id, finished=True).AndReturn([job]) self.mox.ReplayAll() job_status.wait_for_jobs_to_finish(self.afe, [job])