def test_isRunning_timeout(self, mock_update): job = Job(change_num="change_num", project_name="project") job.node_ip = 'ip' delta = datetime.timedelta(seconds=int(Configuration().MAX_RUNNING_TIME)) job.updated = datetime.datetime.now() - delta self.assertFalse(job.isRunning("DB")) mock_update.assert_called_with("DB", result='Aborted: Timed out')
def test_isRunning_pid_fail(self, mock_execute_command, mock_update): job = Job(change_num="change_num", project_name="project") job.node_ip = 'ip' delta = datetime.timedelta(seconds=350) job.updated = datetime.datetime.now() - delta mock_execute_command.side_effect=Exception('SSH error getting PID') self.assertFalse(job.isRunning("DB")) mock_update.assert_called_with("DB", result='Aborted: Exception checking for pid') self.assertEqual(1, mock_execute_command.call_count)
def test_runTest_deletes_bad_node(self, mock_getSSHObject, mock_update): job = Job(change_num="change_num", project_name="project") nodepool = mock.Mock() nodepool.getNode.return_value = ('new_node', 'ip') mock_getSSHObject.return_value = None job.runJob("DB", nodepool) nodepool.deleteNode.assert_called_once_with('new_node') mock_update.assert_called_once_with("DB", node_id=0)
def test_runTest_deletes_existing_node(self, mock_getSSHObject, mock_update): job = Job(change_num="change_num", project_name="project") job.node_id='existing_node' nodepool = mock.Mock() nodepool.getNode.return_value = (None, None) job.runJob("DB", nodepool) nodepool.deleteNode.assert_called_once_with('existing_node') mock_update.assert_called_once_with("DB", node_id=0) self.assertEqual(0, mock_getSSHObject.call_count)
def addJob(self, change_ref, project_name, commit_id): change_num = change_ref.split('/')[3] existing_jobs = Job.retrieve(self.db, project_name, change_num) for existing in existing_jobs: self.log.info( 'Job for previous patchset (%s) already queued - replacing' % (existing)) existing.update(self.db, state=constants.OBSOLETE) job = Job(change_num, change_ref, project_name, commit_id) with self.db.get_session() as session: self.log.info("Job for %s queued" % job.change_num) session.add(job)
def test_runTest_update_test_runner(self, mock_update_testrunner, mock_execute_command, mock_update, mock_sleep): job = Job(change_num="change_num", change_ref='change_ref', project_name="openstack/xenapi-os-testing") nodepool = mock.Mock() nodepool.getNode.return_value = ('new_node', 'ip') ssh = mock.Mock() job.runJob("DB", nodepool) mock_update_testrunner.assert_has_calls([mock.call('change_ref')])
def test_runTest_update_test_runner(self, mock_update_testrunner, mock_execute_command, mock_update, mock_sleep): job = Job(change_num="change_num", change_ref='change_ref', project_name="stackforge/xenapi-os-testing") nodepool = mock.Mock() nodepool.getNode.return_value = ('new_node', 'ip') ssh = mock.Mock() job.runJob("DB", nodepool) mock_update_testrunner.assert_has_calls([mock.call('change_ref')])
def test_isRunning_happy_path(self, mock_execute_command, mock_update): job = Job(change_num="change_num", project_name="project") job.node_ip = 'ip' delta = datetime.timedelta(seconds=350) job.updated = datetime.datetime.now() - delta mock_execute_command.return_value = False self.assertFalse(job.isRunning("DB")) self.assertEqual(0, mock_update.call_count) mock_execute_command.return_value = True self.assertTrue(job.isRunning("DB")) self.assertEqual(0, mock_update.call_count)
def get_jobs(self): ret_list = [] collectingJobs = Job.getAllWhere(self.jobQueue.db, state=constants.COLLECTING) for job in collectingJobs: ret_list.append(job) return ret_list
def test_runTest_happy_path(self, mock_execute_command, mock_update, mock_sleep): job = Job(change_num="change_num", project_name="project") nodepool = mock.Mock() nodepool.getNode.return_value = ('new_node', 'ip') ssh = mock.Mock() job.runJob("DB", nodepool) # The node should not be deleted(!) self.assertEqual(0, nodepool.deleteNode.call_count) # Two calls - one to set the node ID and the other to set the state to running update_call1 = mock.call("DB", node_id='new_node', result='', node_ip='ip') update_call2 = mock.call("DB", state=constants.RUNNING) mock_update.assert_has_calls([update_call1, update_call2])
def func_failures(options, queue): table = PrettyTable(["Project", "Change", "State", "Result", "Age", "Duration", "URL"]) table.align = 'l' now = time.time() all_jobs = Job.getAllWhere(queue.db) for job in all_jobs: if not job.result or (job.result != 'Failed' and job.result.find('Aborted') != 0): continue updated = time.mktime(job.updated.timetuple()) age_hours = (now - updated) / 3600 if options.recent: if age_hours > int(options.recent): continue age = '%.02f' % (age_hours) duration = '-' if job.test_started and job.test_stopped: started = time.mktime(job.test_started.timetuple()) stopped = time.mktime(job.test_stopped.timetuple()) duration = "%.02f"%((stopped - started)/3600) table.add_row([job.project_name, job.change_num, constants.STATES[job.state], job.result, age, duration, job.logs_url]) return str(table)
def processResults(self): allJobs = Job.getAllWhere(self.db, state=constants.RUNNING) self.log.info('%d jobs running...'%len(allJobs)) for job in allJobs: if job.isRunning(self.db): continue job.update(self.db, state=constants.COLLECTING) self.log.info('Tests for %s are done! Collecting'%job)
def processResults(self): allJobs = Job.getAllWhere(self.db, state=constants.RUNNING) self.log.info('%d jobs running...' % len(allJobs)) for job in allJobs: if job.isRunning(self.db): continue job.update(self.db, state=constants.COLLECTING) self.log.info('Tests for %s are done! Collecting' % job)
def addJob(self, change_ref, project_name, commit_id): change_num = change_ref.split('/')[3] existing_jobs = Job.retrieve(self.db, project_name, change_num) for existing in existing_jobs: self.log.info('Job for previous patchset (%s) already queued - replacing'%(existing)) existing.update(self.db, state=constants.OBSOLETE) job = Job(change_num, change_ref, project_name, commit_id) with self.db.get_session() as session: self.log.info("Job for %s queued"%job.change_num) session.add(job)
def test_start_test_clears_time(self, now): now.return_value = NOW db = DB('sqlite://') db.create_schema() job = Job(change_num="change_num", project_name="project") with db.get_session() as session: session.add(job) job.created=PAST job.db = db job.update(db, state=constants.RUNNING) with db.get_session() as session: job, = session.query(Job).all() self.assertEquals(job.updated, NOW) self.assertEquals(job.state, constants.RUNNING) self.assertEquals(job.test_started, NOW) self.assertEquals(job.test_stopped, None) self.assertEquals("project", job.project_name) self.assertEquals("change_num", job.change_num)
def test_recent_jobs(self, now): now.return_value = NOW db = DB('sqlite://') db.create_schema() job1 = Job(change_num="change_num1", project_name="project") job2 = Job(change_num="change_num2", project_name="project") with db.get_session() as session: session.add(job1) job1.created=PAST job1.db = db job1.state=constants.RUNNING job1.updated=PAST session.add(job2) recent_jobs = Job.getRecent(db) self.assertEqual(len(recent_jobs), 1) recent_jobs = Job.getRecent(db, 200000) self.assertEqual(len(recent_jobs), 2)
def test_update(self, now): now.return_value = NOW db = DB('sqlite://') db.create_schema() job = Job(change_num="change_num", project_name="project") with db.get_session() as session: session.add(job) job.created=PAST job.db = db self.assertEqual(job.state, constants.QUEUED) job.update(db, state=constants.FINISHED) with db.get_session() as session: job, = session.query(Job).all() self.assertEquals(NOW, job.updated) self.assertEquals(constants.FINISHED, job.state) self.assertEquals("project", job.project_name) self.assertEquals("change_num", job.change_num)
def func_show(options, queue): table = PrettyTable() table.add_column('Key', ['Project name', 'Change num', 'Change ref', 'state', 'created', 'Commit id', 'Node id', 'Node ip', 'Result', 'Logs', 'Report', 'Updated', 'Gerrit URL']) job = Job.getAllWhere(queue.db, change_ref=options.change_ref)[0] url = 'https://review.openstack.org/%s'%job.change_num table.add_column('Value', [job.project_name, job.change_num, job.change_ref, constants.STATES[job.state], job.created, job.commit_id, job.node_id, job.node_ip, job.result, job.logs_url, job.report_url, job.updated, url]) table.align = 'l' return str(table)
def func_list(options, queue): table = PrettyTable([ "ID", "Project", "Change", "State", "IP", "Result", "Age (hours)", "Duration" ]) table.align = 'l' now = time.time() all_jobs = Job.getRecent(queue.db, int(options.recent)) state_dict = {} result_dict = {} if options.states and len(options.states) > 0: states = options.states.split(',') else: # Default should be everything except obsolete jobs states = constants.STATES.values() states.remove(constants.STATES[constants.OBSOLETE]) for job in all_jobs: updated = time.mktime(job.updated.timetuple()) age_hours = (now - updated) / 3600 state_count = state_dict.get(constants.STATES[job.state], 0) + 1 state_dict[constants.STATES[job.state]] = state_count result_count = result_dict.get(job.result, 0) + 1 result_dict[job.result] = result_count if states and constants.STATES[job.state] not in states: continue if job.node_id: node_ip = job.node_ip else: node_ip = '(%s)' % job.node_ip age = '%.02f' % (age_hours) duration = '-' if job.test_started and job.test_stopped: started = time.mktime(job.test_started.timetuple()) stopped = time.mktime(job.test_stopped.timetuple()) if started < stopped: duration = "%.02f" % ((stopped - started) / 3600) table.add_row([ job.id, job.project_name, job.change_ref, constants.STATES[job.state], node_ip, job.result, age, duration ]) output_str = str(state_dict) + "\n" output_str = output_str + str(result_dict) + "\n" output_str = output_str + str(table) return output_str
def test_delete(self): db = DB('sqlite://') db.create_schema() job = Job(change_num="change_num", project_name="project") with db.get_session() as session: session.add(job) job.db = db jobs = Job.getAllWhere(db) self.assertEqual(len(jobs), 1) Job.deleteWhere(db) jobs = Job.getAllWhere(db) self.assertEqual(len(jobs), 0)
def func_list(options, queue): table = PrettyTable(["Project", "Change", "State", "IP", "Result", "Age (hours)", "Duration"]) table.align = 'l' now = time.time() all_jobs = Job.getAllWhere(queue.db) state_dict = {} result_dict = {} if options.states and len(options.states) > 0: states = options.states.split(',') else: states = None for job in all_jobs: updated = time.mktime(job.updated.timetuple()) age_hours = (now - updated) / 3600 if options.recent: if age_hours > int(options.recent): continue state_count = state_dict.get(constants.STATES[job.state], 0) + 1 state_dict[constants.STATES[job.state]] = state_count result_count = result_dict.get(job.result, 0)+1 result_dict[job.result] = result_count if states and constants.STATES[job.state] not in states: continue if job.node_id: node_ip = job.node_ip else: node_ip = '(%s)'%job.node_ip age = '%.02f' % (age_hours) duration = '-' if job.test_started and job.test_stopped: started = time.mktime(job.test_started.timetuple()) stopped = time.mktime(job.test_stopped.timetuple()) if started < stopped: duration = "%.02f"%((stopped - started)/3600) table.add_row([job.project_name, job.change_ref, constants.STATES[job.state], node_ip, job.result, age, duration]) output_str = str(state_dict)+"\n" output_str = output_str + str(result_dict)+"\n" output_str = output_str + str(table) return output_str
def func_show(options, queue): output_str = '' jobs = Job.getAllWhere(queue.db, change_ref=options.change_ref) for job in jobs: table = PrettyTable() table.add_column('Key', ['ID', 'Project name', 'Change num', 'Change ref', 'state', 'created', 'Commit id', 'Node id', 'Node ip', 'Result', 'Logs', 'Report', 'Updated', 'Gerrit URL', 'Failures']) url = 'https://review.openstack.org/%s'%job.change_num table.add_column('Value', [job.id, job.project_name, job.change_num, job.change_ref, constants.STATES[job.state], job.created, job.commit_id, job.node_id, job.node_ip, job.result, job.logs_url, job.report_url, job.updated, url, job.failed]) table.align = 'l' output_str = output_str + str(table)+'\n' return output_str
def postResults(self): allJobs = Job.getAllWhere(self.db, state=constants.COLLECTED) self.log.info("%d jobs ready to be posted..." % len(allJobs)) for job in allJobs: if job.result.find("Aborted") == 0: logging.info("Not voting on aborted job %s (%s)", job, job.result) job.update(self.db, state=constants.FINISHED) continue if Configuration().get_bool("VOTE"): message = Configuration().VOTE_MESSAGE % { "result": job.result, "report": job.report_url, "log": job.logs_url, } vote_num = "+1" if job.result == "Passed" else "-1" if (vote_num == "+1") or (not Configuration().get_bool("VOTE_PASSED_ONLY")): logging.info("Posting results for %s (%s)", job, job.result) vote(job.commit_id, vote_num, message) job.update(self.db, state=constants.FINISHED)
def func_show(options, queue): output_str = '' jobs = Job.getAllWhere(queue.db, change_ref=options.change_ref) for job in jobs: table = PrettyTable() table.add_column('Key', [ 'ID', 'Project name', 'Change num', 'Change ref', 'state', 'created', 'Commit id', 'Node id', 'Node ip', 'Result', 'Logs', 'Report', 'Updated', 'Gerrit URL', 'Failures' ]) url = 'https://review.openstack.org/%s' % job.change_num table.add_column('Value', [ job.id, job.project_name, job.change_num, job.change_ref, constants.STATES[job.state], job.created, job.commit_id, job.node_id, job.node_ip, job.result, job.logs_url, job.report_url, job.updated, url, job.failed ]) table.align = 'l' output_str = output_str + str(table) + '\n' return output_str
def postResults(self): allJobs = Job.getAllWhere(self.db, state=constants.COLLECTED) self.log.info('%d jobs ready to be posted...'%len(allJobs)) for job in allJobs: if job.result.find('Aborted') == 0: logging.info('Not voting on aborted job %s (%s)', job, job.result) job.update(self.db, state=constants.FINISHED) continue if Configuration().get_bool('VOTE'): message = Configuration().VOTE_MESSAGE % {'result':job.result, 'report': job.report_url, 'log':job.logs_url} vote_num = "+1" if job.result == 'Passed' else "-1" if ((vote_num == '+1') or (not Configuration().get_bool('VOTE_PASSED_ONLY'))): logging.info('Posting results for %s (%s)', job, job.result) vote(job.commit_id, vote_num, message) job.update(self.db, state=constants.FINISHED)
def test_stop_test_sets_stop_time(self, now): now.return_value = NOW db = DB('sqlite://') db.create_schema() job = Job(change_num="change_num", project_name="project") with db.get_session() as session: session.add(job) job.created=PAST job.db = db job.state=constants.RUNNING job.update(db, state=constants.COLLECTING) with db.get_session() as session: job, = session.query(Job).all() self.assertEqual(job.state, constants.COLLECTING) self.assertEquals(NOW, job.updated) self.assertEquals(constants.COLLECTING, job.state) self.assertEquals(NOW, job.test_stopped) self.assertEquals("project", job.project_name) self.assertEquals("change_num", job.change_num)
def postResults(self): allJobs = Job.getAllWhere(self.db, state=constants.COLLECTED) self.log.info('%d jobs ready to be posted...' % len(allJobs)) for job in allJobs: if job.result.find('Aborted') == 0: logging.info('Not voting on aborted job %s (%s)', job, job.result) job.update(self.db, state=constants.FINISHED) continue if Configuration().get_bool('VOTE'): message = Configuration().VOTE_MESSAGE % { 'result': job.result, 'report': job.report_url, 'log': job.logs_url } vote_num = "+1" if job.result == 'Passed' else "-1" if ((vote_num == '+1') or (not Configuration().get_bool('VOTE_PASSED_ONLY'))): logging.info('Posting results for %s (%s)', job, job.result) vote(job.commit_id, vote_num, message) job.update(self.db, state=constants.FINISHED)
def setUp(self): self.job = Job()
def flush(self): Job.deleteWhere(self.db)
def test_isRunning_no_ip(self): job = Job(change_num="change_num", project_name="project") self.assertFalse(job.isRunning("DB"))
class TestRetrieveResults(unittest.TestCase): def setUp(self): self.job = Job() def run_retrieve_results(self): return self.job.retrieveResults('ignored') def test_no_ip(self): self.job.node_ip = None result = self.run_retrieve_results() self.assertEquals(constants.NO_IP, result) @mock.patch('osci.job.utils') def test_status_can_be_retrieved(self, fake_utils): self.job.node_ip = 'ip' fake_utils.execute_command.return_value = ( 0, 'Reported status\nAnd some\nRubbish', 'err') result = self.run_retrieve_results() self.assertEquals('Reported status', result) @mock.patch('osci.job.utils') def test_logs_copied(self, fake_utils): self.job.node_ip = 'ip' fake_utils.execute_command.return_value = ( 0, 'Reported status\nAnd some\nRubbish', 'err') result = self.run_retrieve_results() fake_utils.execute_command.assert_called_once_with( 'ssh -q -o BatchMode=yes' ' -o UserKnownHostsFile=/dev/null' ' -o StrictHostKeyChecking=no' ' -i .ssh/jenkins' ' jenkins@ip cat result.txt', silent=True, return_streams=True) @mock.patch('osci.job.utils') def test_dom0_logs_copied(self, fake_utils): self.job.node_ip = 'ip' fake_utils.execute_command.return_value = ( 0, 'Reported status\nAnd some\nRubbish', 'err') result = self.run_retrieve_results() fake_utils.copy_dom0_logs.assert_called_once_with( 'ip', 'jenkins', '.ssh/jenkins', 'ignored' ) @mock.patch('osci.job.utils') def test_dom0_log_copy_fails(self, fake_utils): self.job.node_ip = 'ip' fake_utils.execute_command.return_value = ( 0, 'Reported status\nAnd some\nRubbish', 'err') fake_utils.copy_dom0_logs.side_effect = Exception() result = self.run_retrieve_results() self.assertEquals(constants.COPYFAIL, result) @mock.patch('osci.job.utils') def test_status_cannot_be_retrieved_old_status_used(self, fake_utils): self.job.node_ip = 'ip' self.job.result = 'Aborted: previous result' fake_utils.execute_command.return_value = ( 1, 'Reported status\nAnd some\nRubbish', 'err') result = self.run_retrieve_results() self.assertEquals('Aborted: previous result', result) @mock.patch('osci.job.utils') def test_status_cannot_be_retrieved_old_status_not_used(self, fake_utils): self.job.node_ip = 'ip' self.job.result = 'previous result' fake_utils.execute_command.return_value = ( 1, 'Reported status\nAnd some\nRubbish', 'err') result = self.run_retrieve_results() self.assertEquals(constants.NORESULT, result) @mock.patch('osci.job.utils') def test_exception_raised(self, fake_utils): self.job.node_ip = 'ip' fake_utils.execute_command.side_effect = Exception() result = self.run_retrieve_results() self.assertEquals(constants.COPYFAIL, result)
def triggerJob(self, job_id): allJobs = Job.getAllWhere(self.db, id=job_id) for job in allJobs: job.runJob(self.db, self.nodepool)
def get_queued_enabled_jobs(self): allJobs = Job.getAllWhere(self.db, state=constants.QUEUED) self.log.info("%d jobs queued..." % len(allJobs)) if Configuration().get_bool("RUN_TESTS"): return allJobs return []
def recheckJob(self, job_id): allJobs = Job.getAllWhere(self.db, id=job_id) for job in allJobs: self.addJob(job.change_ref, job.project_name, job.commit_id)
def get_queued_enabled_jobs(self): allJobs = Job.getAllWhere(self.db, state=constants.QUEUED) self.log.info('%d jobs queued...' % len(allJobs)) if Configuration().get_bool('RUN_TESTS'): return allJobs return []
def func_failures(options, queue): output_str = '' table = PrettyTable(["ID", "Project", "Change", "State", "Result", "Age", "Duration", "URL"]) table.align = 'l' now = time.time() all_jobs = Job.getRecent(queue.db, int(options.recent)) all_failed_tests = {} for job in all_jobs: if not job.result or (job.result != 'Failed' and job.result.find('Aborted') != 0): continue updated = time.mktime(job.updated.timetuple()) age_hours = (now - updated) / 3600 age = '%.02f' % (age_hours) duration = '-' if job.test_started and job.test_stopped: started = time.mktime(job.test_started.timetuple()) stopped = time.mktime(job.test_stopped.timetuple()) duration = "%.02f"%((stopped - started)/3600) job_failed = job.failed if job.failed is not None else '' failed_tests = [m.group(0) for m in re.finditer('tempest.[^ ()]+', job_failed)] if options.withfail is not None: if len(options.withfail) == 0: if len(failed_tests) != 0: continue else: if options.withfail not in job.failed: continue table.add_row([job.id, job.project_name, job.change_num, constants.STATES[job.state], job.result, age, duration, job.logs_url]) if len(failed_tests) == 0: failed_tests = ['No tempest failures detected'] elif int(options.max_fails) > 0 and len(failed_tests) > int(options.max_fails): failed_tests = ['More than %s failures'%options.max_fails] for failed_test in failed_tests: # Treat JSON and XML as the same since we're only interested in driver failures failed_test = failed_test.replace('JSON', '') failed_test = failed_test.replace('XML', '') cur_count = all_failed_tests.get(failed_test, 0) all_failed_tests[failed_test] = cur_count + 1 if options.min_dup: msg='Fewer than %s duplicates'%options.min_dup for failed_test in list(all_failed_tests.keys()): if all_failed_tests[failed_test] < int(options.min_dup): cur_count = all_failed_tests.get(msg, 0) all_failed_tests[msg] = cur_count + 1 del all_failed_tests[failed_test] output_str += str(table) + '\n' output_str += '\n' output_str += 'Failures\n' output_str += '-------------------\n' single_count =0 sorted_tests = sorted(all_failed_tests, key=all_failed_tests.get, reverse=True) for failed_test in sorted_tests: output_str += "%3d %s\n"%(all_failed_tests[failed_test], failed_test) return output_str
def func_failures(options, queue): output_str = '' table = PrettyTable([ "ID", "Project", "Change", "State", "Result", "Age", "Duration", "URL" ]) table.align = 'l' now = time.time() all_jobs = Job.getRecent(queue.db, int(options.recent)) all_failed_tests = {} for job in all_jobs: if not job.result or (job.result != 'Failed' and job.result.find('Aborted') != 0): continue updated = time.mktime(job.updated.timetuple()) age_hours = (now - updated) / 3600 age = '%.02f' % (age_hours) duration = '-' if job.test_started and job.test_stopped: started = time.mktime(job.test_started.timetuple()) stopped = time.mktime(job.test_stopped.timetuple()) duration = "%.02f" % ((stopped - started) / 3600) job_failed = job.failed if job.failed is not None else '' failed_tests = [ m.group(0) for m in re.finditer('tempest.[^ ()]+', job_failed) ] if options.withfail is not None: if len(options.withfail) == 0: if len(failed_tests) != 0: continue else: if options.withfail not in job.failed: continue table.add_row([ job.id, job.project_name, job.change_num, constants.STATES[job.state], job.result, age, duration, job.logs_url ]) if len(failed_tests) == 0: failed_tests = ['No tempest failures detected'] elif int(options.max_fails) > 0 and len(failed_tests) > int( options.max_fails): failed_tests = ['More than %s failures' % options.max_fails] for failed_test in failed_tests: # Treat JSON and XML as the same since we're only interested in driver failures failed_test = failed_test.replace('JSON', '') failed_test = failed_test.replace('XML', '') cur_count = all_failed_tests.get(failed_test, 0) all_failed_tests[failed_test] = cur_count + 1 if options.min_dup: msg = 'Fewer than %s duplicates' % options.min_dup for failed_test in list(all_failed_tests.keys()): if all_failed_tests[failed_test] < int(options.min_dup): cur_count = all_failed_tests.get(msg, 0) all_failed_tests[msg] = cur_count + 1 del all_failed_tests[failed_test] output_str += str(table) + '\n' output_str += '\n' output_str += 'Failures\n' output_str += '-------------------\n' single_count = 0 sorted_tests = sorted(all_failed_tests, key=all_failed_tests.get, reverse=True) for failed_test in sorted_tests: output_str += "%3d %s\n" % (all_failed_tests[failed_test], failed_test) return output_str
def get_queued_enabled_jobs(self): allJobs = Job.getAllWhere(self.db, state=constants.QUEUED) self.log.info('%d jobs queued...'%len(allJobs)) if self.jobs_enabled: return allJobs return []
def test_isRunning_early_wait(self): job = Job(change_num="change_num", project_name="project") job.node_ip = 'ip' job.updated = datetime.datetime.now() self.assertTrue(job.isRunning("DB"))