def test_get_tests_from_run_id(self): run_a = api.create_run() run_b = api.create_run() test_a = api.create_test('fake_test') test_b = api.create_test('fake_test2') api.create_test_run(test_a.id, run_a.id, 'fail', start_time=datetime.datetime( 1914, 6, 28, 10, 45, 0)) api.create_test_run(test_a.id, run_b.id, 'fail', start_time=datetime.datetime.utcnow()) api.create_test_run(test_b.id, run_a.id, 'success', start_time=datetime.datetime( 1914, 6, 28, 10, 45, 0)) result = api.get_tests_from_run_id(run_a.id) self.assertEqual(2, len(result)) self.assertIn(test_a.id, [x.id for x in result]) self.assertIn(test_a.test_id, [x.test_id for x in result]) self.assertIn(test_b.id, [x.id for x in result]) self.assertIn(test_b.test_id, [x.test_id for x in result])
def test_get_test_runs_test_test_id(self): run = api.create_run() test_a = api.create_test('fake_test') test_b = api.create_test('less_fake_test') api.create_test_run(test_a.id, run.id, 'success') api.create_test_run(test_b.id, run.id, 'success') res = api.get_test_runs_by_test_test_id('less_fake_test') self.assertEqual(1, len(res)) self.assertEqual(test_b.id, res[0].test_id) self.assertEqual(run.id, res[0].run_id)
def test_create_test_and_get_by_test_id(self): create_res = api.create_test('fake_test', 2, 1, 1, 1.2) res = api.get_test_by_test_id('fake_test') self.assertEqual(res.id, create_res.id) self.assertEqual(res.test_id, 'fake_test') self.assertEqual(res.run_time, 1.2) self.assertEqual(res.run_count, 2)
def process_results(results): session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) if CONF.run_at: run_at = date_parser.parse(CONF.run_at) else: run_at = None db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, CONF.artifacts, id=CONF.run_id, run_at=run_at, session=session) if CONF.run_meta: api.add_run_metadata(CONF.run_meta, db_run.id, session) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] == 'success': success = 1 fails = 0 elif results[test]['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session) if results[test]['metadata']: if CONF.test_attr_prefix: attrs = results[test]['metadata'].get('attrs') test_attr_list = _get_test_attrs_list(attrs) test_metadata = api.get_test_metadata(db_test.id, session) test_metadata = [(meta.key, meta.value) for meta in test_metadata] if test_attr_list: for attr in test_attr_list: if ('attr', attr) not in test_metadata: test_meta_dict = {'attr': attr} api.add_test_metadata(test_meta_dict, db_test.id, sesion=session) api.add_test_run_metadata(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachments'], test_run.id, session) session.close()
def test_create_test_run_and_list(self): run = api.create_run() test = api.create_test('fake_test') test_run = api.create_test_run(test.id, run.id, 'fail') self.assertIsNotNone(test_run) all_test_runs = api.get_all_test_runs() self.assertEqual(len(all_test_runs), 1) self.assertEqual(test_run.id, all_test_runs[0].id)
def test_get_test_run_duration(self): start_time = datetime.datetime.utcnow() stop_time = start_time + datetime.timedelta(minutes=3) run = api.create_run() test_a = api.create_test('fake_test') test_run = api.create_test_run(test_a.id, run.id, 'success', start_time, stop_time) dur = api.get_test_run_duration(test_run.id) self.assertEqual(180.0, dur)
def test_get_tests_from_run_id(self): run_a = api.create_run() run_b = api.create_run() test_a = api.create_test('fake_test') test_b = api.create_test('fake_test2') api.create_test_run(test_a.id, run_a.id, 'fail', start_time=datetime.datetime(1914, 6, 28, 10, 45, 0)) api.create_test_run(test_a.id, run_b.id, 'fail', start_time=datetime.datetime.utcnow()) api.create_test_run(test_b.id, run_a.id, 'success', start_time=datetime.datetime(1914, 6, 28, 10, 45, 0)) result = api.get_tests_from_run_id(run_a.id) self.assertEqual(2, len(result)) self.assertIn(test_a.id, [x.id for x in result]) self.assertIn(test_a.test_id, [x.test_id for x in result]) self.assertIn(test_b.id, [x.id for x in result]) self.assertIn(test_b.test_id, [x.test_id for x in result])
def test_get_test_runs_dicts_from_run_id_are_in_chrono_order(self): run = api.create_run() test_a = api.create_test('fake_test') test_b = api.create_test('fake_test_2') test_c = api.create_test('fake_test_3') api.create_test_run(test_a.id, run.id, 'success', datetime.datetime.utcnow()) api.create_test_run(test_b.id, run.id, 'success', datetime.datetime(1914, 6, 28, 10, 45, 0)) api.create_test_run(test_c.id, run.id, 'success', datetime.datetime(2014, 8, 26, 20, 00, 00)) test_run_dicts = api.get_tests_run_dicts_from_run_id(run.uuid) self.assertEqual(len(test_run_dicts), 3) prev = None for test_run in test_run_dicts: if prev == None: prev = test_run continue self.assertTrue(test_run_dicts[test_run]['start_time'] > test_run_dicts[prev]['start_time']) prev = test_run
def test_get_test_runs_dicts_with_no_stop_time(self): run = api.create_run() test_a = api.create_test('fake_test') start_time = datetime.datetime.utcnow() stop_time = None api.create_test_run(test_a.id, run.id, 'success', start_time, stop_time) test_run_dict = api.get_tests_run_dicts_from_run_id(run.uuid) self.assertEqual(1, len(test_run_dict)) self.assertIn('fake_test', test_run_dict) self.assertEqual(test_run_dict['fake_test']['status'], 'success') self.assertEqual(test_run_dict['fake_test']['start_time'], start_time) self.assertEqual(test_run_dict['fake_test']['stop_time'], stop_time)
def test_get_all_test_metadata_keys(self): test = api.create_test('fake_test') meta_dict = { 'test_a': 'a', 'test_a': 'b', 'test_b': 'a', 'test_c': 'a', 'test_d': 'a', 'test_c': 'b', } api.add_test_metadata(meta_dict, test.id) keys = api.get_all_test_metadata_keys() self.assertEqual(sorted(['test_a', 'test_b', 'test_c', 'test_d']), sorted(keys))
def process_results(results): session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) if CONF.run_at: run_at = date_parser.parse(CONF.run_at) else: run_at = None db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, CONF.artifacts, id=CONF.run_id, run_at=run_at, session=session, metadata=CONF.run_meta) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] == 'success': success = 1 fails = 0 elif results[test]['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, None, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session=session) if results[test]['metadata']: api.update_test_run(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachments'], test_run.id, session) session.close()
def test_delete_old_test_runs(self): run_a = api.create_run() run_b = api.create_run() test = api.create_test('fake_test') test_run_a = api.create_test_run(test.id, run_a.id, 'fail', start_time=datetime.datetime( 1914, 6, 28, 10, 45, 0)) test_run_b = api.create_test_run(test.id, run_b.id, 'fail', start_time=datetime.datetime.utcnow()) api.add_test_run_metadata({'key': 'value'}, test_run_b.id) api.add_test_run_metadata({'key': 'not_so_much_a_value'}, test_run_a.id) api.delete_old_test_runs() test_runs = api.get_all_test_runs() self.assertEqual(1, len(test_runs)) self.assertEqual(test_run_b.id, test_runs[0].id) self.assertEqual(1, len(api.get_test_run_metadata(test_run_b.id))) self.assertEqual(0, len(api.get_test_run_metadata(test_run_a.id)))
def test_get_test_runs_dicts_with_meta(self): run = api.create_run() test_a = api.create_test('fake_test') test_run = api.create_test_run(test_a.id, run.id, 'success', datetime.datetime.utcnow(), datetime.datetime.utcnow()) run_meta = { 'key_a': 'value_b', 'key_b': 'value_a', 'attrs': 'test,smoke,notatest', } api.add_test_run_metadata(run_meta, test_run.id) test_run_dict = api.get_tests_run_dicts_from_run_id(run.uuid) self.assertEqual(3, len(test_run_dict['fake_test']['metadata'])) for meta in run_meta: self.assertIn(meta, test_run_dict['fake_test']['metadata']) self.assertEqual(run_meta[meta], test_run_dict['fake_test']['metadata'][meta])
def update_tests(self, job): self.log.debug("Entering update_tests for job %s" % job) for test in job.tests: print test t = api.get_test_by_test_id(test['name'], self.session) if t is None: t = api.create_test(test['name'], session=self.session) values = {'run_count':t.run_count + 1} if test['status'] == "FAILED" or test['status'] == "REGRESSION": values['failure'] = t.failure + 1 elif test['status'] == "PASSED" or test['status'] == "FIXED": values['success'] = t.success + 1 elif test['status'] == "SKIPPED": pass api.update_test(values, t.id, session=self.session)
def test_get_test_run_dict_by_run_meta_key_value(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run() run_b = api.create_run() api.add_run_metadata({'key': 'true'}, run_a.id) api.add_run_metadata({'key': 'not so true'}, run_b.id) test_a = api.create_test('fake_test') api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a, timestamp_b) api.create_test_run(test_a.id, run_b.id, 'fail', timestamp_a, datetime.datetime.utcnow()) test_run_dicts = api.get_test_run_dict_by_run_meta_key_value( 'key', 'true') self.assertEqual(1, len(test_run_dicts)) self.assertEqual([{ 'test_id': 'fake_test', 'status': 'success', 'start_time': timestamp_a, 'stop_time': timestamp_b, }], test_run_dicts)
def test_get_test_run_dict_by_run_meta_key_value(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run() run_b = api.create_run() api.add_run_metadata({'key': 'true'}, run_a.id) api.add_run_metadata({'key': 'not so true'}, run_b.id) test_a = api.create_test('fake_test') api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a, timestamp_b) api.create_test_run(test_a.id, run_b.id, 'fail', timestamp_a, datetime.datetime.utcnow()) test_run_dicts = api.get_test_run_dict_by_run_meta_key_value('key', 'true') self.assertEqual(1, len(test_run_dicts)) self.assertEqual([{ 'test_id': 'fake_test', 'status': 'success', 'start_time': timestamp_a, 'stop_time': timestamp_b, }], test_run_dicts)
def test_get_test_runs_by_run_id(self): run_b = api.create_run() run_a = api.create_run() run_c = api.create_run() test_a = api.create_test('fake_test') testrun_a = api.create_test_run(test_a.id, run_a.id, 'success', datetime.datetime.utcnow()) testrun_b = api.create_test_run(test_a.id, run_b.id, 'success', datetime.datetime.utcnow()) testrun_c = api.create_test_run(test_a.id, run_c.id, 'success', datetime.datetime.utcnow()) test_runs_a = api.get_test_runs_by_run_id(run_a.uuid) test_runs_b = api.get_test_runs_by_run_id(run_b.uuid) test_runs_c = api.get_test_runs_by_run_id(run_c.uuid) self.assertEqual(len(test_runs_a), 1) self.assertEqual(testrun_a.id, test_runs_a[0].id) self.assertEqual(testrun_a.status, test_runs_a[0].status) self.assertEqual(len(test_runs_b), 1) self.assertEqual(testrun_b.id, test_runs_b[0].id) self.assertEqual(testrun_b.status, test_runs_b[0].status) self.assertEqual(len(test_runs_c), 1) self.assertEqual(testrun_c.id, test_runs_c[0].id) self.assertEqual(testrun_c.status, test_runs_c[0].status)
def _update_test(self, test_dict, session, start_time, stop_time): test_id = utils.cleanup_test_name(test_dict['id']) db_test = db_api.get_test_by_test_id(test_id, session) if not db_test: if test_dict['status'] == 'success': success = 1 fails = 0 elif test_dict['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = read_subunit.get_duration(start_time, stop_time) db_test = db_api.create_test(test_id, (success + fails), success, fails, run_time, session) else: test_dict['start_time'] = start_time test_dict['end_time'] = stop_time test_values = shell.increment_counts(db_test, test_dict) # If skipped nothing to update if test_values: db_api.update_test(test_values, db_test.id, session) return db_test
def test_get_id_from_test_id(self): test_a = api.create_test('fake_test') id_value = api.get_id_from_test_id('fake_test') self.assertEqual(test_a.id, id_value)
def process_results(results, run_at=None, artifacts=None, run_id=None, run_meta=None, test_attr_prefix=None): """Insert converted subunit data into the database. Allows for run-specific information to be passed in via kwargs, checks CONF if no run-specific information is supplied. :param results: subunit stream to be inserted :param run_at: Optional time at which the run was started. :param artifacts: Link to any artifacts from the test run. :param run_id: The run id for the new run. Must be unique. :param run_meta: Metadata corresponding to the new run. :param test_attr_prefix: Optional test attribute prefix. """ run_at = _override_conf(run_at, 'run_at') artifacts = _override_conf(artifacts, 'artifacts') run_id = _override_conf(run_id, 'run_id') run_meta = _override_conf(run_meta, 'run_meta') test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix') if run_at: if not isinstance(run_at, datetime.datetime): run_at = date_parser.parse(run_at) else: run_at = None session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, artifacts, id=run_id, run_at=run_at, session=session) if run_meta: api.add_run_metadata(run_meta, db_run.id, session) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] in ['success', 'xfail']: success = 1 fails = 0 elif results[test]['status'] in ['fail', 'uxsuccess']: fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session) if results[test]['metadata']: if test_attr_prefix: attrs = results[test]['metadata'].get('attrs') test_attr_list = _get_test_attrs_list(attrs) test_metadata = api.get_test_metadata(db_test.id, session) test_metadata = [(meta.key, meta.value) for meta in test_metadata] if test_attr_list: for attr in test_attr_list: if CONF.remove_test_attr_prefix: normalized_attr = attr[len( CONF.test_attr_prefix):] else: normalized_attr = attr if ('attr', normalized_attr) not in test_metadata: test_meta_dict = {'attr': normalized_attr} api.add_test_metadata(test_meta_dict, db_test.id, session=session) api.add_test_run_metadata(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachments'], test_run.id, session) session.close() return db_run
def test_create_test(self): api.create_test('1234') res = api.get_all_tests() self.assertEqual(len(res), 1) self.assertEqual(res[0].test_id, '1234')