def process_results(results): session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) if CONF.run_at: run_at = date_parser.parse(CONF.run_at) else: run_at = None db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, CONF.artifacts, id=CONF.run_id, run_at=run_at, session=session) if CONF.run_meta: api.add_run_metadata(CONF.run_meta, db_run.id, session) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] == 'success': success = 1 fails = 0 elif results[test]['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session) if results[test]['metadata']: if CONF.test_attr_prefix: attrs = results[test]['metadata'].get('attrs') test_attr_list = _get_test_attrs_list(attrs) test_metadata = api.get_test_metadata(db_test.id, session) test_metadata = [(meta.key, meta.value) for meta in test_metadata] if test_attr_list: for attr in test_attr_list: if ('attr', attr) not in test_metadata: test_meta_dict = {'attr': attr} api.add_test_metadata(test_meta_dict, db_test.id, sesion=session) api.add_test_run_metadata(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachments'], test_run.id, session) session.close()
def test_get_runs_by_key_value(self): api.create_run() run_b = api.create_run() api.add_run_metadata({'not_a_key': 'not_a_value'}, run_b.id) found_runs = api.get_runs_by_key_value('not_a_key', 'not_a_value') self.assertEqual(1, len(found_runs)) self.assertEqual(run_b.id, found_runs[0].id) self.assertEqual(run_b.uuid, found_runs[0].uuid)
def test_get_run_times_grouped_by_run_metadata_key(self): run_a = api.create_run(run_time=2.2, passes=2) run_b = api.create_run(run_time=3.5, passes=3) api.add_run_metadata({'key': 'value_a'}, run_a.id) api.add_run_metadata({'key': 'value_b'}, run_b.id) res = api.get_run_times_grouped_by_run_metadata_key('key') expected_res = {'value_a': [2.2], 'value_b': [3.5]} self.assertEqual(expected_res, res)
def test_get_run_metadata(self): run_a = api.create_run() run_b = api.create_run() api.add_run_metadata({'not_a_key': 'not_a_value'}, run_b.id) a_metadata = api.get_run_metadata(run_a.uuid) b_metadata = api.get_run_metadata(run_b.uuid) self.assertEqual([], a_metadata) self.assertEqual(1, len(b_metadata)) b_metadata = b_metadata[0].to_dict() self.assertEqual(run_b.id, b_metadata['run_id']) self.assertEqual('not_a_key', b_metadata['key']) self.assertEqual('not_a_value', b_metadata['value'])
def test_delete_old_runs(self): run_a = api.create_run(run_at=datetime.datetime( 1914, 6, 28, 10, 45, 0)) run_b = api.create_run() api.add_run_metadata({'key': 'value'}, run_b.id) api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id) api.delete_old_runs() runs = api.get_all_runs() self.assertEqual(1, len(runs)) self.assertEqual(1, api.get_session().query( models.RunMetadata.id).count()) self.assertEqual(run_b.id, runs[0].id) self.assertEqual(1, len(api.get_run_metadata(run_b.uuid))) self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
def test_get_all_run_metadata_keys(self): run = api.create_run() meta_dict = { 'test_a': 'a', 'test_a': 'b', 'test_b': 'a', 'test_c': 'a', 'test_d': 'a', 'test_c': 'b', } api.add_run_metadata(meta_dict, run.id) keys = api.get_all_run_metadata_keys() self.assertEqual(sorted(['test_a', 'test_b', 'test_c', 'test_d']), sorted(keys))
def test_get_test_run_series_with_meta(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run(passes=5, run_at=timestamp_a) api.create_run(fails=2, run_at=timestamp_b) api.add_run_metadata({'not_a_key': 'not_a_value'}, run_a.id) result = api.get_test_run_series(key='not_a_key', value='not_a_value') self.assertEqual(1, len(result.keys())) self.assertIn(timestamp_a.replace(microsecond=0), [x.replace(microsecond=0) for x in list(result.keys())]) self.assertNotIn( timestamp_b.replace(microsecond=0), [x.replace(microsecond=0) for x in list(result.keys())]) self.assertEqual(5, result[list(result.keys())[0]])
def test_delete_old_runs(self): run_a = api.create_run( run_at=datetime.datetime(1914, 6, 28, 10, 45, 0)) run_b = api.create_run() api.add_run_metadata({'key': 'value'}, run_b.id) api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id) api.delete_old_runs() runs = api.get_all_runs() self.assertEqual(1, len(runs)) self.assertEqual( 1, api.get_session().query(models.RunMetadata.id).count()) self.assertEqual(run_b.id, runs[0].id) self.assertEqual(1, len(api.get_run_metadata(run_b.uuid))) self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
def test_get_test_run_series_with_meta(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run(passes=5, run_at=timestamp_a) api.create_run(fails=2, run_at=timestamp_b) api.add_run_metadata({'not_a_key': 'not_a_value'}, run_a.id) result = api.get_test_run_series(key='not_a_key', value='not_a_value') self.assertEqual(1, len(result.keys())) self.assertIn(timestamp_a.replace(microsecond=0), [x.replace(microsecond=0) for x in list(result.keys())]) self.assertNotIn(timestamp_b.replace(microsecond=0), [x.replace(microsecond=0) for x in list( result.keys())]) self.assertEqual(5, result[list(result.keys())[0]])
def test_get_time_series_runs_by_key_value(self): runs = [] run_at = datetime.datetime.utcnow() for run_num in moves.range(15): run = api.create_run(run_num, run_num + 1, run_num + 2, 3, run_at=run_at) runs.append(run) run_meta = {'test_key': 'fun', 'non_test': 'value-%s' % run_num} if run_num >= 3: run_meta = { 'test_key': 'no-fun', 'non_test': 'value-%s' % run_num } api.add_run_metadata(run_meta, run.id) runs_time_series = api.get_time_series_runs_by_key_value( 'test_key', 'fun') self.assertEqual(1, len(runs_time_series)) timestamp = list(runs_time_series.keys())[0] self.assertEqual(3, len(runs_time_series[timestamp])) for run_num in moves.range(3): run_dict = { 'skip': run_num, 'fail': run_num + 1, 'pass': run_num + 2, 'id': runs[run_num].uuid, 'run_time': 3.0, 'metadata': { u'test_key': u'fun', u'non_test': u'value-%s' % run_num } } self.assertIn(run_dict, runs_time_series[timestamp]) for run_num in moves.range(3, 14): missing_run_dict = { 'skip': run_num, 'fail': run_num + 1, 'pass': run_num + 2, 'id': runs[run_num].id, 'run_time': 3.0, 'metadata': { u'test_key': u'fun', u'non_test': u'value-%s' % run_num } } self.assertNotIn(missing_run_dict, runs_time_series[timestamp])
def process_results(results): session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, CONF.artifacts, id=CONF.run_id, session=session) if CONF.run_meta: api.add_run_metadata(CONF.run_meta, db_run.id, session) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] == 'success': success = 1 fails = 0 elif results[test]['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session) if results[test]['metadata']: api.add_test_run_metadata(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachment'], test_run.id, session) session.close()
def test_get_time_series_runs_by_key_value(self): runs = [] run_at = datetime.datetime.utcnow() for run_num in moves.range(15): run = api.create_run(run_num, run_num + 1, run_num + 2, 3, run_at=run_at) runs.append(run) run_meta = {'test_key': 'fun', 'non_test': 'value-%s' % run_num} if run_num >= 3: run_meta = {'test_key': 'no-fun', 'non_test': 'value-%s' % run_num} api.add_run_metadata(run_meta, run.id) runs_time_series = api.get_time_series_runs_by_key_value('test_key', 'fun') self.assertEqual(1, len(runs_time_series)) timestamp = list(runs_time_series.keys())[0] self.assertEqual(3, len(runs_time_series[timestamp])) for run_num in moves.range(3): run_dict = { 'skip': run_num, 'fail': run_num + 1, 'pass': run_num + 2, 'id': runs[run_num].uuid, 'run_time': 3.0, 'metadata': { u'test_key': u'fun', u'non_test': u'value-%s' % run_num } } self.assertIn(run_dict, runs_time_series[timestamp]) for run_num in moves.range(3, 14): missing_run_dict = { 'skip': run_num, 'fail': run_num + 1, 'pass': run_num + 2, 'id': runs[run_num].id, 'run_time': 3.0, 'metadata': { u'test_key': u'fun', u'non_test': u'value-%s' % run_num } } self.assertNotIn(missing_run_dict, runs_time_series[timestamp])
def startTestRun(self): self._subunit = io.BytesIO() self.subunit_stream = subunit.v2.StreamResultToBytes(self._subunit) self.hook = testtools.CopyStreamResult( [testtools.StreamToDict(self._handle_test), self.subunit_stream]) self.hook.startTestRun() self.start_time = datetime.datetime.utcnow() session = self.session_factory() if not self._run_id: self.run = db_api.create_run(session=session) if self._metadata: db_api.add_run_metadata({'stestr_run_meta': self._metadata}, self.run.id, session=session) self._run_id = self.run.uuid else: int_id = db_api.get_run_id_from_uuid(self._run_id, session=session) self.run = db_api.get_run_by_id(int_id, session=session) session.close() self.totals = {}
def test_get_test_run_dict_by_run_meta_key_value(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run() run_b = api.create_run() api.add_run_metadata({'key': 'true'}, run_a.id) api.add_run_metadata({'key': 'not so true'}, run_b.id) test_a = api.create_test('fake_test') api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a, timestamp_b) api.create_test_run(test_a.id, run_b.id, 'fail', timestamp_a, datetime.datetime.utcnow()) test_run_dicts = api.get_test_run_dict_by_run_meta_key_value( 'key', 'true') self.assertEqual(1, len(test_run_dicts)) self.assertEqual([{ 'test_id': 'fake_test', 'status': 'success', 'start_time': timestamp_a, 'stop_time': timestamp_b, }], test_run_dicts)
def test_get_test_run_dict_by_run_meta_key_value(self): timestamp_a = datetime.datetime.utcnow() timestamp_b = timestamp_a + datetime.timedelta(minutes=2) run_a = api.create_run() run_b = api.create_run() api.add_run_metadata({'key': 'true'}, run_a.id) api.add_run_metadata({'key': 'not so true'}, run_b.id) test_a = api.create_test('fake_test') api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a, timestamp_b) api.create_test_run(test_a.id, run_b.id, 'fail', timestamp_a, datetime.datetime.utcnow()) test_run_dicts = api.get_test_run_dict_by_run_meta_key_value('key', 'true') self.assertEqual(1, len(test_run_dicts)) self.assertEqual([{ 'test_id': 'fake_test', 'status': 'success', 'start_time': timestamp_a, 'stop_time': timestamp_b, }], test_run_dicts)
def test_get_runs_by_status_grouped_by_run_metadata(self): # Generating 20 runs: # 10 with no failures # 10 with 10 failures # 7 in 2010/2011 each, 6 in 2012 # 10 in projecta/projectb each for i in moves.range(20): if i % 2 == 1: fails = 10 else: fails = 0 year = 2010 + (i % 3) run_at = datetime.datetime(year, 1, i + 1, 12, 0, 0) run = api.create_run(fails=fails, passes=10, run_at=run_at) self.assertIsNotNone(run) if i < 10: project = 'projecta' else: project = 'projectb' meta_dict = {'project': project} api.add_run_metadata(meta_dict, run.id) result = api.get_runs_by_status_grouped_by_run_metadata( 'project', start_date='2012-01-01', stop_date='2012-12-31') # There should be two projects self.assertEqual(2, len(result.keys())) self.assertTrue('projecta' in result) self.assertTrue('projectb' in result) # There should be passes and failures self.assertEqual(2, len(result['projecta'].keys())) self.assertTrue('pass' in result['projecta']) self.assertTrue('fail' in result['projecta']) self.assertEqual(2, len(result['projectb'].keys())) self.assertTrue('pass' in result['projectb']) self.assertTrue('fail' in result['projectb']) self.assertEqual(2, result['projecta']['pass']) self.assertEqual(1, result['projecta']['fail']) self.assertEqual(1, result['projectb']['pass']) self.assertEqual(2, result['projectb']['fail'])
def test_get_recent_runs_by_key_value_metadata(self): run_a = api.create_run() run_b = api.create_run() run_c = api.create_run() api.add_run_metadata({'a_key': 'a_value'}, run_a.id) api.add_run_metadata({'a_key': 'a_value'}, run_c.id) api.add_run_metadata({'a_key': 'b_value'}, run_b.id) result = api.get_recent_runs_by_key_value_metadata('a_key', 'a_value') self.assertEqual(2, len(result)) self.assertIn(run_a.id, [x.id for x in result]) self.assertNotIn(run_b.id, [x.id for x in result]) self.assertIn(run_c.id, [x.id for x in result])
def test_get_all_runs_time_series_by_key(self): time_a = datetime.datetime(1914, 6, 28, 10, 45, 0) run_a = api.create_run(run_at=time_a) run_b = api.create_run() time_c = datetime.datetime(1918, 11, 11, 11, 11, 11) run_c = api.create_run(run_at=time_c) api.add_run_metadata({'not_a_key': 'not_a_value'}, run_b.id) api.add_run_metadata({'a_key': 'a_value'}, run_a.id) api.add_run_metadata({'a_key': 'c_value'}, run_c.id) result = api.get_all_runs_time_series_by_key('a_key') self.assertEqual(2, len(result.keys())) self.assertIn(time_a.date(), [x.date() for x in result.keys()]) self.assertIn(time_c.date(), [x.date() for x in result.keys()])
def test_get_recent_runs_by_key_value_metadata_one_run(self): timestamp = datetime.datetime(1914, 6, 28, 10, 45, 0) run_a = api.create_run(run_at=timestamp) run_b = api.create_run() run_c = api.create_run() api.add_run_metadata({'a_key': 'a_value'}, run_a.id) api.add_run_metadata({'a_key': 'a_value'}, run_c.id) api.add_run_metadata({'a_key': 'b_value'}, run_b.id) result = api.get_recent_runs_by_key_value_metadata('a_key', 'a_value', num_runs=1) self.assertEqual(1, len(result)) self.assertNotIn(run_a.id, [x.id for x in result]) self.assertNotIn(run_b.id, [x.id for x in result]) self.assertIn(run_c.id, [x.id for x in result])
def process_results(results, run_at=None, artifacts=None, run_id=None, run_meta=None, test_attr_prefix=None): """Insert converted subunit data into the database. Allows for run-specific information to be passed in via kwargs, checks CONF if no run-specific information is supplied. :param results: subunit stream to be inserted :param run_at: Optional time at which the run was started. :param artifacts: Link to any artifacts from the test run. :param run_id: The run id for the new run. Must be unique. :param run_meta: Metadata corresponding to the new run. :param test_attr_prefix: Optional test attribute prefix. """ run_at = _override_conf(run_at, 'run_at') artifacts = _override_conf(artifacts, 'artifacts') run_id = _override_conf(run_id, 'run_id') run_meta = _override_conf(run_meta, 'run_meta') test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix') if run_at: if not isinstance(run_at, datetime.datetime): run_at = date_parser.parse(run_at) else: run_at = None session = api.get_session() run_time = results.pop('run_time') totals = get_run_totals(results) db_run = api.create_run(totals['skips'], totals['fails'], totals['success'], run_time, artifacts, id=run_id, run_at=run_at, session=session) if run_meta: api.add_run_metadata(run_meta, db_run.id, session) for test in results: db_test = api.get_test_by_test_id(test, session) if not db_test: if results[test]['status'] in ['success', 'xfail']: success = 1 fails = 0 elif results[test]['status'] in ['fail', 'uxsuccess']: fails = 1 success = 0 else: fails = 0 success = 0 run_time = subunit.get_duration(results[test]['start_time'], results[test]['end_time']) db_test = api.create_test(test, (success + fails), success, fails, run_time, session) else: test_values = increment_counts(db_test, results[test]) # If skipped nothing to update if test_values: api.update_test(test_values, db_test.id, session) test_run = api.create_test_run(db_test.id, db_run.id, results[test]['status'], results[test]['start_time'], results[test]['end_time'], session) if results[test]['metadata']: if test_attr_prefix: attrs = results[test]['metadata'].get('attrs') test_attr_list = _get_test_attrs_list(attrs) test_metadata = api.get_test_metadata(db_test.id, session) test_metadata = [(meta.key, meta.value) for meta in test_metadata] if test_attr_list: for attr in test_attr_list: if CONF.remove_test_attr_prefix: normalized_attr = attr[len( CONF.test_attr_prefix):] else: normalized_attr = attr if ('attr', normalized_attr) not in test_metadata: test_meta_dict = {'attr': normalized_attr} api.add_test_metadata(test_meta_dict, db_test.id, session=session) api.add_test_run_metadata(results[test]['metadata'], test_run.id, session) if results[test]['attachments']: api.add_test_run_attachments(results[test]['attachments'], test_run.id, session) session.close() return db_run