def get_test_runs_for_test(test_id): test_id = parse.unquote(test_id) start_date = _parse_datetimes(flask.request.args.get('start_date', None)) stop_date = _parse_datetimes(flask.request.args.get('stop_date', None)) datetime_resolution = flask.request.args.get('datetime_resolution', 'min') if datetime_resolution not in ['sec', 'min', 'hour', 'day']: message = ('Datetime resolution: %s, is not a valid' ' choice' % datetime_resolution) status_code = 400 return abort(make_response(message, status_code)) with session_scope() as session: db_test_runs = api.get_test_runs_by_test_test_id(test_id, session=session, start_date=start_date, stop_date=stop_date) if not db_test_runs: # NOTE(mtreinish) if no data is returned from the DB just return an # empty set response, the test_run_aggregator function assumes data # is present. return jsonify({'numeric': {}, 'data': {}}) test_runs =\ test_run_aggregator.convert_test_runs_list_to_time_series_dict( db_test_runs, datetime_resolution) return jsonify(test_runs)
def test_get_test_runs_test_test_id(self): run = api.create_run() test_a = api.create_test('fake_test') test_b = api.create_test('less_fake_test') api.create_test_run(test_a.id, run.id, 'success') api.create_test_run(test_b.id, run.id, 'success') res = api.get_test_runs_by_test_test_id('less_fake_test') self.assertEqual(1, len(res)) self.assertEqual(test_b.id, res[0].test_id) self.assertEqual(run.id, res[0].run_id)
def _get_data(test_id, start_date, stop_date): with session_scope() as session: db_test_runs = api.get_test_runs_by_test_test_id( test_id, session=session, start_date=start_date, stop_date=stop_date) if not db_test_runs: # NOTE(mtreinish) if no data is returned from the DB just # return an empty set response, the test_run_aggregator # function assumes data is present. return {'numeric': {}, 'data': {}, 'failed_runs': {}} test_runs =\ test_run_aggregator.convert_test_runs_list_to_time_series_dict( db_test_runs, datetime_resolution) failed_run_ids = [ x.run_id for x in db_test_runs if x.status == 'fail'] failed_runs = api.get_runs_by_ids(failed_run_ids, session=session) job_names = {} providers = {} failed_uuids = [x.uuid for x in failed_runs] split_uuids = [] if len(failed_uuids) <= 10: split_uuids = [[x] for x in failed_uuids] else: for i in range(0, len(failed_uuids), 10): end = i + 10 split_uuids.append(failed_uuids[i:end]) for uuids in split_uuids: change_dict = {} for uuid in uuids: metadata = api.get_run_metadata(uuid, session=session) short_uuid = None change_num = None patch_num = None for meta in metadata: if meta.key == 'build_short_uuid': short_uuid = meta.value elif meta.key == 'build_change': change_num = meta.value elif meta.key == 'build_patchset': patch_num = meta.value elif meta.key == 'build_name': job_names[uuid] = meta.value elif meta.key == 'node_provider': providers[uuid] = meta.value # NOTE(mtreinish): If the required metadata fields # aren't present skip ES lookup if not short_uuid or not change_num or not patch_num: continue global classifier if classifier: change_dict[uuid] = { 'change_num': change_num, 'patch_num': patch_num, 'short_uuid': short_uuid, } query_thread = threading.Thread( target=_populate_bug_dict, args=[change_dict]) query_threads.append(query_thread) query_thread.start() output = [] for thread in query_threads: thread.join() for run in failed_runs: temp_run = {} temp_run['provider'] = providers.get(run.uuid) temp_run['job_name'] = job_names.get(run.uuid) temp_run['run_at'] = run.run_at.isoformat() temp_run['artifacts'] = run.artifacts temp_run['bugs'] = bug_dict.get(run.uuid, []) output.append(temp_run) test_runs['failed_runs'] = output return test_runs
def _get_data(test_id, start_date, stop_date): with session_scope() as session: db_test_runs = api.get_test_runs_by_test_test_id( test_id, session=session, start_date=start_date, stop_date=stop_date) if not db_test_runs: # NOTE(mtreinish) if no data is returned from the DB just # return an empty set response, the test_run_aggregator # function assumes data is present. return {'numeric': {}, 'data': {}, 'failed_runs': {}} test_runs =\ test_run_aggregator.convert_test_runs_list_to_time_series_dict( db_test_runs, datetime_resolution) failed_run_ids = [ x.run_id for x in db_test_runs if x.status == 'fail' ] failed_runs = api.get_runs_by_ids(failed_run_ids, session=session) job_names = {} providers = {} failed_uuids = [x.uuid for x in failed_runs] split_uuids = [] if len(failed_uuids) <= 10: split_uuids = [[x] for x in failed_uuids] else: for i in range(0, len(failed_uuids), 10): end = i + 10 split_uuids.append(failed_uuids[i:end]) for uuids in split_uuids: change_dict = {} for uuid in uuids: metadata = api.get_run_metadata(uuid, session=session) short_uuid = None change_num = None patch_num = None for meta in metadata: if meta.key == 'build_short_uuid': short_uuid = meta.value elif meta.key == 'build_change': change_num = meta.value elif meta.key == 'build_patchset': patch_num = meta.value elif meta.key == 'build_name': job_names[uuid] = meta.value elif meta.key == 'node_provider': providers[uuid] = meta.value # NOTE(mtreinish): If the required metadata fields # aren't present skip ES lookup if not short_uuid or not change_num or not patch_num: continue global classifier if classifier: change_dict[uuid] = { 'change_num': change_num, 'patch_num': patch_num, 'short_uuid': short_uuid, } query_thread = threading.Thread(target=_populate_bug_dict, args=[change_dict]) query_threads.append(query_thread) query_thread.start() output = [] for thread in query_threads: thread.join() for run in failed_runs: temp_run = {} temp_run['provider'] = providers.get(run.uuid) temp_run['job_name'] = job_names.get(run.uuid) temp_run['run_at'] = run.run_at.isoformat() temp_run['artifacts'] = run.artifacts temp_run['bugs'] = bug_dict.get(run.uuid, []) output.append(temp_run) test_runs['failed_runs'] = output return test_runs