Exemple #1
0
 def test_get_run_metadata(self):
     run_a = api.create_run()
     run_b = api.create_run()
     api.add_run_metadata({'not_a_key': 'not_a_value'}, run_b.id)
     a_metadata = api.get_run_metadata(run_a.uuid)
     b_metadata = api.get_run_metadata(run_b.uuid)
     self.assertEqual([], a_metadata)
     self.assertEqual(1, len(b_metadata))
     b_metadata = b_metadata[0].to_dict()
     self.assertEqual(run_b.id, b_metadata['run_id'])
     self.assertEqual('not_a_key', b_metadata['key'])
     self.assertEqual('not_a_value', b_metadata['value'])
Exemple #2
0
 def test_get_run_metadata(self):
     run_a = api.create_run()
     run_b = api.create_run()
     api.add_run_metadata({'not_a_key': 'not_a_value'}, run_b.id)
     a_metadata = api.get_run_metadata(run_a.uuid)
     b_metadata = api.get_run_metadata(run_b.uuid)
     self.assertEqual([], a_metadata)
     self.assertEqual(1, len(b_metadata))
     b_metadata = b_metadata[0].to_dict()
     self.assertEqual(run_b.id, b_metadata['run_id'])
     self.assertEqual('not_a_key', b_metadata['key'])
     self.assertEqual('not_a_value', b_metadata['value'])
Exemple #3
0
 def test_delete_old_runs(self):
     run_a = api.create_run(run_at=datetime.datetime(
         1914, 6, 28, 10, 45, 0))
     run_b = api.create_run()
     api.add_run_metadata({'key': 'value'}, run_b.id)
     api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id)
     api.delete_old_runs()
     runs = api.get_all_runs()
     self.assertEqual(1, len(runs))
     self.assertEqual(1, api.get_session().query(
         models.RunMetadata.id).count())
     self.assertEqual(run_b.id, runs[0].id)
     self.assertEqual(1, len(api.get_run_metadata(run_b.uuid)))
     self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
Exemple #4
0
 def test_delete_old_runs(self):
     run_a = api.create_run(
         run_at=datetime.datetime(1914, 6, 28, 10, 45, 0))
     run_b = api.create_run()
     api.add_run_metadata({'key': 'value'}, run_b.id)
     api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id)
     api.delete_old_runs()
     runs = api.get_all_runs()
     self.assertEqual(1, len(runs))
     self.assertEqual(
         1,
         api.get_session().query(models.RunMetadata.id).count())
     self.assertEqual(run_b.id, runs[0].id)
     self.assertEqual(1, len(api.get_run_metadata(run_b.uuid)))
     self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
Exemple #5
0
def get_subunit_results(build_uuid,
                        dataset_name,
                        sample_interval,
                        db_uri,
                        build_name='tempest-full',
                        data_path=None,
                        s3=None):
    engine = create_engine(db_uri)
    Session = sessionmaker(bind=engine)
    session = Session()
    runs = api.get_runs_by_key_value('build_uuid', build_uuid, session=session)
    results = []
    for run in runs:
        # Check if we are interested in this build at all
        meta = api.get_run_metadata(run.uuid, session=session)
        build_names = [x.value for x in meta if x.key == 'build_name']
        if len(build_names) >= 1:
            # Skip build_names that aren't selected
            if not build_name == build_names[0]:
                continue
            db_build_name = build_names[0]
        else:
            continue
        # NOTE(mtreinish): Only be concerned with single node to start
        if 'multinode' in db_build_name:
            continue
        result = _get_data_for_run(run,
                                   sample_interval,
                                   session,
                                   data_path=data_path,
                                   s3=s3)
        if result:
            results.append(result)
    session.close()
    return results
Exemple #6
0
def _get_recent_runs_data(run_metadata_key, value, detail=False):
    num_runs = flask.request.args.get('num_runs', 10)
    with session_scope() as session:
        results = api.get_recent_runs_by_key_value_metadata(
            run_metadata_key, value, num_runs, session)
        runs = []
        for result in results:
            if detail:
                run = result.to_dict()
            else:
                if result.passes > 0 and result.fails == 0:
                    status = 'success'
                elif result.fails > 0:
                    status = 'fail'
                else:
                    continue

                run = {
                    'id': result.uuid,
                    'status': status,
                    'start_date': result.run_at.isoformat(),
                    'link': result.artifacts,
                }

            run_meta = api.get_run_metadata(result.uuid, session)
            for meta in run_meta:
                if meta.key == 'build_name':
                    run['build_name'] = meta.value
                    break
            runs.append(run)
    return runs
Exemple #7
0
def _get_recent_runs_data(run_metadata_key, value, detail=False):
    num_runs = flask.request.args.get('num_runs', 10)
    with session_scope() as session:
        results = api.get_recent_runs_by_key_value_metadata(
            run_metadata_key, value, num_runs, session)
        runs = []
        for result in results:
            if detail:
                run = result.to_dict()
            else:
                if result.passes > 0 and result.fails == 0:
                    status = 'success'
                elif result.fails > 0:
                    status = 'fail'
                else:
                    continue

                run = {
                    'id': result.uuid,
                    'status': status,
                    'start_date': result.run_at.isoformat(),
                    'link': result.artifacts,
                }

            run_meta = api.get_run_metadata(result.uuid, session)
            for meta in run_meta:
                if meta.key == 'build_name':
                    run['build_name'] = meta.value
                    break
            runs.append(run)
    return runs
Exemple #8
0
 def get_metadata(self):
     if self._run_id:
         session = self.session_factory()
         metadata = db_api.get_run_metadata(self._run_id, session=session)
         for meta in metadata:
             if meta.key == 'stestr_run_meta':
                 return meta.value
     return None
Exemple #9
0
def get_recent_failed_runs_rss(run_metadata_key, value):
    run_metadata_key = parse.unquote(run_metadata_key)
    value = parse.unquote(value)
    url = request.url
    if run_metadata_key not in feeds:
        feeds[run_metadata_key] = {value: _gen_feed(url,
                                                    run_metadata_key,
                                                    value)}
        feeds["last runs"][run_metadata_key] = {value: None}
    elif value not in feeds[run_metadata_key]:
        feeds[run_metadata_key][value] = _gen_feed(url,
                                                   run_metadata_key,
                                                   value)
        feeds["last runs"][run_metadata_key][value] = None
    fg = feeds[run_metadata_key][value]
    with session_scope() as session:
        failed_runs = api.get_recent_failed_runs_by_run_metadata(
            run_metadata_key, value,
            start_date=feeds["last runs"][run_metadata_key][value],
            session=session)
        if failed_runs:
            last_run = sorted([x.run_at for x in failed_runs])[-1]
            if feeds["last runs"][run_metadata_key][value] == last_run:
                return feeds[run_metadata_key][value].rss_str()
            feeds["last runs"][run_metadata_key][value] = last_run
        else:
            count = api.get_runs_counts_by_run_metadata(
                run_metadata_key, value, session=session)
            if count == 0:
                msg = 'No matching runs found with %s=%s' % (
                    run_metadata_key, value)
                return abort(make_response(msg, 404))
        for run in failed_runs:
            meta = api.get_run_metadata(run.uuid, session=session)
            uuid = [x.value for x in meta if x.key == 'build_uuid'][0]
            build_name = [x.value for x in meta if x.key == 'build_name'][0]
            entry = fg.add_entry()
            entry.id(uuid)
            entry.title('Failed Run %s/%s' % (build_name, uuid[:7]))
            entry.published(pytz.utc.localize(run.run_at))
            entry.link({'href': run.artifacts, 'rel': 'alternate'})
            metadata_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'g/%s/%s' % (run_metadata_key, value))
            job_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'job/%s' % build_name)
            content = '<ul>'
            content += '<li><a href="%s">Metadata page</a></li>\n' % (
                metadata_url)
            content += '<li><a href="%s">Job Page</a></li>' % (job_url)
            content += '</ul>'
            entry.description(content)
    response = make_response(feeds[run_metadata_key][value].rss_str())
    response.headers['Content-Type'] = 'application/xml; charset=utf-8'
    return response
Exemple #10
0
def get_recent_failed_runs_rss(run_metadata_key, value):
    run_metadata_key = parse.unquote(run_metadata_key)
    value = parse.unquote(value)
    url = request.url
    if run_metadata_key not in feeds:
        feeds[run_metadata_key] = {value: _gen_feed(url,
                                                    run_metadata_key,
                                                    value)}
        feeds["last runs"][run_metadata_key] = {value: None}
    elif value not in feeds[run_metadata_key]:
        feeds[run_metadata_key][value] = _gen_feed(url,
                                                   run_metadata_key,
                                                   value)
        feeds["last runs"][run_metadata_key][value] = None
    fg = feeds[run_metadata_key][value]
    with session_scope() as session:
        failed_runs = api.get_recent_failed_runs_by_run_metadata(
            run_metadata_key, value,
            start_date=feeds["last runs"][run_metadata_key][value],
            session=session)
        if failed_runs:
            last_run = sorted([x.run_at for x in failed_runs])[-1]
            if feeds["last runs"][run_metadata_key][value] == last_run:
                return feeds[run_metadata_key][value].rss_str()
            feeds["last runs"][run_metadata_key][value] = last_run
        else:
            msg = 'No Failed Runs for run metadata %s: %s' % (
                run_metadata_key, value)
            return abort(make_response(msg, 400))
        for run in failed_runs:
            meta = api.get_run_metadata(run.uuid, session=session)
            uuid = [x.value for x in meta if x.key == 'build_uuid'][0]
            entry = fg.add_entry()
            entry.id(uuid)
            entry.title('Failed Run %s' % uuid)
            entry.published(pytz.utc.localize(run.run_at))
            entry.link({'href': run.artifacts, 'rel': 'alternate'})
            build_name = [x.value for x in meta if x.key == 'build_name'][0]
            # TODO(mtreinish): Make this html
            metadata_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'g/%s/%s' % (run_metadata_key, value))
            job_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'job/%s' % build_name)
            content = 'Metadata page: %s\n' % metadata_url
            content += '\nJob Page %s' % job_url
            entry.description(content)
    return feeds[run_metadata_key][value].rss_str()
Exemple #11
0
def _get_metadata(run_id):
    """Returns a dict of run_metadata objects associated with a run_id

    :param run_id:
    :return:
    """

    engine = create_engine('mysql://*****:*****@logstash.openstack.org' +
                           ':3306/subunit2sql')
    Session = sessionmaker(bind=engine)
    session = Session()

    metadata = api.get_run_metadata(run_id,session=session)
    ret_list = []

    for meta in metadata:
        ret_list.append(meta.to_dict())

    return ret_list
Exemple #12
0
 def _get_recent(status):
     with session_scope() as session:
         failed_runs = api.get_recent_failed_runs(num_runs, session)
         job_names = {}
         for run in failed_runs:
             metadata = api.get_run_metadata(run, session=session)
             short_uuid = None
             change_num = None
             patch_num = None
             for meta in metadata:
                 if meta.key == 'build_short_uuid':
                     short_uuid = meta.value
                 elif meta.key == 'build_change':
                     change_num = meta.value
                 elif meta.key == 'build_patchset':
                     patch_num = meta.value
                 elif meta.key == 'build_name':
                     job_names[run] = meta.value
                 global classifier
                 if classifier:
                     # NOTE(mtreinish): If the required metadata fields
                     # aren't present skip ES lookup
                     if not short_uuid or not change_num or not patch_num:
                         continue
                     query_thread = threading.Thread(
                         target=_populate_bug_dict,
                         args=(change_num, patch_num, short_uuid, run))
                     query_threads.append(query_thread)
                     query_thread.start()
         test_runs = api.get_test_runs_by_status_for_run_ids(
             status, failed_runs, session=session, include_run_id=True)
         output = []
         for run in test_runs:
             run['start_time'] = run['start_time'].isoformat()
             run['stop_time'] = run['stop_time'].isoformat()
             run['job_name'] = job_names.get(run['uuid'])
             output.append(run)
         for thread in query_threads:
             thread.join()
         return {'test_runs': output, 'bugs': bug_dict}
Exemple #13
0
 def _get_recent(status):
     with session_scope() as session:
         failed_runs = api.get_recent_failed_runs(num_runs, session)
         global classifier
         if classifier:
             for run in failed_runs:
                 metadata = api.get_run_metadata(run, session=session)
                 short_uuid = None
                 change_num = None
                 patch_num = None
                 for meta in metadata:
                     if meta.key == 'build_short_uuid':
                         short_uuid = meta.value
                     elif meta.key == 'build_change':
                         change_num = meta.value
                     elif meta.key == 'build_patchset':
                         patch_num = meta.value
                 # NOTE(mtreinish): If the required metadata fields aren't
                 # present skip ES lookup
                 if not short_uuid or not change_num or not patch_num:
                     continue
                 query_thread = threading.Thread(
                     target=_populate_bug_dict, args=(change_num, patch_num,
                                                      short_uuid, run))
                 query_threads.append(query_thread)
                 query_thread.start()
         test_runs = api.get_test_runs_by_status_for_run_ids(
             status, failed_runs, session=session, include_run_id=True)
         output = []
         for run in test_runs:
             run['start_time'] = run['start_time'].isoformat()
             run['stop_time'] = run['stop_time'].isoformat()
             output.append(run)
         for thread in query_threads:
             thread.join()
         return {'test_runs': output, 'bugs': bug_dict}
Exemple #14
0
 def _get_data(test_id, start_date, stop_date):
     with session_scope() as session:
         db_test_runs = api.get_test_runs_by_test_test_id(
             test_id, session=session, start_date=start_date,
             stop_date=stop_date)
         if not db_test_runs:
             # NOTE(mtreinish) if no data is returned from the DB just
             # return an empty set response, the test_run_aggregator
             # function assumes data is present.
             return {'numeric': {}, 'data': {}, 'failed_runs': {}}
         test_runs =\
             test_run_aggregator.convert_test_runs_list_to_time_series_dict(
                 db_test_runs, datetime_resolution)
         failed_run_ids = [
             x.run_id for x in db_test_runs if x.status == 'fail']
         failed_runs = api.get_runs_by_ids(failed_run_ids, session=session)
         job_names = {}
         providers = {}
         failed_uuids = [x.uuid for x in failed_runs]
         split_uuids = []
         if len(failed_uuids) <= 10:
             split_uuids = [[x] for x in failed_uuids]
         else:
             for i in range(0, len(failed_uuids), 10):
                 end = i + 10
                 split_uuids.append(failed_uuids[i:end])
         for uuids in split_uuids:
             change_dict = {}
             for uuid in uuids:
                 metadata = api.get_run_metadata(uuid, session=session)
                 short_uuid = None
                 change_num = None
                 patch_num = None
                 for meta in metadata:
                     if meta.key == 'build_short_uuid':
                         short_uuid = meta.value
                     elif meta.key == 'build_change':
                         change_num = meta.value
                     elif meta.key == 'build_patchset':
                         patch_num = meta.value
                     elif meta.key == 'build_name':
                         job_names[uuid] = meta.value
                     elif meta.key == 'node_provider':
                         providers[uuid] = meta.value
                 # NOTE(mtreinish): If the required metadata fields
                 # aren't present skip ES lookup
                 if not short_uuid or not change_num or not patch_num:
                     continue
             global classifier
             if classifier:
                 change_dict[uuid] = {
                     'change_num': change_num,
                     'patch_num': patch_num,
                     'short_uuid': short_uuid,
                 }
                 query_thread = threading.Thread(
                     target=_populate_bug_dict, args=[change_dict])
                 query_threads.append(query_thread)
                 query_thread.start()
         output = []
         for thread in query_threads:
             thread.join()
         for run in failed_runs:
             temp_run = {}
             temp_run['provider'] = providers.get(run.uuid)
             temp_run['job_name'] = job_names.get(run.uuid)
             temp_run['run_at'] = run.run_at.isoformat()
             temp_run['artifacts'] = run.artifacts
             temp_run['bugs'] = bug_dict.get(run.uuid, [])
             output.append(temp_run)
         test_runs['failed_runs'] = output
     return test_runs
Exemple #15
0
def get_recent_failed_runs_rss(run_metadata_key, value):
    run_metadata_key = parse.unquote(run_metadata_key)
    value = parse.unquote(value)
    url = request.url
    if run_metadata_key not in feeds:
        feeds[run_metadata_key] = {
            value: _gen_feed(url, run_metadata_key, value)
        }
        feeds["last runs"][run_metadata_key] = {value: None}
    elif value not in feeds[run_metadata_key]:
        feeds[run_metadata_key][value] = _gen_feed(url, run_metadata_key,
                                                   value)
        feeds["last runs"][run_metadata_key][value] = None
    fg = feeds[run_metadata_key][value]
    with session_scope() as session:
        failed_runs = api.get_recent_failed_runs_by_run_metadata(
            run_metadata_key,
            value,
            start_date=feeds["last runs"][run_metadata_key][value],
            session=session)
        if failed_runs:
            last_run = sorted([x.run_at for x in failed_runs])[-1]
            if feeds["last runs"][run_metadata_key][value] == last_run:
                return feeds[run_metadata_key][value].rss_str()
            feeds["last runs"][run_metadata_key][value] = last_run
        else:
            count = api.get_runs_counts_by_run_metadata(run_metadata_key,
                                                        value,
                                                        session=session)
            if count == 0:
                msg = 'No matching runs found with %s=%s' % (run_metadata_key,
                                                             value)
                return abort(make_response(msg, 404))
        for run in failed_runs:
            meta = api.get_run_metadata(run.uuid, session=session)
            failing_test_runs = api.get_failing_from_run(run.id,
                                                         session=session)
            uuid = [x.value for x in meta if x.key == 'build_uuid'][0]
            build_name = [x.value for x in meta if x.key == 'build_name'][0]
            entry = fg.add_entry()
            entry.id(uuid)
            entry.title('Failed Run %s/%s' % (build_name, uuid[:7]))
            entry.published(pytz.utc.localize(run.run_at))
            entry.link({'href': run.artifacts, 'rel': 'alternate'})
            metadata_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'g/%s/%s' % (run_metadata_key, value))
            job_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
                'job/%s' % build_name)
            content = '<ul>'
            content += '<li><a href="%s">Metadata page</a></li>\n' % (
                metadata_url)
            content += '<li><a href="%s">Job Page</a></li>' % (job_url)
            content += '</ul>'
            content += '<h3>Failed tests</h3>'
            content += '<ul>'
            for failing_test_run in failing_test_runs:
                content += '<li><a href="%s">%s</a></li>' % (
                    rss_opts['frontend_url'] + '/#/test/' +
                    failing_test_run.test.test_id,
                    failing_test_run.test.test_id)
            content += '</ul>'
            entry.description(content)
    response = make_response(feeds[run_metadata_key][value].rss_str())
    response.headers['Content-Type'] = 'application/xml; charset=utf-8'
    return response
Exemple #16
0
 def _get_data(test_id, start_date, stop_date):
     with session_scope() as session:
         db_test_runs = api.get_test_runs_by_test_test_id(
             test_id,
             session=session,
             start_date=start_date,
             stop_date=stop_date)
         if not db_test_runs:
             # NOTE(mtreinish) if no data is returned from the DB just
             # return an empty set response, the test_run_aggregator
             # function assumes data is present.
             return {'numeric': {}, 'data': {}, 'failed_runs': {}}
         test_runs =\
             test_run_aggregator.convert_test_runs_list_to_time_series_dict(
                 db_test_runs, datetime_resolution)
         failed_run_ids = [
             x.run_id for x in db_test_runs if x.status == 'fail'
         ]
         failed_runs = api.get_runs_by_ids(failed_run_ids, session=session)
         job_names = {}
         providers = {}
         failed_uuids = [x.uuid for x in failed_runs]
         split_uuids = []
         if len(failed_uuids) <= 10:
             split_uuids = [[x] for x in failed_uuids]
         else:
             for i in range(0, len(failed_uuids), 10):
                 end = i + 10
                 split_uuids.append(failed_uuids[i:end])
         for uuids in split_uuids:
             change_dict = {}
             for uuid in uuids:
                 metadata = api.get_run_metadata(uuid, session=session)
                 short_uuid = None
                 change_num = None
                 patch_num = None
                 for meta in metadata:
                     if meta.key == 'build_short_uuid':
                         short_uuid = meta.value
                     elif meta.key == 'build_change':
                         change_num = meta.value
                     elif meta.key == 'build_patchset':
                         patch_num = meta.value
                     elif meta.key == 'build_name':
                         job_names[uuid] = meta.value
                     elif meta.key == 'node_provider':
                         providers[uuid] = meta.value
                 # NOTE(mtreinish): If the required metadata fields
                 # aren't present skip ES lookup
                 if not short_uuid or not change_num or not patch_num:
                     continue
             global classifier
             if classifier:
                 change_dict[uuid] = {
                     'change_num': change_num,
                     'patch_num': patch_num,
                     'short_uuid': short_uuid,
                 }
                 query_thread = threading.Thread(target=_populate_bug_dict,
                                                 args=[change_dict])
                 query_threads.append(query_thread)
                 query_thread.start()
         output = []
         for thread in query_threads:
             thread.join()
         for run in failed_runs:
             temp_run = {}
             temp_run['provider'] = providers.get(run.uuid)
             temp_run['job_name'] = job_names.get(run.uuid)
             temp_run['run_at'] = run.run_at.isoformat()
             temp_run['artifacts'] = run.artifacts
             temp_run['bugs'] = bug_dict.get(run.uuid, [])
             output.append(temp_run)
         test_runs['failed_runs'] = output
     return test_runs
Exemple #17
0
def _get_result_for_run(run,
                        session,
                        use_db=True,
                        get_tests=False,
                        data_path=None,
                        s3=None):
    # First try to get the data from disk
    metadata_folder = get_data_path(data_path=data_path, s3=s3)
    metadata_folder.append('.metadata')
    use_s3 = (metadata_folder[0] == 's3:')
    stream_or_file = False
    # If s3 is None, get a vanilla s3 client just for the exceptions
    s3 = s3 or get_s3_client()
    if use_s3:
        object_key = os.sep.join(metadata_folder[3:] + [run.uuid + '.json.gz'])
        try:
            s3.head_object(Bucket=metadata_folder[2], Key=object_key)

            def stream_or_file():
                return s3.get_object(Bucket=metadata_folder[2],
                                     Key=object_key)['Body']
        except s3.exceptions.ClientError:
            # Not found, continue
            pass
    else:
        os.makedirs(os.sep.join(metadata_folder), exist_ok=True)
        result_file = os.sep.join(metadata_folder + [run.uuid + '.json.gz'])
        if os.path.isfile(result_file):

            def stream_or_file():
                return result_file

    # If cached
    if stream_or_file:
        try:
            with gzip.open(stream_or_file(), mode='rt') as f:
                if use_db:
                    # When using remote let me know if loading from cache
                    print("%s: metadata found in cache" % run.uuid)
                return json.loads(f.read())
        except IOError as ioe:
            # Something went wrong opening the file, so we won't load
            # this run.
            print('Run %s found in the dataset, however: %s', (run.uuid, ioe))
            return None

    # If no local cache, and use_db is False, return nothing
    if not use_db:
        print("No local data for %s, use_db set to false" % run.uuid)
        return None

    # If no local cache, get data from the DB
    result = {}

    # We may need the list of tests
    if get_tests:
        test_runs = api.get_test_runs_by_run_id(run.uuid, session=session)
        tests = []
        for test_run in test_runs:
            test = {'status': test_run.status}
            start_time = test_run.start_time
            start_time = start_time.replace(
                microsecond=test_run.start_time_microsecond)
            stop_time = test_run.stop_time
            stop_time = stop_time.replace(
                microsecond=test_run.stop_time_microsecond)
            test['start_time'] = start_time
            test['stop_time'] = stop_time
            tests.append(test)

    # Setup run metadata
    if run.fails > 0 or run.passes == 0:
        result['status'] = 1  # Failed
    else:
        result['status'] = 0  # Passed
    result['artifact'] = run.artifacts
    # Get extra run metadata
    metadata = api.get_run_metadata(run.uuid, session)
    for md in metadata:
        result[md['key']] = md['value']

    # Cache the json file, without tests
    if use_s3:
        s3.put_object(Bucket=metadata_folder[2],
                      Key=object_key,
                      Body=gzip.compress(json.dumps(result).encode()))
    else:
        with gzip.open(result_file, mode='wb') as local_cache:
            local_cache.write(json.dumps(result).encode())
    print("%s: metadata cached from URL" % run.uuid)

    # Adding the tests after caching
    if get_tests:
        result['tests'] = tests
    return result