Пример #1
0
def RunTBMv2Metric(tbmv2_metric, json_trace, force_recompute=False):
    message = 'Running TBMv2 Metric...'
    PrintNoLn(message)
    hset = histogram_set.HistogramSet()

    cached_results = GetV2CachedResultPath(tbmv2_metric, json_trace)

    if not force_recompute and os.path.exists(cached_results):
        with open(cached_results) as f:
            hset.ImportDicts(json.load(f))
        CursorErase(len(message))
        return hset

    metrics = [tbmv2_metric]
    TEN_MINUTES = 60 * 10
    trace_abspath = os.path.abspath(json_trace)
    mre_result = metric_runner.RunMetricOnSingleTrace(trace_abspath,
                                                      metrics,
                                                      timeout=TEN_MINUTES)
    histograms = mre_result.pairs.get('histograms')
    if mre_result.failures:
        raise Exception("Error computing TBMv2 metric for %s" % json_trace)
    if 'histograms' not in mre_result.pairs:
        raise Exception("Metric %s is empty for trace %s" %
                        (tbmv2_metric, json_trace))
    histograms = mre_result.pairs['histograms']
    hset.ImportDicts(histograms)
    with open(cached_results, 'w') as f:
        json.dump(histograms, f)

    CursorErase(len(message))
    return hset
Пример #2
0
def _RunMetric(test_result, metrics):
    html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
    html_local_path = html_trace['filePath']
    html_remote_url = html_trace.get('viewUrl')

    # The timeout needs to be coordinated with the Swarming IO timeout for the
    # task that runs this code. If this timeout is longer or close in length
    # to the swarming IO timeout then we risk being forcibly killed for not
    # producing any output. Note that this could be fixed by periodically
    # outputting logs while waiting for metrics to be calculated.
    TEN_MINUTES = 60 * 10
    mre_result = metric_runner.RunMetricOnSingleTrace(
        html_local_path,
        metrics,
        canonical_url=html_remote_url,
        timeout=TEN_MINUTES,
        extra_import_options={'trackDetailedModelStats': True})

    if mre_result.failures:
        util.SetUnexpectedFailure(test_result)
        for f in mre_result.failures:
            logging.error('Failure recorded for test %s: %s',
                          test_result['testPath'], f)

    return mre_result.pairs.get('histograms', [])
Пример #3
0
def _ComputeMetricsInPool((run, trace_value)):
    story_name = run.story.name
    try:
        assert not trace_value.is_serialized, (
            "%s: TraceValue should not be serialized." % story_name)
        retvalue = {
            'run': run,
            'fail': [],
            'histogram_dicts': None,
            'scalars': []
        }
        extra_import_options = {'trackDetailedModelStats': True}

        logging.info('%s: Serializing trace.', story_name)
        trace_value.SerializeTraceData()
        trace_size_in_mib = os.path.getsize(trace_value.filename) / (2**20)
        # Bails out on trace that are too big. See crbug.com/812631 for more
        # details.
        if trace_size_in_mib > 400:
            retvalue['fail'].append('%s: Trace size is too big: %s MiB' %
                                    (story_name, trace_size_in_mib))
            return retvalue

        logging.info('%s: Starting to compute metrics on trace.', story_name)
        start = time.time()
        # This timeout needs to be coordinated with the Swarming IO timeout for the
        # task that runs this code. If this timeout is longer or close in length
        # to the swarming IO timeout then we risk being forcibly killed for not
        # producing any output. Note that this could be fixed by periodically
        # outputing logs while waiting for metrics to be calculated.
        timeout = _TEN_MINUTES
        mre_result = metric_runner.RunMetricOnSingleTrace(
            trace_value.filename,
            trace_value.timeline_based_metric,
            extra_import_options,
            canonical_url=trace_value.trace_url,
            timeout=timeout)
        logging.info('%s: Computing metrics took %.3f seconds.' %
                     (story_name, time.time() - start))

        if mre_result.failures:
            for f in mre_result.failures:
                retvalue['fail'].append('%s: %s' % (story_name, str(f)))

        histogram_dicts = mre_result.pairs.get('histograms', [])
        retvalue['histogram_dicts'] = histogram_dicts

        scalars = []
        for d in mre_result.pairs.get('scalars', []):
            scalars.append(
                common_value_helpers.TranslateScalarValue(d, trace_value.page))
        retvalue['scalars'] = scalars
        return retvalue
    except Exception as e:  # pylint: disable=broad-except
        # logging exception here is the only way to get a stack trace since
        # multiprocessing's pool implementation does not save that data. See
        # crbug.com/953365.
        logging.error('%s: Exception while calculating metric', story_name)
        logging.exception(e)
        raise
Пример #4
0
def _PoolWorker(test_result):
    metrics = [
        tag['value'] for tag in test_result['tags'] if tag['key'] == 'tbmv2'
    ]
    html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
    html_local_path = html_trace['filePath']
    html_remote_url = html_trace.get('remoteUrl')

    logging.info('%s: Starting to compute metrics on trace.',
                 test_result['testPath'])
    start = time.time()
    # The timeout needs to be coordinated with the Swarming IO timeout for the
    # task that runs this code. If this timeout is longer or close in length
    # to the swarming IO timeout then we risk being forcibly killed for not
    # producing any output. Note that this could be fixed by periodically
    # outputting logs while waiting for metrics to be calculated.
    TEN_MINUTES = 60 * 10
    mre_result = metric_runner.RunMetricOnSingleTrace(
        html_local_path,
        metrics,
        canonical_url=html_remote_url,
        timeout=TEN_MINUTES,
        extra_import_options={'trackDetailedModelStats': True})
    logging.info('%s: Computing metrics took %.3f seconds.' %
                 (test_result['testPath'], time.time() - start))

    if mre_result.failures:
        test_result['status'] = 'FAIL'
        for f in mre_result.failures:
            logging.error('Failure recorded for test %s: %s',
                          test_result['testPath'], f)

    return mre_result.pairs.get('histograms', [])
Пример #5
0
def RunTBMv2Metric(tbmv2_name, html_trace_filename, traces_dir):
    metrics = [tbmv2_name]
    TEN_MINUTES = 60 * 10
    mre_result = metric_runner.RunMetricOnSingleTrace(html_trace_filename,
                                                      metrics,
                                                      timeout=TEN_MINUTES)
    with tempfile.NamedTemporaryFile(dir=traces_dir,
                                     suffix='_tbmv2.json',
                                     delete=False) as out_file:
        json.dump(mre_result.pairs.get('histograms', []),
                  out_file,
                  indent=2,
                  sort_keys=True,
                  separators=(',', ': '))
    logging.debug('Saved TBMv2 metric to %s' % out_file.name)
    return out_file.name