Exemple #1
0
def history(data, output, key='job', limit=None):
    title = var._project_name + ' Performance History'
    with html.Report(output, title) as report:
        dates, measurements = _history_data(data, key, limit)

        with report.image_grid() as grid:
            for k, m in sorted(measurements.items()):
                _history_plot(str(k), dates, m, grid.image())
Exemple #2
0
def compare_backends(data, output):
    assert all(d['domain'] == data[0]['domain'] for d in data)

    title = 'GridTools Backends Comparison for Domain ' + '×'.join(
        str(d) for d in data[0]['domain'])
    with html.Report(output, title) as report:
        _add_backend_comparison_plots(report, data)
        _add_info(report, [f'Configuration {i + 1}' for i in range(len(data))],
                  data)
Exemple #3
0
def history(data, output, key='job', limit=None):
    assert all(d['domain'] == data[0]['domain'] for d in data)

    title = 'GridTools Performance History for Domain ' + '×'.join(
        str(d) for d in data[0]['domain'])
    with html.Report(output, title) as report:
        dates, measurements = _history_data(data, key, limit)

        with report.image_grid() as grid:
            for k, m in sorted(measurements.items()):
                _history_plot(str(k), dates, m, grid.image())
Exemple #4
0
def compare_all(results, references, output):
    index = 0
    global_exitcode = 0
    title = var._project_name + ' Performance'
    with html.Report(output, title) as report:
        for res in results:
            ref = references[index]
            exitcode = compare_one(report, _load_json(ref), _load_json(res), output)
            global_exitcode = global_exitcode or exitcode
            index += 1
        _add_explanation_of_symbols(report)
    return global_exitcode
Exemple #5
0
def compare(before, after, output):
    before_outs = _OutputKey.outputs_by_key(before)
    after_outs = _OutputKey.outputs_by_key(after)
    cis = {
        k: _ConfidenceInterval.compare_medians(before_outs[k], v)
        for k, v in after_outs.items() if k in before_outs
    }

    title = var._project_name + ' Performance'
    with html.Report(output, title) as report:
        exitcode = _add_comparison_table(report, cis)
        _add_comparison_plots(report, before_outs, after_outs, cis)
        _add_info(report, ['Before', 'After'], [before, after])
    return exitcode
Exemple #6
0
def compare(before, after, output):
    before_outs = _OutputKey.outputs_by_key(before)
    after_outs = _OutputKey.outputs_by_key(after)
    cis = {
        k: _ConfidenceInterval.compare_medians(before_outs[k], v)
        for k, v in after_outs.items() if k in before_outs
    }

    assert before['domain'] == after['domain']
    title = 'GridTools Performance for Domain ' + '×'.join(
        str(d) for d in after['domain'])
    with html.Report(output, title) as report:
        _add_comparison_table(report, cis)
        _add_comparison_plots(report, before_outs, after_outs, cis)
        _add_info(report, ['Before', 'After'], [before, after])
Exemple #7
0
def compare_executors(data, output):
    title = 'HPX executors Comparison'
    with html.Report(output, title) as report:
        _add_executor_comparison_plots(report, data)
        _add_info(report, [f'Configuration {i + 1}' for i in range(len(data))],
                  data)