Ejemplo n.º 1
0
def per_pass_compile_times(axis_size, run_data, include_opts):
    # Also plot a graph for each specific pass.

    for compile_pass in run_data['0.sml']:
        errors = []
        y_data = []
        x_data = []

        for i in range(axis_size):
            # Get the data for the ith file:
            ith = run_data[str(i) + '.sml']

            (min_err, max_err, value) = \
                graph.generate_min_max_median(ith[compile_pass],
                                              delete_min_max=3)

            errors.append((min_err, max_err))
            y_data.append(value)
            x_data.append(i)

        fig = graph.draw_line(x_data,
                              y_data,
                              error_bars=errors,
                              x_label=gen_x_label_for(compile_pass, benchmark),
                              y_label="Time (ms)",
                              title=gen_title_for(compile_pass, benchmark))

        if include_opts:
            no_opts = "_with_opts_"
        else:
            no_opts = ""

        graph.save_to(
            fig, benchmark + no_opts + run_data['name'] + '_' + compile_pass +
            '.eps')
Ejemplo n.º 2
0
def generic_compile_time_graph(axis_size, run_data, include_opts):
    # For this graph, split into TIR, byteR and AST.
    number = 4 if include_opts else 3
    runs = run_data['runs']
    x_data = []
    y_data = []
    errors = []

    for i in range(axis_size):
        x_data.append(i)

        y_data_dict = run_data[str(i) + '.sml']

        ast_times = \
            sum_ast_times_from(y_data_dict, runs,
                               include_opts=include_opts)
        tir_times = \
            sum_tir_times_from(y_data_dict, runs,
                               include_opts=include_opts)
        if include_opts:
            opt_times = sum_opt_times(y_data_dict, runs)

        byteR_times = \
            sum_byteR_times_from(y_data_dict, runs,
                                 include_opts=include_opts)

        if include_opts:
            tuples = [
                (ast_times[i], tir_times[i] + ast_times[i],
                 ast_times[i] + tir_times[i] + opt_times[i],
                 ast_times[i] + tir_times[i] + opt_times[i] + byteR_times[i])
                for i in range(runs)
            ]
        else:
            tuples = [(ast_times[i], tir_times[i] + ast_times[i],
                       byteR_times[i] + ast_times[i] + tir_times[i])
                      for i in range(runs)]

        if runs == 1:
            selected_tuple = tuples[0]
            errors = None
        else:
            # Do the min max selection by overall time.
            def select(x):
                return x[3 if include_opts else 2]

            def averager(tuple):
                s0 = 0.0
                s1 = 0.0
                s2 = 0.0
                s3 = 0.0

                if include_opts:
                    for (a, b, c, d) in tuple:
                        s0 += a
                        s1 += b
                        s2 += c
                        s3 += d
                else:
                    for (a, b, c) in tuple:
                        s0 += a
                        s1 += b
                        s2 += c

                n = len(tuple)
                if include_opts:
                    return (s0 / n, s1 / n, s2 / n, s3 / n)
                else:
                    return (s0 / n, s1 / n, s2 / n)

            (min_err, max_err, selected_tuple) = \
                graph.generate_min_max_median(tuples,
                                              narrowing_function=select,
                                              averaging_function=averager,
                                              delete_min_max=3)
            errors.append((min_err, max_err))

        y_data.append(selected_tuple)

    if include_opts:
        labels = [
            "Time spent in AST Representation",
            "Time spent in TIR Representation", "Time spent Optimising",
            "Time spent in ByteR Representation"
        ]
    else:
        labels = [
            "Time spent in AST Representation",
            "Time spent in TIR Representation",
            "Time spent in ByteR Representation"
        ]

    fig = graph.draw_stacked_line(number,
                                  x_data,
                                  y_data,
                                  errors,
                                  y_label="Compile Time (ms)",
                                  x_label=gen_x_label_for(None, benchmark),
                                  title=(gen_title_for(None, benchmark)),
                                  legend=labels,
                                  ylim_max=gen_y_lim_for(None, benchmark))
    fig.show()

    if include_opts:
        opts_string = "_with_opts_"
    else:
        opts_string = ""

    graph.save_to(
        fig, benchmark + opts_string + run_data['name'] + '_compile_time.eps')
Ejemplo n.º 3
0
            data_dict = {}
            for test in opt_data[opt]['tests']:
                if test['name'] in BENCHMARKS:
                    data_dict[test['name']] = \
                        float(test[FIELD]) / float(no_opt_data[test['name']]) \
                        - 1

            this_list = []
            for benchmark in sorted(data_dict):
                this_list.append(data_dict[benchmark])

            data_points.append(this_list)

    fig = graph.draw_grouped_bar(len(used_opts),
                                 len(BENCHMARKS),
                                 data_points,
                                 sorted(BENCHMARKS),
                                 labels=sorted(used_opts),
                                 bottom=1,
                                 label_rotation=70,
                                 figsize=(8, 6),
                                 xlabel="Benchmarks",
                                 ylabel="Code Size (Bytes)",
                                 title=("Code Size under Individual "
                                        "Optimizations"))

    graph.save_to(fig, 'individual_passes_code_size.eps')

    if not args.nohold:
        fig.show()
Ejemplo n.º 4
0
        opt_names = []

        # This specifies where the bars are vertically centered.
        bottom_value = 1 if NORMALIZE[graph_no] else 0

        for opt in sorted(groups):
            if opt != 'no_opts' or not NORMALIZE[graph_no]:
                opt_names.append(OPT_NAMES[opt])
                data_list.append(groups[opt])
                errors_list.append(errors[opt])

        assert len(opt_names) == len(data_list)
        assert len(data_list) == len(errors_list)

        plot = graph.draw_grouped_bar(len(data_list), len(titles), data_list,
                                      titles, errors=errors_list,
                                      labels=opt_names, title=TITLES[graph_no],
                                      bottom=bottom_value,
                                      xlabel="Benchmark",
                                      ylabel=METRIC_NAME[graph_no],
                                      label_rotation=70,
                                      figsize=FIG_SIZE[graph_no],
                                      top_padding=TOP_PADDING[graph_no])

        if FIG_SIZE[graph_no]:
            plot.gcf().set_size_inches(*FIG_SIZE[graph_no])

        graph.save_to(plot, OUTPUT_FILES[graph_no])
        if not args.nohold:
            plot.show()
Ejemplo n.º 5
0
            os.path.basename(benchmark_file)

        # Now get the data for the runs:
        stacked_data = get_data_from(data[benchmark_file])

        processed_data[benchmark_name] = stacked_data
        benchmarks.append(benchmark_name)

    # This is stored in a category-major format.
    stacked_data_list = []

    for category in CATEGORIES_LIST:
        this_data = []
        for benchmark in sorted(benchmarks):
            this_data.append(processed_data[benchmark][category])
        stacked_data_list.append(this_data)

    print stacked_data_list

    fig = graph.draw_stacked_bar(len(CATEGORIES_LIST),
                                 len(benchmarks),
                                 stacked_data_list,
                                 sorted(benchmarks),
                                 labels=CATEGORIES_LIST,
                                 x_label='Benchmark',
                                 y_label='Compile Time (ms)',
                                 title=('Compile Time Breakdown in MLC'))
    graph.save_to(fig, 'compile_time_breakdown.eps')
    if not args.nohold:
        fig.show()
Ejemplo n.º 6
0
            while str(measurement) + '.sml' in \
                    builtup_data[benchmark][compiler]:
                recorded_data = \
                    builtup_data[benchmark][compiler] \
                                [str(measurement) + ".sml"] \
                                ['subprocess_times']
                (min_err, max_err, med_value) = \
                    graph.generate_min_max_median(recorded_data,
                                                  delete_min_max=3)
                this_data.append(med_value)
                this_errors.append((min_err, max_err))

                x_data.append(measurement)
                measurement += 1

            y_errors.append(this_errors)
            y_data.append(this_data)

        # Draw the graph.
        plot = \
            graph.draw_multiple_lines(x_data, y_data, y_errors,
                                      plotter.gen_x_label_for(None, benchmark),
                                      y_label='Compile Time (ms)',
                                      title=('Compile time against number of'
                                             ' function declarations'),
                                      legend=None)

        graph.save_to(plot, benchmark + 'compiler_comparison.eps')
        if not args.nohold:
            pyplot.show(True)
Ejemplo n.º 7
0
        data[compiler] = {
                'x': range(xmin, xmax),
                'y': yvals,
                'errors': yerrors
        }

    x_values = []
    y_values = []
    y_errors = []
    labels = []
    colors = []

    for compiler in COMPILERS:
        x_values.append(data[compiler]['x'])
        y_values.append(data[compiler]['y'])
        y_errors.append(data[compiler]['errors'])
        colors.append(COLOR_MAP[compiler])
        labels.append(PRETTY_NAME_MAP[compiler])

    fig = graph.draw_multiple_lines(x_values, y_values, colors,
                                    error_bars=y_errors, legend=labels,
                                    title=('Compile Time vs '
                                           'log2(type variables)'),
                                    x_label='log2(type variables)',
                                    y_label='Compile Time (ms)')

    graph.save_to(fig, 'type_blowup_comparison_graph.eps')
    if not args.nohold:
        fig.show(True)