Ejemplo n.º 1
0
def plot_objective_value_evolution(avg_values, best_values, *args, **kwargs):
    """Plots an array of objective values vs iteration index.
    Arguments
    --------
        values: a list of objective values
    """
    fig, ax1 = viz_utils.setup_figure_1ax('Iteration index',
                                          shrink_ax=False,
                                          size=(8, 8))

    color = 'tab:blue'
    ax1.set_ylabel('Best obj value', color=color)
    ax1.plot(range(len(best_values)), best_values, color=color, linewidth=2)
    ax1.tick_params(axis='y', labelcolor=color)
    ax1.yaxis.set_ticks_position('none')

    ax2 = ax1.twinx()
    color = 'tab:orange'
    ax2.set_ylabel('Average obj value',
                   color=color)  # we already handled the x-label with ax1
    ax2.plot(range(len(avg_values)), avg_values, color=color)
    ax2.tick_params(axis='y', labelcolor=color)

    ax2.spines['top'].set_visible(False)
    ax2.spines['right'].set_visible(False)
    ax2.spines['bottom'].set_visible(False)
    ax2.spines['left'].set_visible(False)
    ax2.yaxis.set_ticks_position('none')
    ax1.yaxis.set_ticks_position('none')

    if 'title' in kwargs:
        ax1.set_title(kwargs['title'])

    return fig
Ejemplo n.º 2
0
def mult_plot_runtime_performance(out_parser_dict,
                                  plot_type='performance',
                                  colormap='jet',
                                  reverse_legend=False,
                                  plot_over='population',
                                  **kwargs):
    """Plot multiple runs (e.g. over different releases) with same configurations in one performance plot."""
    fig, ax = viz_utils.setup_figure_1ax(
        x_label='Input size [population]',
        y_label=' '.join([LABEL_MAP[plot_type], UNITS_MAP[plot_type]]))

    # Check that all runs have same configuration and only one algo / obj func
    first_config = list(out_parser_dict.values())[0].config
    for parser in list(out_parser_dict.values())[1:]:
        if parser.config.keys() != first_config.keys():
            raise AssertionError(
                'For multiple runtime / performance plots, need equal config keys..'
            )
        if parser.config.items() != first_config.items():
            raise AssertionError(
                'For multiple runtime / performance plots, need equal configs.'
            )
    for parser in out_parser_dict.values():
        assert len(parser.config['algorithm']
                   ) == 1, 'Only one algorithm over different runs per plot.'
        assert len(
            parser.config['obj_func']
        ) == 1, 'Only one objective func over different runs per plot.'

    if 'vmax' in kwargs:
        vmax = kwargs['vmax']
    else:
        vmax = len(out_parser_dict)
    cmap_norm, cmap = norm_cmap(colormap, vmin=0, vmax=vmax)

    idx = 0
    for run_label, out_parser in out_parser_dict.items():
        plot_mean_runtime_vs_input_size(out_parser,
                                        plot_type,
                                        plot_over=plot_over,
                                        ax=ax,
                                        color=cmap(cmap_norm(idx)),
                                        label=run_label,
                                        reverse_legend=reverse_legend,
                                        **kwargs)
        idx += 1
    ax.set_ylim(bottom=0.0)
    # Shrink current axis by 20%
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])

    if reverse_legend:
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(reversed(handles),
                  reversed(labels),
                  frameon=False,
                  loc='center left',
                  bbox_to_anchor=(1, 0.5))
    else:
        ax.legend(frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))
Ejemplo n.º 3
0
def plot_multiple_perf_metric_vs_config_val(perf_metrics,
                                            x_key,
                                            y_keys,
                                            ax=None):
    """Same as plot_perf_metric_vs_config_val but for multiple y values."""
    if ax is None:
        fig, ax = viz_utils.setup_figure_1ax(x_label=x_key, y_label='')
    for y_val in y_keys:
        plot_perf_metric_vs_config_val(perf_metrics, x_key, y_val, ax=ax)
    ax.legend()
Ejemplo n.º 4
0
def plot_perf_metric_vs_config_val(perf_metrics,
                                   x_key,
                                   y_key,
                                   ax=None,
                                   label=None):
    """Given a dict of performanc_metric dicts over runs, one dict looks like so
        {'config': {'algorithm': 'pso',
                    'obj_func': 'rosenbrock',
                    'dimension': 64,
                    'n_rep': 1,
                    'n_iter': 1832,
                    'population': 512,
                    'min_val': -100,
                    'max_val': 100},
         'op_intensity': 2947.808607796262,
         'flop_count': 1193603079,
         'mem_move_bytes': 404912,
         'mem_move_floats': 101228,
         'performance': 0.3243619732473652},
     plot values from the config, e.g. n_iter, over runs vs calculated performance metrics, e.g. performance.
     """
    assert x_key in list(perf_metrics.values(
    ))[0]['config'], 'x axis values should be keys in run config.'
    assert y_key in [item for item in list(perf_metrics.values())[0].keys() if item != 'config'], \
        'y axis values should be calculated performance quantities.'

    if label is None:
        label = ' '.join([LABEL_MAP[y_key], UNITS_MAP[y_key]])

    if ax is None:
        fig, ax = viz_utils.setup_figure_1ax(x_label=LABEL_MAP[x_key],
                                             y_label=label)

    x_val_list = []
    y_val_list = []

    for metrics in perf_metrics.values():
        x_val_list.append(metrics['config'][x_key])
        y_val_list.append(metrics[y_key])

    ax.plot(x_val_list, y_val_list, 'o', label=label)
    return ax
Ejemplo n.º 5
0
def roofline_plot():
    """Produces the bare bone roofline plot for a given memory bandwidth and peak performance of a system."""
    def attainable_performance(operational_intensity):
        return min(PEAK_PERFORMANCE, MEMORY_BANDWIDTH * operational_intensity)

    oi_values = np.logspace(-4, 12, 1000, base=2)
    perf_values = [attainable_performance(oi) for oi in oi_values]
    fig, ax = viz_utils.setup_figure_1ax(
        x_label='Operational Intensity [Flops/Bytes]',
        y_label='Performance [Flops/Cycle]')
    ax.set_xscale("log", basex=2)
    ax.set_yscale("log", basey=2)
    ax.plot(oi_values, perf_values, linewidth=2.0, alpha=0.7)
    ax.set_aspect('equal', adjustable='datalim')

    ridge_point = PEAK_PERFORMANCE / MEMORY_BANDWIDTH
    ax.annotate(
        f'{{{ridge_point:0.1f}, {PEAK_PERFORMANCE:0.1f}}}',
        xy=(ridge_point, PEAK_PERFORMANCE),
        xytext=(-70, 15),
        textcoords='offset points',
    )
    return fig, ax
Ejemplo n.º 6
0
def plot_mean_runtime_vs_input_size(out_parser: OutputParser,
                                    plot_type='performance',
                                    ax=None,
                                    color=None,
                                    label=None,
                                    reverse_legend=False,
                                    plot_over='population',
                                    **kwargs):
    """For all algorithms present in the out_parser data, plot mean runtime vs input size.

    NOTE: We fix the dimension to be equal for all runs. Only works if only one dimension specified in config.
    """
    plot_types = ['performance', 'mean_runtime']
    plot_over_choices = ['population', 'dimension']

    assert plot_type in plot_types, f'plot_type argument needs to be one of {plot_types}.'
    assert plot_over in plot_over_choices, f'plot_over argument needs to be one of {plot_over_choices}.'

    config = out_parser.config
    sub_configs = out_parser.sub_configs

    #
    # if len(config['dimension']) != 1:
    #     raise ValueError(f'We fix the dimension and vary population size. Only one dimension allowed. '
    #                      f'Given: {config["dimension"]}')

    assert plot_type in plot_types, f'Plot type argument needs to be one of {plot_types}.'

    algos = config['algorithm']
    obj_funcs = config['obj_func']

    timings = out_parser.parse_timings(return_lists=True)
    mean_timings = {run: np.mean(times) for run, times in timings.items()}

    algo_quantity_vs_size = {algo: {}
                             for algo in algos
                             }  # quantity is either cycle or flop / cycle

    for algo in algos:
        for obj_func in obj_funcs:

            algo_quantity_vs_size[algo][obj_func] = {
                plot_type: [],
                'sizes': []
            }

            for run, sub_config in sub_configs.items():

                if sub_config['algorithm'] == algo and sub_config[
                        'obj_func'] == obj_func:
                    y_label = ' '.join(
                        [LABEL_MAP[plot_type], UNITS_MAP[plot_type]])
                    if plot_type == 'performance':
                        flop_counter = performance_calculations.FlopCounter(
                            sub_config)
                        flops = flop_counter.flop_count()
                        algo_quantity_vs_size[algo][obj_func][
                            plot_type].append(flops / mean_timings[run])
                    elif plot_type == 'mean_runtime':
                        algo_quantity_vs_size[algo][obj_func][
                            plot_type].append(mean_timings[run])

                    algo_quantity_vs_size[algo][obj_func]['sizes'].append(
                        sub_config[plot_over])

    if ax is None:
        if plot_over == 'dimension':
            title = f'Population size: {config["population"][0]}'
        if plot_over == 'population':
            title = f'Search space dimension: {config["dimension"][0]}'
        _, ax = viz_utils.setup_figure_1ax(x_label=f'Input size [{plot_over}]',
                                           y_label=y_label,
                                           title=title)

    if 'vmax' in kwargs:
        vmax = kwargs['vmax']
    else:
        vmax = len(sub_configs)
    if color is None:
        cmap_norm, cmap = norm_cmap('jet', vmin=0, vmax=vmax)
        print('No colormap provided, using jet as default.')
    idx = 1
    for algo, obj_func_dict in algo_quantity_vs_size.items():
        for obj_func, data_dict in obj_func_dict.items():
            sizes, times = sort_two_lists_based_on_first(
                data_dict['sizes'], data_dict[plot_type])

            if color is None:
                color = cmap(cmap_norm(idx))
            if not label:
                label = '_'.join([algo, obj_func])
            ax.plot(sizes, times, label=label, color=color, linewidth=1.8)
            ax.plot(sizes, times, 'o', label='', color=color)
            idx += 1

    if reverse_legend:
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(reversed(handles), reversed(labels), frameon=False)
    else:
        ax.legend(frameon=False)
    if 'log_xaxis' in kwargs:
        ax.set_xscale("log", nonposx='clip')
    if 'log_yaxis' in kwargs:
        ax.set_yscale("log")

    return algo_quantity_vs_size
Ejemplo n.º 7
0
def plot_optimization_evolution_2d(evolution_data,
                                   *args,
                                   obj_func=None,
                                   **kwargs):
    """For a given population, plot their positions in solution space in 2d over time.

    Arguments
    ---------
        evolution_data: list over iterations where each entry is a dict of population members
                        holding their 2d positions as a list
        xlims (optional): list of lower and upper x lim for plot
        ylims (optional): list of lower and upper y lim for plot
        obj_func: name of the objective function to contour plot in the background
    """
    mpld3.enable_notebook()
    fig, ax = viz_utils.setup_figure_1ax(x_label='x position',
                                         y_label='y position',
                                         size=(8, 8),
                                         shrink_ax=False)

    if obj_func:
        if obj_func not in OBJ_FUNCS:
            raise NotImplementedError(
                f'{obj_func} is not implemented for plotting. '
                f'Only {list(OBJ_FUNCS.keys())} can be plotted.\n'
                f'Feel free to implement them in python :)')

        mpld3.disable_notebook(
        )  # contour plots cannot be json serialized so we have to switch of d3 mode
        min_x, max_x, min_y, max_y = viz_utils.get_min_max_of_evolution_data(
            evolution_data)
        x_list = np.linspace(min_x, max_x, 100)
        y_list = np.linspace(min_y, max_y, 100)
        x_mesh, y_mesh = np.meshgrid(x_list, y_list)

        z_mesh = OBJ_FUNCS[obj_func](np.vstack(
            [x_mesh.ravel(), y_mesh.ravel()])).reshape((100, 100))

        ax.contourf(
            x_mesh,
            y_mesh,
            z_mesh,
            [rosenbrock(np.array([k, k])) for k in np.linspace(1, 20, 50)],
            cmap='jet',
            locator=ticker.LogLocator(),
            alpha=0.1)

    for iter_idx, step_dict in enumerate(evolution_data):

        if iter_idx == 0:
            color = 'black'
        else:
            color = viz_utils.get_color_from_cm(cm.hot, 1, len(evolution_data),
                                                iter_idx + 1)

        x = [pos[0] for pos in step_dict.values()]
        y = [pos[1] for pos in step_dict.values()]

        ax.plot(x, y, '.', color=color, alpha=0.7, markeredgewidth=0.0)

    if 'xlims' in kwargs:
        ax.set_xlim(kwargs['xlims'])
    if 'ylims' in kwargs:
        ax.set_ylim(kwargs['ylims'])
    if 'title' in kwargs:
        ax.set_title(kwargs['title'])

    return fig, ax