Esempio n. 1
0
def test_parameters_with_nan_inf():
    # create the necessary results
    result_1 = create_optimization_result_nan_inf()
    result_2 = create_optimization_result_nan_inf()

    # test a standard call
    visualize.parameters(result_1)

    # test plotting of lists
    visualize.parameters([result_1, result_2])
Esempio n. 2
0
def test_parameters(scale_to_interval):
    # create the necessary results
    result_1 = create_optimization_result()
    result_2 = create_optimization_result()

    # test a standard call
    visualize.parameters(result_1, scale_to_interval=scale_to_interval)

    # test plotting of lists
    visualize.parameters(
        [result_1, result_2], scale_to_interval=scale_to_interval
    )
Esempio n. 3
0
def test_parameters_with_options():
    # create the necessary results
    result_1 = create_optimization_result()
    result_2 = create_optimization_result()

    # alternative figure size and plotting options
    (_, _, _, _, ref_point) = create_plotting_options()
    alt_fig_size = (9.0, 8.0)

    # test calls with specific options
    visualize.parameters(result_1,
                         parameter_indices='all',
                         reference=ref_point,
                         size=alt_fig_size,
                         colors=[1., .3, .3, 0.5])

    visualize.parameters([result_1, result_2],
                         parameter_indices='all',
                         reference=ref_point,
                         balance_alpha=False,
                         start_indices=(0, 1, 4))

    visualize.parameters([result_1, result_2],
                         parameter_indices='free_only',
                         start_indices=3)
Esempio n. 4
0
def store_and_plot_pretraining(result: Result, pretraindir: str, prefix: str):
    """
    Store optimziation results in HDF5 as well as csv for later reuse. Also
    saves some visualization for debugging purposes.

    :param result:
        result from pretraining

    :param pretraindir:
        directory in which results and plots will be stored

    :param prefix:
        prefix for file names that can be used to differentiate between
        different pretraining stages as well as models/datasets.
    """
    # store full results as hdf5
    rfile = os.path.join(pretraindir, prefix + '.hdf5')
    if os.path.exists(rfile):
        # temp bugfix for https://github.com/ICB-DCM/pyPESTO/issues/529
        os.remove(rfile)
    writer = OptimizationResultHDF5Writer(rfile)
    writer.write(result, overwrite=True)

    # store parameter values, this will be used in subsequent steps
    parameter_df = pd.DataFrame(
        [r for r in result.optimize_result.get_for_key('x') if r is not None],
        columns=result.problem.x_names)
    parameter_df.to_csv(os.path.join(pretraindir, prefix + '.csv'))

    # do plotting
    waterfall(result, scale_y='log10', offset_y=0.0)
    plt.tight_layout()
    plt.savefig(os.path.join(pretraindir, prefix + '_waterfall.pdf'))

    if result.problem.dim_full < 2e3:
        parameters(result)
        plt.tight_layout()
        plt.savefig(os.path.join(pretraindir, prefix + '_parameters.pdf'))
Esempio n. 5
0
                range(len(problem.x_names))]
    opt_result['x'] = x_sorted
    result.optimize_result.append(opt_result)

result.optimize_result.sort()


prefix = '__'.join([MODEL, DATA, str(N_HIDDEN), OPTIMIZER])

waterfall(result, scale_y='log10', offset_y=0.0)
plot_and_save_fig(prefix + '__waterfall.pdf')

optimizer_history(result, scale_y='log10')
plot_and_save_fig(prefix + '__optimizer_trace.pdf')

parameters(result)
plot_and_save_fig(prefix + '__parameters.pdf')

optimizer_convergence(result)
plot_and_save_fig(prefix + '__optimizer_convergence.pdf')

fig_embedding, axes_embedding = plt.subplots(1, N_STARTS,
                                             figsize=(18.5, 10.5))

embedding_fun = theano.function(
    [mae.encoder_pars],
    mae.encode(mae.encoder_pars)
)
inflate_fun = theano.function(
    [mae.encoder_pars],
    mae.encode_params(mae.encoder_pars)
    'color': [0.2, 0.4, 1., 1.],
    'legend': 'second optimum'
}
ref = visualize.create_references(ref)

# new waterfall plot with reference point for second optimum
visualize.waterfall(result1_dogleg,
                    size=(15, 6),
                    scale_y='lin',
                    y_limits=[-1, 101],
                    reference=ref,
                    colors=[0., 0., 0., 1.])

###### Visulaize Parameters
visualize.parameters([result1_bfgs, result1_tnc],
                     legends=['L-BFGS-B', 'TNC'],
                     balance_alpha=False)
visualize.parameters(result1_dogleg,
                     legends='dogleg',
                     reference=ref,
                     size=(15, 10),
                     start_indices=[0, 1, 2, 3, 4, 5],
                     balance_alpha=False)

df = result1_tnc.optimize_result.as_dataframe(
    ['fval', 'n_fval', 'n_grad', 'n_hess', 'n_res', 'n_sres', 'time'])
df.head()

###### Optimizer History
# plot one list of waterfalls
visualize.optimizer_history([result1_bfgs, result1_tnc],