def test_storage_opt_result(): minimize_result = create_optimization_result() with tempfile.TemporaryDirectory(dir=".") as tmpdirname: result_file_name = os.path.join(tmpdirname, "a", "b", "result.h5") opt_result_writer = OptimizationResultHDF5Writer(result_file_name) opt_result_writer.write(minimize_result) opt_result_reader = OptimizationResultHDF5Reader(result_file_name) read_result = opt_result_reader.read() for i, opt_res in enumerate(minimize_result.optimize_result.list): for key in opt_res: if isinstance(opt_res[key], np.ndarray): np.testing.assert_array_equal( opt_res[key], read_result.optimize_result.list[i][key]) else: assert (opt_res[key] == read_result.optimize_result.list[i] [key])
def test_storage_opt_result_update(hdf5_file): minimize_result = create_optimization_result() minimize_result_2 = create_optimization_result() result_file_name = hdf5_file opt_result_writer = OptimizationResultHDF5Writer(result_file_name) opt_result_writer.write(minimize_result) opt_result_writer.write(minimize_result_2, overwrite=True) opt_result_reader = OptimizationResultHDF5Reader(result_file_name) read_result = opt_result_reader.read() for i, opt_res in enumerate(minimize_result_2.optimize_result.list): for key in opt_res: if isinstance(opt_res[key], np.ndarray): np.testing.assert_array_equal( opt_res[key], read_result.optimize_result.list[i][key]) else: assert opt_res[key] == read_result.optimize_result.list[i][key]
def store_and_plot_pretraining(result: Result, pretraindir: str, prefix: str): """ Store optimziation results in HDF5 as well as csv for later reuse. Also saves some visualization for debugging purposes. :param result: result from pretraining :param pretraindir: directory in which results and plots will be stored :param prefix: prefix for file names that can be used to differentiate between different pretraining stages as well as models/datasets. """ # store full results as hdf5 rfile = os.path.join(pretraindir, prefix + '.hdf5') if os.path.exists(rfile): # temp bugfix for https://github.com/ICB-DCM/pyPESTO/issues/529 os.remove(rfile) writer = OptimizationResultHDF5Writer(rfile) writer.write(result, overwrite=True) # store parameter values, this will be used in subsequent steps parameter_df = pd.DataFrame( [r for r in result.optimize_result.get_for_key('x') if r is not None], columns=result.problem.x_names) parameter_df.to_csv(os.path.join(pretraindir, prefix + '.csv')) # do plotting waterfall(result, scale_y='log10', offset_y=0.0) plt.tight_layout() plt.savefig(os.path.join(pretraindir, prefix + '_waterfall.pdf')) if result.problem.dim_full < 2e3: parameters(result) plt.tight_layout() plt.savefig(os.path.join(pretraindir, prefix + '_parameters.pdf'))
) return problem # set all your code into this if condition. # This way only one core performs the code # and distributes the work of the optimization. if __name__ == '__main__': # set number of starts n_starts = 2 # create problem problem = setup_rosen_problem() # create optimizer optimizer = optimize.FidesOptimizer(verbose=0) # result is the way to call the optimization with MPIPoolEngine. result = optimize.minimize( problem=problem, optimizer=optimizer, n_starts=n_starts, engine=MPIPoolEngine(), filename=None, ) # saving optimization results to hdf5 file_name = 'temp_result.h5' opt_result_writer = OptimizationResultHDF5Writer(file_name) problem_writer = ProblemHDF5Writer(file_name) problem_writer.write(problem) opt_result_writer.write(result)
report_hess=False) # do the optimization ref = visualize.create_references( x=np.asarray(petab_problem.x_nominal_scaled)[np.asarray( petab_problem.x_free_indices)], fval=problem.objective( np.asarray(petab_problem.x_nominal_scaled)[np.asarray( petab_problem.x_free_indices)])) print(f'Reference fval: {ref[0]["fval"]}') hdf_results_file = os.path.join('results', prefix + '.hdf5') result = optimize.minimize( problem=problem, optimizer=optimizer, n_starts=N_STARTS, engine=engine, options=options, progress_bar=False, filename=None, ) visualize.waterfall(result, reference=ref, scale_y='log10') plt.tight_layout() plt.savefig(os.path.join('results', prefix + '_waterfall.pdf')) writer = OptimizationResultHDF5Writer(hdf_results_file) writer.write(result, overwrite=True)