def check_minimize(objective, library, solver, allow_failed_starts=False): options = {'maxiter': 100} optimizer = None if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) elif library == 'pyswarm': optimizer = pypesto.PyswarmOptimizer(options=options) lb = 0 * np.ones((1, 2)) ub = 1 * np.ones((1, 2)) problem = pypesto.Problem(objective, lb, ub) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=allow_failed_starts) result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=1, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) assert isinstance(result.optimize_result.list[0]['fval'], float)
def parameter_estimation(objective, library, solver, fixed_pars, n_starts): if re.match(r'(?i)^(ls_)', solver): options = {'max_nfev': 10} else: options = {'maxiter': 10} if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) elif library == 'pyswarm': optimizer = pypesto.PyswarmOptimizer(options=options) else: raise ValueError("This code should not be reached") optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv') dim = len(objective.x_ids) lb = -2 * np.ones((1, dim)) ub = 2 * np.ones((1, dim)) pars = objective.amici_model.getParameters() problem = pypesto.Problem(objective, lb, ub, x_fixed_indices=fixed_pars, x_fixed_vals=[pars[idx] for idx in fixed_pars]) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=False, startpoint_resample=True, ) pypesto.minimize(problem, optimizer, n_starts, options=optimize_options)
def create_optimization_history(): # create the pypesto problem problem = create_problem() # create optimizer optimizer_options = {'maxiter': 200} optimizer = pypesto.ScipyOptimizer(method='TNC', options=optimizer_options) # run optimization optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True) result_with_trace = pypesto.minimize( problem=problem, optimizer=optimizer, n_starts=5, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) return result_with_trace
def create_optimization_results(objective): # create optimizer, pypesto problem and options options = {'maxiter': 200} optimizer = pypesto.ScipyOptimizer(method='TNC', options=options) lb = -2 * np.ones((1, 2)) ub = 2 * np.ones((1, 2)) problem = pypesto.Problem(objective, lb, ub) optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True) # run optimization result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=5, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) return problem, result, optimizer
def parameter_estimation( objective, library, solver, fixed_pars, n_starts, ): options = { 'maxiter': 100 } if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv') lb = -2 * np.ones((1, objective.dim)) ub = 2 * np.ones((1, objective.dim)) pars = objective.amici_model.getParameters() problem = pypesto.Problem(objective, lb, ub, x_fixed_indices=fixed_pars, x_fixed_vals=[pars[idx] for idx in fixed_pars] ) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=False, startpoint_resample=True, ) results = pypesto.minimize( problem, optimizer, n_starts, options=optimize_options, ) results = results.optimize_result.list
def check_history(self): self.problem = pypesto.Problem(self.obj, self.lb, self.ub) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=False ) history_options = pypesto.HistoryOptions( trace_record=True, trace_record_hess=False, trace_save_iter=1, storage_file='tmp/traces/conversion_example_{id}.csv', ) result = pypesto.minimize( problem=self.problem, optimizer=self.optimizer, n_starts=1, startpoint_method=pypesto.startpoint.uniform, options=optimize_options, history_options=history_options ) # disable trace from here on self.obj.history.options.trace_record = False for start in result.optimize_result.list: trace = start.history._trace it_final = int(trace[('fval', np.NaN)].idxmin()) it_start = int(np.where(np.logical_not( np.isnan(trace['fval'].values) ))[0][0]) self.assertTrue(np.isclose( trace['x'].values[0, :], start.x0 ).all()) self.assertTrue(np.isclose( trace['x'].values[it_final, :], start.x ).all()) self.assertTrue(np.isclose( trace['fval'].values[it_start, 0], start.fval0 )) funs = { 'fval': self.obj.get_fval, 'grad': self.obj.get_grad, 'hess': self.obj.get_hess, 'res': self.obj.get_res, 'sres': self.obj.get_sres, 'chi2': lambda x: res_to_chi2(self.obj.get_res(x)), 'schi2': lambda x: sres_to_schi2(*self.obj( x, (0, 1,), pypesto.objective.constants.MODE_RES )) } for var, fun in funs.items(): for it in range(5): if var in ['fval', 'chi2']: if not np.isnan(trace[var].values[it, 0]): self.assertTrue(np.isclose( trace[var].values[it, 0], fun(trace['x'].values[it, :]) )) elif var in ['hess', 'sres', 'res']: if trace[var].values[it, 0] is not None: self.assertTrue(np.isclose( trace[var].values[it, 0], fun(trace['x'].values[it, :]) ).all()) elif self.obj.history.options[f'trace_record_{var}'] \ and not \ np.isnan(trace[var].values[it, :]).all(): self.assertTrue(np.isclose( trace[var].values[it, :], fun(trace['x'].values[it, :]) ).all())