def parameter_estimation(objective, library, solver, fixed_pars, n_starts): if re.match(r'(?i)^(ls_)', solver): options = {'max_nfev': 10} else: options = {'maxiter': 10} if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) elif library == 'pyswarm': optimizer = pypesto.PyswarmOptimizer(options=options) else: raise ValueError("This code should not be reached") optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv') dim = len(objective.x_ids) lb = -2 * np.ones((1, dim)) ub = 2 * np.ones((1, dim)) pars = objective.amici_model.getParameters() problem = pypesto.Problem(objective, lb, ub, x_fixed_indices=fixed_pars, x_fixed_vals=[pars[idx] for idx in fixed_pars]) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=False, startpoint_resample=True, ) pypesto.minimize(problem, optimizer, n_starts, options=optimize_options)
def test_preeq_guesses(): """ Test whether optimization with preequilibration guesses works, asserts that steadystate guesses are written and checks that gradient is still correct with guesses set """ petab_problem = petab.Problem.from_yaml( folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml") petab_problem.model_name = "Zheng_PNAS2012" importer = pypesto.PetabImporter(petab_problem) obj = importer.create_objective() problem = importer.create_problem(obj) optimizer = pypesto.ScipyOptimizer('ls_trf') result = pypesto.minimize( problem=problem, optimizer=optimizer, n_starts=2, ) assert problem.objective.steadystate_guesses['fval'] < np.inf assert len(obj.steadystate_guesses['data']) == 1 df = obj.check_grad( result.optimize_result.list[0]['x'], eps=1e-3, verbosity=0, mode=pypesto.objective.constants.MODE_FUN ) print("relative errors MODE_FUN: ", df.rel_err.values) print("absolute errors MODE_FUN: ", df.abs_err.values) assert np.all((df.rel_err.values < RTOL) | (df.abs_err.values < ATOL))
def setUpClass(cls): cls.optimizer = pypesto.ScipyOptimizer(method='ls_trf', options={'maxiter': 100}) cls.obj, _ = load_model_objective('conversion_reaction') cls.lb = -2 * np.ones((1, 2)) cls.ub = 2 * np.ones((1, 2))
def check_minimize(objective, library, solver, allow_failed_starts=False): options = {'maxiter': 100} optimizer = None if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) elif library == 'pyswarm': optimizer = pypesto.PyswarmOptimizer(options=options) lb = 0 * np.ones((1, 2)) ub = 1 * np.ones((1, 2)) problem = pypesto.Problem(objective, lb, ub) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=allow_failed_starts) result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=1, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) assert isinstance(result.optimize_result.list[0]['fval'], float)
def setUpClass(cls): cls.optimizer = pypesto.ScipyOptimizer( method='trust-exact', options={'maxiter': 100} ) cls.lb = 0 * np.ones((1, 2)) cls.ub = 1 * np.ones((1, 2))
def _test_petab(engine): petab_importer = pypesto.PetabImporter.from_yaml( folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml") objective = petab_importer.create_objective() problem = petab_importer.create_problem(objective) optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10}) result = pypesto.minimize(problem=problem, n_starts=3, engine=engine, optimizer=optimizer) assert len(result.optimize_result.as_list()) == 3
def _multistart_on_kernel(fun, lb, ub): objective = pypesto.Objective(fun=fun) problem = pypesto.Problem(objective=objective, lb=lb, ub=ub) optimizer = pypesto.ScipyOptimizer(options={ 'maxiter': 10000, 'maxfun': 10000, 'disp': False }) result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=100) return result
def test_3_optimize(self): # run optimization for obj_edatas, importer in \ zip(self.obj_edatas, self.petab_importers): obj = obj_edatas[0] optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10}) problem = importer.create_problem(obj) result = pypesto.minimize( problem=problem, optimizer=optimizer, n_starts=2) self.assertTrue(np.isfinite( result.optimize_result.get_for_key('fval')[0]))
def _test_basic(engine): # set up problem objective = test_objective.rosen_for_sensi(max_sensi_order=2)['obj'] lb = 0 * np.ones((1, 2)) ub = 1 * np.ones((1, 2)) problem = pypesto.Problem(objective, lb, ub) optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10}) result = pypesto.minimize(problem=problem, n_starts=5, engine=engine, optimizer=optimizer) assert len(result.optimize_result.as_list()) == 5
def test_pipeline(sampler, problem): """Check that a typical pipeline runs through.""" # optimization optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10}) result = pypesto.minimize(problem, n_starts=3, optimizer=optimizer) # sampling result = pypesto.sample( problem, sampler=sampler, n_samples=20, result=result) # some plot pypesto.visualize.sampling_1d_marginals(result) plt.close()
def create_optimization_history(): # create the pypesto problem problem = create_problem() # create optimizer optimizer_options = {'maxiter': 200} optimizer = pypesto.ScipyOptimizer(method='TNC', options=optimizer_options) # run optimization optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True) result_with_trace = pypesto.minimize( problem=problem, optimizer=optimizer, n_starts=5, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) return result_with_trace
def create_optimization_results(objective): # create optimizer, pypesto problem and options options = {'maxiter': 200} optimizer = pypesto.ScipyOptimizer(method='TNC', options=options) lb = -2 * np.ones((1, 2)) ub = 2 * np.ones((1, 2)) problem = pypesto.Problem(objective, lb, ub) optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True) # run optimization result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=5, startpoint_method=pypesto.startpoint.uniform, options=optimize_options) return problem, result, optimizer
def test_optimize(): problem = create_problem() optimizer = pypesto.ScipyOptimizer() n_starts = 5 result = pypesto.minimize(problem, optimizer, n_starts) optimizer_result = result.optimize_result.list[0] assert len(optimizer_result.x) == 5 assert len(optimizer_result.grad) == 5 # maybe not what we want, but that's how it is right now assert len(problem.ub) == 3 # nans written into unknown components assert np.isnan(optimizer_result.grad[1]) # fixed values written into parameter vector assert optimizer_result.x[1] == 1 lb_full = problem.get_full_vector(problem.lb) assert len(lb_full) == 5
def test_optimize(self): # logging pypesto.logging.log_to_console(logging.WARN) filename = ".test_logging.tmp" pypesto.logging.log_to_file(logging.DEBUG, filename) logger = logging.getLogger('pypesto') if os.path.exists(filename): os.remove(filename) fh = logging.FileHandler(filename) fh.setLevel(logging.DEBUG) logger.addHandler(fh) logger.info("start test") # problem definition def fun(_): raise Exception("This function cannot be called.") objective = pypesto.Objective(fun=fun) problem = pypesto.Problem(objective, -1, 1) optimizer = pypesto.ScipyOptimizer() options = {'allow_failed_starts': True} # optimization pypesto.minimize(problem, optimizer, 5, options=options) # assert logging worked self.assertTrue(os.path.exists(filename)) f = open(filename, 'rb') content = str(f.read()) f.close() # tidy up os.remove(filename) # check if error message got inserted self.assertTrue("fail" in content)
def parameter_estimation( objective, library, solver, fixed_pars, n_starts, ): options = { 'maxiter': 100 } if library == 'scipy': optimizer = pypesto.ScipyOptimizer(method=solver, options=options) elif library == 'dlib': optimizer = pypesto.DlibOptimizer(method=solver, options=options) optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv') lb = -2 * np.ones((1, objective.dim)) ub = 2 * np.ones((1, objective.dim)) pars = objective.amici_model.getParameters() problem = pypesto.Problem(objective, lb, ub, x_fixed_indices=fixed_pars, x_fixed_vals=[pars[idx] for idx in fixed_pars] ) optimize_options = pypesto.OptimizeOptions( allow_failed_starts=False, startpoint_resample=True, ) results = pypesto.minimize( problem, optimizer, n_starts, options=optimize_options, ) results = results.optimize_result.list
obj = pypesto.Objective(fun=f, grad=g) #, hess=h) print('optimal x = ', petab_problem.x_nominal) print('optimal lh value', obj(petab_problem.x_nominal)) # check gradient at optimum and at random point check_grad_1 = obj.check_grad(petab_problem.x_nominal) print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])]) x_random = np.random.normal(0.5, 0.005, 22) check_grad_2 = obj.check_grad(x_random) print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])]) # OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________ optimizer = pypesto.ScipyOptimizer(method='L-BFGS-B') # play with optimization options optimizer.options = {'maxiter': 1e5, 'ftol': 1e-10, 'gtol': 1e-10, 'maxls': 80} # optimizer.options = {'maxcor': 10, 'ftol': 1e-10, 'gtol': 1e-05, 'eps': 1e-08, 'maxfun': 1e5, # 'maxiter': 1e5, 'maxls': 20} problem = importer.create_problem(obj) engine = pypesto.SingleCoreEngine() n_starts = 10 start = time.time() result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=n_starts, engine=engine) end = time.time()
output_folder=os.path.join(basedir, 'models', model_name, model_name), model_name=model_name, ) obj = importer.create_objective() obj.n_threads = n_threads obj.use_amici_petab_simulate = False problem = importer.create_problem(obj) optim_options = { 'xtol': 1e-12, 'gtol': 1e-4, } optimizer = pypesto.ScipyOptimizer( method='ls_trf', options=optim_options ) optimize_options = pypesto.optimize.optimize.OptimizeOptions( startpoint_resample=True, allow_failed_starts=True, ) result = pypesto.minimize( problem=problem, optimizer=optimizer, n_starts=10, options=optimize_options ) resultdir = os.path.join(basedir, 'results')
# enable sensitivities solver.setSensitivityOrder(amici.SensitivityOrder_first) # First-order ... solver.setSensitivityMethod(amici.SensitivityMethod_forward) # ... forward sensitivities model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters # CREATE OBJECTIVE FUNCTION_____________________________________________________________________________________________ obj = importer.create_objective(model=model, solver=solver) print('optimal x = ', petab_problem.x_nominal) print('optimal lh value', obj(petab_problem.x_nominal)) # OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________ optimizer = pypesto.ScipyOptimizer() problem = importer.create_problem(obj) engine = pypesto.SingleCoreEngine() n_starts = 10 start = time.time() result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=n_starts, engine=engine) end = time.time() print('best parameter: ', result.optimize_result.as_list('x')[0]['x']) print('best likelihood value: ', obj(result.optimize_result.as_list('x')[0]['x'])) # calculate computation time comp_time = end - start