def parameter_estimation(objective, library, solver, fixed_pars, n_starts):

    if re.match(r'(?i)^(ls_)', solver):
        options = {'max_nfev': 10}
    else:
        options = {'maxiter': 10}

    if library == 'scipy':
        optimizer = pypesto.ScipyOptimizer(method=solver, options=options)
    elif library == 'dlib':
        optimizer = pypesto.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = pypesto.PyswarmOptimizer(options=options)
    else:
        raise ValueError("This code should not be reached")

    optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv')

    dim = len(objective.x_ids)
    lb = -2 * np.ones((1, dim))
    ub = 2 * np.ones((1, dim))
    pars = objective.amici_model.getParameters()
    problem = pypesto.Problem(objective,
                              lb,
                              ub,
                              x_fixed_indices=fixed_pars,
                              x_fixed_vals=[pars[idx] for idx in fixed_pars])

    optimize_options = pypesto.OptimizeOptions(
        allow_failed_starts=False,
        startpoint_resample=True,
    )

    pypesto.minimize(problem, optimizer, n_starts, options=optimize_options)
Exemple #2
0
def check_minimize(objective, library, solver, allow_failed_starts=False):

    options = {'maxiter': 100}

    optimizer = None

    if library == 'scipy':
        optimizer = pypesto.ScipyOptimizer(method=solver, options=options)
    elif library == 'dlib':
        optimizer = pypesto.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = pypesto.PyswarmOptimizer(options=options)

    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = pypesto.OptimizeOptions(
        allow_failed_starts=allow_failed_starts)

    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=1,
                              startpoint_method=pypesto.startpoint.uniform,
                              options=optimize_options)

    assert isinstance(result.optimize_result.list[0]['fval'], float)
Exemple #3
0
 def _test_petab(self, engine):
     petab_importer = pypesto.PetabImporter.from_folder(
         folder_base + "Zheng_PNAS2012")
     objective = petab_importer.create_objective()
     problem = petab_importer.create_problem(objective)
     result = pypesto.minimize(problem=problem, n_starts=2, engine=engine)
     self.assertTrue(len(result.optimize_result.as_list()) == 2)
def test_preeq_guesses():
    """
    Test whether optimization with preequilibration guesses works, asserts
    that steadystate guesses are written and checks that gradient is still
    correct with guesses set
    """
    petab_problem = petab.Problem.from_yaml(
        folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
    petab_problem.model_name = "Zheng_PNAS2012"
    importer = pypesto.PetabImporter(petab_problem)
    obj = importer.create_objective()
    problem = importer.create_problem(obj)
    optimizer = pypesto.ScipyOptimizer('ls_trf')

    result = pypesto.minimize(
        problem=problem, optimizer=optimizer, n_starts=2,
    )

    assert problem.objective.steadystate_guesses['fval'] < np.inf
    assert len(obj.steadystate_guesses['data']) == 1

    df = obj.check_grad(
        result.optimize_result.list[0]['x'],
        eps=1e-3,
        verbosity=0,
        mode=pypesto.objective.constants.MODE_FUN
    )
    print("relative errors MODE_FUN: ", df.rel_err.values)
    print("absolute errors MODE_FUN: ", df.abs_err.values)
    assert np.all((df.rel_err.values < RTOL) | (df.abs_err.values < ATOL))
Exemple #5
0
def test_ground_truth():
    # use best self-implemented sampler, which has a chance of correctly
    # sampling from the distribution
    sampler = pypesto.AdaptiveParallelTemperingSampler(
        internal_sampler=pypesto.AdaptiveMetropolisSampler(), n_chains=5)

    problem = gaussian_problem()

    result = pypesto.minimize(problem)

    result = pypesto.sample(problem, n_samples=10000,
                            result=result, sampler=sampler)

    # get samples of first chain
    samples = result.sample_result.trace_x[0].flatten()

    # test against different distributions

    statistic, pval = kstest(samples, 'norm')
    print(statistic, pval)
    assert statistic < 0.1

    statistic, pval = kstest(samples, 'uniform')
    print(statistic, pval)
    assert statistic > 0.1
Exemple #6
0
 def _test_basic(self, engine):
     # set up problem
     objective = test_objective.rosen_for_sensi(max_sensi_order=2)['obj']
     lb = 0 * np.ones((1, 2))
     ub = 1 * np.ones((1, 2))
     problem = pypesto.Problem(objective, lb, ub)
     result = pypesto.minimize(problem=problem, n_starts=9, engine=engine)
     self.assertTrue(len(result.optimize_result.as_list()) == 9)
Exemple #7
0
def _test_petab(engine):
    petab_importer = pypesto.PetabImporter.from_yaml(
        folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
    objective = petab_importer.create_objective()
    problem = petab_importer.create_problem(objective)
    optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10})
    result = pypesto.minimize(problem=problem,
                              n_starts=3,
                              engine=engine,
                              optimizer=optimizer)
    assert len(result.optimize_result.as_list()) == 3
Exemple #8
0
def _multistart_on_kernel(fun, lb, ub):
    objective = pypesto.Objective(fun=fun)
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)
    optimizer = pypesto.ScipyOptimizer(options={
        'maxiter': 10000,
        'maxfun': 10000,
        'disp': False
    })
    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=100)
    return result
    def test_3_optimize(self):
        # run optimization
        for obj_edatas, importer in \
                zip(self.obj_edatas, self.petab_importers):
            obj = obj_edatas[0]
            optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10})
            problem = importer.create_problem(obj)
            result = pypesto.minimize(
                problem=problem, optimizer=optimizer, n_starts=2)

            self.assertTrue(np.isfinite(
                result.optimize_result.get_for_key('fval')[0]))
Exemple #10
0
def _test_basic(engine):
    # set up problem
    objective = test_objective.rosen_for_sensi(max_sensi_order=2)['obj']
    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)
    optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10})
    result = pypesto.minimize(problem=problem,
                              n_starts=5,
                              engine=engine,
                              optimizer=optimizer)
    assert len(result.optimize_result.as_list()) == 5
Exemple #11
0
def test_pipeline(sampler, problem):
    """Check that a typical pipeline runs through."""
    # optimization
    optimizer = pypesto.ScipyOptimizer(options={'maxiter': 10})
    result = pypesto.minimize(problem, n_starts=3, optimizer=optimizer)

    # sampling
    result = pypesto.sample(
        problem, sampler=sampler, n_samples=20, result=result)

    # some plot
    pypesto.visualize.sampling_1d_marginals(result)
    plt.close()
Exemple #12
0
    def test_optimize(self):
        # logging
        pypesto.logging.log_to_console(logging.WARN)
        filename = ".test_logging.tmp"
        pypesto.logging.log_to_file(logging.DEBUG, filename)
        logger = logging.getLogger('pypesto')
        if os.path.exists(filename):
            os.remove(filename)
        fh = logging.FileHandler(filename)
        fh.setLevel(logging.DEBUG)
        logger.addHandler(fh)
        logger.info("start test")

        # problem definition
        def fun(_):
            raise Exception("This function cannot be called.")

        objective = pypesto.Objective(fun=fun)
        problem = pypesto.Problem(objective, -1, 1)

        optimizer = pypesto.ScipyOptimizer()
        options = {'allow_failed_starts': True}

        # optimization
        pypesto.minimize(problem, optimizer, 5, options=options)

        # assert logging worked
        self.assertTrue(os.path.exists(filename))
        f = open(filename, 'rb')
        content = str(f.read())
        f.close()

        # tidy up
        os.remove(filename)

        # check if error message got inserted
        self.assertTrue("fail" in content)
Exemple #13
0
def create_optimization_history():
    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = pypesto.ScipyOptimizer(method='TNC', options=optimizer_options)

    # run optimization
    optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True)
    result_with_trace = pypesto.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=5,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options)

    return result_with_trace
Exemple #14
0
def create_optimization_results(objective):
    # create optimizer, pypesto problem and options
    options = {'maxiter': 200}
    optimizer = pypesto.ScipyOptimizer(method='TNC', options=options)

    lb = -2 * np.ones((1, 2))
    ub = 2 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = pypesto.OptimizeOptions(allow_failed_starts=True)

    # run optimization
    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=5,
                              startpoint_method=pypesto.startpoint.uniform,
                              options=optimize_options)

    return problem, result, optimizer
Exemple #15
0
def test_optimize():
    problem = create_problem()
    optimizer = pypesto.ScipyOptimizer()
    n_starts = 5
    result = pypesto.minimize(problem, optimizer, n_starts)

    optimizer_result = result.optimize_result.list[0]
    assert len(optimizer_result.x) == 5
    assert len(optimizer_result.grad) == 5

    # maybe not what we want, but that's how it is right now
    assert len(problem.ub) == 3

    # nans written into unknown components
    assert np.isnan(optimizer_result.grad[1])

    # fixed values written into parameter vector
    assert optimizer_result.x[1] == 1

    lb_full = problem.get_full_vector(problem.lb)
    assert len(lb_full) == 5
Exemple #16
0
def parameter_estimation(
    objective,
    library,
    solver,
    fixed_pars,
    n_starts,
):
    options = {
        'maxiter': 100
    }

    if library == 'scipy':
        optimizer = pypesto.ScipyOptimizer(method=solver,
                                           options=options)
    elif library == 'dlib':
        optimizer = pypesto.DlibOptimizer(method=solver,
                                          options=options)

    optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv')

    lb = -2 * np.ones((1, objective.dim))
    ub = 2 * np.ones((1, objective.dim))
    pars = objective.amici_model.getParameters()
    problem = pypesto.Problem(objective, lb, ub,
                              x_fixed_indices=fixed_pars,
                              x_fixed_vals=[pars[idx] for idx in fixed_pars]
                              )

    optimize_options = pypesto.OptimizeOptions(
        allow_failed_starts=False,
        startpoint_resample=True,
    )

    results = pypesto.minimize(
        problem, optimizer, n_starts, options=optimize_options,
    )
    results = results.optimize_result.list
Exemple #17
0
# OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________

optimizer = pypesto.ScipyOptimizer(method='L-BFGS-B')

# play with optimization options
optimizer.options = {'maxiter': 1e5, 'ftol': 1e-10, 'gtol': 1e-10, 'maxls': 80}
# optimizer.options = {'maxcor': 10, 'ftol': 1e-10, 'gtol': 1e-05, 'eps': 1e-08, 'maxfun': 1e5,
#                     'maxiter': 1e5, 'maxls': 20}

problem = importer.create_problem(obj)
engine = pypesto.SingleCoreEngine()
n_starts = 10
start = time.time()
result = pypesto.minimize(problem=problem,
                          optimizer=optimizer,
                          n_starts=n_starts,
                          engine=engine)
end = time.time()

print('\nbest parameter: ', result.optimize_result.as_list('x')[0]['x'])
print('best likelihood value: ',
      obj(result.optimize_result.as_list('x')[0]['x']))

# calculate computation time
comp_time = end - start

# calculate converged points
conv_points = compute_converged_points_single(result=result)
print('converted points: ', conv_points)

# SAVE STARTPOINTS _____________________________________________________________________________________________________
    'gtol': 1e-4,
}

optimizer = pypesto.ScipyOptimizer(
    method='ls_trf',
    options=optim_options
)

optimize_options = pypesto.optimize.optimize.OptimizeOptions(
    startpoint_resample=True,
    allow_failed_starts=True,
)

result = pypesto.minimize(
    problem=problem,
    optimizer=optimizer,
    n_starts=10,
    options=optimize_options
)

resultdir = os.path.join(basedir, 'results')
os.makedirs(resultdir, exist_ok=True)
pypesto.visualize.parameters(result, start_indices=range(5))
plt.savefig(
    os.path.join(resultdir, f'parameters__{setting}.pdf')
)

pypesto.visualize.waterfall(result, start_indices=range(5))
plt.savefig(
    os.path.join(resultdir, f'waterfall__{setting}.pdf')
)
Exemple #19
0
    def check_history(self):
        self.problem = pypesto.Problem(self.obj, self.lb, self.ub)

        optimize_options = pypesto.OptimizeOptions(
            allow_failed_starts=False
        )

        history_options = pypesto.HistoryOptions(
            trace_record=True,
            trace_record_hess=False,
            trace_save_iter=1,
            storage_file='tmp/traces/conversion_example_{id}.csv',
        )

        result = pypesto.minimize(
            problem=self.problem,
            optimizer=self.optimizer,
            n_starts=1,
            startpoint_method=pypesto.startpoint.uniform,
            options=optimize_options,
            history_options=history_options
        )
        # disable trace from here on
        self.obj.history.options.trace_record = False
        for start in result.optimize_result.list:
            trace = start.history._trace
            it_final = int(trace[('fval', np.NaN)].idxmin())
            it_start = int(np.where(np.logical_not(
                np.isnan(trace['fval'].values)
            ))[0][0])
            self.assertTrue(np.isclose(
                trace['x'].values[0, :], start.x0
            ).all())
            self.assertTrue(np.isclose(
                trace['x'].values[it_final, :], start.x
            ).all())
            self.assertTrue(np.isclose(
                trace['fval'].values[it_start, 0], start.fval0
            ))

            funs = {
                'fval': self.obj.get_fval,
                'grad': self.obj.get_grad,
                'hess': self.obj.get_hess,
                'res': self.obj.get_res,
                'sres': self.obj.get_sres,
                'chi2': lambda x: res_to_chi2(self.obj.get_res(x)),
                'schi2': lambda x: sres_to_schi2(*self.obj(
                    x,
                    (0, 1,),
                    pypesto.objective.constants.MODE_RES
                ))
            }
            for var, fun in funs.items():
                for it in range(5):
                    if var in ['fval', 'chi2']:
                        if not np.isnan(trace[var].values[it, 0]):
                            self.assertTrue(np.isclose(
                                trace[var].values[it, 0],
                                fun(trace['x'].values[it, :])
                            ))
                    elif var in ['hess', 'sres', 'res']:
                        if trace[var].values[it, 0] is not None:
                            self.assertTrue(np.isclose(
                                trace[var].values[it, 0],
                                fun(trace['x'].values[it, :])
                            ).all())
                    elif self.obj.history.options[f'trace_record_{var}'] \
                            and not \
                            np.isnan(trace[var].values[it, :]).all():
                        self.assertTrue(np.isclose(
                            trace[var].values[it, :],
                            fun(trace['x'].values[it, :])
                        ).all())