Exemplo n.º 1
0
    def setUp(cls):

        cls.lb = np.array([-1])
        cls.ub = np.array([5])

        cls.problem_training_data = pypesto.Problem(
            lsq_residual_objective(0), cls.lb, cls.ub
        )

        cls.problem_all_data = pypesto.Problem(
            pypesto.objective.AggregatedObjective(
                [lsq_residual_objective(0), lsq_residual_objective(2)]
            ),
            cls.lb,
            cls.ub,
        )

        # optimum f(0)=0
        cls.result_training_data = optimize.minimize(
            cls.problem_training_data, n_starts=5, filename=None
        )
        # Optimum f(1)=2
        cls.result_all_data = optimize.minimize(
            cls.problem_all_data, n_starts=5, filename=None
        )
Exemplo n.º 2
0
def test_x_names():
    """Test that `x_names` are handled properly."""
    kwargs = {
        'objective': pypesto.Objective(),
        'lb': [-5] * 3,
        'ub': [4] * 3,
        'x_fixed_indices': [1],
        'x_fixed_vals': [42.0],
    }

    # non-unique values
    with pytest.raises(ValueError):
        pypesto.Problem(x_names=['x1', 'x2', 'x2'], **kwargs)

    # too few or too many arguments
    with pytest.raises(AssertionError):
        pypesto.Problem(x_names=['x1', 'x2'], **kwargs)
    with pytest.raises(AssertionError):
        pypesto.Problem(x_names=['x1', 'x2', 'x3', 'x4'], **kwargs)

    # all fine
    problem = pypesto.Problem(x_names=['a', 'b', 'c'], **kwargs)
    assert problem.x_names == ['a', 'b', 'c']

    # defaults
    problem = pypesto.Problem(**kwargs)
    assert problem.x_names == ['x0', 'x1', 'x2']
Exemplo n.º 3
0
def test_mode():
    """
    Tests the maximum/optimum for priors in different scales...
    """

    scales = ['lin', 'log', 'log10']
    prior_types = ['normal', 'laplace', 'logNormal']

    problem_dict = {
        'lin': {
            'lb': [0],
            'ub': [10],
            'opt': [1]
        },
        'log': {
            'lb': [-3],
            'ub': [3],
            'opt': [0]
        },
        'log10': {
            'lb': [-3],
            'ub': [2],
            'opt': [0]
        }
    }

    for prior_type, scale in itertools.product(prior_types, scales):

        prior_list = [get_parameter_prior_dict(0, prior_type, [1, 1], scale)]

        test_prior = NegLogParameterPriors(prior_list)
        test_problem = pypesto.Problem(test_prior,
                                       lb=problem_dict[scale]['lb'],
                                       ub=problem_dict[scale]['ub'],
                                       dim_full=1,
                                       x_scales=[scale])

        optimizer = pypesto.optimize.ScipyOptimizer(method='Nelder-Mead')

        result = pypesto.optimize.minimize(problem=test_problem,
                                           optimizer=optimizer,
                                           n_starts=10)

        assert np.isclose(result.optimize_result.list[0]['x'],
                          problem_dict[scale]['opt'],
                          atol=1e-04)

    # test uniform distribution:
    for scale in scales:
        prior_dict = get_parameter_prior_dict(0, 'uniform', [1, 2], scale)

        # check inside and outside of interval
        assert abs(prior_dict['density_fun'](lin_to_scaled(.5, scale)) -
                   0) < 1e-8

        assert abs(prior_dict['density_fun'](lin_to_scaled(1.5, scale)) -
                   math.log(1)) < 1e-8

        assert abs(prior_dict['density_fun'](lin_to_scaled(2.5, scale)) -
                   0) < 1e-8
Exemplo n.º 4
0
def gaussian_problem():
    def nllh(x):
        return -gaussian_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-10], ub=[10])
    return problem
Exemplo n.º 5
0
def parameter_estimation(objective, library, solver, fixed_pars, n_starts):

    if re.match(r'(?i)^(ls_)', solver):
        options = {'max_nfev': 10}
    else:
        options = {'maxiter': 10}

    if library == 'scipy':
        optimizer = pypesto.ScipyOptimizer(method=solver, options=options)
    elif library == 'dlib':
        optimizer = pypesto.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = pypesto.PyswarmOptimizer(options=options)
    else:
        raise ValueError("This code should not be reached")

    optimizer.temp_file = os.path.join('test', 'tmp_{index}.csv')

    dim = len(objective.x_ids)
    lb = -2 * np.ones((1, dim))
    ub = 2 * np.ones((1, dim))
    pars = objective.amici_model.getParameters()
    problem = pypesto.Problem(objective,
                              lb,
                              ub,
                              x_fixed_indices=fixed_pars,
                              x_fixed_vals=[pars[idx] for idx in fixed_pars])

    optimize_options = pypesto.OptimizeOptions(
        allow_failed_starts=False,
        startpoint_resample=True,
    )

    pypesto.minimize(problem, optimizer, n_starts, options=optimize_options)
Exemplo n.º 6
0
def test_empty_prior():
    """Check that priors are zero when none are defined."""
    # define negative log posterior
    posterior_fun = pypesto.Objective(fun=negative_log_posterior)

    # define pypesto problem without prior object
    test_problem = pypesto.Problem(objective=posterior_fun,
                                   lb=-10,
                                   ub=10,
                                   x_names=['x'])

    sampler = sample.AdaptiveMetropolisSampler()

    result = sample.sample(
        test_problem,
        n_samples=50,
        sampler=sampler,
        x0=np.array([0.0]),
        filename=None,
    )

    # get log prior values of first chain
    logprior_trace = -result.sample_result.trace_neglogprior[0, :]

    # check that all entries are zero
    assert (logprior_trace == 0.0).all()
Exemplo n.º 7
0
def test_profile_with_fixed_parameters():
    """Test using profiles with fixed parameters."""
    obj = test_objective.rosen_for_sensi(max_sensi_order=1)['obj']

    lb = -2 * np.ones(5)
    ub = 2 * np.ones(5)
    problem = pypesto.Problem(objective=obj,
                              lb=lb,
                              ub=ub,
                              x_fixed_vals=[0.5, -1.8],
                              x_fixed_indices=[0, 3])

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 50})
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=2)

    for i_method, next_guess_method in enumerate([
            'fixed_step', 'adaptive_step_order_0', 'adaptive_step_order_1',
            'adaptive_step_regression'
    ]):
        print(next_guess_method)
        profile.parameter_profile(problem=problem,
                                  result=result,
                                  optimizer=optimizer,
                                  next_guess_method=next_guess_method)

        # standard plotting
        axes = visualize.profiles(result, profile_list_ids=i_method)
        assert len(axes) == 3
        visualize.profile_cis(result, profile_list=i_method)
Exemplo n.º 8
0
def check_minimize(objective, library, solver, allow_failed_starts=False):

    options = {'maxiter': 100}

    optimizer = None

    if library == 'scipy':
        optimizer = pypesto.ScipyOptimizer(method=solver, options=options)
    elif library == 'dlib':
        optimizer = pypesto.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = pypesto.PyswarmOptimizer(options=options)

    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = pypesto.OptimizeOptions(
        allow_failed_starts=allow_failed_starts)

    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=1,
                              startpoint_method=pypesto.startpoint.uniform,
                              options=optimize_options)

    assert isinstance(result.optimize_result.list[0]['fval'], float)
Exemplo n.º 9
0
def test_mpipoolengine():
    """
    Test the MPIPoolEngine by calling an example script with mpiexec.
    """
    try:
        # get the path to this file:
        path = os.path.dirname(__file__)
        # run the example file.
        subprocess.check_call(  # noqa: S603,S607
            [
                'mpiexec',
                '-np',
                '2',
                'python',
                '-m',
                'mpi4py.futures',
                f'{path}/../../doc/example/example_MPIPool.py',
            ]
        )

        # read results
        result1 = read_result('temp_result.h5', problem=True, optimize=True)
        # set optimizer
        optimizer = optimize.FidesOptimizer(verbose=0)
        # initialize problem with x_guesses and objective
        objective = pypesto.Objective(
            fun=sp.optimize.rosen,
            grad=sp.optimize.rosen_der,
            hess=sp.optimize.rosen_hess,
        )
        x_guesses = np.array(
            [result1.optimize_result.list[i]['x0'] for i in range(2)]
        )
        problem = pypesto.Problem(
            objective=objective,
            ub=result1.problem.ub,
            lb=result1.problem.lb,
            x_guesses=x_guesses,
        )
        result2 = optimize.minimize(
            problem=problem,
            optimizer=optimizer,
            n_starts=2,
            engine=pypesto.engine.MultiProcessEngine(),
            filename=None,
        )

        for ix in range(2):
            assert_almost_equal(
                result1.optimize_result.list[ix]['x'],
                result2.optimize_result.list[ix]['x'],
                err_msg='The final parameter values '
                'do not agree for the engines.',
            )

    finally:
        if os.path.exists('temp_result.h5'):
            # delete data
            os.remove('temp_result.h5')
Exemplo n.º 10
0
 def _test_basic(self, engine):
     # set up problem
     objective = test_objective.rosen_for_sensi(max_sensi_order=2)['obj']
     lb = 0 * np.ones((1, 2))
     ub = 1 * np.ones((1, 2))
     problem = pypesto.Problem(objective, lb, ub)
     result = pypesto.minimize(problem=problem, n_starts=9, engine=engine)
     self.assertTrue(len(result.optimize_result.as_list()) == 9)
Exemplo n.º 11
0
 def get_problem(self):
     """Full pypesto problem."""
     return pypesto.Problem(
         objective=self.get_objective(),
         lb=self.lb,
         ub=self.ub,
         x_guesses=self.x_guesses,
     )
Exemplo n.º 12
0
def gaussian_mixture_problem():
    """Problem based on a mixture of gaussians."""
    def nllh(x):
        return - gaussian_mixture_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-10], ub=[10],
                              x_names=['x'])
    return problem
Exemplo n.º 13
0
def test_unbounded_minimize(optimizer):
    """
    Test unbounded optimization using various optimizers and objective modes.
    """
    lb_init = 1.1 * np.ones((1, 2))
    lb = -np.inf * np.ones(lb_init.shape)
    ub_init = 1.11 * np.ones((1, 2))
    ub = np.inf * np.ones(ub_init.shape)
    problem = pypesto.Problem(
        rosen_for_sensi(max_sensi_order=2)['obj'],
        lb, ub, lb_init=lb_init, ub_init=ub_init
    )
    opt = get_optimizer(*optimizer)

    options = optimize.OptimizeOptions(allow_failed_starts=False)

    if isinstance(optimizer[1], str) and re.match(r'^(?i)(ls_)', optimizer[1]):
        return

    if optimizer in [('dlib', ''), ('pyswarm', ''), ('cmaes', ''),
                     *[('nlopt', method) for method in [
                         nlopt.GN_ESCH, nlopt.GN_ISRES, nlopt.GN_AGS,
                         nlopt.GD_STOGO, nlopt.GD_STOGO_RAND, nlopt.G_MLSL,
                         nlopt.G_MLSL_LDS, nlopt.GD_MLSL, nlopt.GD_MLSL_LDS,
                         nlopt.GN_CRS2_LM, nlopt.GN_ORIG_DIRECT,
                         nlopt.GN_ORIG_DIRECT_L, nlopt.GN_DIRECT,
                         nlopt.GN_DIRECT_L, nlopt.GN_DIRECT_L_NOSCAL,
                         nlopt.GN_DIRECT_L_RAND,
                         nlopt.GN_DIRECT_L_RAND_NOSCAL]]]:
        with pytest.raises(ValueError):
            optimize.minimize(
                problem=problem,
                optimizer=opt,
                n_starts=1,
                startpoint_method=pypesto.startpoint.uniform,
                options=options
            )
        return
    else:
        result = optimize.minimize(
            problem=problem,
            optimizer=opt,
            n_starts=1,
            startpoint_method=pypesto.startpoint.uniform,
            options=options
        )

    # check that ub/lb were reverted
    assert isinstance(result.optimize_result.list[0]['fval'], float)
    if optimizer not in [('scipy', 'ls_trf'), ('scipy', 'ls_dogbox')]:
        assert np.isfinite(result.optimize_result.list[0]['fval'])
        assert result.optimize_result.list[0]['x'] is not None
    # check that result is not in bounds, optimum is at (1,1), so you would
    # hope that any reasonable optimizer manage to finish with x < ub,
    # but I guess some are pretty terrible
    assert np.any(result.optimize_result.list[0]['x'] < lb_init) or \
        np.any(result.optimize_result.list[0]['x'] > ub_init)
Exemplo n.º 14
0
def gaussian_mixture_separated_modes_problem():
    """Problem based on a mixture of gaussians with far/separated modes."""
    def nllh(x):
        return - gaussian_mixture_separated_modes_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-100], ub=[200],
                              x_names=['x'])
    return problem
Exemplo n.º 15
0
def rosenbrock_problem():
    """Problem based on rosenbrock objective."""
    objective = pypesto.Objective(fun=so.rosen)

    dim_full = 2
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)
    return problem
Exemplo n.º 16
0
def test_dim_vs_dim_full():
    """Test passing arrays in the full or reduced dimension."""
    objective = pypesto.Objective()

    # define problem with bounds including fixed parameters
    pypesto.Problem(objective=objective,
                    lb=[-1] * 4,
                    ub=[1] * 4,
                    dim_full=4,
                    x_fixed_indices=[0, 3],
                    x_fixed_vals=[42, 43])

    # define problem with incomplete bounds
    pypesto.Problem(objective=objective,
                    lb=[-1] * 2,
                    ub=[1] * 2,
                    dim_full=4,
                    x_fixed_indices=[0, 3],
                    x_fixed_vals=[42, 43])
Exemplo n.º 17
0
def problem():
    """A very basic problem."""
    lb = [-5] * 10
    ub = [6] * 10
    objective = pypesto.Objective()
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_fixed_indices=[0, 1, 5],
                              x_fixed_vals=[42, 43, 44])
    return problem
Exemplo n.º 18
0
def create_problem():
    # define a pypesto objective
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)

    # define a pypesto problem
    (lb, ub) = create_bounds()
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Exemplo n.º 19
0
def create_problem(n_parameters: int = 2, x_names: Sequence[str] = None):
    # define a pypesto objective
    objective = pypesto.Objective(
        fun=so.rosen, grad=so.rosen_der, hess=so.rosen_hess, x_names=x_names
    )

    # define a pypesto problem
    (lb, ub) = create_bounds(n_parameters)
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Exemplo n.º 20
0
def problem(request) -> pypesto.Problem:
    if request.param == 'cr':
        return CRProblem().get_problem()
    elif 'rosen' in request.param:
        integrated = 'integrated' in request.param
        obj = rosen_for_sensi(max_sensi_order=2, integrated=integrated)['obj']
        lb = 0 * np.ones((1, 2))
        ub = 1 * np.ones((1, 2))
        return pypesto.Problem(objective=obj, lb=lb, ub=ub)
    else:
        raise ValueError("Unexpected input")
Exemplo n.º 21
0
def create_problem():
    objective = rosen_for_sensi(2)['obj']
    lb = [-3, -3, -3, -3, -3]
    ub = [3, 3, 3, 3, 3]
    x_fixed_indices = [1, 3]
    x_fixed_vals = [1, 1]
    problem = pypesto.Problem(objective=objective,
                              lb=lb, ub=ub,
                              x_fixed_indices=x_fixed_indices,
                              x_fixed_vals=x_fixed_vals)

    return problem
Exemplo n.º 22
0
def _multistart_on_kernel(fun, lb, ub):
    objective = pypesto.Objective(fun=fun)
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)
    optimizer = pypesto.ScipyOptimizer(options={
        'maxiter': 10000,
        'maxfun': 10000,
        'disp': False
    })
    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=100)
    return result
Exemplo n.º 23
0
def _test_basic(engine):
    # set up problem
    objective = rosen_for_sensi(max_sensi_order=2)['obj']
    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)
    optimizer = pypesto.optimize.ScipyOptimizer(options={'maxiter': 10})
    result = pypesto.optimize.minimize(problem=problem,
                                       n_starts=5,
                                       engine=engine,
                                       optimizer=optimizer)
    assert len(result.optimize_result.as_list()) == 5
Exemplo n.º 24
0
def test_ensemble_from_optimization():
    """
    Test reading an ensemble from optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    history_options = pypesto.HistoryOptions(trace_record=True)
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        history_options=history_options,
    )

    # change fvals of each start
    for i_start, optimizer_result in enumerate(result.optimize_result.list):
        optimizer_result['fval'] = i_start + 1
        for i_iter in range(len(optimizer_result['history']._trace['fval'])):
            optimizer_result['history']._trace['fval'][i_iter] = (
                len(optimizer_result['history']._trace['fval']) + i_start -
                i_iter)

    # test_endpoints
    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       cutoff=4,
                                                       max_size=10)

    ensemble_hist = Ensemble.from_optimization_history(result=result,
                                                       cutoff=4,
                                                       max_size=10,
                                                       max_per_start=5)

    # compare vector_tags with the expected values:
    ep_tags = [(int(result.optimize_result.list[i]['id']), -1)
               for i in [0, 1, 2, 3]]

    hist_tags = [(
        int(result.optimize_result.list[i]['id']),
        len(result.optimize_result.list[i]['history']._trace['fval']) - 1 - j,
    ) for i in range(4) for j in reversed(range(4 - i))]
    assert hist_tags == ensemble_hist.vector_tags
    assert ep_tags == ensemble_ep.vector_tags
Exemplo n.º 25
0
def create_problem():
    # define a pypesto objective (with tracing options)
    objective_options = pypesto.ObjectiveOptions(trace_record=True,
                                                 trace_save_iter=1)
    objective = pypesto.Objective(fun=sp.optimize.rosen,
                                  grad=sp.optimize.rosen_der,
                                  hess=sp.optimize.rosen_hess,
                                  options=objective_options)

    # define a pypesto problem
    (lb, ub) = create_bounds()
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Exemplo n.º 26
0
def test_prior():
    """Check that priors are defined for sampling."""
    # define negative log posterior
    posterior_fun = pypesto.Objective(fun=negative_log_posterior)

    # define negative log prior
    prior_fun = pypesto.Objective(fun=negative_log_prior)

    # define pypesto prior object
    prior_object = pypesto.NegLogPriors(objectives=[prior_fun])

    # define pypesto problem using prior object
    test_problem = pypesto.Problem(
        objective=posterior_fun,
        x_priors_defs=prior_object,
        lb=-10,
        ub=10,
        x_names=['x'],
    )

    sampler = sample.AdaptiveMetropolisSampler()

    result = sample.sample(
        test_problem,
        n_samples=1e4,
        sampler=sampler,
        x0=np.array([0.0]),
        filename=None,
    )

    # get log prior values of first chain
    logprior_trace = -result.sample_result.trace_neglogprior[0, :]

    # check that not all entries are zero
    assert (logprior_trace != 0.0).any()

    # get samples of first chain
    samples = result.sample_result.trace_x[0, :, 0]

    # generate ground-truth samples
    rvs = norm.rvs(size=5000, loc=-1.0, scale=np.sqrt(0.7))

    # check sample distribution agreement with the ground-truth
    statistic, pval = ks_2samp(rvs, samples)
    print(statistic, pval)

    assert statistic < 0.1
Exemplo n.º 27
0
def rosenbrock_problem():
    """Problem based on rosenbrock objective.

    Features
    --------
    * 3-dim
    * has fixed parameters
    """
    objective = pypesto.Objective(fun=so.rosen)

    dim_full = 2
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    problem = pypesto.Problem(
            objective=objective, lb=lb, ub=ub,
            x_fixed_indices=[1], x_fixed_vals=[2])
    return problem
Exemplo n.º 28
0
def create_optimization_results(objective):
    # create optimizer, pypesto problem and options
    options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(method='TNC', options=options)

    lb = -2 * np.ones(2)
    ub = 2 * np.ones(2)
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(allow_failed_starts=True)

    # run optimization
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=5,
                               startpoint_method=pypesto.startpoint.uniform,
                               options=optimize_options)

    return problem, result, optimizer
Exemplo n.º 29
0
def test_optimize():
    # logging
    pypesto.logging.log_to_console(logging.WARN)
    filename = ".test_logging.tmp"
    pypesto.logging.log_to_file(logging.DEBUG, filename)
    logger = logging.getLogger('pypesto')
    if os.path.exists(filename):
        os.remove(filename)
    fh = logging.FileHandler(filename)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)
    logger.info("start test")

    # problem definition
    def fun(_):
        raise Exception("This function cannot be called.")

    objective = pypesto.Objective(fun=fun)
    problem = pypesto.Problem(objective, -1, 1)

    optimizer = pypesto.optimize.ScipyOptimizer()
    options = {'allow_failed_starts': True}

    # optimization
    pypesto.optimize.minimize(problem,
                              optimizer,
                              5,
                              options=options,
                              filename=None)

    # assert logging worked
    assert os.path.exists(filename)
    f = open(filename, 'rb')
    content = str(f.read())
    f.close()

    # tidy up
    os.remove(filename)

    # check if error message got inserted
    assert "fail" in content
Exemplo n.º 30
0
def setup_rosen_problem(n_starts: int = 2):
    """Set up the rosenbrock problem and return
    a pypesto.Problem"""
    objective = pypesto.Objective(
        fun=sp.optimize.rosen,
        grad=sp.optimize.rosen_der,
        hess=sp.optimize.rosen_hess,
    )

    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    # fixing startpoints
    startpoints = pypesto.startpoint.latin_hypercube(
        n_starts=n_starts, lb=lb, ub=ub
    )
    problem = pypesto.Problem(
        objective=objective, lb=lb, ub=ub, x_guesses=startpoints
    )
    return problem