Esempio n. 1
0
def test_empty_prior():
    """Check that priors are zero when none are defined."""
    # define negative log posterior
    posterior_fun = pypesto.Objective(fun=negative_log_posterior)

    # define pypesto problem without prior object
    test_problem = pypesto.Problem(objective=posterior_fun,
                                   lb=-10,
                                   ub=10,
                                   x_names=['x'])

    sampler = sample.AdaptiveMetropolisSampler()

    result = sample.sample(
        test_problem,
        n_samples=50,
        sampler=sampler,
        x0=np.array([0.0]),
        filename=None,
    )

    # get log prior values of first chain
    logprior_trace = -result.sample_result.trace_neglogprior[0, :]

    # check that all entries are zero
    assert (logprior_trace == 0.0).all()
Esempio n. 2
0
    def get_objective(
        self,
        fun: bool = True,
        res: bool = True,
        max_sensi_order: int = 2,
        fim_for_hess: bool = False,
    ):
        """Full pyPESTO objective function.

        Parameters
        ----------
        fun: Whether the objective can calculate function values.
        res: Whether the objective can calculate residuals.
        max_sensi_order: Maximum sensitivity order the function can calculate.
        fim_for_hess: Whether to use the FIM instead of the Hessian.
        """
        if fim_for_hess:
            fhess = self.get_ffim()
        else:
            fhess = self.get_fs2nllh()

        return pypesto.Objective(
            fun=self.get_fnllh() if fun else None,
            grad=self.get_fsnllh() if fun and max_sensi_order >= 1 else None,
            hess=fhess if fun and max_sensi_order >= 2 else None,
            res=self.get_fres() if res else None,
            sres=self.get_fsres() if res and max_sensi_order >= 1 else None,
        )
Esempio n. 3
0
def test_x_names():
    """Test that `x_names` are handled properly."""
    kwargs = {
        'objective': pypesto.Objective(),
        'lb': [-5] * 3,
        'ub': [4] * 3,
        'x_fixed_indices': [1],
        'x_fixed_vals': [42.0],
    }

    # non-unique values
    with pytest.raises(ValueError):
        pypesto.Problem(x_names=['x1', 'x2', 'x2'], **kwargs)

    # too few or too many arguments
    with pytest.raises(AssertionError):
        pypesto.Problem(x_names=['x1', 'x2'], **kwargs)
    with pytest.raises(AssertionError):
        pypesto.Problem(x_names=['x1', 'x2', 'x3', 'x4'], **kwargs)

    # all fine
    problem = pypesto.Problem(x_names=['a', 'b', 'c'], **kwargs)
    assert problem.x_names == ['a', 'b', 'c']

    # defaults
    problem = pypesto.Problem(**kwargs)
    assert problem.x_names == ['x0', 'x1', 'x2']
Esempio n. 4
0
def test_finite_difference_checks():
    """
    Test the finite difference gradient check methods by expected relative
    error.
    """
    x = sp.Symbol('x')

    # Setup single-parameter objective function
    fun_expr = x**10
    grad_expr = fun_expr.diff()
    theta = 0.1

    fun = sp.lambdify(x, fun_expr)
    grad = sp.lambdify(x, grad_expr)

    objective = pypesto.Objective(fun=fun, grad=grad)

    def rel_err(eps_):
        """Expected relative error."""
        central_difference = (fun(theta + eps_) - fun(theta - eps_)) / (2 *
                                                                        eps_)
        return abs(
            (grad(theta) - central_difference) / (central_difference + eps_))

    # Test the single step size `check_grad` method.
    eps = 1e-5
    result_single_eps = objective.check_grad(np.array([theta]), eps=eps)
    assert result_single_eps['rel_err'].squeeze() == rel_err(eps)

    # Test the multiple step size `check_grad_multi_eps` method.
    multi_eps = {1e-1, 1e-3, 1e-5, 1e-7, 1e-9}
    result_multi_eps = \
        objective.check_grad_multi_eps([theta], multi_eps=multi_eps)
    assert result_multi_eps['rel_err'].squeeze() == \
        min(rel_err(_eps) for _eps in multi_eps)
Esempio n. 5
0
def gaussian_problem():
    def nllh(x):
        return -gaussian_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-10], ub=[10])
    return problem
Esempio n. 6
0
def test_prior():
    """Check that priors are defined for sampling."""
    # define negative log posterior
    posterior_fun = pypesto.Objective(fun=negative_log_posterior)

    # define negative log prior
    prior_fun = pypesto.Objective(fun=negative_log_prior)

    # define pypesto prior object
    prior_object = pypesto.NegLogPriors(objectives=[prior_fun])

    # define pypesto problem using prior object
    test_problem = pypesto.Problem(
        objective=posterior_fun,
        x_priors_defs=prior_object,
        lb=-10,
        ub=10,
        x_names=['x'],
    )

    sampler = sample.AdaptiveMetropolisSampler()

    result = sample.sample(
        test_problem,
        n_samples=1e4,
        sampler=sampler,
        x0=np.array([0.0]),
        filename=None,
    )

    # get log prior values of first chain
    logprior_trace = -result.sample_result.trace_neglogprior[0, :]

    # check that not all entries are zero
    assert (logprior_trace != 0.0).any()

    # get samples of first chain
    samples = result.sample_result.trace_x[0, :, 0]

    # generate ground-truth samples
    rvs = norm.rvs(size=5000, loc=-1.0, scale=np.sqrt(0.7))

    # check sample distribution agreement with the ground-truth
    statistic, pval = ks_2samp(rvs, samples)
    print(statistic, pval)

    assert statistic < 0.1
Esempio n. 7
0
def test_mpipoolengine():
    """
    Test the MPIPoolEngine by calling an example script with mpiexec.
    """
    try:
        # get the path to this file:
        path = os.path.dirname(__file__)
        # run the example file.
        subprocess.check_call(  # noqa: S603,S607
            [
                'mpiexec',
                '-np',
                '2',
                'python',
                '-m',
                'mpi4py.futures',
                f'{path}/../../doc/example/example_MPIPool.py',
            ]
        )

        # read results
        result1 = read_result('temp_result.h5', problem=True, optimize=True)
        # set optimizer
        optimizer = optimize.FidesOptimizer(verbose=0)
        # initialize problem with x_guesses and objective
        objective = pypesto.Objective(
            fun=sp.optimize.rosen,
            grad=sp.optimize.rosen_der,
            hess=sp.optimize.rosen_hess,
        )
        x_guesses = np.array(
            [result1.optimize_result.list[i]['x0'] for i in range(2)]
        )
        problem = pypesto.Problem(
            objective=objective,
            ub=result1.problem.ub,
            lb=result1.problem.lb,
            x_guesses=x_guesses,
        )
        result2 = optimize.minimize(
            problem=problem,
            optimizer=optimizer,
            n_starts=2,
            engine=pypesto.engine.MultiProcessEngine(),
            filename=None,
        )

        for ix in range(2):
            assert_almost_equal(
                result1.optimize_result.list[ix]['x'],
                result2.optimize_result.list[ix]['x'],
                err_msg='The final parameter values '
                'do not agree for the engines.',
            )

    finally:
        if os.path.exists('temp_result.h5'):
            # delete data
            os.remove('temp_result.h5')
Esempio n. 8
0
def gaussian_mixture_problem():
    """Problem based on a mixture of gaussians."""
    def nllh(x):
        return - gaussian_mixture_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-10], ub=[10],
                              x_names=['x'])
    return problem
Esempio n. 9
0
def gaussian_mixture_separated_modes_problem():
    """Problem based on a mixture of gaussians with far/separated modes."""
    def nllh(x):
        return - gaussian_mixture_separated_modes_llh(x)

    objective = pypesto.Objective(fun=nllh)
    problem = pypesto.Problem(objective=objective, lb=[-100], ub=[200],
                              x_names=['x'])
    return problem
Esempio n. 10
0
def obj_for_sensi(fun, grad, hess, max_sensi_order, integrated, x):
    """
    Create a pypesto.Objective able to compute up to the speficied
    max_sensi_order. Returns a dict containing the objective obj as well
    as max_sensi_order and fval, grad, hess for the passed x.

    Parameters
    ----------

    fun, grad, hess: callable
        Functions computing the fval, grad, hess.
    max_sensi_order: int
        Maximum sensitivity order the pypesto.Objective should be capable of.
    integrated: bool
        True if fun, grad, hess should be integrated into one function, or
        passed to pypesto.Objective separately (both is possible)
    x: np.array
        Value at which to evaluate the function to obtain true values.

    Returns
    -------

    ret: dict
        With fields obj, max_sensi_order, x, fval, grad, hess.
    """
    if integrated:
        if max_sensi_order == 2:
            def arg_fun(x):
                return (fun(x), grad(x), hess(x))
            arg_grad = arg_hess = True
        elif max_sensi_order == 1:
            def arg_fun(x):
                return (fun(x), grad(x))
            arg_grad = True
            arg_hess = False
        else:
            def arg_fun(x):
                return fun(x)
            arg_grad = arg_hess = False
    else:  # integrated
        if max_sensi_order >= 2:
            arg_hess = hess
        else:
            arg_hess = None
        if max_sensi_order >= 1:
            arg_grad = grad
        else:
            arg_grad = None
        arg_fun = fun
    obj = pypesto.Objective(fun=arg_fun, grad=arg_grad, hess=arg_hess)
    return {'obj': obj,
            'max_sensi_order': max_sensi_order,
            'x': x,
            'fval': fun(x),
            'grad': grad(x),
            'hess': hess(x)}
Esempio n. 11
0
def rosenbrock_problem():
    """Problem based on rosenbrock objective."""
    objective = pypesto.Objective(fun=so.rosen)

    dim_full = 2
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)
    return problem
Esempio n. 12
0
def create_problem():
    # define a pypesto objective
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)

    # define a pypesto problem
    (lb, ub) = create_bounds()
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Esempio n. 13
0
def problem():
    """A very basic problem."""
    lb = [-5] * 10
    ub = [6] * 10
    objective = pypesto.Objective()
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_fixed_indices=[0, 1, 5],
                              x_fixed_vals=[42, 43, 44])
    return problem
Esempio n. 14
0
def create_problem(n_parameters: int = 2, x_names: Sequence[str] = None):
    # define a pypesto objective
    objective = pypesto.Objective(
        fun=so.rosen, grad=so.rosen_der, hess=so.rosen_hess, x_names=x_names
    )

    # define a pypesto problem
    (lb, ub) = create_bounds(n_parameters)
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Esempio n. 15
0
def _multistart_on_kernel(fun, lb, ub):
    objective = pypesto.Objective(fun=fun)
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)
    optimizer = pypesto.ScipyOptimizer(options={
        'maxiter': 10000,
        'maxfun': 10000,
        'disp': False
    })
    result = pypesto.minimize(problem=problem,
                              optimizer=optimizer,
                              n_starts=100)
    return result
Esempio n. 16
0
def test_ensemble_from_optimization():
    """
    Test reading an ensemble from optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    history_options = pypesto.HistoryOptions(trace_record=True)
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        history_options=history_options,
    )

    # change fvals of each start
    for i_start, optimizer_result in enumerate(result.optimize_result.list):
        optimizer_result['fval'] = i_start + 1
        for i_iter in range(len(optimizer_result['history']._trace['fval'])):
            optimizer_result['history']._trace['fval'][i_iter] = (
                len(optimizer_result['history']._trace['fval']) + i_start -
                i_iter)

    # test_endpoints
    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       cutoff=4,
                                                       max_size=10)

    ensemble_hist = Ensemble.from_optimization_history(result=result,
                                                       cutoff=4,
                                                       max_size=10,
                                                       max_per_start=5)

    # compare vector_tags with the expected values:
    ep_tags = [(int(result.optimize_result.list[i]['id']), -1)
               for i in [0, 1, 2, 3]]

    hist_tags = [(
        int(result.optimize_result.list[i]['id']),
        len(result.optimize_result.list[i]['history']._trace['fval']) - 1 - j,
    ) for i in range(4) for j in reversed(range(4 - i))]
    assert hist_tags == ensemble_hist.vector_tags
    assert ep_tags == ensemble_ep.vector_tags
Esempio n. 17
0
def lsq_residual_objective(d: float):
    """
    Returns an objective for the function

    f(x) = (x-d)^2
    """

    def f(x):
        return np.sum((x[0] - d) ** 2)

    def grad(x):
        return 2 * (x - d)

    return pypesto.Objective(fun=f, grad=grad)
Esempio n. 18
0
def create_problem():
    # define a pypesto objective (with tracing options)
    objective_options = pypesto.ObjectiveOptions(trace_record=True,
                                                 trace_save_iter=1)
    objective = pypesto.Objective(fun=sp.optimize.rosen,
                                  grad=sp.optimize.rosen_der,
                                  hess=sp.optimize.rosen_hess,
                                  options=objective_options)

    # define a pypesto problem
    (lb, ub) = create_bounds()
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    return problem
Esempio n. 19
0
def rosenbrock_problem():
    """Problem based on rosenbrock objective.

    Features
    --------
    * 3-dim
    * has fixed parameters
    """
    objective = pypesto.Objective(fun=so.rosen)

    dim_full = 2
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    problem = pypesto.Problem(
            objective=objective, lb=lb, ub=ub,
            x_fixed_indices=[1], x_fixed_vals=[2])
    return problem
Esempio n. 20
0
def test_dim_vs_dim_full():
    """Test passing arrays in the full or reduced dimension."""
    objective = pypesto.Objective()

    # define problem with bounds including fixed parameters
    pypesto.Problem(objective=objective,
                    lb=[-1] * 4,
                    ub=[1] * 4,
                    dim_full=4,
                    x_fixed_indices=[0, 3],
                    x_fixed_vals=[42, 43])

    # define problem with incomplete bounds
    pypesto.Problem(objective=objective,
                    lb=[-1] * 2,
                    ub=[1] * 2,
                    dim_full=4,
                    x_fixed_indices=[0, 3],
                    x_fixed_vals=[42, 43])
Esempio n. 21
0
def test_optimize():
    # logging
    pypesto.logging.log_to_console(logging.WARN)
    filename = ".test_logging.tmp"
    pypesto.logging.log_to_file(logging.DEBUG, filename)
    logger = logging.getLogger('pypesto')
    if os.path.exists(filename):
        os.remove(filename)
    fh = logging.FileHandler(filename)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)
    logger.info("start test")

    # problem definition
    def fun(_):
        raise Exception("This function cannot be called.")

    objective = pypesto.Objective(fun=fun)
    problem = pypesto.Problem(objective, -1, 1)

    optimizer = pypesto.optimize.ScipyOptimizer()
    options = {'allow_failed_starts': True}

    # optimization
    pypesto.optimize.minimize(problem,
                              optimizer,
                              5,
                              options=options,
                              filename=None)

    # assert logging worked
    assert os.path.exists(filename)
    f = open(filename, 'rb')
    content = str(f.read())
    f.close()

    # tidy up
    os.remove(filename)

    # check if error message got inserted
    assert "fail" in content
Esempio n. 22
0
def setup_rosen_problem(n_starts: int = 2):
    """Set up the rosenbrock problem and return
    a pypesto.Problem"""
    objective = pypesto.Objective(
        fun=sp.optimize.rosen,
        grad=sp.optimize.rosen_der,
        hess=sp.optimize.rosen_hess,
    )

    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))

    # fixing startpoints
    startpoints = pypesto.startpoint.latin_hypercube(
        n_starts=n_starts, lb=lb, ub=ub
    )
    problem = pypesto.Problem(
        objective=objective, lb=lb, ub=ub, x_guesses=startpoints
    )
    return problem
Esempio n. 23
0
# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 1
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(
    logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)
                               ) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g)  #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))

# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 22)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

# OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________

optimizer = pypesto.ScipyOptimizer(method='L-BFGS-B')
# play with FSA tolerances
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOG(1+x) OBJECTIVE FUNCTION ___________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# define offset
eps = 1e-5

f = lambda x: obj_lin.get_fval(10**np.array(x) - eps)
g = lambda x: obj_lin.get_grad(10**np.array(x) - eps) * 10**np.array(
    x) * np.log(10)

obj = pypesto.Objective(fun=f, grad=g)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))

# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 12)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

# OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________

optimizer = pypesto.ScipyOptimizer(method='L-BFGS-B')
Esempio n. 25
0
def test_hdf5_history_mp():
    """Test whether hdf5-History works with a MultiProcessEngine."""
    objective1 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    objective2 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem1 = pypesto.Problem(objective=objective1,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)
    problem2 = pypesto.Problem(objective=objective2,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)

    optimizer1 = pypesto.optimize.ScipyOptimizer(options={'maxiter': 10})
    optimizer2 = pypesto.optimize.ScipyOptimizer(options={'maxiter': 10})

    with tempfile.TemporaryDirectory(dir=".") as tmpdirname:
        _, fn = tempfile.mkstemp(".hdf5", dir=f"{tmpdirname}")

        history_options_mp = pypesto.HistoryOptions(trace_record=True,
                                                    storage_file=fn)
        history_options_mem = pypesto.HistoryOptions(trace_record=True)
        # optimize with Memory History
        result_hdf5_mem = pypesto.optimize.minimize(
            problem=problem1,
            optimizer=optimizer1,
            n_starts=n_starts,
            history_options=history_options_mem,
            engine=MultiProcessEngine(),
            filename=None,
        )

        # optimizing with history saved in hdf5 and MultiProcessEngine
        result_memory_mp = pypesto.optimize.minimize(
            problem=problem2,
            optimizer=optimizer2,
            n_starts=n_starts,
            history_options=history_options_mp,
            engine=MultiProcessEngine(),
            filename=None,
        )

        history_entries = [X, FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2]
        assert len(result_hdf5_mem.optimize_result.list) == len(
            result_memory_mp.optimize_result.list)
        for mp_res in result_memory_mp.optimize_result.list:
            for mem_res in result_hdf5_mem.optimize_result.list:
                if mp_res['id'] == mem_res['id']:
                    for entry in history_entries:
                        hdf5_entry_trace = getattr(mp_res['history'],
                                                   f'get_{entry}_trace')()
                        mem_entry_trace = getattr(mem_res['history'],
                                                  f'get_{entry}_trace')()
                        for iteration in range(len(hdf5_entry_trace)):
                            # comparing nan and None difficult
                            if (hdf5_entry_trace[iteration] is None
                                    or np.isnan(
                                        hdf5_entry_trace[iteration]).all()):
                                continue
                            np.testing.assert_array_equal(
                                mem_entry_trace[iteration],
                                hdf5_entry_trace[iteration],
                            )
Esempio n. 26
0
def test_storage_all():
    """Test `read_result` and `write_result`.

    It currently does not test read/write of the problem as this
    is know to not work completely. Also excludes testing the history
    key of an optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer()
    # Optimization
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    # Profiling
    result = profile.parameter_profile(
        problem=problem,
        result=result,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )
    # Sampling
    sampler = sample.AdaptiveMetropolisSampler()
    result = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        result=result,
        filename=None,
    )
    # Read and write
    filename = 'test_file.hdf5'
    try:
        write_result(result=result, filename=filename)
        result_read = read_result(filename=filename)

        # test optimize
        for i, opt_res in enumerate(result.optimize_result.list):
            for key in opt_res:
                if key == 'history':
                    continue
                if isinstance(opt_res[key], np.ndarray):
                    np.testing.assert_array_equal(
                        opt_res[key], result_read.optimize_result.list[i][key])
                else:
                    assert (opt_res[key] == result_read.optimize_result.list[i]
                            [key])

        # test profile
        for key in result.profile_result.list[0][0].keys():
            if (result.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(result.profile_result.list[0][0][key], np.ndarray):
                np.testing.assert_array_equal(
                    result.profile_result.list[0][0][key],
                    result_read.profile_result.list[0][0][key],
                )
            elif isinstance(result.profile_result.list[0][0][key], int):
                assert (result.profile_result.list[0][0][key] ==
                        result_read.profile_result.list[0][0][key])

        # test sample
        for key in result.sample_result.keys():
            if result.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(result.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
            elif isinstance(result.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
    finally:
        if os.path.exists(filename):
            os.remove(filename)
Esempio n. 27
0
def test_storage_sampling():
    """
    This test tests the saving and loading of samples
    into HDF5 through pypesto.store.SamplingResultHDF5Writer
    and pypesto.store.SamplingResultHDF5Reader. Tests all entries
    aside from time and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    x_0 = result_optimization.optimize_result.list[0]['x']
    sampler = sample.AdaptiveParallelTemperingSampler(
        internal_sampler=sample.AdaptiveMetropolisSampler(), n_chains=1)
    sample_original = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        x0=[x_0],
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_sample_writer = SamplingResultHDF5Writer(fn)
        pypesto_sample_writer.write(sample_original)
        pypesto_sample_reader = SamplingResultHDF5Reader(fn)
        sample_read = pypesto_sample_reader.read()

        for key in sample_original.sample_result.keys():
            if sample_original.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(sample_original.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
            elif isinstance(sample_original.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
    finally:
        if os.path.exists(fn):
            os.remove(fn)
Esempio n. 28
0
def test_storage_profiling():
    """
    This test tests the saving and loading of profiles
    into HDF5 through pypesto.store.ProfileResultHDF5Writer
    and pypesto.store.ProfileResultHDF5Reader. Tests all entries
    aside from times and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    profile_original = profile.parameter_profile(
        problem=problem,
        result=result_optimization,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_profile_writer = ProfileResultHDF5Writer(fn)
        pypesto_profile_writer.write(profile_original)
        pypesto_profile_reader = ProfileResultHDF5Reader(fn)
        profile_read = pypesto_profile_reader.read()

        for key in profile_original.profile_result.list[0][0].keys():
            if (profile_original.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            np.ndarray):
                np.testing.assert_array_equal(
                    profile_original.profile_result.list[0][0][key],
                    profile_read.profile_result.list[0][0][key],
                )
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            int):
                assert (profile_original.profile_result.list[0][0][key] ==
                        profile_read.profile_result.list[0][0][key])
    finally:
        if os.path.exists(fn):
            os.remove(fn)
Esempio n. 29
0
def test_storage_trace(hdf5_file):
    objective1 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    objective2 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem1 = pypesto.Problem(objective=objective1,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)
    problem2 = pypesto.Problem(objective=objective2,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)

    optimizer1 = optimize.ScipyOptimizer(options={'maxiter': 10})
    optimizer2 = optimize.ScipyOptimizer(options={'maxiter': 10})

    history_options_hdf5 = pypesto.HistoryOptions(trace_record=True,
                                                  storage_file=hdf5_file)
    # optimize with history saved to hdf5
    result_hdf5 = optimize.minimize(
        problem=problem1,
        optimizer=optimizer1,
        n_starts=n_starts,
        history_options=history_options_hdf5,
    )

    # optimizing with history saved in memory
    history_options_memory = pypesto.HistoryOptions(trace_record=True)
    result_memory = optimize.minimize(
        problem=problem2,
        optimizer=optimizer2,
        n_starts=n_starts,
        history_options=history_options_memory,
        filename=None,
    )

    history_entries = [X, FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2]
    assert len(result_hdf5.optimize_result.list) == len(
        result_memory.optimize_result.list)
    for mem_res in result_memory.optimize_result.list:
        for hdf_res in result_hdf5.optimize_result.list:
            if mem_res['id'] == hdf_res['id']:
                for entry in history_entries:
                    hdf5_entry_trace = getattr(hdf_res['history'],
                                               f'get_{entry}_trace')()
                    for iteration in range(len(hdf5_entry_trace)):
                        # comparing nan and None difficult
                        if (hdf5_entry_trace[iteration] is None or np.isnan(
                                hdf5_entry_trace[iteration]).all()):
                            continue
                        np.testing.assert_array_equal(
                            getattr(mem_res['history'],
                                    f'get_{entry}_trace')()[iteration],
                            hdf5_entry_trace[iteration],
                        )
# enable sensitivities
solver.setSensitivityOrder(amici.SensitivityOrder_first)       # First-order ...
solver.setSensitivityMethod(amici.SensitivityMethod_forward)   # ... forward sensitivities
model.requireSensitivitiesForAllParameters()                   # ... w.r.t. all parameters

# CREATE OBJECTIVE FUNCTION_____________________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 1
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)
f = lambda x: obj_lin.get_fval(logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)) * logicleInverseGradient(x, logicle_obj)
obj_logicle = pypesto.Objective(fun=f, grad=g) #, hess=h)


# MODEL SELECTION ______________________________________________________________________________________________________

def import_stuff(file_name):
    import_opt = import_modelSelection_results(file_name)
    lambdas = import_opt['lambdas'][~np.isnan(import_opt['lambdas'])]
    sigma = import_opt['sigma'][0]
    par_names_temp = import_opt['par_names']
    par_names_index = [i for i, y in enumerate(par_names_temp) if y is not np.nan]
    par_names = par_names_temp[par_names_index]
    conv_points = import_opt['conv_points'][:len(lambdas)]
    n_starts = import_opt['n_starts'][0]
    options = import_opt['options'][0]