예제 #1
0
    def setUp(cls):

        cls.lb = np.array([-1])
        cls.ub = np.array([5])

        cls.problem_training_data = pypesto.Problem(
            lsq_residual_objective(0), cls.lb, cls.ub
        )

        cls.problem_all_data = pypesto.Problem(
            pypesto.objective.AggregatedObjective(
                [lsq_residual_objective(0), lsq_residual_objective(2)]
            ),
            cls.lb,
            cls.ub,
        )

        # optimum f(0)=0
        cls.result_training_data = optimize.minimize(
            cls.problem_training_data, n_starts=5, filename=None
        )
        # Optimum f(1)=2
        cls.result_all_data = optimize.minimize(
            cls.problem_all_data, n_starts=5, filename=None
        )
예제 #2
0
def test_unbounded_minimize(optimizer):
    """
    Test unbounded optimization using various optimizers and objective modes.
    """
    lb_init = 1.1 * np.ones((1, 2))
    lb = -np.inf * np.ones(lb_init.shape)
    ub_init = 1.11 * np.ones((1, 2))
    ub = np.inf * np.ones(ub_init.shape)
    problem = pypesto.Problem(
        rosen_for_sensi(max_sensi_order=2)['obj'],
        lb, ub, lb_init=lb_init, ub_init=ub_init
    )
    opt = get_optimizer(*optimizer)

    options = optimize.OptimizeOptions(allow_failed_starts=False)

    if isinstance(optimizer[1], str) and re.match(r'^(?i)(ls_)', optimizer[1]):
        return

    if optimizer in [('dlib', ''), ('pyswarm', ''), ('cmaes', ''),
                     *[('nlopt', method) for method in [
                         nlopt.GN_ESCH, nlopt.GN_ISRES, nlopt.GN_AGS,
                         nlopt.GD_STOGO, nlopt.GD_STOGO_RAND, nlopt.G_MLSL,
                         nlopt.G_MLSL_LDS, nlopt.GD_MLSL, nlopt.GD_MLSL_LDS,
                         nlopt.GN_CRS2_LM, nlopt.GN_ORIG_DIRECT,
                         nlopt.GN_ORIG_DIRECT_L, nlopt.GN_DIRECT,
                         nlopt.GN_DIRECT_L, nlopt.GN_DIRECT_L_NOSCAL,
                         nlopt.GN_DIRECT_L_RAND,
                         nlopt.GN_DIRECT_L_RAND_NOSCAL]]]:
        with pytest.raises(ValueError):
            optimize.minimize(
                problem=problem,
                optimizer=opt,
                n_starts=1,
                startpoint_method=pypesto.startpoint.uniform,
                options=options
            )
        return
    else:
        result = optimize.minimize(
            problem=problem,
            optimizer=opt,
            n_starts=1,
            startpoint_method=pypesto.startpoint.uniform,
            options=options
        )

    # check that ub/lb were reverted
    assert isinstance(result.optimize_result.list[0]['fval'], float)
    if optimizer not in [('scipy', 'ls_trf'), ('scipy', 'ls_dogbox')]:
        assert np.isfinite(result.optimize_result.list[0]['fval'])
        assert result.optimize_result.list[0]['x'] is not None
    # check that result is not in bounds, optimum is at (1,1), so you would
    # hope that any reasonable optimizer manage to finish with x < ub,
    # but I guess some are pretty terrible
    assert np.any(result.optimize_result.list[0]['x'] < lb_init) or \
        np.any(result.optimize_result.list[0]['x'] > ub_init)
예제 #3
0
def create_optimization_history():
    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(
        method='TNC', options=optimizer_options
    )

    history_options = pypesto.HistoryOptions(
        trace_record=True, trace_save_iter=1
    )

    # run optimization
    optimize_options = optimize.OptimizeOptions(allow_failed_starts=True)
    result_with_trace = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=5,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        history_options=history_options,
        filename=None,
    )

    return result_with_trace
예제 #4
0
def test_autocorrelation_pipeline():
    """Check that the autocorrelation test works."""
    problem = gaussian_problem()

    sampler = sample.MetropolisSampler()

    # optimization
    result = optimize.minimize(problem, n_starts=3)

    # sample
    result = sample.sample(
        problem, sampler=sampler, n_samples=1000, result=result)

    # run auto-correlation with previous geweke
    sample.geweke_test(result)

    ac1 = sample.auto_correlation(result)

    # run auto-correlation without previous geweke
    result.sample_result.burn_in = None
    ac2 = sample.auto_correlation(result)

    assert ac1 == ac2

    # run effective sample size with previous geweke
    # and autocorrelation
    ess1 = sample.effective_sample_size(result)

    # run effective sample size without previous geweke
    # and autocorrelation
    result.sample_result.burn_in = None
    result.sample_result.auto_correlation = None
    ess2 = sample.effective_sample_size(result)

    assert ess1 == ess2
예제 #5
0
def test_autocorrelation_short_chain():
    """Check that the autocorrelation
    reacts nicely to small sample numbers."""
    problem = gaussian_problem()

    sampler = sample.MetropolisSampler()

    # optimization
    result = optimize.minimize(problem, n_starts=3, filename=None)

    # sample
    result = sample.sample(problem,
                           sampler=sampler,
                           n_samples=10,
                           result=result,
                           filename=None)

    # manually set burn in to chain length (only for testing!!)
    chain_length = result.sample_result.trace_x.shape[1]
    result.sample_result.burn_in = chain_length

    # run auto-correlation
    ac = sample.auto_correlation(result)

    assert ac is None

    # run effective sample size
    ess = sample.effective_sample_size(result)

    assert ess is None
예제 #6
0
def test_profile_with_fixed_parameters():
    """Test using profiles with fixed parameters."""
    obj = test_objective.rosen_for_sensi(max_sensi_order=1)['obj']

    lb = -2 * np.ones(5)
    ub = 2 * np.ones(5)
    problem = pypesto.Problem(objective=obj,
                              lb=lb,
                              ub=ub,
                              x_fixed_vals=[0.5, -1.8],
                              x_fixed_indices=[0, 3])

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 50})
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=2)

    for i_method, next_guess_method in enumerate([
            'fixed_step', 'adaptive_step_order_0', 'adaptive_step_order_1',
            'adaptive_step_regression'
    ]):
        print(next_guess_method)
        profile.parameter_profile(problem=problem,
                                  result=result,
                                  optimizer=optimizer,
                                  next_guess_method=next_guess_method)

        # standard plotting
        axes = visualize.profiles(result, profile_list_ids=i_method)
        assert len(axes) == 3
        visualize.profile_cis(result, profile_list=i_method)
예제 #7
0
def test_petab_pysb_optimization():
    test_case = '0001'
    test_case_dir = os.path.join(petabtests.PYSB_DIR, test_case)
    petab_yaml = os.path.join(test_case_dir, f'_{test_case}.yaml')
    solution_yaml = os.path.join(test_case_dir, f'_{test_case}_solution.yaml')

    # expected results
    with open(solution_yaml) as f:
        solution = yaml.full_load(f)

    petab_problem = PysbPetabProblem.from_yaml(petab_yaml)

    importer = PetabImporterPysb(petab_problem)
    problem = importer.create_problem()

    # ensure simulation result for true parameters matches
    assert np.isclose(problem.objective(petab_problem.x_nominal),
                      -solution[petabtests.LLH])

    optimizer = optimize.ScipyOptimizer()
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=10,
                               filename=None)
    fvals = np.array(result.optimize_result.get_for_key('fval'))

    # ensure objective after optimization is not worse than for true parameters
    assert np.all(fvals <= -solution[petabtests.LLH])
예제 #8
0
def check_minimize(objective, library, solver, allow_failed_starts=False):

    options = {
        'maxiter': 100
    }

    optimizer = None

    if library == 'scipy':
        optimizer = optimize.ScipyOptimizer(method=solver, options=options)
    elif library == 'ipopt':
        optimizer = optimize.IpoptOptimizer()
    elif library == 'dlib':
        optimizer = optimize.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = optimize.PyswarmOptimizer(options=options)

    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(
        allow_failed_starts=allow_failed_starts)

    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options
    )

    assert isinstance(result.optimize_result.list[0]['fval'], float)
예제 #9
0
def test_ground_truth():
    """Test whether we actually retrieve correct distributions."""
    # use best self-implemented sampler, which has a chance of correctly
    # sample from the distribution
    sampler = sample.AdaptiveParallelTemperingSampler(
        internal_sampler=sample.AdaptiveMetropolisSampler(), n_chains=5)

    problem = gaussian_problem()

    result = optimize.minimize(problem, filename=None)

    result = sample.sample(problem,
                           n_samples=5000,
                           result=result,
                           sampler=sampler,
                           filename=None)

    # get samples of first chain
    samples = result.sample_result.trace_x[0].flatten()

    # test against different distributions

    statistic, pval = kstest(samples, 'norm')
    print(statistic, pval)
    assert statistic < 0.1

    statistic, pval = kstest(samples, 'uniform')
    print(statistic, pval)
    assert statistic > 0.1
예제 #10
0
def pretrain(problem: Problem,
             startpoint_method: Callable,
             nstarts: int,
             fatol: float = 1e-2,
             subspace: fides.SubSpaceDim = fides.SubSpaceDim.FULL,
             maxiter: int = int(1e3)):
    """
    Pretrain the provided problem via optimization.

    :param problem:
        problem that defines the pretraining optimization problem

    :param startpoint_method:
        function that generates the initial points for optimization. In most
        cases this uses results from previous pretraining steps.

    :param nstarts:
        number of local optimizations to perform

    :param fatol:
        absolute function tolerance for termination of optimization

    :param subspace:
        fides subspace to use, fides.SubSpaceDim.FULL becomes quite slow for
        for anything with over 1k parameters

    :param maxiter:
        maximum number of iterations
    """
    opt = FidesOptimizer(hessian_update=fides.BFGS(),
                         options={
                             'maxtime':
                             3600,
                             fides.Options.FATOL:
                             fatol,
                             fides.Options.MAXTIME:
                             7200,
                             fides.Options.MAXITER:
                             maxiter,
                             fides.Options.SUBSPACE_DIM:
                             subspace,
                             fides.Options.REFINE_STEPBACK:
                             False,
                             fides.Options.STEPBACK_STRAT:
                             fides.StepBackStrategy.SINGLE_REFLECT
                         })

    optimize_options = OptimizeOptions(
        startpoint_resample=True,
        allow_failed_starts=True,
    )

    return minimize(
        problem,
        opt,
        n_starts=nstarts,
        options=optimize_options,
        startpoint_method=startpoint_method,
    )
예제 #11
0
def test_mpipoolengine():
    """
    Test the MPIPoolEngine by calling an example script with mpiexec.
    """
    try:
        # get the path to this file:
        path = os.path.dirname(__file__)
        # run the example file.
        subprocess.check_call(  # noqa: S603,S607
            [
                'mpiexec',
                '-np',
                '2',
                'python',
                '-m',
                'mpi4py.futures',
                f'{path}/../../doc/example/example_MPIPool.py',
            ]
        )

        # read results
        result1 = read_result('temp_result.h5', problem=True, optimize=True)
        # set optimizer
        optimizer = optimize.FidesOptimizer(verbose=0)
        # initialize problem with x_guesses and objective
        objective = pypesto.Objective(
            fun=sp.optimize.rosen,
            grad=sp.optimize.rosen_der,
            hess=sp.optimize.rosen_hess,
        )
        x_guesses = np.array(
            [result1.optimize_result.list[i]['x0'] for i in range(2)]
        )
        problem = pypesto.Problem(
            objective=objective,
            ub=result1.problem.ub,
            lb=result1.problem.lb,
            x_guesses=x_guesses,
        )
        result2 = optimize.minimize(
            problem=problem,
            optimizer=optimizer,
            n_starts=2,
            engine=pypesto.engine.MultiProcessEngine(),
            filename=None,
        )

        for ix in range(2):
            assert_almost_equal(
                result1.optimize_result.list[ix]['x'],
                result2.optimize_result.list[ix]['x'],
                err_msg='The final parameter values '
                'do not agree for the engines.',
            )

    finally:
        if os.path.exists('temp_result.h5'):
            # delete data
            os.remove('temp_result.h5')
예제 #12
0
def test_history_beats_optimizer():
    """Test overwriting from history vs whatever the optimizer reports."""
    problem = CRProblem(
        x_guesses=np.array([0.25, 0.25]).reshape(1, -1)
    ).get_problem()

    max_fval = 10
    scipy_options = {"maxfun": max_fval}

    result_hist = optimize.minimize(
        problem=problem,
        optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
        n_starts=1,
        options=optimize.OptimizeOptions(history_beats_optimizer=True),
        filename=None,
    )

    result_opt = optimize.minimize(
        problem=problem,
        optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
        n_starts=1,
        options=optimize.OptimizeOptions(history_beats_optimizer=False),
        filename=None,
    )

    for result in (result_hist, result_opt):
        # number of function evaluations
        assert result.optimize_result.list[0]['n_fval'] <= max_fval + 1
        # optimal value in bounds
        assert np.all(problem.lb <= result.optimize_result.list[0]['x'])
        assert np.all(problem.ub >= result.optimize_result.list[0]['x'])
        # entries filled
        for key in ('fval', 'x', 'grad'):
            val = result.optimize_result.list[0][key]
            assert val is not None and np.all(np.isfinite(val))

    # TNC funnily reports the last value if not converged
    #  (this may break if their implementation is changed at some point ...)
    assert (
        result_hist.optimize_result.list[0]['fval']
        < result_opt.optimize_result.list[0]['fval']
    )
예제 #13
0
def test_ensemble_from_optimization():
    """
    Test reading an ensemble from optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    history_options = pypesto.HistoryOptions(trace_record=True)
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        history_options=history_options,
    )

    # change fvals of each start
    for i_start, optimizer_result in enumerate(result.optimize_result.list):
        optimizer_result['fval'] = i_start + 1
        for i_iter in range(len(optimizer_result['history']._trace['fval'])):
            optimizer_result['history']._trace['fval'][i_iter] = (
                len(optimizer_result['history']._trace['fval']) + i_start -
                i_iter)

    # test_endpoints
    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       cutoff=4,
                                                       max_size=10)

    ensemble_hist = Ensemble.from_optimization_history(result=result,
                                                       cutoff=4,
                                                       max_size=10,
                                                       max_per_start=5)

    # compare vector_tags with the expected values:
    ep_tags = [(int(result.optimize_result.list[i]['id']), -1)
               for i in [0, 1, 2, 3]]

    hist_tags = [(
        int(result.optimize_result.list[i]['id']),
        len(result.optimize_result.list[i]['history']._trace['fval']) - 1 - j,
    ) for i in range(4) for j in reversed(range(4 - i))]
    assert hist_tags == ensemble_hist.vector_tags
    assert ep_tags == ensemble_ep.vector_tags
예제 #14
0
def test_pipeline(sampler, problem):
    """Check that a typical pipeline runs through."""
    # optimization
    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    result = optimize.minimize(
        problem, n_starts=3, optimizer=optimizer)

    # sample
    result = sample.sample(
        problem, sampler=sampler, n_samples=100, result=result)

    # some plot
    visualize.sampling_1d_marginals(result)
    plt.close()
예제 #15
0
def test_trim_results(problem):
    """
    Test trimming of hess/sres from results
    """

    optimize_options = optimize.OptimizeOptions(
        report_hess=False, report_sres=False
    )
    prob = pypesto.Problem(
        objective=rosen_for_sensi(max_sensi_order=2)['obj'],
        lb=0 * np.ones((1, 2)),
        ub=1 * np.ones((1, 2)),
    )

    # hess
    optimizer = optimize.FidesOptimizer(verbose=0)
    result = optimize.minimize(
        problem=prob,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        filename=None,
    )
    assert result.optimize_result.list[0].hess is None

    # sres
    optimizer = optimize.ScipyOptimizer(method='ls_trf')
    result = optimize.minimize(
        problem=prob,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        filename=None,
    )
    assert result.optimize_result.list[0].sres is None
예제 #16
0
def get_ensemble_prediction(max_size: int = 2,
                            inc_weights: bool = False,
                            inc_sigmay: bool = False):
    """
    Creates an ensemble prediction for the tests.
    """
    problem = create_petab_problem()

    optimizer = optimize.ScipyOptimizer()
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=2,
                               filename=None)

    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       max_size=10)

    # This post_processor will transform the output of the simulation tool
    # such that the output is compatible with the next steps.
    def post_processor(amici_outputs, output_type, output_ids):
        outputs = [
            amici_output[output_type] if amici_output[AMICI_STATUS] == 0 else
            np.full((len(amici_output[AMICI_T]), len(output_ids)), np.nan)
            for amici_output in amici_outputs
        ]
        return outputs

    amici_objective = result.problem.objective
    observable_ids = amici_objective.amici_model.getObservableIds()
    post_processor_y = partial(
        post_processor,
        output_type=AMICI_Y,
        output_ids=observable_ids,
    )
    # Create pyPESTO predictors for states and observables
    predictor_y = AmiciPredictor(
        amici_objective,
        post_processor=post_processor_y,
        output_ids=observable_ids,
    )
    engine = MultiProcessEngine()
    ensemble_prediction = ensemble_ep.predict(
        predictor_y,
        prediction_id=AMICI_Y,
        engine=engine,
        include_llh_weights=inc_weights,
        include_sigmay=inc_sigmay,
    )
    return ensemble_prediction
예제 #17
0
def test_geweke_test_unconverged():
    """Check that the geweke test reacts nicely to small sample numbers."""
    problem = gaussian_problem()

    sampler = sample.MetropolisSampler()

    # optimization
    result = optimize.minimize(problem, n_starts=3)

    # sample
    result = sample.sample(
        problem, sampler=sampler, n_samples=100, result=result)

    # run geweke test (should not fail!)
    sample.geweke_test(result)
예제 #18
0
def test_samples_cis():
    """
    Test whether :py:func:`pypesto.sample.calculate_ci_mcmc_sample` produces
    percentile-based credibility intervals correctly.
    """
    # load problem
    problem = gaussian_problem()

    # set a sampler
    sampler = sample.MetropolisSampler()

    # optimization
    result = optimize.minimize(problem, n_starts=3, filename=None)

    # sample
    result = sample.sample(problem,
                           sampler=sampler,
                           n_samples=2000,
                           result=result,
                           filename=None)

    # run geweke test
    sample.geweke_test(result)

    # get converged chain
    converged_chain = np.asarray(
        result.sample_result.trace_x[0, result.sample_result.burn_in:, :])

    # set confidence levels
    alpha_values = [0.99, 0.95, 0.68]

    # loop over confidence levels
    for alpha in alpha_values:
        # calculate parameter samples confidence intervals
        lb, ub = sample.calculate_ci_mcmc_sample(result, ci_level=alpha)
        # get corresponding percentiles to alpha
        percentiles = 100 * np.array([(1 - alpha) / 2, 1 - (1 - alpha) / 2])
        # check result agreement
        diff = np.percentile(converged_chain, percentiles, axis=0) - [lb, ub]

        assert (diff == 0).all()
        # check if lower bound is smaller than upper bound
        assert (lb < ub).all()
        # check if dimmensions agree
        assert lb.shape == ub.shape
예제 #19
0
def test_result_from_hdf5_history(hdf5_file):
    problem = create_petab_problem()

    history_options_hdf5 = pypesto.HistoryOptions(
        trace_record=True,
        storage_file=hdf5_file,
    )
    # optimize with history saved to hdf5
    result = optimize.minimize(
        problem=problem,
        n_starts=1,
        history_options=history_options_hdf5,
    )

    result_from_hdf5 = optimization_result_from_history(filename=hdf5_file,
                                                        problem=problem)

    # Currently 'exitflag', 'time' and 'message' are not loaded.
    arguments = [
        ID,
        X,
        FVAL,
        GRAD,
        HESS,
        RES,
        SRES,
        N_FVAL,
        N_GRAD,
        N_HESS,
        N_RES,
        N_SRES,
        X0,
        FVAL0,
    ]
    for key in arguments:
        if result.optimize_result.list[0][key] is None:
            assert result_from_hdf5.optimize_result.list[0][key] is None
        elif isinstance(result.optimize_result.list[0][key], np.ndarray):
            assert np.allclose(
                result.optimize_result.list[0][key],
                result_from_hdf5.optimize_result.list[0][key],
            ), key
        else:
            assert (result.optimize_result.list[0][key] ==
                    result_from_hdf5.optimize_result.list[0][key]), key
예제 #20
0
def create_optimization_results(objective):
    # create optimizer, pypesto problem and options
    options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(method='TNC', options=options)

    lb = -2 * np.ones(2)
    ub = 2 * np.ones(2)
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(allow_failed_starts=True)

    # run optimization
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=5,
                               startpoint_method=pypesto.startpoint.uniform,
                               options=optimize_options)

    return problem, result, optimizer
예제 #21
0
def test_parameters_hist():
    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(method='TNC',
                                        options=optimizer_options)

    # run optimization
    result_1 = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=10,
        startpoint_method=pypesto.startpoint.uniform,
    )

    visualize.parameter_hist(result_1, 'x1')
    visualize.parameter_hist(result_1, 'x1', start_indices=list(range(10)))
예제 #22
0
def test_time_trajectory_model():
    """Test pypesto.visualize.time_trajectory_model"""
    current_path = os.path.dirname(os.path.realpath(__file__))
    dir_path = os.path.abspath(
        os.path.join(current_path, '..', '..', 'doc', 'example')
    )

    # import to petab
    petab_problem = petab.Problem.from_yaml(
        os.path.join(
            dir_path, "conversion_reaction", "conversion_reaction.yaml"
        )
    )
    # import to pypesto
    importer = pypesto.petab.PetabImporter(petab_problem)
    # create problem
    problem = importer.create_problem()

    result = optimize.minimize(problem=problem, n_starts=1, filename=None)

    # test call of time_trajectory_model
    time_trajectory_model(result=result)
예제 #23
0
def check_minimize(problem, library, solver, allow_failed_starts=False):
    """Runs a single run of optimization according to the provided inputs
    and checks whether optimization yielded a solution."""
    optimizer = get_optimizer(library, solver)
    optimize_options = optimize.OptimizeOptions(
        allow_failed_starts=allow_failed_starts
    )

    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        filename=None,
    )

    assert isinstance(result.optimize_result.list[0]['fval'], float)
    if (library, solver) not in [
        ('nlopt', nlopt.GD_STOGO_RAND)  # id 9, fails in 40% of cases
    ]:
        assert np.isfinite(result.optimize_result.list[0]['fval'])
        assert result.optimize_result.list[0]['x'] is not None
예제 #24
0
def test_pipeline(sampler, problem):
    """Check that a typical pipeline runs through."""
    # optimization
    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    result = optimize.minimize(problem,
                               n_starts=3,
                               optimizer=optimizer,
                               filename=None)

    # sample
    result = sample.sample(problem,
                           sampler=sampler,
                           n_samples=100,
                           result=result,
                           filename=None)
    # remove warnings in test/sample/test_sample.
    # Warning here: pypesto/visualize/sampling.py:1104
    # geweke test
    sample.geweke_test(result=result)

    # some plot
    visualize.sampling_1d_marginals(result)
    plt.close()
예제 #25
0
def check_minimize(objective, library, solver, allow_failed_starts=False):
    """Runs a single run of optimization according to the provided inputs
    and checks whether optimization yielded a solution."""
    optimizer = get_optimizer(library, solver)
    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(
        allow_failed_starts=allow_failed_starts
    )

    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options
    )

    assert isinstance(result.optimize_result.list[0]['fval'], float)
    if (library, solver) not in [('scipy', 'ls_trf'), ('scipy', 'ls_dogbox')]:
        assert np.isfinite(result.optimize_result.list[0]['fval'])
        assert result.optimize_result.list[0]['x'] is not None
예제 #26
0
def train(ae: MechanisticAutoEncoder,
          optimizer: str = 'fides',
          ftol: float = 1e-3,
          maxiter: int = 1e4,
          n_starts: int = 1,
          seed: int = 0) -> Result:
    """
    Trains the provided autoencoder by solving the optimization problem
    generated by :py:func:`create_pypesto_problem`

    :param ae:
        Autoencoder that will be trained
    :param optimizer:
        Optimizer string that specifies the optimizer that will be used
    :param ftol:
        function tolerance that is used to assess optimizer convergence
    :param maxiter:
        maximum number of optimization iterations
    :param n_starts:
        number of local starts that will be performed
    :param seed:
        random seed that will be used to generate the randomly sampled
        initial startpoints

    :returns:
        Pypesto optimization results.
    """
    pypesto_problem = create_pypesto_problem(ae)

    if optimizer == 'ipopt':
        opt = IpoptOptimizer(options={
            'maxiter': maxiter,
            'tol': ftol,
            'disp': 5,
        })
    elif optimizer.startswith('NLOpt_'):
        opt = NLoptOptimizer(method=getattr(nlopt,
                                            optimizer.replace('NLOpt_', '')),
                             options={
                                 'maxtime': 3600,
                                 'ftol_abs': ftol,
                             })
    elif optimizer == 'fides':
        opt = FidesOptimizer(hessian_update=fides.BFGS(),
                             options={
                                 'maxtime': 3600,
                                 fides.Options.FATOL: ftol,
                                 fides.Options.MAXTIME: 3600,
                                 fides.Options.MAXITER: maxiter,
                                 fides.Options.SUBSPACE_DIM:
                                 fides.SubSpaceDim.FULL
                             },
                             verbose=logging.INFO)

    os.makedirs(trace_path, exist_ok=True)

    history_options = HistoryOptions(trace_record=True,
                                     trace_record_hess=False,
                                     trace_record_res=False,
                                     trace_record_sres=False,
                                     trace_record_schi2=False,
                                     storage_file=os.path.join(
                                         trace_path,
                                         TRACE_FILE_TEMPLATE.format(
                                             pathway=ae.pathway_name,
                                             data=ae.data_name,
                                             optimizer=optimizer,
                                             n_hidden=ae.n_hidden,
                                             job=seed)),
                                     trace_save_iter=10)

    np.random.seed(seed)

    optimize_options = OptimizeOptions(
        startpoint_resample=False,
        allow_failed_starts=True,
    )

    decoder_par_pretraining = os.path.join(
        'pretraining', f'{ae.pathway_name}__{ae.data_name}__{ae.n_hidden}'
        f'__decoder_inflate.csv')
    has_decoder_par_pretraing = os.path.exists(decoder_par_pretraining)
    if has_decoder_par_pretraing:
        decoder_pars = pd.read_csv(decoder_par_pretraining)[ae.x_names]

    lb = np.asarray([
        parameter_boundaries_scales[name.split('_')[-1]][0]
        for name in pypesto_problem.x_names
    ])
    ub = np.asarray([
        parameter_boundaries_scales[name.split('_')[-1]][1]
        for name in pypesto_problem.x_names
    ])

    def startpoint(**kwargs):

        if has_decoder_par_pretraing and seed < len(decoder_pars):
            xs = decoder_pars.iloc[seed, :]
        else:
            xs = np.random.random((kwargs['n_starts'],
                                   ae.n_encoder_pars + ae.n_kin_params)) \
                * (ub - lb) + lb
        return xs

    return minimize(pypesto_problem,
                    opt,
                    n_starts=n_starts,
                    options=optimize_options,
                    history_options=history_options,
                    startpoint_method=startpoint)
예제 #27
0
def test_storage_trace(hdf5_file):
    objective1 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    objective2 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem1 = pypesto.Problem(objective=objective1,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)
    problem2 = pypesto.Problem(objective=objective2,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)

    optimizer1 = optimize.ScipyOptimizer(options={'maxiter': 10})
    optimizer2 = optimize.ScipyOptimizer(options={'maxiter': 10})

    history_options_hdf5 = pypesto.HistoryOptions(trace_record=True,
                                                  storage_file=hdf5_file)
    # optimize with history saved to hdf5
    result_hdf5 = optimize.minimize(
        problem=problem1,
        optimizer=optimizer1,
        n_starts=n_starts,
        history_options=history_options_hdf5,
    )

    # optimizing with history saved in memory
    history_options_memory = pypesto.HistoryOptions(trace_record=True)
    result_memory = optimize.minimize(
        problem=problem2,
        optimizer=optimizer2,
        n_starts=n_starts,
        history_options=history_options_memory,
        filename=None,
    )

    history_entries = [X, FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2]
    assert len(result_hdf5.optimize_result.list) == len(
        result_memory.optimize_result.list)
    for mem_res in result_memory.optimize_result.list:
        for hdf_res in result_hdf5.optimize_result.list:
            if mem_res['id'] == hdf_res['id']:
                for entry in history_entries:
                    hdf5_entry_trace = getattr(hdf_res['history'],
                                               f'get_{entry}_trace')()
                    for iteration in range(len(hdf5_entry_trace)):
                        # comparing nan and None difficult
                        if (hdf5_entry_trace[iteration] is None or np.isnan(
                                hdf5_entry_trace[iteration]).all()):
                            continue
                        np.testing.assert_array_equal(
                            getattr(mem_res['history'],
                                    f'get_{entry}_trace')()[iteration],
                            hdf5_entry_trace[iteration],
                        )
예제 #28
0
def test_storage_sampling():
    """
    This test tests the saving and loading of samples
    into HDF5 through pypesto.store.SamplingResultHDF5Writer
    and pypesto.store.SamplingResultHDF5Reader. Tests all entries
    aside from time and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    x_0 = result_optimization.optimize_result.list[0]['x']
    sampler = sample.AdaptiveParallelTemperingSampler(
        internal_sampler=sample.AdaptiveMetropolisSampler(), n_chains=1)
    sample_original = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        x0=[x_0],
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_sample_writer = SamplingResultHDF5Writer(fn)
        pypesto_sample_writer.write(sample_original)
        pypesto_sample_reader = SamplingResultHDF5Reader(fn)
        sample_read = pypesto_sample_reader.read()

        for key in sample_original.sample_result.keys():
            if sample_original.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(sample_original.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
            elif isinstance(sample_original.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
    finally:
        if os.path.exists(fn):
            os.remove(fn)
예제 #29
0
def test_storage_profiling():
    """
    This test tests the saving and loading of profiles
    into HDF5 through pypesto.store.ProfileResultHDF5Writer
    and pypesto.store.ProfileResultHDF5Reader. Tests all entries
    aside from times and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    profile_original = profile.parameter_profile(
        problem=problem,
        result=result_optimization,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_profile_writer = ProfileResultHDF5Writer(fn)
        pypesto_profile_writer.write(profile_original)
        pypesto_profile_reader = ProfileResultHDF5Reader(fn)
        profile_read = pypesto_profile_reader.read()

        for key in profile_original.profile_result.list[0][0].keys():
            if (profile_original.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            np.ndarray):
                np.testing.assert_array_equal(
                    profile_original.profile_result.list[0][0][key],
                    profile_read.profile_result.list[0][0][key],
                )
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            int):
                assert (profile_original.profile_result.list[0][0][key] ==
                        profile_read.profile_result.list[0][0][key])
    finally:
        if os.path.exists(fn):
            os.remove(fn)
예제 #30
0
def test_storage_all():
    """Test `read_result` and `write_result`.

    It currently does not test read/write of the problem as this
    is know to not work completely. Also excludes testing the history
    key of an optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer()
    # Optimization
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    # Profiling
    result = profile.parameter_profile(
        problem=problem,
        result=result,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )
    # Sampling
    sampler = sample.AdaptiveMetropolisSampler()
    result = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        result=result,
        filename=None,
    )
    # Read and write
    filename = 'test_file.hdf5'
    try:
        write_result(result=result, filename=filename)
        result_read = read_result(filename=filename)

        # test optimize
        for i, opt_res in enumerate(result.optimize_result.list):
            for key in opt_res:
                if key == 'history':
                    continue
                if isinstance(opt_res[key], np.ndarray):
                    np.testing.assert_array_equal(
                        opt_res[key], result_read.optimize_result.list[i][key])
                else:
                    assert (opt_res[key] == result_read.optimize_result.list[i]
                            [key])

        # test profile
        for key in result.profile_result.list[0][0].keys():
            if (result.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(result.profile_result.list[0][0][key], np.ndarray):
                np.testing.assert_array_equal(
                    result.profile_result.list[0][0][key],
                    result_read.profile_result.list[0][0][key],
                )
            elif isinstance(result.profile_result.list[0][0][key], int):
                assert (result.profile_result.list[0][0][key] ==
                        result_read.profile_result.list[0][0][key])

        # test sample
        for key in result.sample_result.keys():
            if result.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(result.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
            elif isinstance(result.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
    finally:
        if os.path.exists(filename):
            os.remove(filename)