예제 #1
0
def test_petab_pysb_optimization():
    test_case = '0001'
    test_case_dir = os.path.join(petabtests.PYSB_DIR, test_case)
    petab_yaml = os.path.join(test_case_dir, f'_{test_case}.yaml')
    solution_yaml = os.path.join(test_case_dir, f'_{test_case}_solution.yaml')

    # expected results
    with open(solution_yaml) as f:
        solution = yaml.full_load(f)

    petab_problem = PysbPetabProblem.from_yaml(petab_yaml)

    importer = PetabImporterPysb(petab_problem)
    problem = importer.create_problem()

    # ensure simulation result for true parameters matches
    assert np.isclose(problem.objective(petab_problem.x_nominal),
                      -solution[petabtests.LLH])

    optimizer = optimize.ScipyOptimizer()
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=10,
                               filename=None)
    fvals = np.array(result.optimize_result.get_for_key('fval'))

    # ensure objective after optimization is not worse than for true parameters
    assert np.all(fvals <= -solution[petabtests.LLH])
예제 #2
0
def get_optimizer(library, solver):
    """Constructs Optimizer given and optimization library and optimization
    solver specification"""
    options = {
        'maxiter': 100
    }
    optimizer = None

    if library == 'scipy':
        optimizer = optimize.ScipyOptimizer(method=solver, options=options)
    elif library == 'ipopt':
        optimizer = optimize.IpoptOptimizer()
    elif library == 'dlib':
        optimizer = optimize.DlibOptimizer(options=options)
    elif library == 'pyswarm':
        optimizer = optimize.PyswarmOptimizer(options=options)
    elif library == 'cmaes':
        optimizer = optimize.CmaesOptimizer(options=options)
    elif library == 'nlopt':
        optimizer = optimize.NLoptOptimizer(method=solver, options=options)
    elif library == 'fides':
        options[fides.Options.SUBSPACE_DIM] = solver[1]
        optimizer = optimize.FidesOptimizer(options=options,
                                            hessian_update=solver[0])

    return optimizer
예제 #3
0
def test_profile_with_fixed_parameters():
    """Test using profiles with fixed parameters."""
    obj = test_objective.rosen_for_sensi(max_sensi_order=1)['obj']

    lb = -2 * np.ones(5)
    ub = 2 * np.ones(5)
    problem = pypesto.Problem(objective=obj,
                              lb=lb,
                              ub=ub,
                              x_fixed_vals=[0.5, -1.8],
                              x_fixed_indices=[0, 3])

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 50})
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=2)

    for i_method, next_guess_method in enumerate([
            'fixed_step', 'adaptive_step_order_0', 'adaptive_step_order_1',
            'adaptive_step_regression'
    ]):
        print(next_guess_method)
        profile.parameter_profile(problem=problem,
                                  result=result,
                                  optimizer=optimizer,
                                  next_guess_method=next_guess_method)

        # standard plotting
        axes = visualize.profiles(result, profile_list_ids=i_method)
        assert len(axes) == 3
        visualize.profile_cis(result, profile_list=i_method)
예제 #4
0
def check_minimize(objective, library, solver, allow_failed_starts=False):

    options = {
        'maxiter': 100
    }

    optimizer = None

    if library == 'scipy':
        optimizer = optimize.ScipyOptimizer(method=solver, options=options)
    elif library == 'ipopt':
        optimizer = optimize.IpoptOptimizer()
    elif library == 'dlib':
        optimizer = optimize.DlibOptimizer(method=solver, options=options)
    elif library == 'pyswarm':
        optimizer = optimize.PyswarmOptimizer(options=options)

    lb = 0 * np.ones((1, 2))
    ub = 1 * np.ones((1, 2))
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(
        allow_failed_starts=allow_failed_starts)

    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options
    )

    assert isinstance(result.optimize_result.list[0]['fval'], float)
예제 #5
0
def get_optimizer(library, solver):
    """Constructs Optimizer given and optimization library and optimization
    solver specification"""
    options = {'maxiter': 100}

    if library == 'scipy':
        optimizer = optimize.ScipyOptimizer(method=solver, options=options)
    elif library == 'ipopt':
        optimizer = optimize.IpoptOptimizer()
    elif library == 'dlib':
        optimizer = optimize.DlibOptimizer(options=options)
    elif library == 'pyswarm':
        optimizer = optimize.PyswarmOptimizer(options=options)
    elif library == 'cmaes':
        optimizer = optimize.CmaesOptimizer(options=options)
    elif library == 'scipydiffevolopt':
        optimizer = optimize.ScipyDifferentialEvolutionOptimizer(
            options=options
        )
    elif library == 'pyswarms':
        optimizer = optimize.PyswarmsOptimizer(options=options)
    elif library == 'nlopt':
        optimizer = optimize.NLoptOptimizer(method=solver, options=options)
    elif library == 'fides':
        options[fides.Options.SUBSPACE_DIM] = solver[1]
        optimizer = optimize.FidesOptimizer(
            options=options, hessian_update=solver[0]
        )
    else:
        raise ValueError(f"Optimizer not recognized: {library}")

    return optimizer
예제 #6
0
def create_optimization_history():
    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(
        method='TNC', options=optimizer_options
    )

    history_options = pypesto.HistoryOptions(
        trace_record=True, trace_save_iter=1
    )

    # run optimization
    optimize_options = optimize.OptimizeOptions(allow_failed_starts=True)
    result_with_trace = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=5,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        history_options=history_options,
        filename=None,
    )

    return result_with_trace
예제 #7
0
def test_history_beats_optimizer():
    """Test overwriting from history vs whatever the optimizer reports."""
    problem = CRProblem(
        x_guesses=np.array([0.25, 0.25]).reshape(1, -1)
    ).get_problem()

    max_fval = 10
    scipy_options = {"maxfun": max_fval}

    result_hist = optimize.minimize(
        problem=problem,
        optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
        n_starts=1,
        options=optimize.OptimizeOptions(history_beats_optimizer=True),
        filename=None,
    )

    result_opt = optimize.minimize(
        problem=problem,
        optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
        n_starts=1,
        options=optimize.OptimizeOptions(history_beats_optimizer=False),
        filename=None,
    )

    for result in (result_hist, result_opt):
        # number of function evaluations
        assert result.optimize_result.list[0]['n_fval'] <= max_fval + 1
        # optimal value in bounds
        assert np.all(problem.lb <= result.optimize_result.list[0]['x'])
        assert np.all(problem.ub >= result.optimize_result.list[0]['x'])
        # entries filled
        for key in ('fval', 'x', 'grad'):
            val = result.optimize_result.list[0][key]
            assert val is not None and np.all(np.isfinite(val))

    # TNC funnily reports the last value if not converged
    #  (this may break if their implementation is changed at some point ...)
    assert (
        result_hist.optimize_result.list[0]['fval']
        < result_opt.optimize_result.list[0]['fval']
    )
예제 #8
0
def test_ensemble_from_optimization():
    """
    Test reading an ensemble from optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5

    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    history_options = pypesto.HistoryOptions(trace_record=True)
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        history_options=history_options,
    )

    # change fvals of each start
    for i_start, optimizer_result in enumerate(result.optimize_result.list):
        optimizer_result['fval'] = i_start + 1
        for i_iter in range(len(optimizer_result['history']._trace['fval'])):
            optimizer_result['history']._trace['fval'][i_iter] = (
                len(optimizer_result['history']._trace['fval']) + i_start -
                i_iter)

    # test_endpoints
    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       cutoff=4,
                                                       max_size=10)

    ensemble_hist = Ensemble.from_optimization_history(result=result,
                                                       cutoff=4,
                                                       max_size=10,
                                                       max_per_start=5)

    # compare vector_tags with the expected values:
    ep_tags = [(int(result.optimize_result.list[i]['id']), -1)
               for i in [0, 1, 2, 3]]

    hist_tags = [(
        int(result.optimize_result.list[i]['id']),
        len(result.optimize_result.list[i]['history']._trace['fval']) - 1 - j,
    ) for i in range(4) for j in reversed(range(4 - i))]
    assert hist_tags == ensemble_hist.vector_tags
    assert ep_tags == ensemble_ep.vector_tags
예제 #9
0
def test_pipeline(sampler, problem):
    """Check that a typical pipeline runs through."""
    # optimization
    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    result = optimize.minimize(
        problem, n_starts=3, optimizer=optimizer)

    # sample
    result = sample.sample(
        problem, sampler=sampler, n_samples=100, result=result)

    # some plot
    visualize.sampling_1d_marginals(result)
    plt.close()
예제 #10
0
def get_ensemble_prediction(max_size: int = 2,
                            inc_weights: bool = False,
                            inc_sigmay: bool = False):
    """
    Creates an ensemble prediction for the tests.
    """
    problem = create_petab_problem()

    optimizer = optimize.ScipyOptimizer()
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=2,
                               filename=None)

    ensemble_ep = Ensemble.from_optimization_endpoints(result=result,
                                                       max_size=10)

    # This post_processor will transform the output of the simulation tool
    # such that the output is compatible with the next steps.
    def post_processor(amici_outputs, output_type, output_ids):
        outputs = [
            amici_output[output_type] if amici_output[AMICI_STATUS] == 0 else
            np.full((len(amici_output[AMICI_T]), len(output_ids)), np.nan)
            for amici_output in amici_outputs
        ]
        return outputs

    amici_objective = result.problem.objective
    observable_ids = amici_objective.amici_model.getObservableIds()
    post_processor_y = partial(
        post_processor,
        output_type=AMICI_Y,
        output_ids=observable_ids,
    )
    # Create pyPESTO predictors for states and observables
    predictor_y = AmiciPredictor(
        amici_objective,
        post_processor=post_processor_y,
        output_ids=observable_ids,
    )
    engine = MultiProcessEngine()
    ensemble_prediction = ensemble_ep.predict(
        predictor_y,
        prediction_id=AMICI_Y,
        engine=engine,
        include_llh_weights=inc_weights,
        include_sigmay=inc_sigmay,
    )
    return ensemble_prediction
예제 #11
0
def test_parameters_hist():
    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(method='TNC',
                                        options=optimizer_options)

    # run optimization
    result_1 = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=10,
        startpoint_method=pypesto.startpoint.uniform,
    )

    visualize.parameter_hist(result_1, 'x1')
    visualize.parameter_hist(result_1, 'x1', start_indices=list(range(10)))
예제 #12
0
def create_optimization_results(objective):
    # create optimizer, pypesto problem and options
    options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(method='TNC', options=options)

    lb = -2 * np.ones(2)
    ub = 2 * np.ones(2)
    problem = pypesto.Problem(objective, lb, ub)

    optimize_options = optimize.OptimizeOptions(allow_failed_starts=True)

    # run optimization
    result = optimize.minimize(problem=problem,
                               optimizer=optimizer,
                               n_starts=5,
                               startpoint_method=pypesto.startpoint.uniform,
                               options=optimize_options)

    return problem, result, optimizer
예제 #13
0
def test_pipeline(sampler, problem):
    """Check that a typical pipeline runs through."""
    # optimization
    optimizer = optimize.ScipyOptimizer(options={'maxiter': 10})
    result = optimize.minimize(problem,
                               n_starts=3,
                               optimizer=optimizer,
                               filename=None)

    # sample
    result = sample.sample(problem,
                           sampler=sampler,
                           n_samples=100,
                           result=result,
                           filename=None)
    # remove warnings in test/sample/test_sample.
    # Warning here: pypesto/visualize/sampling.py:1104
    # geweke test
    sample.geweke_test(result=result)

    # some plot
    visualize.sampling_1d_marginals(result)
    plt.close()
예제 #14
0
def test_trim_results(problem):
    """
    Test trimming of hess/sres from results
    """

    optimize_options = optimize.OptimizeOptions(
        report_hess=False, report_sres=False
    )
    prob = pypesto.Problem(
        objective=rosen_for_sensi(max_sensi_order=2)['obj'],
        lb=0 * np.ones((1, 2)),
        ub=1 * np.ones((1, 2)),
    )

    # hess
    optimizer = optimize.FidesOptimizer(verbose=0)
    result = optimize.minimize(
        problem=prob,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        filename=None,
    )
    assert result.optimize_result.list[0].hess is None

    # sres
    optimizer = optimize.ScipyOptimizer(method='ls_trf')
    result = optimize.minimize(
        problem=prob,
        optimizer=optimizer,
        n_starts=1,
        startpoint_method=pypesto.startpoint.uniform,
        options=optimize_options,
        filename=None,
    )
    assert result.optimize_result.list[0].sres is None
예제 #15
0
def get_optimizer(optimizer_name: str, history_file: str,
                  parsed_options: Dict):
    if optimizer_name == 'fides':
        optim_options = {
            fides.Options.MAXITER: MAX_ITER,
            fides.Options.FATOL: 0.0,
            fides.Options.FRTOL: 0.0,
            fides.Options.XTOL: 1e-6,
            fides.Options.GATOL: 0.0,
            fides.Options.GRTOL: 0.0,
            fides.Options.HISTORY_FILE: history_file
        }

        parsed2optim = {
            'stepback': fides.Options.STEPBACK_STRAT,
            'subspace': fides.Options.SUBSPACE_DIM,
        }

        happ = parsed_options.pop('hessian', 'FIM')
        if re.match(r'Hybrid[SB]_[0-9]+', happ):
            hybrid_happ, nswitch = happ[6:].split('_')

            happs = {'B': fides.BFGS(), 'S': fides.SR1()}

            hessian_update = fides.HybridFixed(
                switch_iteration=int(float(nswitch)),
                happ=happs[hybrid_happ[0]],
            )
        else:
            hessian_update = {
                'BFGS': fides.BFGS(),
                'SR1': fides.SR1(),
                'FX': fides.FX(fides.BFGS()),
                'GNSBFGS': fides.GNSBFGS(),
                'SSM': fides.SSM(),
                'TSSM': fides.TSSM(),
                'FIM': None,
                'FIMe': None
            }[happ]

        for parse_field, optim_field in parsed2optim.items():
            if parse_field in parsed_options:
                value = parsed_options.pop(parse_field)
                optim_options[optim_field] = value

        if parsed_options:
            raise ValueError(f'Unknown options {parsed_options.keys()}')

        return optimize.FidesOptimizer(options=optim_options,
                                       verbose=logging.ERROR,
                                       hessian_update=hessian_update)

    if optimizer_name.startswith('ls_trf'):
        # monkeypatch xtol check
        from monkeypatch_ls_trf import trf_bounds
        scipy.optimize._lsq.trf.trf_bounds = trf_bounds

        with h5py.File(history_file, 'w') as f:
            pass

        options = {
            'max_nfev': MAX_ITER,
            'xtol': 1e-6,
            'ftol': 0.0,
            'gtol': 0.0
        }
        if optimizer_name == 'ls_trf_2D':
            options['tr_solver'] = 'lsmr'
        elif optimizer_name == 'ls_trf':
            options['tr_solver'] = 'exact'

        return optimize.ScipyOptimizer(method='ls_trf', options=options)

    raise ValueError('Unknown optimizer name.')
예제 #16
0
def test_storage_all():
    """Test `read_result` and `write_result`.

    It currently does not test read/write of the problem as this
    is know to not work completely. Also excludes testing the history
    key of an optimization result.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    problem = pypesto.Problem(objective=objective, lb=lb, ub=ub)

    optimizer = optimize.ScipyOptimizer()
    # Optimization
    result = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    # Profiling
    result = profile.parameter_profile(
        problem=problem,
        result=result,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )
    # Sampling
    sampler = sample.AdaptiveMetropolisSampler()
    result = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        result=result,
        filename=None,
    )
    # Read and write
    filename = 'test_file.hdf5'
    try:
        write_result(result=result, filename=filename)
        result_read = read_result(filename=filename)

        # test optimize
        for i, opt_res in enumerate(result.optimize_result.list):
            for key in opt_res:
                if key == 'history':
                    continue
                if isinstance(opt_res[key], np.ndarray):
                    np.testing.assert_array_equal(
                        opt_res[key], result_read.optimize_result.list[i][key])
                else:
                    assert (opt_res[key] == result_read.optimize_result.list[i]
                            [key])

        # test profile
        for key in result.profile_result.list[0][0].keys():
            if (result.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(result.profile_result.list[0][0][key], np.ndarray):
                np.testing.assert_array_equal(
                    result.profile_result.list[0][0][key],
                    result_read.profile_result.list[0][0][key],
                )
            elif isinstance(result.profile_result.list[0][0][key], int):
                assert (result.profile_result.list[0][0][key] ==
                        result_read.profile_result.list[0][0][key])

        # test sample
        for key in result.sample_result.keys():
            if result.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(result.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
            elif isinstance(result.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    result.sample_result[key],
                    result_read.sample_result[key],
                )
    finally:
        if os.path.exists(filename):
            os.remove(filename)
예제 #17
0
def test_storage_sampling():
    """
    This test tests the saving and loading of samples
    into HDF5 through pypesto.store.SamplingResultHDF5Writer
    and pypesto.store.SamplingResultHDF5Reader. Tests all entries
    aside from time and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    x_0 = result_optimization.optimize_result.list[0]['x']
    sampler = sample.AdaptiveParallelTemperingSampler(
        internal_sampler=sample.AdaptiveMetropolisSampler(), n_chains=1)
    sample_original = sample.sample(
        problem=problem,
        sampler=sampler,
        n_samples=100,
        x0=[x_0],
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_sample_writer = SamplingResultHDF5Writer(fn)
        pypesto_sample_writer.write(sample_original)
        pypesto_sample_reader = SamplingResultHDF5Reader(fn)
        sample_read = pypesto_sample_reader.read()

        for key in sample_original.sample_result.keys():
            if sample_original.sample_result[key] is None or key == 'time':
                continue
            elif isinstance(sample_original.sample_result[key], np.ndarray):
                np.testing.assert_array_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
            elif isinstance(sample_original.sample_result[key], (float, int)):
                np.testing.assert_almost_equal(
                    sample_original.sample_result[key],
                    sample_read.sample_result[key],
                )
    finally:
        if os.path.exists(fn):
            os.remove(fn)
예제 #18
0
def test_storage_profiling():
    """
    This test tests the saving and loading of profiles
    into HDF5 through pypesto.store.ProfileResultHDF5Writer
    and pypesto.store.ProfileResultHDF5Reader. Tests all entries
    aside from times and message.
    """
    objective = pypesto.Objective(fun=so.rosen,
                                  grad=so.rosen_der,
                                  hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem = pypesto.Problem(objective=objective,
                              lb=lb,
                              ub=ub,
                              x_guesses=startpoints)

    optimizer = optimize.ScipyOptimizer()

    result_optimization = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=n_starts,
        filename=None,
    )
    profile_original = profile.parameter_profile(
        problem=problem,
        result=result_optimization,
        profile_index=[0],
        optimizer=optimizer,
        filename=None,
    )

    fn = 'test_file.hdf5'
    try:
        pypesto_profile_writer = ProfileResultHDF5Writer(fn)
        pypesto_profile_writer.write(profile_original)
        pypesto_profile_reader = ProfileResultHDF5Reader(fn)
        profile_read = pypesto_profile_reader.read()

        for key in profile_original.profile_result.list[0][0].keys():
            if (profile_original.profile_result.list[0][0].keys is None
                    or key == 'time_path'):
                continue
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            np.ndarray):
                np.testing.assert_array_equal(
                    profile_original.profile_result.list[0][0][key],
                    profile_read.profile_result.list[0][0][key],
                )
            elif isinstance(profile_original.profile_result.list[0][0][key],
                            int):
                assert (profile_original.profile_result.list[0][0][key] ==
                        profile_read.profile_result.list[0][0][key])
    finally:
        if os.path.exists(fn):
            os.remove(fn)
예제 #19
0
def test_storage_trace(hdf5_file):
    objective1 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    objective2 = pypesto.Objective(fun=so.rosen,
                                   grad=so.rosen_der,
                                   hess=so.rosen_hess)
    dim_full = 10
    lb = -5 * np.ones((dim_full, 1))
    ub = 5 * np.ones((dim_full, 1))
    n_starts = 5
    startpoints = pypesto.startpoint.latin_hypercube(n_starts=n_starts,
                                                     lb=lb,
                                                     ub=ub)
    problem1 = pypesto.Problem(objective=objective1,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)
    problem2 = pypesto.Problem(objective=objective2,
                               lb=lb,
                               ub=ub,
                               x_guesses=startpoints)

    optimizer1 = optimize.ScipyOptimizer(options={'maxiter': 10})
    optimizer2 = optimize.ScipyOptimizer(options={'maxiter': 10})

    history_options_hdf5 = pypesto.HistoryOptions(trace_record=True,
                                                  storage_file=hdf5_file)
    # optimize with history saved to hdf5
    result_hdf5 = optimize.minimize(
        problem=problem1,
        optimizer=optimizer1,
        n_starts=n_starts,
        history_options=history_options_hdf5,
    )

    # optimizing with history saved in memory
    history_options_memory = pypesto.HistoryOptions(trace_record=True)
    result_memory = optimize.minimize(
        problem=problem2,
        optimizer=optimizer2,
        n_starts=n_starts,
        history_options=history_options_memory,
        filename=None,
    )

    history_entries = [X, FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2]
    assert len(result_hdf5.optimize_result.list) == len(
        result_memory.optimize_result.list)
    for mem_res in result_memory.optimize_result.list:
        for hdf_res in result_hdf5.optimize_result.list:
            if mem_res['id'] == hdf_res['id']:
                for entry in history_entries:
                    hdf5_entry_trace = getattr(hdf_res['history'],
                                               f'get_{entry}_trace')()
                    for iteration in range(len(hdf5_entry_trace)):
                        # comparing nan and None difficult
                        if (hdf5_entry_trace[iteration] is None or np.isnan(
                                hdf5_entry_trace[iteration]).all()):
                            continue
                        np.testing.assert_array_equal(
                            getattr(mem_res['history'],
                                    f'get_{entry}_trace')()[iteration],
                            hdf5_entry_trace[iteration],
                        )
예제 #20
0
z = np.zeros_like(x)
for j in range(0, x.shape[0]):
    for k in range(0, x.shape[1]):
        z[j, k] = objective1([x[j, k], y[j, k]], (0, ))

fig = plt.figure()
fig.set_size_inches(*(14, 10))
ax = plt.axes(projection='3d')
ax.plot_surface(X=x, Y=y, Z=z)
plt.xlabel('x axis')
plt.ylabel('y axis')
ax.set_title('cost function values')

###### Opimization
# create different optimizers
optimizer_bfgs = optimize.ScipyOptimizer(method='l-bfgs-b')
optimizer_tnc = optimize.ScipyOptimizer(method='TNC')
optimizer_dogleg = optimize.ScipyOptimizer(method='dogleg')

# set number of starts
n_starts = 20

# save optimizer trace
history_options = pypesto.HistoryOptions(trace_record=True)

# Run optimizaitons for different optimzers
result1_bfgs = optimize.minimize(problem=problem1,
                                 optimizer=optimizer_bfgs,
                                 n_starts=n_starts,
                                 history_options=history_options)
result1_tnc = optimize.minimize(problem=problem1,
예제 #21
0
def test_optimization_stats():
    """Test pypesto.visualize.optimization_stats"""

    # create the pypesto problem
    problem = create_problem()

    # create optimizer
    optimizer_options = {'maxiter': 200}
    optimizer = optimize.ScipyOptimizer(
        method='TNC', options=optimizer_options
    )

    # run optimization
    result_1 = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=10,
        startpoint_method=pypesto.startpoint.uniform,
        filename=None,
    )

    result_2 = optimize.minimize(
        problem=problem,
        optimizer=optimizer,
        n_starts=10,
        startpoint_method=pypesto.startpoint.uniform,
        filename=None,
    )

    visualize.optimization_run_property_per_multistart(
        result_1, 'n_fval', legends='best result'
    )

    visualize.optimization_run_property_per_multistart(
        result_1, 'n_fval', plot_type='hist', legends='best result'
    )

    visualize.optimization_run_property_per_multistart(result_2, 'n_fval')

    # test plotting of lists
    visualize.optimization_run_property_per_multistart(
        [result_1, result_2],
        'n_fval',
        legends=['result1', 'result2'],
        plot_type='line',
    )

    visualize.optimization_run_property_per_multistart(
        result_1, 'time', plot_type='hist', legends='best result'
    )

    visualize.optimization_run_property_per_multistart(
        [result_1, result_2],
        'time',
        colors=[[0.5, 0.9, 0.9, 0.3], [0.9, 0.7, 0.8, 0.5]],
        legends=['result1', 'result2'],
        plot_type='hist',
    )

    visualize.optimization_run_properties_per_multistart([result_1, result_2])

    visualize.optimization_run_properties_one_plot(result_1, ['time'])

    visualize.optimization_run_properties_one_plot(
        result_1, ['n_fval', 'n_grad', 'n_hess']
    )

    visualize.optimization_run_property_per_multistart(
        [result_1, result_2],
        'time',
        colors=[[0.5, 0.9, 0.9, 0.3], [0.9, 0.7, 0.8, 0.5]],
        legends=['result1', 'result2'],
        plot_type='both',
    )