コード例 #1
0
def _construct_models(X, Y, metric, do_mcmc, with_pending):
    pending_candidates = []
    if with_pending:
        pending_candidates = [PendingEvaluation((0.5, 0.5)), PendingEvaluation((0.2, 0.2))]
    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [
            CandidateEvaluation(x, y) for x, y in zip(X, Y)
        ],
        [],
        pending_candidates
    )
    random_seed = 0

    gpmodel = default_gpmodel(
        state, random_seed=random_seed,
        optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    result = [GaussProcSurrogateModel(
        state, metric, random_seed, gpmodel, fit_parameters=True,
        num_fantasy_samples=20)]
    if do_mcmc:
        gpmodel_mcmc = default_gpmodel_mcmc(
            state, random_seed=random_seed,
            mcmc_config=DEFAULT_MCMC_CONFIG)
        result.append(
            GaussProcSurrogateModel(
                state, metric, random_seed, gpmodel_mcmc,
                fit_parameters=True,num_fantasy_samples=20))
    return result
コード例 #2
0
def test_gp_fit(tuning_job_state):
    _set_seeds(0)
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [np.sum(x) * 10.0 for x in X]

    # checks if fitting is running
    random_seed = 0
    gpmodel = default_gpmodel(tuning_job_state, random_seed,
        optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    model = GaussProcSurrogateModel(
        tuning_job_state, DEFAULT_METRIC, random_seed, gpmodel,
        fit_parameters=True, num_fantasy_samples=20)

    X = [tuning_job_state.hp_ranges.to_ndarray(x) for x in X]
    pred_train = model.predict(np.array(X))[0]

    assert np.all(np.abs(pred_train['mean'] - Y) < 1e-1), \
        "in a noiseless setting, mean of GP should coincide closely with outputs at training points"

    X_test = [
        (0.2, 0.2),
        (0.4, 0.2),
        (0.1, 0.9),
        (0.5, 0.5),
    ]
    X_test = [tuning_job_state.hp_ranges.to_ndarray(x) for x in X_test]

    pred_test = model.predict(np.array(X_test))[0]
    assert np.min(pred_train['std']) < np.min(pred_test['std']), \
        "Standard deviation on un-observed points should be greater than at observed ones"
コード例 #3
0
def default_models() -> List[GaussProcSurrogateModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0
         ),  # same evals are added multiple times to force GP to unlearn prior
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
        [],
        [],
    )
    random_seed = 0

    gpmodel = default_gpmodel(state,
                              random_seed=random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)

    gpmodel_mcmc = default_gpmodel_mcmc(state,
                                        random_seed=random_seed,
                                        mcmc_config=DEFAULT_MCMC_CONFIG)

    return [
        GaussProcSurrogateModel(state,
                                DEFAULT_METRIC,
                                random_seed,
                                gpmodel,
                                fit_parameters=True,
                                num_fantasy_samples=20),
        GaussProcSurrogateModel(state,
                                DEFAULT_METRIC,
                                random_seed,
                                gpmodel_mcmc,
                                fit_parameters=True,
                                num_fantasy_samples=20)
    ]
コード例 #4
0
def default_models(metric, do_mcmc=True) -> List[GaussProcSurrogateModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    if metric == DEFAULT_METRIC:
        Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]
    elif metric == DEFAULT_COST_METRIC:
        # Increasing the first hp increases cost
        Y = [{metric: 1.0 + x[0] * 2.0}
             for x in X]
    else:
        raise ValueError(f"{metric} is not a valid metric")

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [
            CandidateEvaluation(x, y) for x, y in zip(X, Y)
        ],
        [], []
    )
    random_seed = 0

    gpmodel = default_gpmodel(
        state, random_seed=random_seed,
        optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    result = [GaussProcSurrogateModel(
        state, metric, random_seed, gpmodel, fit_parameters=True,
        num_fantasy_samples=20)]
    if do_mcmc:
        gpmodel_mcmc = default_gpmodel_mcmc(
            state, random_seed=random_seed,
            mcmc_config=DEFAULT_MCMC_CONFIG)
        result.append(
            GaussProcSurrogateModel(
                state, metric, random_seed, gpmodel_mcmc,
                fit_parameters=True,num_fantasy_samples=20))
    return result
コード例 #5
0
def test_gp_fantasizing():
    """
    Compare whether acquisition function evaluations (values, gradients) with
    fantasizing are the same as averaging them by hand.
    """
    random_seed = 4567
    _set_seeds(random_seed)
    num_fantasy_samples = 10
    num_pending = 5

    hp_ranges = HyperparameterRanges_Impl(
        HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
        HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()))
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    num_data = len(X)
    Y = [
        dictionarize_objective(np.random.randn(1, 1)) for _ in range(num_data)
    ]
    # Draw fantasies. This is done for a number of fixed pending candidates
    # The model parameters are fit in the first iteration, when there are
    # no pending candidates

    # Note: It is important to not normalize targets, because this would be
    # done on the observed targets only, not the fantasized ones, so it
    # would be hard to compare below.
    pending_evaluations = []
    for _ in range(num_pending):
        pending_cand = tuple(np.random.rand(2, ))
        pending_evaluations.append(PendingEvaluation(pending_cand))
    state = TuningJobState(hp_ranges,
                           [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
                           failed_candidates=[],
                           pending_evaluations=pending_evaluations)
    gpmodel = default_gpmodel(state,
                              random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    model = GaussProcSurrogateModel(state,
                                    DEFAULT_METRIC,
                                    random_seed,
                                    gpmodel,
                                    fit_parameters=True,
                                    num_fantasy_samples=num_fantasy_samples,
                                    normalize_targets=False)
    fantasy_samples = model.fantasy_samples
    # Evaluate acquisition function and gradients with fantasizing
    num_test = 50
    X_test = [
        hp_ranges.to_ndarray(tuple(np.random.rand(2, )))
        for _ in range(num_test)
    ]
    acq_func = EIAcquisitionFunction(model)
    fvals, grads = _compute_acq_with_gradient_many(acq_func, X_test)
    # Do the same computation by averaging by hand
    fvals_cmp = np.empty((num_fantasy_samples, ) + fvals.shape)
    grads_cmp = np.empty((num_fantasy_samples, ) + grads.shape)
    X_full = X + state.pending_candidates
    for it in range(num_fantasy_samples):
        Y_full = Y + [
            dictionarize_objective(eval.fantasies[DEFAULT_METRIC][:, it])
            for eval in fantasy_samples
        ]
        state2 = TuningJobState(
            hp_ranges,
            [CandidateEvaluation(x, y) for x, y in zip(X_full, Y_full)],
            failed_candidates=[],
            pending_evaluations=[])
        # We have to skip parameter optimization here
        model2 = GaussProcSurrogateModel(
            state2,
            DEFAULT_METRIC,
            random_seed,
            gpmodel,
            fit_parameters=False,
            num_fantasy_samples=num_fantasy_samples,
            normalize_targets=False)
        acq_func2 = EIAcquisitionFunction(model2)
        fvals_, grads_ = _compute_acq_with_gradient_many(acq_func2, X_test)
        fvals_cmp[it, :] = fvals_
        grads_cmp[it, :] = grads_
    # Comparison
    fvals2 = np.mean(fvals_cmp, axis=0)
    grads2 = np.mean(grads_cmp, axis=0)
    assert np.allclose(fvals, fvals2)
    assert np.allclose(grads, grads2)