예제 #1
0
def _construct_models(X, Y, metric, do_mcmc, with_pending):
    pending_candidates = []
    if with_pending:
        pending_candidates = [PendingEvaluation((0.5, 0.5)), PendingEvaluation((0.2, 0.2))]
    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [
            CandidateEvaluation(x, y) for x, y in zip(X, Y)
        ],
        [],
        pending_candidates
    )
    random_seed = 0

    gpmodel = default_gpmodel(
        state, random_seed=random_seed,
        optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    result = [GaussProcSurrogateModel(
        state, metric, random_seed, gpmodel, fit_parameters=True,
        num_fantasy_samples=20)]
    if do_mcmc:
        gpmodel_mcmc = default_gpmodel_mcmc(
            state, random_seed=random_seed,
            mcmc_config=DEFAULT_MCMC_CONFIG)
        result.append(
            GaussProcSurrogateModel(
                state, metric, random_seed, gpmodel_mcmc,
                fit_parameters=True,num_fantasy_samples=20))
    return result
    def tuning_job_state_mcmc(X, Y) -> TuningJobState:
        Y = [dictionarize_objective(y) for y in Y]

        return TuningJobState(
            HyperparameterRanges_Impl(
                HyperparameterRangeContinuous('x', -4., 4., LinearScaling())),
            [CandidateEvaluation(x, y) for x, y in zip(X, Y)], [], [])
def test_compute_blacklisted_candidates(
        hp_ranges: HyperparameterRanges_Impl,
        candidate_evaluations: List[CandidateEvaluation],
        failed_candidates: List[Candidate],
        pending_evaluations: List[PendingEvaluation],
        expected: Set[Candidate]):
    state = TuningJobState(hp_ranges, candidate_evaluations, failed_candidates,
                           pending_evaluations)
    actual = compute_blacklisted_candidates(state)
    assert set(expected) == set(actual)
예제 #4
0
def multi_algo_state():
    def _candidate_evaluations(num):
        return [
            CandidateEvaluation(
                candidate=(i,),
                metrics=dictionarize_objective(float(i)))
            for i in range(num)]

    return {
        '0': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(
                HyperparameterRangeContinuous('a1_hp_1', -5.0, 5.0, LinearScaling(), -5.0, 5.0)),
            candidate_evaluations=_candidate_evaluations(2),
            failed_candidates=[(i,) for i in range(3)],
            pending_evaluations=[PendingEvaluation((i,)) for i in range(100)]
        ),
        '1': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(),
            candidate_evaluations=_candidate_evaluations(5),
            failed_candidates=[],
            pending_evaluations=[]
        ),
        '2': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(),
            candidate_evaluations=_candidate_evaluations(3),
            failed_candidates=[(i,) for i in range(10)],
            pending_evaluations=[PendingEvaluation((i,)) for i in range(1)]
        ),
        '3': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(),
            candidate_evaluations=_candidate_evaluations(6),
            failed_candidates=[],
            pending_evaluations=[]
        ),
        '4': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(),
            candidate_evaluations=_candidate_evaluations(120),
            failed_candidates=[],
            pending_evaluations=[]
        ),
    }
def tuning_job_state():
    return {'algo-1': TuningJobState(
        hp_ranges=HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('a1_hp_1', -5.0, 5.0, LinearScaling()),
            HyperparameterRangeCategorical('a1_hp_2', ('a', 'b', 'c'))),
        candidate_evaluations=[CandidateEvaluation(candidate=(-3.0, 'a'), value=1.0),
                               CandidateEvaluation(candidate=(-1.9, 'c'), value=2.0),
                               CandidateEvaluation(candidate=(-3.5, 'a'), value=0.3)],
        failed_candidates=[],
        pending_evaluations=[]
    ),
        'algo-2': TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(
                HyperparameterRangeContinuous('a2_hp_1', -5.0, 5.0, LinearScaling()),
                HyperparameterRangeInteger('a2_hp_2', -5, 5, LinearScaling(), -5, 5)),
            candidate_evaluations=[CandidateEvaluation(candidate=(-1.9, -1), value=0.0),
                                   CandidateEvaluation(candidate=(-3.5, 3), value=2.0)],
            failed_candidates=[],
            pending_evaluations=[]
        )
    }
def default_models() -> List[GaussProcSurrogateModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0
         ),  # same evals are added multiple times to force GP to unlearn prior
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
        [],
        [],
    )
    random_seed = 0

    gpmodel = default_gpmodel(state,
                              random_seed=random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)

    gpmodel_mcmc = default_gpmodel_mcmc(state,
                                        random_seed=random_seed,
                                        mcmc_config=DEFAULT_MCMC_CONFIG)

    return [
        GaussProcSurrogateModel(state,
                                DEFAULT_METRIC,
                                random_seed,
                                gpmodel,
                                fit_parameters=True,
                                num_fantasy_samples=20),
        GaussProcSurrogateModel(state,
                                DEFAULT_METRIC,
                                random_seed,
                                gpmodel_mcmc,
                                fit_parameters=True,
                                num_fantasy_samples=20)
    ]
def tuning_job_state() -> TuningJobState:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    return TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ), [CandidateEvaluation(x, y) for x, y in zip(X, Y)], [], [])
예제 #8
0
def default_models(metric, do_mcmc=True) -> List[GaussProcSurrogateModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    if metric == DEFAULT_METRIC:
        Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]
    elif metric == DEFAULT_COST_METRIC:
        # Increasing the first hp increases cost
        Y = [{metric: 1.0 + x[0] * 2.0}
             for x in X]
    else:
        raise ValueError(f"{metric} is not a valid metric")

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [
            CandidateEvaluation(x, y) for x, y in zip(X, Y)
        ],
        [], []
    )
    random_seed = 0

    gpmodel = default_gpmodel(
        state, random_seed=random_seed,
        optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    result = [GaussProcSurrogateModel(
        state, metric, random_seed, gpmodel, fit_parameters=True,
        num_fantasy_samples=20)]
    if do_mcmc:
        gpmodel_mcmc = default_gpmodel_mcmc(
            state, random_seed=random_seed,
            mcmc_config=DEFAULT_MCMC_CONFIG)
        result.append(
            GaussProcSurrogateModel(
                state, metric, random_seed, gpmodel_mcmc,
                fit_parameters=True,num_fantasy_samples=20))
    return result
def test_get_internal_candidate_evaluations():
    """we do not test the case with no evaluations, as it is assumed
    that there will be always some evaluations generated in the beginning
    of the BO loop."""

    candidates = [
        CandidateEvaluation((2, 3.3, 'X'), dictionarize_objective(5.3)),
        CandidateEvaluation((1, 9.9, 'Y'), dictionarize_objective(10.9)),
        CandidateEvaluation((7, 6.1, 'X'), dictionarize_objective(13.1)),
    ]

    state = TuningJobState(
        hp_ranges=HyperparameterRanges_Impl(
            HyperparameterRangeInteger('integer', 0, 10, LinearScaling()),
            HyperparameterRangeContinuous('real', 0, 10, LinearScaling()),
            HyperparameterRangeCategorical('categorical', ('X', 'Y')),
        ),
        candidate_evaluations=candidates,
        failed_candidates=[candidates[0].candidate
                           ],  # these should be ignored by the model
        pending_evaluations=[])

    result = get_internal_candidate_evaluations(state,
                                                DEFAULT_METRIC,
                                                normalize_targets=True,
                                                num_fantasize_samples=20)

    assert len(result.X.shape) == 2, "Input should be a matrix"
    assert len(result.y.shape) == 2, "Output should be a matrix"

    assert result.X.shape[0] == len(candidates)
    assert result.y.shape[
        -1] == 1, "Only single output value per row is suppored"

    assert np.abs(np.mean(
        result.y)) < 1e-8, "Mean of the normalized outputs is not 0.0"
    assert np.abs(np.std(result.y) -
                  1.0) < 1e-8, "Std. of the normalized outputs is not 1.0"

    np.testing.assert_almost_equal(result.mean, 9.766666666666666)
    np.testing.assert_almost_equal(result.std, 3.283629428273267)
def test_gp_fantasizing():
    """
    Compare whether acquisition function evaluations (values, gradients) with
    fantasizing are the same as averaging them by hand.
    """
    random_seed = 4567
    _set_seeds(random_seed)
    num_fantasy_samples = 10
    num_pending = 5

    hp_ranges = HyperparameterRanges_Impl(
        HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
        HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()))
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    num_data = len(X)
    Y = [
        dictionarize_objective(np.random.randn(1, 1)) for _ in range(num_data)
    ]
    # Draw fantasies. This is done for a number of fixed pending candidates
    # The model parameters are fit in the first iteration, when there are
    # no pending candidates

    # Note: It is important to not normalize targets, because this would be
    # done on the observed targets only, not the fantasized ones, so it
    # would be hard to compare below.
    pending_evaluations = []
    for _ in range(num_pending):
        pending_cand = tuple(np.random.rand(2, ))
        pending_evaluations.append(PendingEvaluation(pending_cand))
    state = TuningJobState(hp_ranges,
                           [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
                           failed_candidates=[],
                           pending_evaluations=pending_evaluations)
    gpmodel = default_gpmodel(state,
                              random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    model = GaussProcSurrogateModel(state,
                                    DEFAULT_METRIC,
                                    random_seed,
                                    gpmodel,
                                    fit_parameters=True,
                                    num_fantasy_samples=num_fantasy_samples,
                                    normalize_targets=False)
    fantasy_samples = model.fantasy_samples
    # Evaluate acquisition function and gradients with fantasizing
    num_test = 50
    X_test = [
        hp_ranges.to_ndarray(tuple(np.random.rand(2, )))
        for _ in range(num_test)
    ]
    acq_func = EIAcquisitionFunction(model)
    fvals, grads = _compute_acq_with_gradient_many(acq_func, X_test)
    # Do the same computation by averaging by hand
    fvals_cmp = np.empty((num_fantasy_samples, ) + fvals.shape)
    grads_cmp = np.empty((num_fantasy_samples, ) + grads.shape)
    X_full = X + state.pending_candidates
    for it in range(num_fantasy_samples):
        Y_full = Y + [
            dictionarize_objective(eval.fantasies[DEFAULT_METRIC][:, it])
            for eval in fantasy_samples
        ]
        state2 = TuningJobState(
            hp_ranges,
            [CandidateEvaluation(x, y) for x, y in zip(X_full, Y_full)],
            failed_candidates=[],
            pending_evaluations=[])
        # We have to skip parameter optimization here
        model2 = GaussProcSurrogateModel(
            state2,
            DEFAULT_METRIC,
            random_seed,
            gpmodel,
            fit_parameters=False,
            num_fantasy_samples=num_fantasy_samples,
            normalize_targets=False)
        acq_func2 = EIAcquisitionFunction(model2)
        fvals_, grads_ = _compute_acq_with_gradient_many(acq_func2, X_test)
        fvals_cmp[it, :] = fvals_
        grads_cmp[it, :] = grads_
    # Comparison
    fvals2 = np.mean(fvals_cmp, axis=0)
    grads2 = np.mean(grads_cmp, axis=0)
    assert np.allclose(fvals, fvals2)
    assert np.allclose(grads, grads2)
def tuning_job_sub_state():
    return TuningJobState(
        hp_ranges=HyperparameterRanges_Impl(),
        candidate_evaluations=[],
        failed_candidates=[],
        pending_evaluations=[])