Beispiel #1
0
def tuning_job_state():
    return {
        'algo-1':
        TuningJobState(hp_ranges=HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('a1_hp_1', -5.0, 5.0,
                                          LinearScaling()),
            HyperparameterRangeCategorical('a1_hp_2', ('a', 'b', 'c'))),
                       candidate_evaluations=[
                           CandidateEvaluation(candidate=(-3.0, 'a'),
                                               value=1.0),
                           CandidateEvaluation(candidate=(-1.9, 'c'),
                                               value=2.0),
                           CandidateEvaluation(candidate=(-3.5, 'a'),
                                               value=0.3)
                       ],
                       failed_candidates=[],
                       pending_evaluations=[]),
        'algo-2':
        TuningJobState(hp_ranges=HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('a2_hp_1', -5.0, 5.0,
                                          LinearScaling()),
            HyperparameterRangeInteger('a2_hp_2', -5, 5, LinearScaling(), -5,
                                       5)),
                       candidate_evaluations=[
                           CandidateEvaluation(candidate=(-1.9, -1),
                                               value=0.0),
                           CandidateEvaluation(candidate=(-3.5, 3), value=2.0)
                       ],
                       failed_candidates=[],
                       pending_evaluations=[])
    }
Beispiel #2
0
    def tuning_job_state_mcmc(X, Y) -> TuningJobState:
        Y = [dictionarize_objective(y) for y in Y]

        return TuningJobState(
            HyperparameterRanges_Impl(
                HyperparameterRangeContinuous('x', -4., 4., LinearScaling())),
            [CandidateEvaluation(x, y) for x, y in zip(X, Y)], [], [])
def data_to_state(data: dict) -> TuningJobState:
    cs = CS.ConfigurationSpace()
    cs_names = ['x{}'.format(i) for i in range(len(data['ss_limits']))]
    cs.add_hyperparameters([
        CSH.UniformFloatHyperparameter(
            name=name, lower=lims['min'], upper=lims['max'])
        for name, lims in zip(cs_names, data['ss_limits'])])
    _evaluations = []
    x_mult = []
    x_add = []
    for lim in data['ss_limits']:
        mn, mx = lim['min'], lim['max']
        x_mult.append(mx - mn)
        x_add.append(mn)
    x_mult = np.array(x_mult)
    x_add = np.array(x_add)
    for x, y in zip(data['train_inputs'], data['train_targets']):
        x_decoded = x * x_mult + x_add
        config_dct = dict(zip(cs_names, x_decoded))
        config = CS.Configuration(cs, values=config_dct)
        _evaluations.append(CandidateEvaluation(
            config, dictionarize_objective(y)))
    return TuningJobState(
        hp_ranges=HyperparameterRanges_CS(cs),
        candidate_evaluations=_evaluations,
        failed_candidates=[],
        pending_evaluations=[])
Beispiel #4
0
    def _compute_posterior(self, fit_parameters: bool,
                           profiler: GPMXNetSimpleProfiler):
        """
        Completes __init__, by computing the posterior. If fit_parameters, this
        includes optimizing the surrogate model parameters.

        If self.state.pending_evaluations is not empty, we proceed as follows:
        - Compute posterior for state without pending evals
        - Draw fantasy values for pending evals
        - Recompute posterior (without fitting)

        """
        if self._debug_log is not None:
            self._debug_log.set_state(self.state)
        # Compute posterior for state without pending evals
        no_pending_state = self.state
        if self.state.pending_evaluations:
            no_pending_state = TuningJobState(
                hp_ranges=self.state.hp_ranges,
                candidate_evaluations=self.state.candidate_evaluations,
                failed_candidates=self.state.failed_candidates,
                pending_evaluations=[])
        self._posterior_for_state(no_pending_state, fit_parameters, profiler)
        if self.state.pending_evaluations:
            # Sample fantasy values for pending evals
            pending_configs = [
                x.candidate for x in self.state.pending_evaluations
            ]
            new_pending = self._draw_fantasy_values(pending_configs)
            # Compute posterior for state with pending evals
            # Note: profiler is not passed here, this would overwrite the
            # results from the first call
            with_pending_state = TuningJobState(
                hp_ranges=self.state.hp_ranges,
                candidate_evaluations=self.state.candidate_evaluations,
                failed_candidates=self.state.failed_candidates,
                pending_evaluations=new_pending)
            self._posterior_for_state(with_pending_state,
                                      fit_parameters=False,
                                      profiler=None)
            # Note: At this point, the fantasy values are dropped, they are not
            # needed anymore. They've just been sampled for the posterior
            # computation. We still maintain them in self.fantasy_samples,
            # which is mainly used for testing
            self.fantasy_samples = new_pending
def test_compute_blacklisted_candidates(
        hp_ranges: HyperparameterRanges_Impl,
        candidate_evaluations: List[CandidateEvaluation],
        failed_candidates: List[Candidate],
        pending_evaluations: List[PendingEvaluation],
        expected: Set[Candidate]):
    state = TuningJobState(hp_ranges, candidate_evaluations, failed_candidates,
                           pending_evaluations)
    actual = compute_blacklisted_candidates(state)
    assert set(expected) == set(actual)
def multi_algo_state():
    def _candidate_evaluations(num):
        return [
            CandidateEvaluation(candidate=(i, ),
                                metrics=dictionarize_objective(float(i)))
            for i in range(num)
        ]

    return {
        '0':
        TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(
                HyperparameterRangeContinuous('a1_hp_1', -5.0, 5.0,
                                              LinearScaling(), -5.0, 5.0)),
            candidate_evaluations=_candidate_evaluations(2),
            failed_candidates=[(i, ) for i in range(3)],
            pending_evaluations=[PendingEvaluation((i, ))
                                 for i in range(100)]),
        '1':
        TuningJobState(hp_ranges=HyperparameterRanges_Impl(),
                       candidate_evaluations=_candidate_evaluations(5),
                       failed_candidates=[],
                       pending_evaluations=[]),
        '2':
        TuningJobState(
            hp_ranges=HyperparameterRanges_Impl(),
            candidate_evaluations=_candidate_evaluations(3),
            failed_candidates=[(i, ) for i in range(10)],
            pending_evaluations=[PendingEvaluation((i, )) for i in range(1)]),
        '3':
        TuningJobState(hp_ranges=HyperparameterRanges_Impl(),
                       candidate_evaluations=_candidate_evaluations(6),
                       failed_candidates=[],
                       pending_evaluations=[]),
        '4':
        TuningJobState(hp_ranges=HyperparameterRanges_Impl(),
                       candidate_evaluations=_candidate_evaluations(120),
                       failed_candidates=[],
                       pending_evaluations=[]),
    }
Beispiel #7
0
def default_models() -> List[GPMXNetModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0
         ),  # same evals are added multiple times to force GP to unlearn prior
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ),
        [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
        [],
        [],
    )
    random_seed = 0

    gpmodel = default_gpmodel(state,
                              random_seed=random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)

    gpmodel_mcmc = default_gpmodel_mcmc(state,
                                        random_seed=random_seed,
                                        mcmc_config=DEFAULT_MCMC_CONFIG)

    return [
        GPMXNetModel(state,
                     DEFAULT_METRIC,
                     random_seed,
                     gpmodel,
                     fit_parameters=True,
                     num_fantasy_samples=20),
        GPMXNetModel(state,
                     DEFAULT_METRIC,
                     random_seed,
                     gpmodel_mcmc,
                     fit_parameters=True,
                     num_fantasy_samples=20)
    ]
Beispiel #8
0
def tuning_job_state() -> TuningJobState:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    return TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ), [CandidateEvaluation(x, y) for x, y in zip(X, Y)], [], [])
Beispiel #9
0
def decode_state(enc_state: dict, hp_ranges: HyperparameterRanges_CS) \
        -> TuningJobState:
    assert isinstance(hp_ranges, HyperparameterRanges_CS), \
        "Must have hp_ranges of HyperparameterRanges_CS type"
    config_space = hp_ranges.config_space

    def to_cs(x):
        return CS.Configuration(config_space, values=x)

    candidate_evaluations = [
        CandidateEvaluation(to_cs(x['candidate']), x['metrics'])
        for x in enc_state['candidate_evaluations']]
    failed_candidates = [to_cs(x) for x in enc_state['failed_candidates']]
    pending_evaluations = [
        PendingEvaluation(to_cs(x)) for x in enc_state['pending_evaluations']]
    return TuningJobState(
        hp_ranges=hp_ranges,
        candidate_evaluations=candidate_evaluations,
        failed_candidates=failed_candidates,
        pending_evaluations=pending_evaluations)
def test_get_internal_candidate_evaluations():
    """we do not test the case with no evaluations, as it is assumed
    that there will be always some evaluations generated in the beginning
    of the BO loop."""

    candidates = [
        CandidateEvaluation((2, 3.3, 'X'), dictionarize_objective(5.3)),
        CandidateEvaluation((1, 9.9, 'Y'), dictionarize_objective(10.9)),
        CandidateEvaluation((7, 6.1, 'X'), dictionarize_objective(13.1)),
    ]

    state = TuningJobState(
        hp_ranges=HyperparameterRanges_Impl(
            HyperparameterRangeInteger('integer', 0, 10, LinearScaling()),
            HyperparameterRangeContinuous('real', 0, 10, LinearScaling()),
            HyperparameterRangeCategorical('categorical', ('X', 'Y')),
        ),
        candidate_evaluations=candidates,
        failed_candidates=[candidates[0].candidate
                           ],  # these should be ignored by the model
        pending_evaluations=[])

    result = get_internal_candidate_evaluations(state,
                                                DEFAULT_METRIC,
                                                normalize_targets=True,
                                                num_fantasize_samples=20)

    assert len(result.X.shape) == 2, "Input should be a matrix"
    assert len(result.y.shape) == 2, "Output should be a matrix"

    assert result.X.shape[0] == len(candidates)
    assert result.y.shape[
        -1] == 1, "Only single output value per row is suppored"

    assert np.abs(np.mean(
        result.y)) < 1e-8, "Mean of the normalized outputs is not 0.0"
    assert np.abs(np.std(result.y) -
                  1.0) < 1e-8, "Std. of the normalized outputs is not 1.0"

    np.testing.assert_almost_equal(result.mean, 9.766666666666666)
    np.testing.assert_almost_equal(result.std, 3.283629428273267)
Beispiel #11
0
def default_models(do_mcmc=True) -> List[GPMXNetModel]:
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    Y = [dictionarize_objective(np.sum(x) * 10.0) for x in X]

    state = TuningJobState(
        HyperparameterRanges_Impl(
            HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
            HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()),
        ), [CandidateEvaluation(x, y) for x, y in zip(X, Y)], [], [])
    random_seed = 0

    gpmodel = default_gpmodel(state,
                              random_seed=random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    result = [
        GPMXNetModel(state,
                     DEFAULT_METRIC,
                     random_seed,
                     gpmodel,
                     fit_parameters=True,
                     num_fantasy_samples=20)
    ]
    if do_mcmc:
        gpmodel_mcmc = default_gpmodel_mcmc(state,
                                            random_seed=random_seed,
                                            mcmc_config=DEFAULT_MCMC_CONFIG)
        result.append(
            GPMXNetModel(state,
                         DEFAULT_METRIC,
                         random_seed,
                         gpmodel_mcmc,
                         fit_parameters=True,
                         num_fantasy_samples=20))
    return result
Beispiel #12
0
    def __init__(
            self, hp_ranges: HyperparameterRanges, random_seed: int,
            gpmodel: GPModel, model_args: GPMXNetModelArgs,
            map_reward: MapReward,
            acquisition_class: Type[AcquisitionFunction],
            init_state: TuningJobState = None,
            local_minimizer_class: Type[LocalOptimizer] = DEFAULT_LOCAL_OPTIMIZER_CLASS,
            skip_optimization: SkipOptimizationPredicate = None,
            num_initial_candidates: int = DEFAULT_NUM_INITIAL_CANDIDATES,
            num_initial_random_choices: int = DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS,
            initial_scoring: Optional[str] = None,
            profiler: Optional[GPMXNetSimpleProfiler] = None,
            first_is_default: bool = True,
            debug_log: Optional[DebugLogPrinter] = None):
        """
        Note that the GPMXNetModel is created on demand (by the state
        transformer) in get_config, along with components needed for the BO
        algorithm.

        The searcher is supposed to maximize reward, while internally, the
        criterion is minimized. map_reward maps reward to internal criterion, it
        must be strictly decreasing.

        :param hp_ranges: Configuration space without resource attribute
        :param random_seed:
        :param gpmodel: GP regression model
        :param model_args: Arguments for GPMXNet model creation
        :param map_reward: Function mapping reward to criterion to be minimized
        :param acquisition_class: Type for acquisition function
        :param init_state: TuningJobState to start from (default is empty)
        :param local_minimizer_class: Type for local minimizer
        :param skip_optimization: Predicate, see
            GPMXNetPendingCandidateStateTransformer
        :param num_initial_candidates: See BayesianOptimizationAlgorithm
        :param num_initial_random_choices: Configs are sampled at random until
            this many candidates received label feedback
        :param initial_scoring: Scoring function to rank initial candidates.
            Default: thompson_indep (independent Thompson sampling)
        :param profiler: If given, HPO computations are profiled
        :param first_is_default: If true, the first config to be evaluated is
            the default of the search space. Otherwise, the first is sampled
            at random
        :param debug_log: DebugLogPrinter for debug logging (optional)

        """
        self.hp_ranges = hp_ranges
        self.random_seed = random_seed
        self.num_initial_candidates = num_initial_candidates
        self.num_initial_random_choices = num_initial_random_choices
        self.map_reward = map_reward
        self.local_minimizer_class = local_minimizer_class
        self.acquisition_class = acquisition_class
        self.debug_log = debug_log
        self.initial_scoring = check_initial_candidates_scorer(initial_scoring)
        # Create state transformer
        # Initial state is empty (note that the state is mutable)
        if init_state is None:
            init_state = TuningJobState(
                hp_ranges=hp_ranges,
                candidate_evaluations=[],
                failed_candidates=[],
                pending_evaluations=[])
        else:
            assert hp_ranges is init_state.hp_ranges, \
                "hp_ranges and init_state.hp_ranges must be same object"
        self.state_transformer = GPMXNetPendingCandidateStateTransformer(
            gpmodel=gpmodel,
            init_state=init_state,
            model_args=model_args,
            skip_optimization=skip_optimization,
            profiler=profiler,
            debug_log=debug_log)
        self.random_state = np.random.RandomState(random_seed)
        self.random_generator = RandomStatefulCandidateGenerator(
            hp_ranges, random_state=self.random_state)
        self.profiler = profiler
        self.do_profile = (profiler is not None)
        self.first_is_default = first_is_default
        if first_is_default:
            assert isinstance(hp_ranges, HyperparameterRanges_CS), \
                "If first_is_default, must have hp_ranges of HyperparameterRanges_CS type"
        if debug_log is not None:
            assert isinstance(hp_ranges, HyperparameterRanges_CS), \
                "If debug_log is given, must have hp_ranges of HyperparameterRanges_CS type"
        # Sums up profiling records across all get_config calls
        self._profile_record = dict()
        if debug_log is not None:
            deb_msg = "[GPFIFOSearcher.__init__]\n"
            deb_msg += ("- acquisition_class = {}\n".format(acquisition_class))
            deb_msg += ("- local_minimizer_class = {}\n".format(local_minimizer_class))
            deb_msg += ("- num_initial_candidates = {}\n".format(num_initial_candidates))
            deb_msg += ("- num_initial_random_choices = {}\n".format(num_initial_random_choices))
            deb_msg += ("- initial_scoring = {}\n".format(self.initial_scoring))
            deb_msg += ("- first_is_default = {}".format(first_is_default))
            logger.info(deb_msg)
    def __init__(self,
                 hp_ranges: HyperparameterRanges_CS,
                 resource_attr_key: str,
                 resource_attr_range: Tuple[int, int],
                 random_seed: int,
                 gpmodel: GPModel,
                 model_args: GPMXNetModelArgs,
                 map_reward: MapReward,
                 acquisition_class: Type[AcquisitionFunction],
                 resource_for_acquisition: Callable[..., int],
                 init_state: TuningJobState = None,
                 local_minimizer_class: Type[
                     LocalOptimizer] = DEFAULT_LOCAL_OPTIMIZER_CLASS,
                 skip_optimization: SkipOptimizationPredicate = None,
                 num_initial_candidates: int = DEFAULT_NUM_INITIAL_CANDIDATES,
                 num_initial_random_choices:
                 int = DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS,
                 initial_scoring: Optional[str] = None,
                 profiler: Optional[GPMXNetSimpleProfiler] = None,
                 first_is_default: bool = True,
                 debug_log: Optional[DebugLogPrinter] = None):
        """
        Note that the GPMXNetModel is created on demand (by the state
        transformer) in get_config, along with components needed for the BO
        algorithm.

        The configuration space is hp_ranges. This does not include the resource
        attribute, which is passed as result component instead, with key
        resource_attr_key. The GP model is over configuration and resource
        attribute, its configuration space is maintained in configspace_ext.

        The search for a next candidate in get_config is fixing the resource
        level, meaning that extended configs for which the acquisition function
        is evaluated, all have the same resource level. This level may depend on
        the current state, the function can be passed as
        resource_for_acquisition. Its signature is
            resource_for_acquisition(state, resource_attr_name, **kwargs) -> int,
        where state is TuningJobState.
        Example: resource_for_acquisition may count the number of labeled data
        at each resource level, and choose the largest level supported by
        enough data. Or information about bracket and first milestone may be
        passed from the scheduler via **kwargs.

        The searcher is supposed to maximize reward, while internally, the
        criterion is minimized. map_reward maps reward to internal criterion, it
        must be strictly decreasing.

        :param hp_ranges: Configuration space without resource attribute
        :param resource_attr_key: Key for resource attribute.
            Note: The resource attribute must be int valued
        :param resource_attr_range: Range (lower, upper) for resource
            attribute( (must be int valued)
        :param random_seed:
        :param gpmodel: GP regression model
        :param model_args: Arguments for GPMXNet model creation
        :param map_reward: Function mapping reward to criterion to be minimized
        :param acquisition_class: Type for acquisition function
        :param resource_for_acquisition: See above
        :param init_state: TuningJobState to start from (default is empty).
            Here, init_state.hp_ranges must be equal to
            self.configspace_ext.hp_ranges_ext here (this is not checked)
        :param local_minimizer_class: Type for local minimizer
        :param skip_optimization: Predicate, see
            GPMXNetPendingCandidateStateTransformer
        :param num_initial_candidates: See BayesianOptimizationAlgorithm
        :param num_initial_random_choices: Configs are sampled at random until
            this many candidates received label feedback
        :param initial_scoring: Scoring function to rank initial candidates.
            Default: thompson_indep (independent Thompson sampling)
        :param profiler: If given, HPO computations are profiled
        :param first_is_default: If true, the first result of get_config is the
            default config of hp_ranges
        :param debug_log: DebugLogPrinter for debug logging (optional)

        """
        self.hp_ranges = hp_ranges
        self.random_seed = random_seed
        self.num_initial_candidates = num_initial_candidates
        self.num_initial_random_choices = num_initial_random_choices
        self.map_reward = map_reward
        self.resource_for_acquisition = resource_for_acquisition
        self.local_minimizer_class = local_minimizer_class
        self.acquisition_class = acquisition_class
        self._gpmodel = gpmodel
        self.initial_scoring = check_initial_candidates_scorer(initial_scoring)
        # Extended configuration space including resource attribute
        self.configspace_ext = ExtendedConfiguration(hp_ranges,
                                                     resource_attr_key,
                                                     resource_attr_range)
        if debug_log is not None:
            # Configure DebugLogPrinter
            debug_log.configspace_ext = self.configspace_ext
        self.debug_log = debug_log
        # Create state transformer
        # Initial state is empty (note that the state is mutable)
        if init_state is None:
            init_state = TuningJobState(
                hp_ranges=self.configspace_ext.hp_ranges_ext,
                candidate_evaluations=[],
                failed_candidates=[],
                pending_evaluations=[])
        self.state_transformer = GPMXNetPendingCandidateStateTransformer(
            gpmodel=gpmodel,
            init_state=init_state,
            model_args=model_args,
            skip_optimization=skip_optimization,
            profiler=profiler,
            debug_log=debug_log)
        self.random_state = np.random.RandomState(random_seed)
        self.random_generator = RandomStatefulCandidateGenerator(
            self.configspace_ext.hp_ranges_ext, random_state=self.random_state)
        self.profiler = profiler
        self.do_profile = (profiler is not None)
        self.first_is_default = first_is_default
        # Sums up profiling records across all get_config calls
        self._profile_record = dict()
        if debug_log is not None:
            deb_msg = "[GPMultiFidelitySearcher.__init__]\n"
            deb_msg += ("- acquisition_class = {}\n".format(acquisition_class))
            deb_msg += (
                "- local_minimizer_class = {}\n".format(local_minimizer_class))
            deb_msg += ("- num_initial_candidates = {}\n".format(
                num_initial_candidates))
            deb_msg += ("- num_initial_random_choices = {}\n".format(
                num_initial_random_choices))
            deb_msg += ("- initial_scoring = {}\n".format(
                self.initial_scoring))
            deb_msg += ("- first_is_default = {}".format(first_is_default))
            logger.info(deb_msg)
Beispiel #14
0
def test_gp_fantasizing():
    """
    Compare whether acquisition function evaluations (values, gradients) with
    fantasizing are the same as averaging them by hand.
    """
    random_seed = 4567
    _set_seeds(random_seed)
    num_fantasy_samples = 10
    num_pending = 5

    hp_ranges = HyperparameterRanges_Impl(
        HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
        HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()))
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    num_data = len(X)
    Y = [
        dictionarize_objective(np.random.randn(1, 1)) for _ in range(num_data)
    ]
    # Draw fantasies. This is done for a number of fixed pending candidates
    # The model parameters are fit in the first iteration, when there are
    # no pending candidates

    # Note: It is important to not normalize targets, because this would be
    # done on the observed targets only, not the fantasized ones, so it
    # would be hard to compare below.
    pending_evaluations = []
    for _ in range(num_pending):
        pending_cand = tuple(np.random.rand(2, ))
        pending_evaluations.append(PendingEvaluation(pending_cand))
    state = TuningJobState(hp_ranges,
                           [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
                           failed_candidates=[],
                           pending_evaluations=pending_evaluations)
    gpmodel = default_gpmodel(state,
                              random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    model = GPMXNetModel(state,
                         DEFAULT_METRIC,
                         random_seed,
                         gpmodel,
                         fit_parameters=True,
                         num_fantasy_samples=num_fantasy_samples,
                         normalize_targets=False)
    fantasy_samples = model.fantasy_samples
    # Evaluate acquisition function and gradients with fantasizing
    num_test = 50
    X_test = np.vstack([
        hp_ranges.to_ndarray(tuple(np.random.rand(2, )))
        for _ in range(num_test)
    ])
    acq_func = EIAcquisitionFunction(model)
    fvals, grads = acq_func.compute_acq_with_gradients(X_test)
    # Do the same computation by averaging by hand
    fvals_cmp = np.empty((num_fantasy_samples, ) + fvals.shape)
    grads_cmp = np.empty((num_fantasy_samples, ) + grads.shape)
    X_full = X + state.pending_candidates
    for it in range(num_fantasy_samples):
        Y_full = Y + [
            dictionarize_objective(eval.fantasies[DEFAULT_METRIC][:, it])
            for eval in fantasy_samples
        ]
        state2 = TuningJobState(
            hp_ranges,
            [CandidateEvaluation(x, y) for x, y in zip(X_full, Y_full)],
            failed_candidates=[],
            pending_evaluations=[])
        # We have to skip parameter optimization here
        model2 = GPMXNetModel(state2,
                              DEFAULT_METRIC,
                              random_seed,
                              gpmodel,
                              fit_parameters=False,
                              num_fantasy_samples=num_fantasy_samples,
                              normalize_targets=False)
        acq_func2 = EIAcquisitionFunction(model2)
        fvals_, grads_ = acq_func2.compute_acq_with_gradients(X_test)
        fvals_cmp[it, :] = fvals_
        grads_cmp[it, :] = grads_
    # Comparison
    fvals2 = np.mean(fvals_cmp, axis=0)
    grads2 = np.mean(grads_cmp, axis=0)
    assert np.allclose(fvals, fvals2)
    assert np.allclose(grads, grads2)
Beispiel #15
0
def tuning_job_sub_state():
    return TuningJobState(hp_ranges=HyperparameterRanges_Impl(),
                          candidate_evaluations=[],
                          failed_candidates=[],
                          pending_evaluations=[])