Exemple #1
0
def get_test_warp(indices, **kwargs):
    warp_tf = Warp(indices=indices, **kwargs)
    c0 = torch.tensor([1.0, 2.0])[:len(indices)]
    c1 = torch.tensor([2.0, 3.0])[:len(indices)]
    batch_shape = kwargs.get("batch_shape", torch.Size([]))
    c0 = c0.expand(batch_shape + c0.shape)
    c1 = c1.expand(batch_shape + c1.shape)
    warp_tf._set_concentration(0, c0)
    warp_tf._set_concentration(1, c1)
    return warp_tf
Exemple #2
0
def get_warping_transform(
    d: int,
    batch_shape: Optional[torch.Size] = None,
    task_feature: Optional[int] = None,
) -> Warp:
    """Construct input warping transform.

    Args:
        d: The dimension of the input, including task features
        batch_shape: The batch_shape of the model
        task_feature: The index of the task feature

    Returns:
        The input warping transform.
    """
    indices = list(range(d))
    # apply warping to all non-task features, including fidelity features
    if task_feature is not None:
        del indices[task_feature]
    # Note: this currently uses the same warping functions for all tasks
    tf = Warp(
        indices=indices,
        # prior with a median of 1
        concentration1_prior=LogNormalPrior(0.0, 0.75 ** 0.5),
        concentration0_prior=LogNormalPrior(0.0, 0.75 ** 0.5),
        batch_shape=batch_shape,
    )
    return tf
Exemple #3
0
def get_warping_transform(
    d: int,
    task_feature: Optional[int] = None,
) -> Warp:
    """Construct input warping transform.

    Args:
        d: The dimension of the input, including task features
        task_feature: the index of the task feature

    Returns:
        The input warping transform.
    """
    indices = list(range(d))
    # apply warping to all non-task features, including fidelity features
    if task_feature is not None:
        del indices[task_feature]
    # Note: this currently uses the same warping functions for all tasks
    tf = Warp(
        indices=indices,
        # use an uninformative prior with maximum log probability at 1
        concentration1_prior=GammaPrior(1.01, 0.01),
        concentration0_prior=GammaPrior(1.01, 0.01),
    )
    return tf
Exemple #4
0
def get_test_warp(indices, **kwargs):
    warp_tf = Warp(indices=indices, **kwargs)
    warp_tf._set_concentration(0, torch.tensor([1.0, 2.0]))
    warp_tf._set_concentration(1, torch.tensor([2.0, 3.0]))
    return warp_tf
Exemple #5
0
    def test_get_X_baseline(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            tkwargs["dtype"] = dtype
            X_train = torch.rand(20, 2, **tkwargs)
            model = MockModel(
                MockPosterior(mean=(2 * X_train +
                                    1).sum(dim=-1, keepdim=True)))
            # test NEI with X_baseline
            acqf = qNoisyExpectedImprovement(model, X_baseline=X_train[:2])
            X = get_X_baseline(acq_function=acqf)
            self.assertTrue(torch.equal(X, acqf.X_baseline))
            # test EI without X_baseline
            acqf = qExpectedImprovement(model, best_f=0.0)

            with warnings.catch_warnings(
                    record=True) as w, settings.debug(True):

                X_rnd = get_X_baseline(acq_function=acqf, )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            # set train inputs
            model.train_inputs = (X_train, )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))
            # test that we fail back to train_inputs if X_baseline is an empty tensor
            acqf.register_buffer("X_baseline", X_train[:0])
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))

            # test acquisitipon function without X_baseline or model
            acqf = FixedFeatureAcquisitionFunction(acqf,
                                                   d=2,
                                                   columns=[0],
                                                   values=[0])
            with warnings.catch_warnings(
                    record=True) as w, settings.debug(True):
                X_rnd = get_X_baseline(acq_function=acqf, )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            Y_train = 2 * X_train[:2] + 1
            moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
            ref_point = torch.zeros(2, **tkwargs)
            # test NEHVI with X_baseline
            acqf = qNoisyExpectedHypervolumeImprovement(
                moo_model,
                ref_point=ref_point,
                X_baseline=X_train[:2],
                cache_root=False,
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, acqf.X_baseline))
            # test qEHVI without train_inputs
            acqf = qExpectedHypervolumeImprovement(
                moo_model,
                ref_point=ref_point,
                partitioning=FastNondominatedPartitioning(
                    ref_point=ref_point,
                    Y=Y_train,
                ),
            )
            # test extracting train_inputs from model list GP
            model_list = ModelListGP(
                SingleTaskGP(X_train, Y_train[:, :1]),
                SingleTaskGP(X_train, Y_train[:, 1:]),
            )
            acqf = qExpectedHypervolumeImprovement(
                model_list,
                ref_point=ref_point,
                partitioning=FastNondominatedPartitioning(
                    ref_point=ref_point,
                    Y=Y_train,
                ),
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))

            # test MESMO for which we need to use
            # `acqf.mo_model`
            batched_mo_model = SingleTaskGP(X_train, Y_train)
            acqf = qMultiObjectiveMaxValueEntropy(
                batched_mo_model,
                sample_pareto_frontiers=lambda model: torch.rand(
                    10, 2, **tkwargs),
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))
            # test that if there is an input transform that is applied
            # to the train_inputs when the model is in eval mode, we
            # extract the untransformed train_inputs
            model = SingleTaskGP(X_train,
                                 Y_train[:, :1],
                                 input_transform=Warp(indices=[0, 1]))
            model.eval()
            self.assertFalse(torch.equal(model.train_inputs[0], X_train))
            acqf = qExpectedImprovement(model, best_f=0.0)
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))