Exemplo n.º 1
0
    def test_fantasize(self):
        for batch_shape, m, dtype, use_octf in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (torch.float, torch.double),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
            model, _ = self._get_model_and_data(
                batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
            )
            # fantasize
            X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, 1])), **tkwargs)
            sampler = SobolQMCNormalSampler(num_samples=3)
            fm = model.fantasize(X=X_f, sampler=sampler)
            self.assertIsInstance(fm, model.__class__)
            fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
            self.assertIsInstance(fm, model.__class__)

        # check that input transforms are applied to X.
        tkwargs = {"device": self.device, "dtype": torch.float}
        intf = Normalize(d=1, bounds=torch.tensor([[0], [10]], **tkwargs))
        model, _ = self._get_model_and_data(
            batch_shape=torch.Size(),
            m=1,
            input_transform=intf,
            **tkwargs,
        )
        X_f = torch.rand(4, 1, **tkwargs)
        fm = model.fantasize(X_f, sampler=SobolQMCNormalSampler(num_samples=3))
        self.assertTrue(
            torch.allclose(fm.train_inputs[0][:, -4:], intf(X_f).expand(3, -1, -1))
        )
Exemplo n.º 2
0
Arquivo: task.py Projeto: stys/albo
    def get_task_runner(self, param, **kw):
        blackbox = BraninConstrained(noise_std=param.noise_std)
        bounds = Tensor(blackbox._bounds)

        objective = ClassicAugmentedLagrangianMCObjective(
            objective=lambda y: y[..., 0],
            constraints=list(lambda y, i=j: y[..., i] for j in range(1, blackbox.out_dim)),
            r=param['r']
        )

        sampler = SobolQMCNormalSampler(
            num_samples=param.num_mc_samples,
            seed=param.get('seed', None)
        )

        acqfopt = qEiAcqfOptimizer(sampler=sampler)

        optimizer = AlboOptimizer(
            blackbox=blackbox,
            objective=objective,
            acqfopt=acqfopt,
            sampler=sampler,
            bounds=bounds
        )

        run = partial(
            optimizer.optimize,
            niter=param.num_iter,
            init_samples=param.num_init_samples,
            al_iter=param.num_al_iter,
            seed=param.get('seed', None)
        )

        return run
 def test_fantasize(self):
     for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
         n_fidelity = (iteration_fidelity is not None) + (data_fidelity
                                                          is not None)
         num_dim = 1 + n_fidelity
         for batch_shape, m, dtype, lin_trunc in itertools.product(
             (torch.Size(), torch.Size([2])),
             (1, 2),
             (torch.float, torch.double),
             (False, True),
         ):
             tkwargs = {"device": self.device, "dtype": dtype}
             model, model_kwargs = _get_model_and_data(
                 iteration_fidelity=iteration_fidelity,
                 data_fidelity=data_fidelity,
                 batch_shape=batch_shape,
                 m=m,
                 lin_truncated=lin_trunc,
                 **tkwargs,
             )
             # fantasize
             X_f = torch.rand(
                 torch.Size(batch_shape + torch.Size([4, num_dim])),
                 **tkwargs)
             sampler = SobolQMCNormalSampler(num_samples=3)
             fm = model.fantasize(X=X_f, sampler=sampler)
             self.assertIsInstance(fm, model.__class__)
             fm = model.fantasize(X=X_f,
                                  sampler=sampler,
                                  observation_noise=False)
             self.assertIsInstance(fm, model.__class__)
Exemplo n.º 4
0
 def test_fantasize(self, cuda=False):
     for batch_shape in (torch.Size(), torch.Size([2])):
         for num_outputs in (1, 2):
             for double in (False, True):
                 tkwargs = {
                     "device":
                     torch.device("cuda") if cuda else torch.device("cpu"),
                     "dtype":
                     torch.double if double else torch.float,
                 }
                 model, model_kwargs = self._get_model_and_data(
                     batch_shape=batch_shape,
                     num_outputs=num_outputs,
                     **tkwargs)
                 # fantasize
                 X_f = torch.rand(
                     torch.Size(batch_shape + torch.Size([4, 1])),
                     **tkwargs)
                 sampler = SobolQMCNormalSampler(num_samples=3)
                 fm = model.fantasize(X=X_f, sampler=sampler)
                 self.assertIsInstance(fm, model.__class__)
                 fm = model.fantasize(X=X_f,
                                      sampler=sampler,
                                      observation_noise=False)
                 self.assertIsInstance(fm, model.__class__)
Exemplo n.º 5
0
    def test_fantasize(self):
        d = 3
        for batch_shape, m, ncat, dtype in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (1, 2),
            (torch.float, torch.double),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                                m=m,
                                                d=d,
                                                **tkwargs)
            cat_dims = list(range(ncat))
            model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)

            # fantasize
            X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, d])),
                             **tkwargs)
            sampler = SobolQMCNormalSampler(num_samples=3)
            fm = model.fantasize(X=X_f, sampler=sampler)
            self.assertIsInstance(fm, model.__class__)
            fm = model.fantasize(X=X_f,
                                 sampler=sampler,
                                 observation_noise=False)
            self.assertIsInstance(fm, model.__class__)
Exemplo n.º 6
0
    def __init__(
        self,
        model: Model,
        target: Union[float, Tensor],
        beta: Union[float, Tensor],
        objective: Optional[MCAcquisitionObjective] = None,
        sampler: Optional[MCSampler] = None,
    ) -> None:
        r"""Monte-carlo level set estimation.

        Args:
            model: A fitted model.
            target: the level set (after objective transform) to be estimated
            beta: a parameter that governs explore-exploit tradeoff
            objective: An MCAcquisitionObjective representing the link function
                (e.g., logistic or probit.) applied on the samples.
                Can be implemented via GenericMCObjective.
            sampler: The sampler used for drawing MC samples.
        """
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=512,
                                            collapse_batch_dims=True)
        if objective is None:
            objective = ProbitObjective()
        super().__init__(model=model,
                         sampler=sampler,
                         objective=None,
                         X_pending=None)
        self.objective = objective
        self.beta = beta
        self.target = target
Exemplo n.º 7
0
def prepare_acquisition_function(args, model_obj, train_x, train_y, bounds, step):
    if args.num_steps > 500:
        sampler = IIDNormalSampler(num_samples=256)
    else:
        sampler = SobolQMCNormalSampler(num_samples=256)
    if args.acqf == "ei":
        acqf = qExpectedImprovement(
            model=model_obj, best_f=train_y.max(), sampler=sampler,
        )
    elif args.acqf == "ucb":
        acqf = qUpperConfidenceBound(model=model_obj, beta=0.9 ** step)
    elif args.acqf == "nei":
        acqf = qNoisyExpectedImprovement(
            model=model_obj, X_baseline=train_x, sampler=sampler
        )
    elif args.acqf == "kg":
        acqf = qKnowledgeGradient(
            model=model_obj,
            sampler=sampler,
            num_fantasies=None,
            current_value=train_y.max(),
        )
    elif args.acqf == "mves":
        candidate_set = torch.rand(10000, bounds.size(0), device=bounds.device)
        candidate_set = bounds[..., 0] + (bounds[..., 1] - bounds[..., 0]) * candidate_set
        acqf = qMaxValueEntropy(
            model=model_obj, candidate_set=candidate_set, train_inputs=train_x,
        )

    return acqf
Exemplo n.º 8
0
 def test_fantasize(self):
     for (train_iteration_fidelity, train_data_fidelity) in [
         (False, True),
         (True, False),
         (True, True),
     ]:
         num_dim = 1 + train_iteration_fidelity + train_data_fidelity
         for batch_shape in (torch.Size(), torch.Size([2])):
             for num_outputs in (1, 2):
                 for double in (False, True):
                     tkwargs = {
                         "device": self.device,
                         "dtype": torch.double if double else torch.float,
                     }
                     model, model_kwargs = self._get_model_and_data(
                         batch_shape=batch_shape,
                         num_outputs=num_outputs,
                         train_iteration_fidelity=train_iteration_fidelity,
                         train_data_fidelity=train_data_fidelity,
                         **tkwargs,
                     )
                     # fantasize
                     X_f = torch.rand(
                         torch.Size(batch_shape + torch.Size([4, num_dim])),
                         **tkwargs,
                     )
                     sampler = SobolQMCNormalSampler(num_samples=3)
                     fm = model.fantasize(X=X_f, sampler=sampler)
                     self.assertIsInstance(fm, model.__class__)
                     fm = model.fantasize(X=X_f,
                                          sampler=sampler,
                                          observation_noise=False)
                     self.assertIsInstance(fm, model.__class__)
Exemplo n.º 9
0
def get_outcome_feasibility_probability(
    model: model.Model,
    X: Tensor,
    outcome_constraints: List[Callable[[Tensor], Tensor]],
    threshold: float = 0.1,
    nsample_outcome: int = 1000,
    seed: Optional[int] = None,
) -> float:
    r"""
    Monte Carlo estimate of the feasible volume with respect to the outcome constraints.

    Args:
        model: The model used for sampling the posterior.
        X: A tensor of dimension `batch-shape x 1 x d`, where d is feature dimension.
        outcome_constraints: A list of callables, each mapping a Tensor of dimension
            `sample_shape x batch-shape x q x m` to a Tensor of dimension
            `sample_shape x batch-shape x q`, where negative values imply feasibility.
        threshold: A lower limit for the probability of posterior samples feasibility.
        nsample_outcome: The number of samples from the model posterior.
        seed: The seed for the posterior sampler. If omitted, use a random seed.

    Returns:
        Estimated proportion of features for which posterior samples satisfy
        given outcome constraints with probability above or equal to
        the given threshold.
    """
    from botorch.sampling import SobolQMCNormalSampler

    seed = seed if seed is not None else torch.randint(0, 1000000,
                                                       (1, )).item()

    posterior = model.posterior(
        X)  # posterior consists of batch_shape marginals
    sampler = SobolQMCNormalSampler(num_samples=nsample_outcome, seed=seed)
    # size of samples: (num outcome samples, batch_shape, 1, outcome dim)
    samples = sampler(posterior)

    feasible = torch.ones(samples.shape[:-1],
                          dtype=torch.bool,
                          device=samples.device)

    # a sample passes if each constraint applied to the sample
    # produces a non-negative tensor
    for oc in outcome_constraints:
        # broadcasted evaluation of the outcome constraints
        feasible &= oc(samples) <= 0

    # proportion of feasibile samples for each of the elements of X
    # summation is done across feasible outcome samples
    p_feas = feasible.sum(0).float() / feasible.size(0)

    # proportion of features leading to the posterior outcome
    # satisfying the given outcome constraints
    # with at probability above a given threshold
    p_outcome = (p_feas >= threshold).sum().item() / X.size(0)

    return p_outcome
Exemplo n.º 10
0
    def __call__(self,
                 model: Model,
                 objective_weights: Tensor,
                 outcome_constraints: Tuple[Tensor, Tensor],
                 X_observed: Optional[Tensor] = None,
                 X_pending: Optional[Tensor] = None,
                 mc_samples: int = 512,
                 qmc: bool = True,
                 seed: Optional[Tensor] = None,
                 **kwargs: Any) -> AcquisitionFunction:
        r"""Creates an acquisition function.

        The callable interface is compatible with `ax.modelbridge.factory.get_botorch` factory function.
        See for example `ax.models.torch.botorch_defaults.get_NEI`.

        Args:
            model: A fitted GPyTorch model
            objective_weights: Transform parameters from model output to raw objective
            outcome_constraints: Transform parameters from model output to raw constaints
            X_observed: Tensor of evaluated points
            X_pending: Tensor of points whose evaluation is pending
            mc_samples: The number of MC samples to use in the inner-loop optimization
            qmc: If True, use qMC instead of MC
            seed: Optional seed for MCSampler

        Returns:
            An instance of the acquisition function
        """

        self.model = model

        if qmc:
            self.sampler = SobolQMCNormalSampler(num_samples=mc_samples,
                                                 seed=seed)
        else:
            self.sampler = IIDNormalSampler(num_samples=mc_samples, seed=seed)

        # Store objective and constraints
        self.objective_weights = objective_weights
        self.outcome_constraints = outcome_constraints

        # Run inner loop of Augmented Lagrangian algorithm for fitting of Lagrange Multipliers
        # and store the fitted albo objective and the trace of inner loop optimization
        # (for debugging and visualization).
        self.albo_objective, self.trace_inner = self.fit_albo_objective()

        # Create an acquisition function over the fitted AL objective
        return get_acquisition_function(
            acquisition_function_name=self.acquisition_function_name,
            model=model,
            objective=self.albo_objective,
            X_observed=X_observed,
            X_pending=X_pending,
            mc_samples=mc_samples,
            seed=seed,
            **kwargs)
Exemplo n.º 11
0
def query_acq_func(acq_func_id: str, acq_func_kwargs: Dict[str, Any],
                   gp_model: SingleTaskGP, gp_model_error: SingleTaskGP,
                   vae_model, q: int, num_MC_samples_acq: int):
    if not hasattr(AnalyticAcquisitionFunction, acq_func_id):
        # use MC version of acq function
        acq_func_id = f'q{acq_func_id}'
        resampler = SobolQMCNormalSampler(num_samples=num_MC_samples_acq,
                                          resample=True).to(
                                              gp_model.train_inputs[0])
        acq_func_kwargs['sampler'] = resampler
        acq_func_class = getattr(mo_acq_func, acq_func_id)

        error_resampler = SobolQMCNormalSampler(
            num_samples=num_MC_samples_acq,
            resample=True).to(gp_model_error.train_inputs[0])
        acq_func_kwargs['error_sampler'] = error_resampler
        acq_func = acq_func_class(
            gp_model, gp_model_error,
            **_filter_kwargs(acq_func_class, **acq_func_kwargs))

    return acq_func
Exemplo n.º 12
0
 def test_fantasize(self):
     for batch_shape, m, dtype, use_octf in itertools.product(
         (torch.Size(), torch.Size([2])),
         (1, 2),
         (torch.float, torch.double),
         (False, True),
     ):
         tkwargs = {"device": self.device, "dtype": dtype}
         octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
         model, _ = self._get_model_and_data(
             batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
         )
         # fantasize
         X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, 1])), **tkwargs)
         sampler = SobolQMCNormalSampler(num_samples=3)
         fm = model.fantasize(X=X_f, sampler=sampler)
         self.assertIsInstance(fm, model.__class__)
         fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
         self.assertIsInstance(fm, model.__class__)
Exemplo n.º 13
0
def compute_rank_weights(train_x, train_y, base_models, target_model,
                         num_samples):
    """
    Compute ranking weights for each base model and the target model (using
        LOOCV for the target model). Note: This implementation does not currently
        address weight dilution, since we only have a small number of base models.

    Args:
        train_x: `n x d` tensor of training points (for target task)
        train_y: `n` tensor of training targets (for target task)
        base_models: list of base models
        target_model: target model
        num_samples: number of mc samples

    Returns:
        Tensor: `n_t`-dim tensor with the ranking weight for each model
    """
    ranking_losses = []
    # compute ranking loss for each base model
    for task in range(len(base_models)):
        model = base_models[task]
        # compute posterior over training points for target task
        posterior = model.posterior(train_x)
        sampler = SobolQMCNormalSampler(num_samples=num_samples)
        base_f_samps = sampler(posterior).squeeze(-1).squeeze(-1)
        # compute and save ranking loss
        ranking_losses.append(compute_ranking_loss(base_f_samps, train_y))
    # compute ranking loss for target model using LOOCV
    # f_samps
    train_yvar = torch.full_like(train_y, noise_std**2)
    target_f_samps = get_target_model_loocv_sample_preds(
        train_x, train_y, train_yvar, target_model, num_samples)
    ranking_losses.append(compute_ranking_loss(target_f_samps, train_y))
    ranking_loss_tensor = torch.stack(ranking_losses)
    # compute best model (minimum ranking loss) for each sample
    best_models = torch.argmin(ranking_loss_tensor, dim=0)
    # compute proportion of samples for which each model is best
    rank_weights = best_models.bincount(
        minlength=len(ranking_losses)).type_as(train_x) / num_samples
    return rank_weights
Exemplo n.º 14
0
def get_target_model_loocv_sample_preds(train_x, train_y, train_yvar,
                                        target_model, num_samples):
    """
    Create a batch-mode LOOCV GP and draw a joint sample across all points from the target task.

    Args:
        train_x: `n x d` tensor of training points
        train_y: `n x 1` tensor of training targets
        target_model: fitted target model
        num_samples: number of mc samples to draw

    Return: `num_samples x n x n`-dim tensor of samples, where dim=1 represents the `n` LOO models,
        and dim=2 represents the `n` training points.
    """
    batch_size = len(train_x)
    masks = torch.eye(len(train_x), dtype=torch.uint8, device=device).bool()
    train_x_cv = torch.stack([train_x[~m] for m in masks])
    train_y_cv = torch.stack([train_y[~m] for m in masks])
    train_yvar_cv = torch.stack([train_yvar[~m] for m in masks])
    state_dict = target_model.state_dict()
    # expand to batch size of batch_mode LOOCV model
    state_dict_expanded = {
        name: t.expand(batch_size, *[-1 for _ in range(t.ndim)])
        for name, t in state_dict.items()
    }
    model = get_fitted_model(train_x_cv,
                             train_y_cv,
                             train_yvar_cv,
                             state_dict=state_dict_expanded)
    with torch.no_grad():
        posterior = model.posterior(train_x)
        # Since we have a batch mode gp and model.posterior always returns an output dimension,
        # the output from `posterior.sample()` here `num_samples x n x n x 1`, so let's squeeze
        # the last dimension.
        sampler = SobolQMCNormalSampler(num_samples=num_samples)
        return sampler(posterior).squeeze(-1)
Exemplo n.º 15
0
    def test_qMS_init(self):
        d = 2
        q = 1
        num_data = 3
        q_batch_sizes = [1, 1, 1]
        num_fantasies = [2, 2, 1]
        t_batch_size = [2]
        for dtype in (torch.float, torch.double):
            bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
            bounds = bounds.repeat(1, d)
            train_X = torch.rand(num_data, d, device=self.device, dtype=dtype)
            train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype)
            model = SingleTaskGP(train_X, train_Y)

            # exactly one of samplers or num_fantasies
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[qExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    inner_mc_samples=[2] * 4,
                )

            # cannot use qMS as its own valfunc_cls
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[qMultiStepLookahead] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )

            # construct using samplers
            samplers = [
                SobolQMCNormalSampler(num_samples=nf,
                                      resample=False,
                                      collapse_batch_dims=True)
                for nf in num_fantasies
            ]
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                valfunc_cls=[qExpectedImprovement] * 4,
                valfunc_argfacs=[make_best_f] * 4,
                inner_mc_samples=[2] * 4,
                samplers=samplers,
            )
            self.assertEqual(qMS.num_fantasies, num_fantasies)

            # use default valfunc_cls, valfun_argfacs, inner_mc_samples
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                samplers=samplers,
            )
            self.assertEqual(len(qMS._valfunc_cls), 4)
            self.assertEqual(len(qMS.inner_samplers), 4)
            self.assertEqual(len(qMS._valfunc_argfacs), 4)

            # _construct_inner_samplers error catching tests below
            # AnalyticAcquisitionFunction with MCAcquisitionObjective
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    objective=IdentityMCObjective(),
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                )
            # AnalyticAcquisitionFunction and q > 1
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=[2, 2, 2],
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )
            # AnalyticAcquisitionFunction and inner_mc_samples
            with self.assertWarns(Warning):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )
            # MCAcquisitionFunction and non MCAcquisitionObjective
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    objective=ScalarizedObjective(weights=torch.tensor([1.0])),
                    batch_sizes=[2, 2, 2],
                    valfunc_cls=[qExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )

            # test warmstarting
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                samplers=samplers,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d])
            warmstarted_X = warmstart_multistep(
                acq_function=qMS,
                bounds=bounds,
                num_restarts=5,
                raw_samples=10,
                full_optimizer=eval_X,
            )
            self.assertEqual(warmstarted_X.shape, torch.Size([5, q_prime, d]))
Exemplo n.º 16
0
    def test_qMS(self):
        d = 2
        q = 1
        num_data = 3
        q_batch_sizes = [1, 1, 1]
        num_fantasies = [2, 2, 1]
        t_batch_size = [2]
        for dtype in (torch.float, torch.double):
            bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
            bounds = bounds.repeat(1, d)
            train_X = torch.rand(num_data, d, device=self.device, dtype=dtype)
            train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype)
            model = SingleTaskGP(train_X, train_Y)

            # default evaluation tests
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=[1, 1, 1],
                num_fantasies=num_fantasies,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d])
            result = qMS(eval_X)
            self.assertEqual(result.shape, torch.Size(t_batch_size))

            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                valfunc_cls=[qExpectedImprovement] * 4,
                valfunc_argfacs=[make_best_f] * 4,
                num_fantasies=num_fantasies,
                inner_mc_samples=[2] * 4,
            )
            result = qMS(eval_X)
            self.assertEqual(result.shape, torch.Size(t_batch_size))

            # get induced fantasy model, with collapse_fantasy_base_samples
            fant_model = qMS.get_induced_fantasy_model(eval_X)
            self.assertEqual(
                fant_model.train_inputs[0].shape,
                torch.Size(num_fantasies[::-1] + t_batch_size +
                           [num_data + sum(q_batch_sizes), d]),
            )

            # collapse fantasy base samples
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                valfunc_cls=[qExpectedImprovement] * 4,
                valfunc_argfacs=[make_best_f] * 4,
                num_fantasies=num_fantasies,
                inner_mc_samples=[2] * 4,
                collapse_fantasy_base_samples=False,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d])
            result = qMS(eval_X)
            self.assertEqual(result.shape, torch.Size(t_batch_size))
            self.assertEqual(qMS.samplers[0].batch_range, (-3, -2))

            # get induced fantasy model, without collapse_fantasy_base_samples
            fant_model = qMS.get_induced_fantasy_model(eval_X)
            self.assertEqual(
                fant_model.train_inputs[0].shape,
                torch.Size(num_fantasies[::-1] + t_batch_size +
                           [num_data + sum(q_batch_sizes), d]),
            )

            # X_pending
            X_pending = torch.rand(5, d)
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                valfunc_cls=[qExpectedImprovement] * 4,
                valfunc_argfacs=[make_best_f] * 4,
                num_fantasies=num_fantasies,
                inner_mc_samples=[2] * 4,
                X_pending=X_pending,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d])
            result = qMS(eval_X)
            self.assertEqual(result.shape, torch.Size(t_batch_size))

            # add dummy base_weights to samplers
            samplers = [
                SobolQMCNormalSampler(num_samples=nf,
                                      resample=False,
                                      collapse_batch_dims=True)
                for nf in num_fantasies
            ]
            for s in samplers:
                s.base_weights = torch.ones(s.sample_shape[0],
                                            1,
                                            device=self.device,
                                            dtype=dtype)

            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=[1, 1, 1],
                samplers=samplers,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d])
            result = qMS(eval_X)
            self.assertEqual(result.shape, torch.Size(t_batch_size))

            # extract candidates
            cand = qMS.extract_candidates(eval_X)
            self.assertEqual(cand.shape, torch.Size(t_batch_size + [q, d]))