Ejemplo n.º 1
0
 def test_manual_seed(self):
     initial_state = torch.random.get_rng_state()
     with manual_seed():
         self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
     with manual_seed(1234):
         self.assertFalse(torch.all(torch.random.get_rng_state() == initial_state))
     self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
Ejemplo n.º 2
0
 def test_manual_seed(self):
     initial_state = torch.random.get_rng_state()
     with manual_seed():
         self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
     with manual_seed(1234):
         self.assertFalse(torch.all(torch.random.get_rng_state() == initial_state))
     self.assertTrue(torch.all(torch.random.get_rng_state() == initial_state))
Ejemplo n.º 3
0
    def _construct_base_samples(self, posterior: Posterior,
                                shape: torch.Size) -> None:
        r"""Generate iid `N(0,1)` base samples (if necessary).

        This function will generate a new set of base samples and set the
        `base_samples` buffer if one of the following is true:

        - `resample=True`
        - the MCSampler has no `base_samples` attribute.
        - `shape` is different than `self.base_samples.shape` (if
            `collapse_batch_dims=True`, then batch dimensions of will be
            automatically broadcasted as necessary)

        Args:
            posterior: The Posterior for which to generate base samples.
            shape: The shape of the base samples to construct.
        """
        if (self.resample or _check_shape_changed(self.base_samples,
                                                  self.batch_range, shape)
                or (not self.collapse_batch_dims
                    and shape != self.base_samples.shape)):
            with manual_seed(seed=self.seed):
                base_samples = torch.randn(shape,
                                           device=posterior.device,
                                           dtype=posterior.dtype)
            self.seed += 1
            self.register_buffer("base_samples", base_samples)
        elif self.collapse_batch_dims and shape != self.base_samples.shape:
            self.base_samples = self.base_samples.view(shape)
        if self.base_samples.device != posterior.device:
            self.to(device=posterior.device)  # pragma: nocover
        if self.base_samples.dtype != posterior.dtype:
            self.to(dtype=posterior.dtype)
Ejemplo n.º 4
0
    def forward(self, X: Tensor) -> Tensor:
        r"""Evaluate the GP sample function at a set of points X.

        Args:
            X: A `batch_shape x n x d`-dim tensor of points

        Returns:
            The value of the GP sample at the `n` points.
        """
        if self.Xs is None:
            X_eval = X  # first time, no previous evaluation points
        else:
            X_eval = torch.cat([self.Xs, X], dim=-2)
        posterior = self._model.posterior(X=X_eval)
        base_sample_shape = posterior.base_sample_shape
        # re-use old samples
        bs_shape = base_sample_shape[:-2] + X.shape[-2:-1] + base_sample_shape[-1:]
        with manual_seed(seed=int(self._seed)):
            new_base_samples = torch.randn(bs_shape, device=X.device, dtype=X.dtype)
        seed = self._seed + 1
        if self.Xs is None:
            base_samples = new_base_samples
        else:
            base_samples = torch.cat([self._base_samples, new_base_samples], dim=-2)
        # TODO: Deduplicate repeated evaluations / deal with numerical degeneracies
        # that could lead to non-determinsitic evaluations. We could use SVD- or
        # eigendecomposition-based sampling, but we probably don't want to use this
        # by default for performance reasonse.
        Ys = posterior.rsample(torch.Size(), base_samples=base_samples)
        self.register_buffer("_Xs", X_eval)
        self.register_buffer("_Ys", Ys)
        self.register_buffer("_seed", seed)
        self.register_buffer("_base_samples", base_samples)
        return self.Ys[..., -(X.size(-2)) :, :]
Ejemplo n.º 5
0
    def test_MultivariateNormalQMCEngineSeededInvTransform(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # test even dimension
            with manual_seed(54321):
                a = torch.randn(2, 2)
                cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()

            mean = torch.zeros(2, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean=mean,
                                                 cov=cov,
                                                 seed=12345,
                                                 inv_transform=True)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [[-0.560064316, 0.629113674], [-1.292604208, -0.048077226]],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))

            # test odd dimension
            with manual_seed(54321):
                a = torch.randn(3, 3)
                cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()

            mean = torch.zeros(3, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean=mean,
                                                 cov=cov,
                                                 seed=12345,
                                                 inv_transform=True)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [
                    [-2.388370037, 3.071142435, -0.319439292],
                    [-0.282978594, -4.350236893, -1.085214734],
                ],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))
Ejemplo n.º 6
0
    def test_MultivariateNormalQMCEngineSeededInvTransform(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # test even dimension
            with manual_seed(54321):
                a = torch.randn(2, 2)
                cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()

            mean = torch.zeros(2, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(
                mean=mean, cov=cov, seed=12345, inv_transform=True
            )
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [[-0.560064316, 0.629113674], [-1.292604208, -0.048077226]],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))

            # test odd dimension
            with manual_seed(54321):
                a = torch.randn(3, 3)
                cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()

            mean = torch.zeros(3, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(
                mean=mean, cov=cov, seed=12345, inv_transform=True
            )
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [
                    [-2.388370037, 3.071142435, -0.319439292],
                    [-0.282978594, -4.350236893, -1.085214734],
                ],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))
Ejemplo n.º 7
0
 def test_get_ehvi(self):
     weights = torch.tensor([0.0, 1.0, 1.0])
     X_observed = torch.rand(4, 3)
     X_pending = torch.rand(1, 3)
     constraints = (torch.tensor([1.0, 0.0, 0.0]), torch.tensor([[10.0]]))
     Y = torch.rand(4, 3)
     mm = MockModel(MockPosterior(mean=Y))
     objective_thresholds = torch.arange(3, dtype=torch.float)
     obj_and_obj_t = get_weighted_mc_objective_and_objective_thresholds(
         objective_weights=weights,
         objective_thresholds=objective_thresholds,
     )
     (weighted_obj, new_obj_thresholds) = obj_and_obj_t
     cons_tfs = get_outcome_constraint_transforms(constraints)
     with manual_seed(0):
         seed = torch.randint(1, 10000, (1, )).item()
     with ExitStack() as es:
         mock_get_acqf = es.enter_context(mock.patch(GET_ACQF_PATH))
         es.enter_context(
             mock.patch(GET_CONSTRAINT_PATH, return_value=cons_tfs))
         es.enter_context(
             mock.patch(GET_OBJ_PATH, return_value=obj_and_obj_t))
         es.enter_context(manual_seed(0))
         get_EHVI(
             model=mm,
             objective_weights=weights,
             outcome_constraints=constraints,
             objective_thresholds=objective_thresholds,
             X_observed=X_observed,
             X_pending=X_pending,
         )
         mock_get_acqf.assert_called_once_with(
             acquisition_function_name="qEHVI",
             model=mm,
             objective=weighted_obj,
             X_observed=X_observed,
             X_pending=X_pending,
             constraints=cons_tfs,
             mc_samples=128,
             qmc=True,
             alpha=0.0,
             seed=seed,
             ref_point=new_obj_thresholds.tolist(),
             Y=Y,
         )
Ejemplo n.º 8
0
    def test_MultivariateNormalQMCEngineSeeded(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):

            # test even dimension
            with manual_seed(54321):
                a = torch.randn(2, 2)
                cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()

            mean = torch.zeros(2, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean=mean,
                                                 cov=cov,
                                                 seed=12345)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [[-0.849047422, -0.713852942], [0.398635030, 1.350660801]],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))

            # test odd dimension
            with manual_seed(54321):
                a = torch.randn(3, 3)
                cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()

            mean = torch.zeros(3, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [
                    [3.113158941, -3.262257099, -0.819938779],
                    [0.621987879, 2.352285624, -1.992680788],
                ],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))
Ejemplo n.º 9
0
    def test_MultivariateNormalQMCEngineSeeded(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):

            # test even dimension
            with manual_seed(54321):
                a = torch.randn(2, 2)
                cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()

            mean = torch.zeros(2, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [[-0.849047422, -0.713852942], [0.398635030, 1.350660801]],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))

            # test odd dimension
            with manual_seed(54321):
                a = torch.randn(3, 3)
                cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()

            mean = torch.zeros(3, device=device, dtype=dtype)
            cov = cov.to(device=device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
            samples = engine.draw(n=2)
            self.assertEqual(samples.dtype, dtype)
            self.assertEqual(samples.device.type, device.type)
            samples_expected = torch.tensor(
                [
                    [3.113158941, -3.262257099, -0.819938779],
                    [0.621987879, 2.352285624, -1.992680788],
                ],
                device=device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(samples, samples_expected))
Ejemplo n.º 10
0
    def test_MultivariateNormalQMCEngineSeededOut(self):
        for dtype in (torch.float, torch.double):

            # test even dimension
            with manual_seed(54321):
                a = torch.randn(2, 2)
                cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()

            mean = torch.zeros(2, device=self.device, dtype=dtype)
            cov = cov.to(device=self.device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean=mean,
                                                 cov=cov,
                                                 seed=12345)
            out = torch.empty(2, 2, device=self.device, dtype=dtype)
            self.assertIsNone(engine.draw(n=2, out=out))
            samples_expected = torch.tensor(
                [[-0.849047422, -0.713852942], [0.398635030, 1.350660801]],
                device=self.device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(out, samples_expected))

            # test odd dimension
            with manual_seed(54321):
                a = torch.randn(3, 3)
                cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()

            mean = torch.zeros(3, device=self.device, dtype=dtype)
            cov = cov.to(device=self.device, dtype=dtype)
            engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
            out = torch.empty(2, 3, device=self.device, dtype=dtype)
            self.assertIsNone(engine.draw(n=2, out=out))
            samples_expected = torch.tensor(
                [
                    [3.113158941, -3.262257099, -0.819938779],
                    [0.621987879, 2.352285624, -1.992680788],
                ],
                device=self.device,
                dtype=dtype,
            )
            self.assertTrue(torch.allclose(out, samples_expected))
Ejemplo n.º 11
0
 def _get_model_and_data(
     self, batch_shape, m, outcome_transform=None, input_transform=None, **tkwargs
 ):
     with manual_seed(0):
         train_X, train_Y = _get_random_data(batch_shape=batch_shape, m=m, **tkwargs)
         train_Yvar = (0.1 + 0.1 * torch.rand_like(train_Y)) ** 2
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": train_Yvar,
         "input_transform": input_transform,
         "outcome_transform": outcome_transform,
     }
     model = HeteroskedasticSingleTaskGP(**model_kwargs)
     return model, model_kwargs
Ejemplo n.º 12
0
 def _get_model_and_data(self,
                         batch_shape,
                         m,
                         outcome_transform=None,
                         **tkwargs):
     with manual_seed(0):
         train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                             num_outputs=m,
                                             **tkwargs)
     train_Yvar = (0.1 + 0.1 * torch.rand_like(train_Y))**2
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": train_Yvar,
     }
     if outcome_transform is not None:
         model_kwargs["outcome_transform"] = outcome_transform
     model = HeteroskedasticSingleTaskGP(**model_kwargs)
     return model, model_kwargs
Ejemplo n.º 13
0
    def _gen_new_generator_run(self, n: int = 1) -> GeneratorRun:
        """Generate new generator run for this experiment.

        Args:
            n: Number of arms to generate.
        """
        # If random seed is not set for this optimization, context manager does
        # nothing; otherwise, it sets the random seed for torch, but only for the
        # scope of this call. This is important because torch seed is set globally,
        # so if we just set the seed without the context manager, it can have
        # serious negative impact on the performance of the models that employ
        # stochasticity.
        with manual_seed(seed=self._random_seed) and warnings.catch_warnings():
            # Filter out GPYTorch warnings to avoid confusing users.
            warnings.simplefilter("ignore")
            return not_none(self.generation_strategy).gen(
                experiment=self.experiment,
                n=n,
                pending_observations=get_pending_observation_features(
                    experiment=self.experiment),
            )
Ejemplo n.º 14
0
def gen_batch_initial_conditions(acq_function,
                                 bounds,
                                 q,
                                 num_restarts,
                                 raw_samples,
                                 options=None):
    r"""[Copy of original botorch function]
    
    Generate a batch of initial conditions for random-restart optimziation.

    Args:
        acq_function: The acquisition function to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
        q: The number of candidates to consider.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic.
        options: Options for initial condition generation. For valid options see
            `initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
            contains a `nonnegative=True` entry, then `acq_function` is
            assumed to be non-negative (useful when using custom acquisition
            functions).

    Returns:
        A `num_restarts x q x d` tensor of initial conditions.

    Example:
        >>> qEI = qExpectedImprovement(model, best_f=0.2)
        >>> bounds = torch.tensor([[0.], [1.]])
        >>> Xinit = gen_batch_initial_conditions(
        >>>     qEI, bounds, q=3, num_restarts=25, raw_samples=500
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    batch_limit: Optional[int] = options.get("batch_limit")
    batch_initial_arms: Tensor
    factor, max_factor = 1, 5
    init_kwargs = {}
    device = bounds.device
    bounds = bounds.cpu()
    if "eta" in options:
        init_kwargs["eta"] = options.get("eta")
    if options.get("nonnegative") or is_nonnegative(acq_function):
        init_func = initialize_q_batch_nonneg
        if "alpha" in options:
            init_kwargs["alpha"] = options.get("alpha")
    else:
        init_func = initialize_q_batch

    q = 1 if q is None else q
    # the dimension the samples are drawn from
    dim = bounds.shape[-1] * q
    if dim > SobolEngine.MAXDIM and settings.debug.on():
        warnings.warn(
            f"Sample dimension q*d={dim} exceeding Sobol max dimension "
            f"({SobolEngine.MAXDIM}). Using iid samples instead.",
            SamplingWarning,
        )

    while factor < max_factor:
        with warnings.catch_warnings(record=True) as ws:
            n = raw_samples * factor
            if dim <= SobolEngine.MAXDIM:
                X_rnd = draw_sobol_samples(bounds=bounds, n=n, q=q, seed=seed)
            else:
                with manual_seed(seed):
                    # load on cpu
                    X_rnd_nlzd = torch.rand(n * dim, dtype=bounds.dtype).view(
                        n, q, bounds.shape[-1])
                X_rnd = bounds[0] + (bounds[1] - bounds[0]) * X_rnd_nlzd
            with torch.no_grad():
                if batch_limit is None:
                    batch_limit = X_rnd.shape[0]
                Y_rnd_list = []
                start_idx = 0
                while start_idx < X_rnd.shape[0]:
                    end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
                    Y_rnd_curr = acq_function(
                        X_rnd[start_idx:end_idx].to(device=device)).cpu()
                    Y_rnd_list.append(Y_rnd_curr)
                    start_idx += batch_limit
                Y_rnd = torch.cat(Y_rnd_list)
            batch_initial_conditions = init_func(
                X=X_rnd, Y=Y_rnd, n=num_restarts,
                **init_kwargs).to(device=device)
            if not any(
                    issubclass(w.category, BadInitialCandidatesWarning)
                    for w in ws):
                return batch_initial_conditions
            if factor < max_factor:
                factor += 1
                if seed is not None:
                    seed += 1  # make sure to sample different X_rnd
    warnings.warn(
        "Unable to find non-zero acquisition function values - initial conditions "
        "are being selected randomly.",
        BadInitialCandidatesWarning,
    )
    return batch_initial_conditions
Ejemplo n.º 15
0
def gen_batch_initial_conditions(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    q: int,
    num_restarts: int,
    raw_samples: int,
    fixed_features: Optional[Dict[int, float]] = None,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
    equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Tensor:
    r"""Generate a batch of initial conditions for random-restart optimziation.

    TODO: Support t-batches of initial conditions.

    Args:
        acq_function: The acquisition function to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
        q: The number of candidates to consider.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic. Note: if `sample_around_best` is True (the default is False),
            then `2 * raw_samples` samples are used.
        fixed_features: A map `{feature_index: value}` for features that
            should be fixed to a particular value during generation.
        options: Options for initial condition generation. For valid options see
            `initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
            contains a `nonnegative=True` entry, then `acq_function` is
            assumed to be non-negative (useful when using custom acquisition
            functions). In addition, an "init_batch_limit" option can be passed
            to specify the batch limit for the initialization. This is useful
            for avoiding memory limits when computing the batch posterior over
            raw samples.
        inequality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
        equality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) = rhs`.

    Returns:
        A `num_restarts x q x d` tensor of initial conditions.

    Example:
        >>> qEI = qExpectedImprovement(model, best_f=0.2)
        >>> bounds = torch.tensor([[0.], [1.]])
        >>> Xinit = gen_batch_initial_conditions(
        >>>     qEI, bounds, q=3, num_restarts=25, raw_samples=500
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    batch_limit: Optional[int] = options.get(
        "init_batch_limit", options.get("batch_limit")
    )
    batch_initial_arms: Tensor
    factor, max_factor = 1, 5
    init_kwargs = {}
    device = bounds.device
    bounds_cpu = bounds.cpu()
    if "eta" in options:
        init_kwargs["eta"] = options.get("eta")
    if options.get("nonnegative") or is_nonnegative(acq_function):
        init_func = initialize_q_batch_nonneg
        if "alpha" in options:
            init_kwargs["alpha"] = options.get("alpha")
    else:
        init_func = initialize_q_batch

    q = 1 if q is None else q
    # the dimension the samples are drawn from
    effective_dim = bounds.shape[-1] * q
    if effective_dim > SobolEngine.MAXDIM and settings.debug.on():
        warnings.warn(
            f"Sample dimension q*d={effective_dim} exceeding Sobol max dimension "
            f"({SobolEngine.MAXDIM}). Using iid samples instead.",
            SamplingWarning,
        )

    while factor < max_factor:
        with warnings.catch_warnings(record=True) as ws:
            n = raw_samples * factor
            if inequality_constraints is None and equality_constraints is None:
                if effective_dim <= SobolEngine.MAXDIM:
                    X_rnd = draw_sobol_samples(bounds=bounds_cpu, n=n, q=q, seed=seed)
                else:
                    with manual_seed(seed):
                        # load on cpu
                        X_rnd_nlzd = torch.rand(
                            n, q, bounds_cpu.shape[-1], dtype=bounds.dtype
                        )
                    X_rnd = bounds_cpu[0] + (bounds_cpu[1] - bounds_cpu[0]) * X_rnd_nlzd
            else:
                X_rnd = (
                    get_polytope_samples(
                        n=n * q,
                        bounds=bounds,
                        inequality_constraints=inequality_constraints,
                        equality_constraints=equality_constraints,
                        seed=seed,
                        n_burnin=options.get("n_burnin", 10000),
                        thinning=options.get("thinning", 32),
                    )
                    .view(n, q, -1)
                    .cpu()
                )
            # sample points around best
            if options.get("sample_around_best", False):
                X_best_rnd = sample_points_around_best(
                    acq_function=acq_function,
                    n_discrete_points=n * q,
                    sigma=options.get("sample_around_best_sigma", 1e-3),
                    bounds=bounds,
                    subset_sigma=options.get("sample_around_best_subset_sigma", 1e-1),
                    prob_perturb=options.get("sample_around_best_prob_perturb"),
                )
                if X_best_rnd is not None:
                    X_rnd = torch.cat(
                        [
                            X_rnd,
                            X_best_rnd.view(n, q, bounds.shape[-1]).cpu(),
                        ],
                        dim=0,
                    )
            X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
            with torch.no_grad():
                if batch_limit is None:
                    batch_limit = X_rnd.shape[0]
                Y_rnd_list = []
                start_idx = 0
                while start_idx < X_rnd.shape[0]:
                    end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
                    Y_rnd_curr = acq_function(
                        X_rnd[start_idx:end_idx].to(device=device)
                    ).cpu()
                    Y_rnd_list.append(Y_rnd_curr)
                    start_idx += batch_limit
                Y_rnd = torch.cat(Y_rnd_list)
            batch_initial_conditions = init_func(
                X=X_rnd, Y=Y_rnd, n=num_restarts, **init_kwargs
            ).to(device=device)
            if not any(issubclass(w.category, BadInitialCandidatesWarning) for w in ws):
                return batch_initial_conditions
            if factor < max_factor:
                factor += 1
                if seed is not None:
                    seed += 1  # make sure to sample different X_rnd
    warnings.warn(
        "Unable to find non-zero acquisition function values - initial conditions "
        "are being selected randomly.",
        BadInitialCandidatesWarning,
    )
    return batch_initial_conditions
Ejemplo n.º 16
0
def estimate_feasible_volume(
    bounds: Tensor,
    model: model.Model,
    outcome_constraints: List[Callable[[Tensor], Tensor]],
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
    nsample_feature: int = 1000,
    nsample_outcome: int = 1000,
    threshold: float = 0.1,
    verbose: bool = False,
    seed: Optional[int] = None,
    device: Optional[torch.device] = None,
    dtype: Optional[torch.dtype] = None,
) -> Tuple[float, float]:
    r"""
    Monte Carlo estimate of the feasible volume with respect
    to feature constraints and outcome constraints.

    Args:
        bounds: A `2 x d` tensor of lower and upper bounds
            for each column of `X`.
        model: The model used for sampling the outcomes.
        outcome_constraints: A list of callables, each mapping a Tensor of dimension
            `sample_shape x batch-shape x q x m` to a Tensor of dimension
            `sample_shape x batch-shape x q`, where negative values imply
            feasibility.
        inequality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
        nsample_feature: The number of feature samples satisfying the bounds.
        nsample_outcome: The number of outcome samples from the model posterior.
        threshold: A lower limit for the probability of outcome feasibility
        seed: The seed for both feature and outcome samplers. If omitted,
            use a random seed.
        verbose: An indicator for whether to log the results.

    Returns:
        2-element tuple containing:

        - Estimated proportion of volume in feature space that is
            feasible wrt the bounds and the inequality constraints (linear).
        - Estimated proportion of feasible features for which
            posterior samples (outcome) satisfies the outcome constraints
            with probability above the given threshold.
    """

    seed = seed if seed is not None else torch.randint(0, 1000000, (1,)).item()

    with manual_seed(seed=seed):
        box_samples = bounds[0] + (bounds[1] - bounds[0]) * torch.rand(
            (nsample_feature, bounds.size(1)), dtype=dtype, device=device
        )

    features, p_feature = get_feasible_samples(
        samples=box_samples, inequality_constraints=inequality_constraints
    )  # each new feature sample is a row

    p_outcome = get_outcome_feasibility_probability(
        model=model,
        X=features.unsqueeze(-2),
        outcome_constraints=outcome_constraints,
        threshold=threshold,
        nsample_outcome=nsample_outcome,
        seed=seed,
    )

    if verbose:  # pragma: no cover
        logger.info(
            "Proportion of volume that satisfies linear constraints: "
            + f"{p_feature:.4e}"
        )
        if p_feature <= 0.01:
            logger.warning(
                "The proportion of satisfying volume is very low and may lead to "
                + "very long run times. Consider making your constraints less "
                + "restrictive."
            )
        logger.info(
            "Proportion of linear-feasible volume that also satisfies each "
            + f"outcome constraint with probability > 0.1: {p_outcome:.4e}"
        )
        if p_outcome <= 0.001:
            logger.warning(
                "The proportion of volume that also satisfies the outcome constraint "
                + "is very low. Consider making your parameter and outcome constraints "
                + "less restrictive."
            )
    return p_feature, p_outcome
Ejemplo n.º 17
0
    def test_get_polytope_samples(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            tkwargs["dtype"] = dtype
            bounds = torch.zeros(2, 4, **tkwargs)
            bounds[1] = 1
            inequality_constraints = [
                (
                    torch.tensor([3], **tkwargs),
                    torch.tensor([-4], **tkwargs),
                    -3,
                )
            ]
            equality_constraints = [
                (
                    torch.tensor([0], **tkwargs),
                    torch.tensor([1], **tkwargs),
                    0.5,
                )
            ]
            dense_equality_constraints = sparse_to_dense_constraints(
                d=4, constraints=equality_constraints
            )
            with manual_seed(0):
                samps = get_polytope_samples(
                    n=5,
                    bounds=bounds,
                    inequality_constraints=inequality_constraints,
                    equality_constraints=equality_constraints,
                    seed=0,
                    thinning=3,
                    n_burnin=2,
                )
            (A, b) = sparse_to_dense_constraints(
                d=4, constraints=inequality_constraints
            )
            dense_inequality_constraints = (-A, -b)
            with manual_seed(0):
                expected_samps = HitAndRunPolytopeSampler(
                    bounds=bounds,
                    inequality_constraints=dense_inequality_constraints,
                    equality_constraints=dense_equality_constraints,
                    n_burnin=2,
                ).draw(15, seed=0)[::3]
            self.assertTrue(torch.equal(samps, expected_samps))

            # test no equality constraints
            with manual_seed(0):
                samps = get_polytope_samples(
                    n=5,
                    bounds=bounds,
                    inequality_constraints=inequality_constraints,
                    seed=0,
                    thinning=3,
                    n_burnin=2,
                )
            with manual_seed(0):
                expected_samps = HitAndRunPolytopeSampler(
                    bounds=bounds,
                    inequality_constraints=dense_inequality_constraints,
                    n_burnin=2,
                ).draw(15, seed=0)[::3]
            self.assertTrue(torch.equal(samps, expected_samps))

            # test no inequality constraints
            with manual_seed(0):
                samps = get_polytope_samples(
                    n=5,
                    bounds=bounds,
                    equality_constraints=equality_constraints,
                    seed=0,
                    thinning=3,
                    n_burnin=2,
                )
            with manual_seed(0):
                expected_samps = HitAndRunPolytopeSampler(
                    bounds=bounds,
                    equality_constraints=dense_equality_constraints,
                    n_burnin=2,
                ).draw(15, seed=0)[::3]
            self.assertTrue(torch.equal(samps, expected_samps))
Ejemplo n.º 18
0
    def sample_relative(
        self,
        study: Study,
        trial: FrozenTrial,
        search_space: Dict[str, BaseDistribution],
    ) -> Dict[str, Any]:
        assert isinstance(search_space, OrderedDict)

        if len(search_space) == 0:
            return {}

        trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))

        n_trials = len(trials)
        if n_trials < self._n_startup_trials:
            return {}

        trans = _SearchSpaceTransform(search_space)
        n_objectives = len(study.directions)
        values: Union[numpy.ndarray, torch.Tensor] = numpy.empty(
            (n_trials, n_objectives), dtype=numpy.float64
        )
        params: Union[numpy.ndarray, torch.Tensor]
        con: Optional[Union[numpy.ndarray, torch.Tensor]] = None
        bounds: Union[numpy.ndarray, torch.Tensor] = trans.bounds
        params = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
        for trial_idx, trial in enumerate(trials):
            params[trial_idx] = trans.transform(trial.params)
            assert len(study.directions) == len(trial.values)

            for obj_idx, (direction, value) in enumerate(zip(study.directions, trial.values)):
                assert value is not None
                if direction == StudyDirection.MINIMIZE:  # BoTorch always assumes maximization.
                    value *= -1
                values[trial_idx, obj_idx] = value

            if self._constraints_func is not None:
                constraints = study._storage.get_trial_system_attrs(trial._trial_id).get(
                    _CONSTRAINTS_KEY
                )
                if constraints is not None:
                    n_constraints = len(constraints)

                    if con is None:
                        con = numpy.full((n_trials, n_constraints), numpy.nan, dtype=numpy.float64)
                    elif n_constraints != con.shape[1]:
                        raise RuntimeError(
                            f"Expected {con.shape[1]} constraints but received {n_constraints}."
                        )

                    con[trial_idx] = constraints

        if self._constraints_func is not None:
            if con is None:
                warnings.warn(
                    "`constraints_func` was given but no call to it correctly computed "
                    "constraints. Constraints passed to `candidates_func` will be `None`."
                )
            elif numpy.isnan(con).any():
                warnings.warn(
                    "`constraints_func` was given but some calls to it did not correctly compute "
                    "constraints. Constraints passed to `candidates_func` will contain NaN."
                )

        values = torch.from_numpy(values)
        params = torch.from_numpy(params)
        if con is not None:
            con = torch.from_numpy(con)
        bounds = torch.from_numpy(bounds)

        if con is not None:
            if con.dim() == 1:
                con.unsqueeze_(-1)
        bounds.transpose_(0, 1)

        if self._candidates_func is None:
            self._candidates_func = _get_default_candidates_func(n_objectives=n_objectives)

        with manual_seed(self._seed):
            # `manual_seed` makes the default candidates functions reproducible.
            # `SobolQMCNormalSampler`'s constructor has a `seed` argument, but its behavior is
            # deterministic when the BoTorch's seed is fixed.
            candidates = self._candidates_func(params, values, con, bounds)
            if self._seed is not None:
                self._seed += 1

        if not isinstance(candidates, torch.Tensor):
            raise TypeError("Candidates must be a torch.Tensor.")
        if candidates.dim() == 2:
            if candidates.size(0) != 1:
                raise ValueError(
                    "Candidates batch optimization is not supported and the first dimension must "
                    "have size 1 if candidates is a two-dimensional tensor. Actual: "
                    f"{candidates.size()}."
                )
            # Batch size is one. Get rid of the batch dimension.
            candidates = candidates.squeeze(0)
        if candidates.dim() != 1:
            raise ValueError("Candidates must be one or two-dimensional.")
        if candidates.size(0) != bounds.size(1):
            raise ValueError(
                "Candidates size must match with the given bounds. Actual candidates: "
                f"{candidates.size(0)}, bounds: {bounds.size(1)}."
            )

        return trans.untransform(candidates.numpy())