Beispiel #1
0
def alebo_acqf_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
    fixed_features: Optional[Dict[int, float]],
    rounding_func: Optional[Callable[[Tensor], Tensor]],
    raw_samples: int,
    num_restarts: int,
    B: Tensor,
) -> Tuple[Tensor, Tensor]:
    """
    Optimize the acquisition function for ALEBO.

    We are optimizing over a polytope within the subspace, and so begin each
    random restart of the acquisition function optimization with points that
    lie within that polytope.
    """
    candidate_list, acq_value_list = [], []
    candidates = torch.tensor([], device=B.device, dtype=B.dtype)
    base_X_pending = acq_function.X_pending  # pyre-ignore
    for i in range(n):
        # Generate initial points for optimization inside embedding
        m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
        Xrnd_npy, _ = m_init.gen(n=raw_samples,
                                 bounds=[(-1.0, 1.0)] * B.shape[1])

        Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype,
                            device=B.device).unsqueeze(1)
        Yrnd = torch.matmul(Xrnd, B.t())  # Project down to the embedding
        with gpytorch.settings.max_cholesky_size(2000):
            with torch.no_grad():
                alpha = acq_function(Yrnd)

            Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)

            # Optimize the acquisition function, separately for each random restart.
            candidate, acq_value = optimize_acqf(
                acq_function=acq_function,
                bounds=[None, None],  # pyre-ignore
                q=1,
                num_restarts=num_restarts,
                raw_samples=0,
                options={
                    "method": "SLSQP",
                    "batch_limit": 1
                },
                inequality_constraints=inequality_constraints,
                batch_initial_conditions=Yinit,
                sequential=False,
            )
            candidate_list.append(candidate)
            acq_value_list.append(acq_value)
            candidates = torch.cat(candidate_list, dim=-2)
            acq_function.set_X_pending(
                torch.cat([base_X_pending, candidates], dim=-2
                          ) if base_X_pending is not None else candidates)
        logger.info(f"Generated sequential candidate {i+1} of {n}")
    acq_function.set_X_pending(base_X_pending)
    return candidates, torch.stack(acq_value_list)
Beispiel #2
0
def alebo_acqf_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
    fixed_features: Optional[Dict[int, float]],
    rounding_func: Optional[Callable[[Tensor], Tensor]],
    raw_samples: int,
    num_restarts: int,
    B: Tensor,
) -> Tuple[Tensor, Tensor]:
    """
    Optimize the acquisition function for ALEBO.

    We are optimizing over a polytope within the subspace, and so begin each
    random restart of the acquisition function optimization with points that
    lie within that polytope.
    """
    assert n == 1  # Handle batch later
    # Generate initial points for optimization inside embedding
    m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
    Xrnd_npy, _ = m_init.gen(n=raw_samples, bounds=[(-1.0, 1.0)] * B.shape[1])

    Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype, device=B.device).unsqueeze(1)
    Yrnd = torch.matmul(Xrnd, B.t())  # Project down to the embedding
    with gpytorch.settings.max_cholesky_size(2000):
        with torch.no_grad():
            alpha = acq_function(Yrnd)

        Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)

        # Optimize the acquisition function, separately for each random restart.
        Xopt = optimize_acqf(
            acq_function=acq_function,
            bounds=[None, None],  # pyre-ignore
            q=n,
            num_restarts=num_restarts,
            raw_samples=0,
            options={
                "method": "SLSQP",
                "batch_limit": 1
            },
            inequality_constraints=inequality_constraints,
            batch_initial_conditions=Yinit,
            sequential=False,
        )
    return Xopt