Ejemplo n.º 1
0
def alebo_acqf_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
    fixed_features: Optional[Dict[int, float]],
    rounding_func: Optional[Callable[[Tensor], Tensor]],
    raw_samples: int,
    num_restarts: int,
    B: Tensor,
) -> Tuple[Tensor, Tensor]:
    """
    Optimize the acquisition function for ALEBO.

    We are optimizing over a polytope within the subspace, and so begin each
    random restart of the acquisition function optimization with points that
    lie within that polytope.
    """
    candidate_list, acq_value_list = [], []
    candidates = torch.tensor([], device=B.device, dtype=B.dtype)
    base_X_pending = acq_function.X_pending  # pyre-ignore
    for i in range(n):
        # Generate initial points for optimization inside embedding
        m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
        Xrnd_npy, _ = m_init.gen(n=raw_samples,
                                 bounds=[(-1.0, 1.0)] * B.shape[1])

        Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype,
                            device=B.device).unsqueeze(1)
        Yrnd = torch.matmul(Xrnd, B.t())  # Project down to the embedding
        with gpytorch.settings.max_cholesky_size(2000):
            with torch.no_grad():
                alpha = acq_function(Yrnd)

            Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)

            # Optimize the acquisition function, separately for each random restart.
            candidate, acq_value = optimize_acqf(
                acq_function=acq_function,
                bounds=[None, None],  # pyre-ignore
                q=1,
                num_restarts=num_restarts,
                raw_samples=0,
                options={
                    "method": "SLSQP",
                    "batch_limit": 1
                },
                inequality_constraints=inequality_constraints,
                batch_initial_conditions=Yinit,
                sequential=False,
            )
            candidate_list.append(candidate)
            acq_value_list.append(acq_value)
            candidates = torch.cat(candidate_list, dim=-2)
            acq_function.set_X_pending(
                torch.cat([base_X_pending, candidates], dim=-2
                          ) if base_X_pending is not None else candidates)
        logger.info(f"Generated sequential candidate {i+1} of {n}")
    acq_function.set_X_pending(base_X_pending)
    return candidates, torch.stack(acq_value_list)
Ejemplo n.º 2
0
Archivo: alebo.py Proyecto: viotemp1/Ax
def get_ALEBOInitializer(search_space: SearchSpace, B: np.ndarray,
                         **model_kwargs: Any) -> RandomModelBridge:
    return RandomModelBridge(
        search_space=search_space,
        model=ALEBOInitializer(B=B, **model_kwargs),
        transforms=ALEBO_X_trans,  # pyre-ignore
    )
Ejemplo n.º 3
0
def alebo_acqf_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
    fixed_features: Optional[Dict[int, float]],
    rounding_func: Optional[Callable[[Tensor], Tensor]],
    raw_samples: int,
    num_restarts: int,
    B: Tensor,
) -> Tuple[Tensor, Tensor]:
    """
    Optimize the acquisition function for ALEBO.

    We are optimizing over a polytope within the subspace, and so begin each
    random restart of the acquisition function optimization with points that
    lie within that polytope.
    """
    assert n == 1  # Handle batch later
    # Generate initial points for optimization inside embedding
    m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
    Xrnd_npy, _ = m_init.gen(n=raw_samples, bounds=[(-1.0, 1.0)] * B.shape[1])

    Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype, device=B.device).unsqueeze(1)
    Yrnd = torch.matmul(Xrnd, B.t())  # Project down to the embedding
    with gpytorch.settings.max_cholesky_size(2000):
        with torch.no_grad():
            alpha = acq_function(Yrnd)

        Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)

        # Optimize the acquisition function, separately for each random restart.
        Xopt = optimize_acqf(
            acq_function=acq_function,
            bounds=[None, None],  # pyre-ignore
            q=n,
            num_restarts=num_restarts,
            raw_samples=0,
            options={
                "method": "SLSQP",
                "batch_limit": 1
            },
            inequality_constraints=inequality_constraints,
            batch_initial_conditions=Yinit,
            sequential=False,
        )
    return Xopt
Ejemplo n.º 4
0
def get_ALEBOInitializer(
    search_space: SearchSpace, B: np.ndarray, **model_kwargs: Any
) -> RandomModelBridge:
    return RandomModelBridge(
        search_space=search_space,
        model=ALEBOInitializer(B=B, **model_kwargs),
        transforms=[CenteredUnitX],
    )
Ejemplo n.º 5
0
    def testALEBOSobolModel(self):
        B = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        Q = np.linalg.pinv(B) @ B
        # Test setting attributes
        m = ALEBOInitializer(B=B)
        self.assertTrue(np.allclose(Q, m.Q))

        # Test gen
        Z, w = m.gen(5, bounds=[(-1.0, 1.0)] * 3)
        self.assertEqual(Z.shape, (5, 3))
        self.assertTrue(Z.min() >= -1.0)
        self.assertTrue(Z.max() <= 1.0)
        # Verify that it is in the subspace
        self.assertTrue(np.allclose(Q @ Z.transpose(), Z.transpose()))

        m = ALEBOInitializer(B=B, nsamp=1)
        with self.assertRaises(ValueError):
            m.gen(2, bounds=[(-1.0, 1.0)] * 3)
Ejemplo n.º 6
0
def gen_train_test_sets(B, ntrain, ntest, seed_train=1000, seed_test=2000):
    # Generate training points
    m1 = ALEBOInitializer(B=B.numpy(), seed=seed_train)
    train_X = torch.tensor(m1.gen(n=ntrain, bounds=[])[0], dtype=torch.double)
    train_Y = highDhartmann6(train_X)
    # Standardize train Y
    mu = train_Y.mean()
    sigma = train_Y.std()
    train_Y = (train_Y - mu) / sigma
    train_Y = train_Y.unsqueeze(1)
    train_Yvar = 1e-7 * torch.ones(train_Y.shape)

    # Generate test points
    m2 = ALEBOInitializer(B=B.numpy(), seed=seed_test)
    test_X = torch.tensor(m2.gen(n=ntest, bounds=[])[0], dtype=torch.double)
    test_Y = highDhartmann6(test_X)
    return train_X, train_Y, train_Yvar, test_X, test_Y, mu, sigma