Exemplo n.º 1
0
    def __init__(
        self,
        model: GPyTorchModel,
        target: float,
        query_set_size: Optional[int] = None,
        Xq: Optional[Tensor] = None,
    ) -> None:
        """
        A global look-ahead acquisition function.

        Args:
            model: The gpytorch model.
            target: Threshold value to target in p-space.
            Xq: (m x d) global reference set.
        """
        super().__init__(model=model)
        assert (
            Xq is not None or query_set_size is not None
        ), "Must pass either query set size or a query set!"
        if Xq is not None and query_set_size is not None:
            assert Xq.shape[0] == query_set_size, (
                "If passing both Xq and query_set_size,"
                + "first dim of Xq should be query_set_size, got {Xq.shape[0]} != {query_set_size}"
            )
        self.gamma = norm.ppf(target)
        Xq = (
            Xq
            if Xq is not None
            else make_scaled_sobol(model.lb, model.ub, query_set_size)
        )
        self.register_buffer("Xq", Xq)
Exemplo n.º 2
0
    def _select_inducing_points(self, method="auto"):
        with torch.no_grad():
            assert method in (
                "pivoted_chol",
                "kmeans++",
                "auto",
                "sobol",
            ), f"Inducing point method should be one of pivoted_chol, kmeans++, sobol, or auto; got {method}"

            if method == "sobol":
                return make_scaled_sobol(self.lb, self.ub, self.inducing_size)
            train_X = torch.unique(self.train_inputs[0], dim=0)
            if method == "auto":
                if train_X.shape[0] <= self.inducing_size:
                    return train_X
                else:
                    method = "kmeans++"

            if method == "pivoted_chol":
                inducing_points = _select_inducing_points(
                    inputs=train_X,
                    covar_module=self.covar_module,
                    num_inducing=self.inducing_size,
                    input_batch_shape=torch.Size([]),
                )
            elif method == "kmeans++":
                # initialize using kmeans
                inducing_points = torch.tensor(
                    kmeans2(train_X.numpy(), self.inducing_size, minit="++")[0],
                    dtype=train_X.dtype,
                )
            return inducing_points
Exemplo n.º 3
0
    def __init__(self,
                 lb,
                 ub,
                 n_trials,
                 dim=1,
                 outcome_type="single_probit",
                 seed=None):
        super().__init__(lb=lb, ub=ub, dim=dim, outcome_type=outcome_type)

        self.points = make_scaled_sobol(lb=lb, ub=ub, size=n_trials, seed=seed)

        self.n_trials = n_trials
        self._count = 0
        self.seed = seed
Exemplo n.º 4
0
    def test_sobolmodel_single(self):
        # test that SobolModel doesn't mess with shapes

        sobol1 = make_scaled_sobol(lb=[1, 2, 3], ub=[2, 3, 4], size=10, seed=12345)

        sobol2 = np.zeros((10, 3))
        mod = SobolStrategy(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, n_trials=10, seed=12345)

        npt.assert_equal(sobol1, mod.points)

        for i in range(10):
            sobol2[i, :] = mod.gen()

        npt.assert_equal(sobol1, sobol2)

        # check that bounds are also right
        self.assertTrue(np.all(sobol1[:, 0] > 1))
        self.assertTrue(np.all(sobol1[:, 1] > 2))
        self.assertTrue(np.all(sobol1[:, 2] > 3))
        self.assertTrue(np.all(sobol1[:, 0] < 2))
        self.assertTrue(np.all(sobol1[:, 1] < 3))
        self.assertTrue(np.all(sobol1[:, 2] < 4))
Exemplo n.º 5
0
    def __init__(
        self,
        inducing_min,
        inducing_max,
        inducing_size=10,
        mean_module=None,
        covar_module=None,
    ):
        mean_prior = inducing_max - inducing_min

        inducing_points = torch.Tensor(
            make_scaled_sobol(inducing_min, inducing_max, inducing_size))

        variational_distribution = MeanFieldVariationalDistribution(
            inducing_points.size(0))
        variational_strategy = VariationalStrategy(
            self,
            inducing_points,
            variational_distribution,
            learn_inducing_locations=False,
        )
        super(GPClassificationModel, self).__init__(variational_strategy)
        self.mean_module = mean_module or gpytorch.means.ConstantMean(
            prior=gpytorch.priors.NormalPrior(loc=0.0, scale=2.0))
        ls_prior = gpytorch.priors.GammaPrior(concentration=3.0,
                                              rate=6.0 / mean_prior)
        ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
        ls_constraint = gpytorch.constraints.Positive(
            transform=None, initial_value=ls_prior_mode)
        ndim = mean_prior.shape[0]
        self.covar_module = covar_module or gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.RBFKernel(
                lengthscale_prior=ls_prior,
                lengthscale_constraint=ls_constraint,
                ard_num_dims=ndim,
            ),
            outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
        )
    def test_sobolgen_single(self):
        # test that SobolGenerator doesn't mess with shapes

        sobol1 = make_scaled_sobol(lb=[1, 2, 3],
                                   ub=[2, 3, 4],
                                   size=10,
                                   seed=12345)

        sobol2 = torch.zeros((10, 3))
        mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)

        for i in range(10):
            sobol2[i, :] = mod.gen()

        npt.assert_almost_equal(sobol1.numpy(), sobol2.numpy())

        # check that bounds are also right
        self.assertTrue(torch.all(sobol1[:, 0] > 1))
        self.assertTrue(torch.all(sobol1[:, 1] > 2))
        self.assertTrue(torch.all(sobol1[:, 2] > 3))
        self.assertTrue(torch.all(sobol1[:, 0] < 2))
        self.assertTrue(torch.all(sobol1[:, 1] < 3))
        self.assertTrue(torch.all(sobol1[:, 2] < 4))
Exemplo n.º 7
0
    def inv_query(
        self: ModelProtocol,
        y: float,
        locked_dims: Optional[Mapping[int, List[float]]] = None,
        probability_space: bool = False,
        n_samples: int = 1000,
    ) -> Tuple[float, torch.Tensor]:
        """Query the model inverse.
        Return nearest x such that f(x) = queried y, and also return the
            value of f at that point.
        Args:
            y (float): Points at which to find the inverse.
            locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
                inverse is along a slice of the full surface.
            probability_space (bool, optional): Is y (and therefore the
                returned nearest_y) in probability space instead of latent
                function space? Defaults to False.
        Returns:
            Tuple[float, np.ndarray]: Tuple containing the value of f
                nearest to queried y and the x position of this value.
        """

        locked_dims = locked_dims or {}

        def model_distance(x, pt, probability_space):
            return np.abs(
                self.predict(torch.tensor([x]), probability_space)[0].detach().numpy()
                - pt
            )

        # Look for point with value closest to y, subject the dict of locked dims

        query_lb = self.lb.clone()
        query_ub = self.ub.clone()

        for locked_dim in locked_dims.keys():
            dim_values = locked_dims[locked_dim]
            if len(dim_values) == 1:
                query_lb[locked_dim] = dim_values[0]
                query_ub[locked_dim] = dim_values[0]
            else:
                query_lb[locked_dim] = dim_values[0]
                query_ub[locked_dim] = dim_values[1]

        d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)

        bounds = zip(query_lb.numpy(), query_ub.numpy())

        fmean, _ = self.predict(d, probability_space)

        f = torch.abs(fmean - y)
        estimate = d[torch.where(f == torch.min(f))[0][0]].numpy()
        a = minimize(
            model_distance,
            estimate,
            args=(y, probability_space),
            method=self.extremum_solver,
            bounds=bounds,
        )
        val = self.predict(torch.tensor([a.x]), probability_space)[0].item()
        return val, torch.Tensor(a.x)
Exemplo n.º 8
0
    def _get_extremum(
        self: ModelProtocol,
        extremum_type: str,
        locked_dims: Optional[Mapping[int, List[float]]] = None,
        n_samples: int = 1000,
    ) -> Tuple[float, np.ndarray]:
        """Return the extremum (min or max) of the modeled function
        Args:
            extremum_type (str): type of extremum (currently 'min' or 'max'
            n_samples (int, optional): number of coarse grid points to sample for optimization estimate.
        Returns:
            Tuple[float, np.ndarray]: Tuple containing the min and its location (argmin).
        """
        locked_dims = locked_dims or {}

        def signed_model(x, sign=1):
            return sign * self.predict(torch.tensor([x]))[0].detach().numpy()

        query_lb = self.lb.clone()
        query_ub = self.ub.clone()

        for locked_dim in locked_dims.keys():
            dim_values = locked_dims[locked_dim]
            if len(dim_values) == 1:
                query_lb[locked_dim] = dim_values[0]
                query_ub[locked_dim] = dim_values[0]
            else:
                query_lb[locked_dim] = dim_values[0]
                query_ub[locked_dim] = dim_values[1]

        # generate a coarse sample to compute an initial estimate.
        d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)

        bounds = zip(query_lb.numpy(), query_ub.numpy())

        fmean, _ = self.predict(d)

        if extremum_type == "max":
            estimate = d[torch.where(fmean == torch.max(fmean))[0][0]].numpy()
            a = minimize(
                signed_model,
                estimate,
                args=-1,
                method=self.extremum_solver,
                bounds=bounds,
            )
            return -a.fun, a.x
        elif extremum_type == "min":
            estimate = d[torch.where(fmean == torch.min(fmean))[0][0]]
            a = minimize(
                signed_model,
                estimate,
                args=1,
                method=self.extremum_solver,
                bounds=bounds,
            )
            return a.fun, a.x

        else:
            raise RuntimeError(
                f"Unknown extremum type: '{extremum_type}'! Valid types: 'min', 'max' "
            )
Exemplo n.º 9
0
 def eval_grid(self):
     return make_scaled_sobol(lb=self.lb,
                              ub=self.ub,
                              size=self.n_eval_points)
Exemplo n.º 10
0
 def test_scaled_sobol_sizes(self):
     lb = np.r_[0, 1]
     ub = np.r_[1, 30]
     grid = make_scaled_sobol(lb, ub, 100)
     self.assertEqual(grid.shape, (100, 2))
Exemplo n.º 11
0
    def test_scaled_sobol_asserts(self):

        lb = np.r_[0, 0, 1]
        ub = np.r_[1]
        with self.assertRaises(AssertionError):
            make_scaled_sobol(lb, ub, 10)
    def gen(self, num_points: int, model: ModelProtocol) -> np.ndarray:
        """Query next point(s) to run by optimizing the acquisition function.
        Args:
            num_points (int, optional): Number of points to query.
            model (ModelProtocol): Fitted model of the data.
        Returns:
            np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
        """

        # eval should be inherited from superclass
        model.eval()  # type: ignore
        train_x = model.train_inputs[0]
        acqf = self._instantiate_acquisition_fn(model, train_x)

        logger.info("Starting gen...")
        starttime = time.time()

        if self.max_gen_time is None:
            new_candidate, _ = optimize_acqf(
                acq_function=acqf,
                bounds=torch.tensor(np.c_[model.lb, model.ub]).T.to(train_x),
                q=num_points,
                num_restarts=self.restarts,
                raw_samples=self.samps,
            )
        else:
            # figure out how long evaluating a single samp
            starttime = time.time()
            _ = acqf(train_x[0:1, :])
            single_eval_time = time.time() - starttime

            # only a heuristic for total num evals since everything is stochastic,
            # but the reasoning is: we initialize with self.samps samps, subsample
            # self.restarts from them in proportion to the value of the acqf, and
            # run that many optimization. So:
            # total_time = single_eval_time * n_eval * restarts + single_eval_time * samps
            # and we solve for n_eval
            n_eval = int((self.max_gen_time - single_eval_time * self.samps) /
                         (single_eval_time * self.restarts))
            if n_eval > 10:
                # heuristic, if we can't afford 10 evals per restart, just use quasi-random search
                options = {"maxfun": n_eval}
                logger.info(f"gen maxfun is {n_eval}")

                new_candidate, _ = optimize_acqf(
                    acq_function=acqf,
                    bounds=torch.tensor(np.c_[model.lb,
                                              model.ub]).T.to(train_x),
                    q=num_points,
                    num_restarts=self.restarts,
                    raw_samples=self.samps,
                    options=options,
                )
            else:
                logger.info(
                    f"gen maxfun is {n_eval}, falling back to random search..."
                )
                nsamp = max(int(self.max_gen_time / single_eval_time), 10)
                # Generate the points at which to sample
                X = make_scaled_sobol(lb=model.lb, ub=model.ub, size=nsamp)

                acqvals = acqf(X[:, None, :])

                best_indx = torch.argmax(acqvals, dim=0)
                new_candidate = X[best_indx, None]

        logger.info(f"Gen done, time={time.time()-starttime}")
        return new_candidate.numpy()