Exemple #1
0
def one_step_acquisition_gp(oracle,
                            full_train_X,
                            full_train_Y,
                            acq,
                            q,
                            bounds,
                            dim,
                            domain,
                            domain_image,
                            state_dict=None,
                            plot_stuff=False):
    model = SingleTaskGP(full_train_X, full_train_Y)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)

    if state_dict is not None:
        model.load_state_dict(state_dict)
    fit_gpytorch_model(mll)

    candidate, EI = get_candidate(model, acq, full_train_Y, q, bounds, dim)

    if acq == 'EI' and dim == 1 and plot_stuff:
        plot_util(oracle, model, EI, domain, domain_image, None, full_train_X,
                  full_train_Y, candidate)

    candidate_image = oracle(candidate)
    full_train_X = torch.cat([full_train_X, candidate])
    full_train_Y = torch.cat([full_train_Y, candidate_image])

    state_dict = model.state_dict()
    return full_train_X, full_train_Y, model, candidate, candidate_image, state_dict
 def fit(self, train_x, train_y):
     n = train_y.shape[0]
     self.mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model,
                                              n)
     self.model.train()
     self.model.set_train_data(train_x, train_y)
     fit_gpytorch_model(self.mll)
    def run(self):
        self._previous_hyperparams = None
        self._optimization_point_number = 0

        # this basically implements Bayes Opt loop, closely following
        # https://botorch.org/tutorials/multi_objective_bo
        for iteration in range(1, self._num_instances + 1):
            fit_gpytorch_model(self._torch_mll)

            ###########################
            params_set, transformed_eps, transformed_err = self._torch_optimize_qehvi_and_get_observation(
            )
            ##############################

            # Update privacy and utility GPs with new data
            # In BoTorch example they actualy define new GP objects on each iteration
            # let's try that too
            train_x = torch.cat([self._privacy_gp.train_inputs[0], params_set])
            train_privacy_y = torch.cat(
                [self._privacy_gp.train_targets, transformed_eps])
            train_utility_y = torch.cat(
                [self._utility_gp.train_targets, transformed_err])
            self._create_models(train_x, train_privacy_y, train_utility_y)

        # Post final run tasks
        hypervolume = self.get_untransformed_hypervolume()
        self._saved_hypervolumes.append(hypervolume)
        print("Hypervolume is: {}".format(hypervolume))
        self._estimate_last_point()
        self._plot_and_save()
Exemple #4
0
 def fit_uncertainty_estimator(self, features=None, targets=None):
     if self.no_deup:
         return None
     self.e_predictor = SingleTaskGP(features, targets)
     mll = ExactMarginalLogLikelihood(self.e_predictor.likelihood,
                                      self.e_predictor)
     fit_gpytorch_model(mll)
Exemple #5
0
    def argmax_posterior_mean(cands: to.Tensor, cands_values: to.Tensor,
                              ddp_space: BoxSpace, num_restarts: int,
                              num_samples: int) -> to.Tensor:
        """
        Compute the GP input with the maximal posterior mean.

        :param cands: candidates a.k.a. x
        :param cands_values: observed values a.k.a. y
        :param ddp_space: space of the domain distribution parameters, indicates the lower and upper bound
        :param num_restarts: number of restarts for the optimization of the acquisition function
        :param num_samples: number of samples for the optimization of the acquisition function
        :return: un-normalized candidate with maximum posterior value a.k.a. x
        """
        if not isinstance(cands, to.Tensor):
            raise pyrado.TypeErr(given=cands, expected_type=to.Tensor)
        if not isinstance(cands_values, to.Tensor):
            raise pyrado.TypeErr(given=cands_values, expected_type=to.Tensor)
        if not isinstance(ddp_space, BoxSpace):
            raise pyrado.TypeErr(given=ddp_space, expected_type=BoxSpace)

        # Normalize the input data and standardize the output data
        uc_projector = UnitCubeProjector(
            to.from_numpy(ddp_space.bound_lo).to(dtype=to.get_default_dtype()),
            to.from_numpy(ddp_space.bound_up).to(dtype=to.get_default_dtype()),
        )
        cands_norm = uc_projector.project_to(cands)
        cands_values_stdized = standardize(cands_values)

        if cands_norm.shape[0] > cands_values.shape[0]:
            print_cbt(
                f"There are {cands.shape[0]} candidates but only {cands_values.shape[0]} evaluations. Ignoring "
                f"the candidates without evaluation for computing the argmax.",
                "y",
            )
            cands_norm = cands_norm[:cands_values.shape[0], :]

        # Create and fit the GP model
        gp = SingleTaskGP(cands_norm, cands_values_stdized)
        gp.likelihood.noise_covar.register_constraint("raw_noise",
                                                      GreaterThan(1e-5))
        mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
        fit_gpytorch_model(mll)

        # Find position with maximal posterior mean
        cand_norm, _ = optimize_acqf(
            acq_function=PosteriorMean(gp),
            bounds=to.stack(
                [to.zeros(ddp_space.flat_dim),
                 to.ones(ddp_space.flat_dim)]).to(dtype=to.float32),
            q=1,
            num_restarts=num_restarts,
            raw_samples=num_samples,
        )

        cand_norm = cand_norm.to(dtype=to.get_default_dtype())
        cand = uc_projector.project_back(cand_norm.detach())
        print_cbt(f"Converged to argmax of the posterior mean: {cand.numpy()}",
                  "g",
                  bright=True)
        return cand
Exemple #6
0
 def fit(self):
     if not self.fitted:
         mll = ExactMarginalLogLikelihood(self.gp_model.likelihood,
                                          self.gp_model)
         fit_gpytorch_model(mll)
     if self.domain is not None and self.postprocessor is not None:
         self.fit_postprocessor_on_domain(self.domain)
Exemple #7
0
def get_map_model(
    train_X: Tensor,
    train_Y: Tensor,
    train_Yvar: Tensor,
    decomposition: Dict[str, List[int]],
    train_embedding: bool = True,
    cat_feature_dict: Optional[Dict] = None,
    embs_feature_dict: Optional[Dict] = None,
    embs_dim_list: Optional[List[int]] = None,
    context_weight_dict: Optional[Dict] = None,
) -> Tuple[LCEAGP, ExactMarginalLogLikelihood]:
    """Obtain MAP fitting of Latent Context Embedding Additive (LCE-A) GP."""
    # assert train_X is non-batched
    assert train_X.dim() < 3, "Don't support batch training"
    model = LCEAGP(
        train_X=train_X,
        train_Y=train_Y,
        train_Yvar=train_Yvar,
        decomposition=decomposition,
        train_embedding=train_embedding,
        embs_dim_list=embs_dim_list,
        cat_feature_dict=cat_feature_dict,
        embs_feature_dict=embs_feature_dict,
        context_weight_dict=context_weight_dict,
    )
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll)
    return model, mll
Exemple #8
0
def optimize_EI(gp, best_f, n_dim):
    """
    Reference: https://botorch.org/api/optim.html

    bounds: 2d-ndarray (2, D)
        The values of lower and upper bound of each parameter.
    q: int
        The number of candidates to sample
    num_restarts: int
        The number of starting points for multistart optimization.
    raw_samples: int
        The number of initial points.

    Returns for joint_optimize is (num_restarts, q, D)
    """

    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    fit_gpytorch_model(mll)
    ei = ExpectedImprovement(gp, best_f=best_f, maximize=False)
    bounds = torch.from_numpy(np.array([[0.] * n_dim, [1.] * n_dim]))
    x = joint_optimize(ei,
                       bounds=bounds,
                       q=1,
                       num_restarts=3,
                       raw_samples=15)

    return np.array(x[0])
 def _setUp(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     train_yvar = torch.tensor(0.1**2, device=self.device)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]],
                                device=self.device,
                                dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=self.device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(self.model_st.likelihood,
                                              self.model_st)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_st = fit_gpytorch_model(self.mll_st,
                                          options={"maxiter": 5},
                                          max_retries=1)
     model_fn = FixedNoiseGP(self.train_x, self.train_y,
                             self.train_yvar.expand_as(self.train_y))
     self.model_fn = model_fn.to(device=self.device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(self.model_fn.likelihood,
                                              self.model_fn)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_fn = fit_gpytorch_model(self.mll_fn,
                                          options={"maxiter": 5},
                                          max_retries=1)
Exemple #10
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Exemple #11
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Exemple #12
0
 def fit(
     self,
     training_data: TrainingData,
     bounds: List[Tuple[float, float]],
     task_features: List[int],
     feature_names: List[str],
     metric_names: List[str],
     fidelity_features: List[int],
     target_fidelities: Optional[Dict[int, float]] = None,
     candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
     state_dict: Optional[Dict[str, Tensor]] = None,
     refit: bool = True,
 ) -> None:
     if self._model is None or self._should_reconstruct:
         self.construct(
             training_data=training_data,
             fidelity_features=fidelity_features,
             # Kwargs below are unused in base `Surrogate`, but used in subclasses.
             metric_names=metric_names,
             task_features=task_features,
         )
     if state_dict is not None:
         self.model.load_state_dict(state_dict)
     if state_dict is None or refit:
         # pyre-ignore[16]: Model has no attribute likelihood.
         # All BoTorch `Model`-s expected to work with this setup have likelihood.
         mll = self.mll_class(self.model.likelihood, self.model)
         fit_gpytorch_model(mll)
def get_gpr_model(X, y, model=None):
    """
    Fit a gpr model to the data or update the model to new data
    Params ::
    X: (sx1) Tensor: Covariates
    y: (sx1) Tensor: Observations
    model: PyTorch SingleTaskGP model: If model is passed, X and y are used to 
        update it. If None then model is trained on X and y. Default is None
    Return ::
    model: PyTorch SingleTaskGP model: Trained or updated model. 
        Returned in train mode
    mll: PyTorch MarginalLogLikelihood object: Returned in train mode
    """

    if model is None:
        # set up model
        model = SingleTaskGP(X, y)
    else:
        # update model with new observations
        model = model.condition_on_observations(X, y)
    mll = ExactMarginalLogLikelihood(model.likelihood, model).to(X)
    # begin training
    model.train()
    mll.train()
    fit_gpytorch_model(mll)
    return model, mll
Exemple #14
0
def CreateModel(xtrain, ytrain):
    '''
    Creates and trains a GpyTorch GP model.
    '''
    model = SingleTaskGP(xtrain, ytrain)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll);
    return(model)
Exemple #15
0
    def fit(
        self,
        training_data: TrainingData,
        search_space_digest: SearchSpaceDigest,
        metric_names: List[str],
        candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
        state_dict: Optional[Dict[str, Tensor]] = None,
        refit: bool = True,
    ) -> None:
        """Fits the underlying BoTorch ``Model`` to ``m`` outcomes.

        NOTE: ``state_dict`` and ``refit`` keyword arguments control how the
        undelying BoTorch ``Model`` will be fit: whether its parameters will
        be reoptimized and whether it will be warm-started from a given state.

        There are three possibilities:

        * ``fit(state_dict=None)``: fit model from stratch (optimize model
          parameters and set its training data used for inference),
        * ``fit(state_dict=some_state_dict, refit=True)``: warm-start refit
          with a state dict of parameters (still re-optimize model parameters
          and set the training data),
        * ``fit(state_dict=some_state_dict, refit=False)``: load model parameters
          without refitting, but set new training data (used in cross-validation,
          for example).

        Args:
            training data: BoTorch ``TrainingData`` container with Xs, Ys, and
                possibly Yvars, to be passed to ``Model.construct_inputs`` in
                BoTorch.
            search_space_digest: A SearchSpaceDigest object containing
                metadata on the features in the trainig data.
            metric_names: Names of each outcome Y in Ys.
            candidate_metadata: Model-produced metadata for candidates, in
                the order corresponding to the Xs.
            state_dict: Optional state dict to load.
            refit: Whether to re-optimize model parameters.
        """
        if self._constructed_manually:
            logger.debug(
                "For manually constructed surrogates (via `Surrogate.from_botorch`), "
                "`fit` skips setting the training data on model and only reoptimizes "
                "its parameters if `refit=True`."
            )
        else:
            self.construct(
                training_data=training_data,
                metric_names=metric_names,
                **dataclasses.asdict(search_space_digest)
            )
        if state_dict:
            # pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
            #  param but got `Dict[str, Tensor]`.
            self.model.load_state_dict(not_none(state_dict))

        if state_dict is None or refit:
            mll = self.mll_class(self.model.likelihood, self.model, **self.mll_options)
            fit_gpytorch_model(mll)
Exemple #16
0
 def get_fitted_model(x, obj, state_dict=None):
     # initialize and fit model
     fitted_model = SingleTaskGP(train_X=x, train_Y=obj)
     if state_dict is not None:
         fitted_model.load_state_dict(state_dict)
     mll = ExactMarginalLogLikelihood(fitted_model.likelihood,
                                      fitted_model)
     mll.to(x)
     fit_gpytorch_model(mll)
     return fitted_model
Exemple #17
0
 def _initialize_model(self, num_init_samples: int) -> None:
     """
     initialize the GP model with num_init_samples of initial samples
     """
     self.train_X = torch.rand((num_init_samples, self.dim))
     self.train_Y = self._function_call(self.train_X)
     self.model = SingleTaskGP(
         self.train_X, self.train_Y, outcome_transform=Standardize(m=1)
     )
     mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
     fit_gpytorch_model(mll)
Exemple #18
0
 def _update_model(self, new_sample: Tensor, new_observation: Tensor) -> None:
     """
     Update the GP model with the new observation(s)
     :param new_sample: sampled point
     :param new_observation: observed function value
     """
     self.train_X = torch.cat((self.train_X, new_sample), 0)
     self.train_Y = torch.cat((self.train_Y, new_observation), 0)
     self.model = self.model.condition_on_observations(new_sample, new_observation)
     if self.retrain_gp:
         mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
         fit_gpytorch_model(mll)
Exemple #19
0
def initialize_model():

    # generate synthetic data
    X = torch.rand(20, 2)
    Y = torch.stack([torch.sin(X[:, 0]), torch.cos(X[:, 1])], -1)

    # construct and fit the multi-output model
    gp = SingleTaskGP(X, Y)
    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    fit_gpytorch_model(mll);

    return gp
Exemple #20
0
    def fit(self, x_train, y_train):
        # normalize parameter (=input) data
        x_train_norm = self.param_normalizer.project_to(x_train)
        # normalize the data
        y_train_norm = self.data_normalizer.standardize(y_train)

        self.gp = SingleTaskGP(x_train_norm, y_train_norm)
        self.gp.likelihood.noise_covar.register_constraint(
            "raw_noise", GreaterThan(1e-5))
        mll = ExactMarginalLogLikelihood(self.gp.likelihood, self.gp)
        fit_gpytorch_model(mll)
        return self.gp
Exemple #21
0
    def get_and_fit_model(
        self,
        Xs: List[Tensor],
        Ys: List[Tensor],
        Yvars: List[Tensor],
        task_features: List[int],
        fidelity_features: List[int],
        metric_names: List[str],
        state_dict: Optional[Dict[str, Tensor]] = None,
        fidelity_model_id: Optional[int] = None,
        **kwargs: Any,
    ) -> ModelListGP:
        """Get a fitted multi-task contextual GP model for each outcome.
        Args:
            Xs: List of X data, one tensor per outcome.
            Ys: List of Y data, one tensor per outcome.
            Yvars:List of Noise variance of Yvar data, one tensor per outcome.
        Returns: Fitted multi-task contextual GP model.
        """

        models = []
        for i, X in enumerate(Xs):
            # validate input Yvars
            Yvar = Yvars[i].clamp_min_(MIN_OBSERVED_NOISE_LEVEL)
            is_nan = torch.isnan(Yvar)
            all_nan_Yvar = torch.all(is_nan)
            if all_nan_Yvar:
                gp_m = LCEMGP(
                    train_X=X,
                    train_Y=Ys[i],
                    task_feature=task_features[i],
                    context_cat_feature=self.context_cat_feature,
                    context_emb_feature=self.context_emb_feature,
                    embs_dim_list=self.embs_dim_list,
                )
            else:
                gp_m = FixedNoiseLCEMGP(
                    train_X=X,
                    train_Y=Ys[i],
                    train_Yvar=Yvar,
                    task_feature=task_features[i],
                    context_cat_feature=self.context_cat_feature,
                    context_emb_feature=self.context_emb_feature,
                    embs_dim_list=self.embs_dim_list,
                )
            models.append(gp_m)
        # Use a ModelListGP
        model = ModelListGP(*models)
        model.to(Xs[0])
        mll = SumMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_model(mll)
        return model
Exemple #22
0
def select_next_points_botorch(observed_X: List[List[float]],
                               observed_y: List[float]) -> np.ndarray:
    """Generate the next sample to evaluate with XTB

    Uses BOTorch to pick the next sample using Expected Improvement

    Args:
        observed_X: Observed coordinates
        observed_y: Observed energies
    Returns:
        Next coordinates to try
    """

    # Clip the energies if needed
    observed_y = np.clip(observed_y, -np.inf,
                         2 + np.log10(np.clip(observed_y, 1, np.inf)))

    # Convert inputs to torch arrays
    train_X = torch.tensor(observed_X, dtype=torch.float)
    train_y = torch.tensor(observed_y, dtype=torch.float)
    train_y = train_y[:, None]
    train_y = standardize(-1 * train_y)

    # Make the GP
    gp = SingleTaskGP(train_X,
                      train_y,
                      covar_module=gpykernels.ScaleKernel(
                          gpykernels.ProductStructureKernel(
                              num_dims=train_X.shape[1],
                              base_kernel=gpykernels.PeriodicKernel(
                                  period_length_prior=NormalPrior(360, 0.1)))))
    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    fit_gpytorch_model(mll)

    # Solve the optimization problem
    #  Following boss, we use Eq. 5 of https://arxiv.org/pdf/1012.2599.pdf with delta=0.1
    n_sampled, n_dim = train_X.shape
    kappa = np.sqrt(
        2 *
        np.log10(np.power(n_sampled, n_dim / 2 + 2) * np.pi**2 /
                 (3.0 * 0.1)))  # Results in more exploration over time
    ei = UpperConfidenceBound(gp, kappa)
    bounds = torch.zeros(2, train_X.shape[1])
    bounds[1, :] = 360
    candidate, acq_value = optimize_acqf(ei,
                                         bounds=bounds,
                                         q=1,
                                         num_restarts=64,
                                         raw_samples=64)
    return candidate.detach().numpy()[0, :]
Exemple #23
0
def CreateModel(xtrain, ytrain):
    '''
    Creates and trains a GpyTorch GP model.
    '''
    # model = SingleTaskGP(xtrain, ytrain)
    # mll = ExactMarginalLogLikelihood(model.likelihood, model)
    # fit_gpytorch_model(mll);
    model = FixedNoiseGP(xtrain,
                         ytrain,
                         train_Yvar=torch.full_like(ytrain, 1e-4))

    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll)
    return (model)
def test_optimize_pytorch(dim):

	train_x, train_yl = get_initial_evaluations(dim=dim)

	Neval = train_x.shape[0]
	dim = train_x.shape[1]

	gpcr = GPCRmodel(train_x=train_x, train_yl=train_yl, noise_std=0.01)

	mll = MLLGPCR_pytorch(likelihood=None,model=gpcr)

	mll(None,None)

	fit_gpytorch_model(mll,max_retries=10) # See https://botorch.org/api/_modules/botorch/optim/utils.html#sample_all_priors
Exemple #25
0
	def update_hyperparameters(self):
		print("\n")
		self.my_print("Fitting model...")
		self.my_print("----------------")
		mll = ExactMarginalLogLikelihood(self.likelihood, self)
		fit_gpytorch_model(mll,max_retries=10) # See https://botorch.org/api/_modules/botorch/optim/utils.html#sample_all_priors
														# max_retries: https://botorch.org/api/_modules/botorch/fit.html#fit_gpytorch_model

		# Sometimes fit_gpytorch_model fails:
		if torch.any(self.covar_module.base_kernel.lengthscale > 2.0):
			self.covar_module.base_kernel.lengthscale[:] = 2.0
		if torch.any(self.covar_module.outputscale > 2.0):
			self.covar_module.outputscale[:] = 2.0

		self.update_hyperparameters_of_model_grad()
Exemple #26
0
 def fit_gp_model(self,
                  arm: int,
                  alternative: int = None,
                  update: bool = False) -> None:
     """
     Fits a GP model to the given arm
     :param arm: Arm index
     :param alternative: Last sampled arm alternative. Used when adding samples
         without refitting
     :param update: Forces GP to be fitted. Otherwise, it is fitted every
         self.gp_update_freq samples.
     :return: None
     """
     arm_sample_count = sum([len(e) for e in self.observations[arm]])
     if update or arm_sample_count % self.gp_update_freq == 0:
         train_X_list = list()
         train_Y_list = list()
         for j in range(len(self.alternative_points[arm])):
             for k in range(len(self.observations[arm][j])):
                 train_X_list.append(
                     self.alternative_points[arm][j].unsqueeze(-2))
                 train_Y_list.append(
                     self.observations[arm][j][k].unsqueeze(-2))
         train_X = torch.cat(train_X_list, dim=0)
         train_Y = torch.cat(train_Y_list, dim=0)
         if self.noise_std is None:
             model = SingleTaskGP(train_X,
                                  train_Y,
                                  outcome_transform=Standardize(m=1))
         else:
             model = FixedNoiseGP(
                 train_X,
                 train_Y,
                 train_Yvar=torch.tensor([self.noise_std**2
                                          ]).expand_as(train_Y),
                 outcome_transform=Standardize(m=1),
             )
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         fit_gpytorch_model(mll)
         self.models[arm] = model
     else:
         last_point = self.alternative_points[arm][alternative].reshape(
             1, -1)
         last_observation = self.observations[arm][alternative][-1].reshape(
             1, -1)
         self.models[arm].condition_on_observations(last_point,
                                                    last_observation,
                                                    noise=self.noise_std**2)
Exemple #27
0
    def test_FixedNoiseMultiTaskGP_single_output(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertEqual(model.num_outputs, 1)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Exemple #28
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood,
                                           self.model)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll = fit_gpytorch_model(self.mll,
                                       options={"maxiter": 1},
                                       max_retries=1)
def get_fitted_model(train_X, train_Y, train_Yvar, state_dict=None):
    """
    Get a single task GP. The model will be fit unless a state_dict with model
        hyperparameters is provided.
    """
    Y_mean = train_Y.mean(dim=-2, keepdim=True)
    Y_std = train_Y.std(dim=-2, keepdim=True)
    model = FixedNoiseGP(train_X, (train_Y - Y_mean) / Y_std, train_Yvar)
    model.Y_mean = Y_mean
    model.Y_std = Y_std
    if state_dict is None:
        mll = ExactMarginalLogLikelihood(model.likelihood, model).to(train_X)
        fit_gpytorch_model(mll)
    else:
        model.load_state_dict(state_dict)
    return model
Exemple #30
0
    def fit(
        self,
        train_x: torch.Tensor,
        train_y: torch.Tensor,
        warmstart_hyperparams: bool = False,
        warmstart_induc: bool = False,
        **kwargs,
    ) -> None:
        """Fit underlying model.

        Args:
            train_x (torch.Tensor): Inputs.
            train_y (torch.LongTensor): Responses.
            warmstart_hyperparams (bool): Whether to reuse the previous hyperparameters (True) or fit from scratch
                (False). Defaults to False.
            warmstart_induc (bool): Whether to reuse the previous inducing points or fit from scratch (False).
                Defaults to False.
        """
        self.set_train_data(train_x, train_y)

        # by default we reuse the model state and likelihood. If we
        # want a fresh fit (no warm start), copy the state from class initialization.
        if not warmstart_hyperparams:
            self._reset_hyperparameters()

        if not warmstart_induc:
            self._reset_variational_strategy()

        n = train_y.shape[0]
        mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, n)
        self.train()

        if self.max_fit_time is not None:
            # figure out how long evaluating a single samp
            starttime = time.time()
            _ = mll(self(train_x), train_y)
            single_eval_time = time.time() - starttime
            n_eval = self.max_fit_time // single_eval_time
            options = {"maxfun": n_eval}
            logger.info(f"fit maxfun is {n_eval}")

        else:
            options = {}
        logger.info("Starting fit...")
        starttime = time.time()
        fit_gpytorch_model(mll, options=options, **kwargs)
        logger.info(f"Fit done, time={time.time()-starttime}")
Exemple #31
0
def propose_candidates(args, prev_res):

    ## train a surrogate model
    gp = SingleTaskGP(prev_res['hparams'], prev_res['acc'])
    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    fit_gpytorch_model(mll)

    ## construct an acquisition function and optimize it
    UCB = UpperConfidenceBound(gp, beta=0.1)

    candidates, acq_value = optimize_acqf(UCB,
                                          bounds=bounds,
                                          q=args.n_cand,
                                          num_restarts=5,
                                          raw_samples=20)

    return candidates
Exemple #32
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
     self.mll = fit_gpytorch_model(self.mll, options={"maxiter": 1})