def get_gpr_model(X, y, model=None):
    """
    Fit a gpr model to the data or update the model to new data
    Params ::
    X: (sx1) Tensor: Covariates
    y: (sx1) Tensor: Observations
    model: PyTorch SingleTaskGP model: If model is passed, X and y are used to 
        update it. If None then model is trained on X and y. Default is None
    Return ::
    model: PyTorch SingleTaskGP model: Trained or updated model. 
        Returned in train mode
    mll: PyTorch MarginalLogLikelihood object: Returned in train mode
    """

    if model is None:
        # set up model
        model = SingleTaskGP(X, y)
    else:
        # update model with new observations
        model = model.condition_on_observations(X, y)
    mll = ExactMarginalLogLikelihood(model.likelihood, model).to(X)
    # begin training
    model.train()
    mll.train()
    fit_gpytorch_model(mll)
    return model, mll
Ejemplo n.º 2
0
def get_gpr_model(X, y, model=None):
    """Fit a gpr model to the data or update the model to new data.


    Parameters
    ----------


    X: (sx1) Tensor
        Covariates
    y: (sx1) Tensor
        Observations
    model: PyTorch SingleTaskGP model
        If model is passed, X and y are used to update it. 
        If None then model is trained on X and y. Default is None.

    
    Returns
    -------


    model: PyTorch SingleTaskGP model
        Trained or updated model. Returned in train mode.
    mll: PyTorch MarginalLogLikelihood object
        This is the loss used to train hyperparameters. Returned in train mode.


    """
    if model is None:
        # set up model
        print('X', X.shape)
        print('y', y.shape)
        model = SingleTaskGP(X, y)
    else:
        # update model with new observations
        model = model.condition_on_observations(X, y)
    mll = ExactMarginalLogLikelihood(model.likelihood, model).to(X)
    # begin training
    model.train()
    mll.train()
    fit_gpytorch_model(mll)
    return model, mll
Ejemplo n.º 3
0
class ParametricArm:
    """
    the class of an Arm
    """

    def __init__(
        self,
        function: SyntheticTestFunction,
        num_init_samples: int = 10,
        retrain_gp: bool = False,
        num_restarts: int = 10,
        raw_samples: int = 1000,
    ):
        """
        Initialize the Arm

        :param function: the function of the arm to sample from
        :param num_init_samples: number of samples to initialize with
        :param retrain_gp: retrain the model after each sample if True
        :param num_restarts: number of random restarts for acquisition function optimization
        :param raw_samples: number of raw samples for acquisition function optimization
        """
        self.function = function
        self.dim = function.dim
        self.bounds = Tensor(function._bounds).t()
        self.scale = self.bounds[1] - self.bounds[0]
        self.l_bounds = self.bounds[0]
        self.num_restarts = num_restarts
        self.raw_samples = raw_samples
        self._initialize_model(num_init_samples)
        self._update_current_best()
        self._maximize_kg()
        self.retrain_gp = retrain_gp
        self.num_samples = num_init_samples

    def _maximize_kg(self) -> None:
        """
        maximizes the KG acquisition function and stores the resulting value and
        the candidate
        """
        acq_func = qKnowledgeGradient(
            model=self.model, current_value=self.current_best_val
        )
        # acq_func = qExpectedImprovement(model=self.model, best_f=self.current_best_val)
        # acq_func = ExpectedImprovement(model=self.model, best_f=self.current_best_val)
        self.next_candidate, self.kg_value = optimize_acqf(
            acq_func,
            Tensor([[0], [1]]).repeat(1, self.dim),
            q=1,
            num_restarts=self.num_restarts,
            raw_samples=self.raw_samples,
        )

    def _update_current_best(self) -> None:
        """
        Updates the current best solution and corresponding value
        """
        pm = PosteriorMean(self.model)
        self.current_best_sol, self.current_best_val = optimize_acqf(
            pm,
            Tensor([[0], [1]]).repeat(1, self.dim),
            q=1,
            num_restarts=self.num_restarts,
            raw_samples=self.raw_samples,
        )

    def _function_call(self, X: Tensor) -> Tensor:
        """
        Scales the solutions to the function domain and returns the function value.
        :param X: Solutions from the relative scale of [0, 1]
        :return: function value
        """
        shape = list(X.size())
        shape[-1] = 1
        X = X * self.scale.repeat(shape) + self.l_bounds.repeat(shape)
        # TODO: adjust for minimization
        return -self.function(X).unsqueeze(1)

    def _initialize_model(self, num_init_samples: int) -> None:
        """
        initialize the GP model with num_init_samples of initial samples
        """
        self.train_X = torch.rand((num_init_samples, self.dim))
        self.train_Y = self._function_call(self.train_X)
        self.model = SingleTaskGP(
            self.train_X, self.train_Y, outcome_transform=Standardize(m=1)
        )
        mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
        fit_gpytorch_model(mll)

    def _update_model(self, new_sample: Tensor, new_observation: Tensor) -> None:
        """
        Update the GP model with the new observation(s)
        :param new_sample: sampled point
        :param new_observation: observed function value
        """
        self.train_X = torch.cat((self.train_X, new_sample), 0)
        self.train_Y = torch.cat((self.train_Y, new_observation), 0)
        self.model = self.model.condition_on_observations(new_sample, new_observation)
        if self.retrain_gp:
            mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
            fit_gpytorch_model(mll)

    def sample_next(self):
        """
        sample the next point, i.e. the point that maximizes KG
        update the model and retrain if needed
        update the relevant values
        """
        Y = self._function_call(self.next_candidate)
        self._update_model(self.next_candidate, Y)
        self._update_current_best()
        self._maximize_kg()
Ejemplo n.º 4
0
# model.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e1))

for it in range(25):
  # Acquisition function...
  candidate_x, acq_value = optimize_acqf(
      UpperConfidenceBound(model, beta=0.1),
      # ExpectedImprovement(model, best_f=torch.max(train_y))
      bounds=torch.Tensor([[-1], [1]]),
      q=1,
      num_restarts=5,
      raw_samples=20,
  )
  candidate_y = obj_noisy(candidate_x)
  train_x = torch.cat([train_x, candidate_x])
  train_y = torch.cat([train_y, candidate_y])
  model = model.condition_on_observations(X=candidate_x, Y=candidate_y)

  # Train GP...
  mll = ExactMarginalLogLikelihood(model.likelihood, model)
  fit_gpytorch_model(mll)

  # Plotting...
  model.eval()

  fig, ax = plt.subplots(1, 1, figsize=(6, 4))
  plt.title(f"Bayesian Opt. without derivatives, Iteration {it}")
  test_x = torch.linspace(-1, 1, steps=100)

  with torch.no_grad():
    posterior = model.posterior(test_x)
    # these are 2 std devs from mean