def _get_model(self, batch_shape, num_outputs, n, **tkwargs): train_x, train_y = _get_random_data( batch_shape=batch_shape, num_outputs=num_outputs, n=n, **tkwargs ) train_yvar = torch.full_like(train_y, 0.01) model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar) return model.to(**tkwargs)
def fixed_noise_gp_model_constructor( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], # Maybe these should be optional where irrelevant? task_features: List[int], fidelity_features: List[int], metric_names: List[str], state_dict: Optional[Dict[str, Tensor]] = None, refit_model: bool = True, **kwargs: Any, ) -> Model: gp = FixedNoiseGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0], **kwargs) gp.to(Xs[0]) if state_dict is not None: gp.load_state_dict(state_dict) if state_dict is None or refit_model: fit_gpytorch_model(ExactMarginalLogLikelihood(gp.likelihood, gp)) return gp
def _get_model(self, batch_shape, num_outputs, n, **tkwargs): train_x, train_y = _get_random_data(batch_shape=batch_shape, num_outputs=num_outputs, n=n, **tkwargs) train_yvar = torch.full_like(train_y, 0.01) model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar) return model.to(**tkwargs)
def fixed_noise_gp_model_constructor( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], task_features: List[int], fidelity_features: List[int], metric_names: List[str], state_dict: Optional[Dict[str, Tensor]] = None, refit_model: bool = True, **kwargs: Any, ) -> Model: gp = FixedNoiseGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0], **kwargs) gp.to(Xs[0]) if state_dict is not None: # pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st # param but got `Dict[str, Tensor]`. gp.load_state_dict(state_dict) if state_dict is None or refit_model: fit_gpytorch_model(ExactMarginalLogLikelihood(gp.likelihood, gp)) return gp