示例#1
0
 def _get_model(self, batch_shape, num_outputs, n, **tkwargs):
     train_x, train_y = _get_random_data(
         batch_shape=batch_shape, num_outputs=num_outputs, n=n, **tkwargs
     )
     train_yvar = torch.full_like(train_y, 0.01)
     model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
     return model.to(**tkwargs)
def _get_model(n, fixed_noise=False, use_octf=False, **tkwargs):
    train_x1, train_y1 = _get_random_data(batch_shape=torch.Size(),
                                          m=1,
                                          n=10,
                                          **tkwargs)
    train_x2, train_y2 = _get_random_data(batch_shape=torch.Size(),
                                          m=1,
                                          n=11,
                                          **tkwargs)
    octfs = [Standardize(m=1), Standardize(m=1)] if use_octf else [None, None]
    if fixed_noise:
        train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
        train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
        model1 = FixedNoiseGP(
            train_X=train_x1,
            train_Y=train_y1,
            train_Yvar=train_y1_var,
            outcome_transform=octfs[0],
        )
        model2 = FixedNoiseGP(
            train_X=train_x2,
            train_Y=train_y2,
            train_Yvar=train_y2_var,
            outcome_transform=octfs[1],
        )
    else:
        model1 = SingleTaskGP(train_X=train_x1,
                              train_Y=train_y1,
                              outcome_transform=octfs[0])
        model2 = SingleTaskGP(train_X=train_x2,
                              train_Y=train_y2,
                              outcome_transform=octfs[1])
    model = ModelListGP(model1, model2)
    return model.to(**tkwargs)
示例#3
0
 def _get_model(self, batch_shape, num_outputs, n, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         n=n,
                                         **tkwargs)
     train_yvar = torch.full_like(train_y, 0.01)
     model = FixedNoiseGP(train_X=train_x,
                          train_Y=train_y,
                          train_Yvar=train_yvar)
     return model.to(**tkwargs)
def _get_model(n, fixed_noise=False, **tkwargs):
    train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=n, **tkwargs)
    if fixed_noise:
        train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
        train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
        model1 = FixedNoiseGP(train_X=train_x1,
                              train_Y=train_y1,
                              train_Yvar=train_y1_var)
        model2 = FixedNoiseGP(train_X=train_x2,
                              train_Y=train_y2,
                              train_Yvar=train_y2_var)
    else:
        model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
        model2 = SingleTaskGP(train_X=train_x2, train_Y=train_y2)
    model = ModelListGP(model1, model2)
    return model.to(**tkwargs)
示例#5
0
def _get_model(
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    task_feature: Optional[int] = None,
    fidelity_features: Optional[List[int]] = None,
    **kwargs: Any,
) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data.

    Args:
        X: A `n x d` tensor of input features.
        Y: A `n x m` tensor of input observations.
        Yvar: A `n x m` tensor of input variances (NaN if unobserved).
        task_feature: The index of the column pertaining to the task feature
            (if present).
        fidelity_features: List of columns of X that are fidelity parameters.

    Returns:
        A GPyTorchModel (unfitted).
    """
    Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL)
    is_nan = torch.isnan(Yvar)
    any_nan_Yvar = torch.any(is_nan)
    all_nan_Yvar = torch.all(is_nan)
    if any_nan_Yvar and not all_nan_Yvar:
        raise ValueError(
            "Mix of known and unknown variances indicates valuation function "
            "errors. Variances should all be specified, or none should be."
        )
    if fidelity_features is None:
        fidelity_features = []
    if len(fidelity_features) == 0:
        # only pass linear_truncated arg if there are fidelities
        kwargs = {k: v for k, v in kwargs.items() if k != "linear_truncated"}
    if len(fidelity_features) > 0:
        if task_feature:
            raise NotImplementedError(
                "multi-task multi-fidelity models not yet available"
            )
        # at this point we can assume that there is only a single fidelity parameter
        gp = SingleTaskMultiFidelityGP(
            train_X=X, train_Y=Y, data_fidelity=fidelity_features[0], **kwargs
        )
    elif task_feature is None and all_nan_Yvar:
        gp = SingleTaskGP(train_X=X, train_Y=Y, **kwargs)
    elif task_feature is None:
        gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar, **kwargs)
    elif all_nan_Yvar:
        gp = MultiTaskGP(train_X=X, train_Y=Y, task_feature=task_feature, **kwargs)
    else:
        gp = FixedNoiseMultiTaskGP(
            train_X=X,
            train_Y=Y.view(-1),
            train_Yvar=Yvar.view(-1),
            task_feature=task_feature,
            **kwargs,
        )
    return gp
示例#6
0
 def _get_model_and_data(self, batch_shape, num_outputs, **tkwargs):
     train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": torch.full_like(train_Y, 0.01),
     }
     model = FixedNoiseGP(**model_kwargs)
     return model, model_kwargs
示例#7
0
 def _get_model_and_data(
     self, batch_shape, m, outcome_transform=None, input_transform=None, **tkwargs
 ):
     train_X, train_Y = _get_random_data(batch_shape=batch_shape, m=m, **tkwargs)
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": torch.full_like(train_Y, 0.01),
         "input_transform": input_transform,
         "outcome_transform": outcome_transform,
     }
     model = FixedNoiseGP(**model_kwargs)
     return model, model_kwargs
示例#8
0
def _get_model(X: Tensor, Y: Tensor, Yvar: Tensor,
               task_feature: Optional[int]) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data."""
    Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL)  # pyre-ignore: [16]
    if task_feature is None:
        gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar)
    else:
        gp = FixedNoiseMultiTaskGP(
            train_X=X,
            train_Y=Y.view(-1),
            train_Yvar=Yvar.view(-1),
            task_feature=task_feature,
        )
    return gp
示例#9
0
 def _get_model_and_data(self,
                         batch_shape,
                         m,
                         outcome_transform=None,
                         **tkwargs):
     train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=m,
                                         **tkwargs)
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": torch.full_like(train_Y, 0.01),
     }
     if outcome_transform is not None:
         model_kwargs["outcome_transform"] = outcome_transform
     model = FixedNoiseGP(**model_kwargs)
     return model, model_kwargs
 def __init__(self, stem, init_x, init_y, alpha_eps, lr, **kwargs):
     stem = stem.to(init_x.device)
     transformed_y, _, sigma2_i = self._transform_targets(init_y, alpha_eps)
     if transformed_y.t().shape[0] != 1:
         _batch_shape = transformed_y.t().shape[:-1]
     else:
         _batch_shape = torch.Size()
     features = stem(init_x)
     gp = FixedNoiseGP(features,
                       transformed_y,
                       sigma2_i,
                       covar_module=ScaleKernel(RBFKernel(
                           batch_shape=_batch_shape,
                           ard_num_dims=stem.output_dim),
                                                batch_shape=_batch_shape))
     mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
     super().__init__(stem, gp, mll, alpha_eps, lr)
     self._raw_inputs = [init_x]
     self._target_batch_shape = _batch_shape
示例#11
0
def _get_model(
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    task_feature: Optional[int],
    fidelity_features: Optional[List[int]] = None,
    fidelity_model_id: Optional[int] = None,
) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data."""
    Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL)  # pyre-ignore: [16]
    is_nan = torch.isnan(Yvar)
    any_nan_Yvar = torch.any(is_nan)
    all_nan_Yvar = torch.all(is_nan)
    if any_nan_Yvar and not all_nan_Yvar:
        raise ValueError(
            "Mix of known and unknown variances indicates "
            "valuation function errors. Variances should all be specified, or "
            "none should be.")
    if fidelity_features is None:
        fidelity_features = []
    if fidelity_model_id is None or len(fidelity_features) == 0:
        if task_feature is None and all_nan_Yvar:
            gp = SingleTaskGP(train_X=X, train_Y=Y)
        elif task_feature is None:
            gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar)
        elif all_nan_Yvar:
            gp = MultiTaskGP(train_X=X, train_Y=Y, task_feature=task_feature)
        else:
            gp = FixedNoiseMultiTaskGP(
                train_X=X,
                train_Y=Y.view(-1),
                train_Yvar=Yvar.view(-1),
                task_feature=task_feature,
            )
    else:
        gp_model = model_list[fidelity_model_id]
        # pyre-ignore [29]
        gp = gp_model(train_X=X, train_Y=Y, train_data_fidelity=False)
    return gp
示例#12
0
def fixed_noise_gp_model_constructor(
    Xs: List[Tensor],
    Ys: List[Tensor],
    Yvars: List[Tensor],  # Maybe these should be optional where irrelevant?
    task_features: List[int],
    fidelity_features: List[int],
    metric_names: List[str],
    state_dict: Optional[Dict[str, Tensor]] = None,
    refit_model: bool = True,
    **kwargs: Any,
) -> Model:
    gp = FixedNoiseGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0], **kwargs)
    gp.to(Xs[0])
    if state_dict is not None:
        gp.load_state_dict(state_dict)
    if state_dict is None or refit_model:
        fit_gpytorch_model(ExactMarginalLogLikelihood(gp.likelihood, gp))
    return gp
示例#13
0
def _get_noiseless_fantasy_model(model: FixedNoiseGP, batch_X_observed: Tensor,
                                 Y_fantasized: Tensor) -> FixedNoiseGP:
    r"""Construct a fantasy model from a fitted model and provided fantasies.

    The fantasy model uses the hyperparameters from the original fitted model and
    assumes the fantasies are noiseless.

    Args:
        model: a fitted FixedNoiseGP
        batch_X_observed: A `b x n x d` tensor of inputs where `b` is the number of
            fantasies.
        Y_fantasized: A `b x n` tensor of fantasized targets where `b` is the number of
            fantasies.

    Returns:
        The fantasy model.
    """
    # initialize a copy of FixedNoiseGP on the original training inputs
    # this makes FixedNoiseGP a non-batch GP, so that the same hyperparameters
    # are used across all batches (by default, a GP with batched training data
    # uses independent hyperparameters for each batch).
    fantasy_model = FixedNoiseGP(
        train_X=model.train_inputs[0],
        train_Y=model.train_targets.unsqueeze(-1),
        train_Yvar=model.likelihood.noise_covar.noise.unsqueeze(-1),
    )
    # update training inputs/targets to be batch mode fantasies
    fantasy_model.set_train_data(inputs=batch_X_observed,
                                 targets=Y_fantasized,
                                 strict=False)
    # use noiseless fantasies
    fantasy_model.likelihood.noise_covar.noise = torch.full_like(
        Y_fantasized, 1e-7)
    # load hyperparameters from original model
    state_dict = deepcopy(model.state_dict())
    fantasy_model.load_state_dict(state_dict)
    return fantasy_model
示例#14
0
def fixed_noise_gp_model_constructor(
    Xs: List[Tensor],
    Ys: List[Tensor],
    Yvars: List[Tensor],
    task_features: List[int],
    fidelity_features: List[int],
    metric_names: List[str],
    state_dict: Optional[Dict[str, Tensor]] = None,
    refit_model: bool = True,
    **kwargs: Any,
) -> Model:
    gp = FixedNoiseGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0], **kwargs)
    gp.to(Xs[0])
    if state_dict is not None:
        # pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
        #  param but got `Dict[str, Tensor]`.
        gp.load_state_dict(state_dict)
    if state_dict is None or refit_model:
        fit_gpytorch_model(ExactMarginalLogLikelihood(gp.likelihood, gp))
    return gp
示例#15
0
def _get_model(
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    task_feature: Optional[int] = None,
    fidelity_features: Optional[List[int]] = None,
    use_input_warping: bool = False,
    **kwargs: Any,
) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data.

    Args:
        X: A `n x d` tensor of input features.
        Y: A `n x m` tensor of input observations.
        Yvar: A `n x m` tensor of input variances (NaN if unobserved).
        task_feature: The index of the column pertaining to the task feature
            (if present).
        fidelity_features: List of columns of X that are fidelity parameters.

    Returns:
        A GPyTorchModel (unfitted).
    """
    Yvar = Yvar.clamp_min(MIN_OBSERVED_NOISE_LEVEL)  # pyre-ignore[16]
    is_nan = torch.isnan(Yvar)
    any_nan_Yvar = torch.any(is_nan)
    all_nan_Yvar = torch.all(is_nan)
    if any_nan_Yvar and not all_nan_Yvar:
        if task_feature:
            # TODO (jej): Replace with inferred noise before making perf judgements.
            Yvar[Yvar != Yvar] = MIN_OBSERVED_NOISE_LEVEL
        else:
            raise ValueError(
                "Mix of known and unknown variances indicates valuation function "
                "errors. Variances should all be specified, or none should be."
            )
    if use_input_warping:
        warp_tf = get_warping_transform(
            d=X.shape[-1],
            task_feature=task_feature,
            batch_shape=X.shape[:-2],  # pyre-ignore [6]
        )
    else:
        warp_tf = None
    if fidelity_features is None:
        fidelity_features = []
    if len(fidelity_features) == 0:
        # only pass linear_truncated arg if there are fidelities
        kwargs = {k: v for k, v in kwargs.items() if k != "linear_truncated"}
    if len(fidelity_features) > 0:
        if task_feature:
            raise NotImplementedError(  # pragma: no cover
                "multi-task multi-fidelity models not yet available"
            )
        # at this point we can assume that there is only a single fidelity parameter
        gp = SingleTaskMultiFidelityGP(
            train_X=X,
            train_Y=Y,
            data_fidelity=fidelity_features[0],
            input_transform=warp_tf,
            **kwargs,
        )
    elif task_feature is None and all_nan_Yvar:
        gp = SingleTaskGP(train_X=X, train_Y=Y, input_transform=warp_tf, **kwargs)
    elif task_feature is None:
        gp = FixedNoiseGP(
            train_X=X, train_Y=Y, train_Yvar=Yvar, input_transform=warp_tf, **kwargs
        )
    else:
        # instantiate multitask GP
        all_tasks, _, _ = MultiTaskGP.get_all_tasks(X, task_feature)
        num_tasks = len(all_tasks)
        prior_dict = kwargs.get("prior")
        prior = None
        if prior_dict is not None:
            prior_type = prior_dict.get("type", None)
            if issubclass(prior_type, LKJCovariancePrior):
                sd_prior = prior_dict.get("sd_prior", GammaPrior(1.0, 0.15))
                sd_prior._event_shape = torch.Size([num_tasks])
                eta = prior_dict.get("eta", 0.5)
                if not isinstance(eta, float) and not isinstance(eta, int):
                    raise ValueError(f"eta must be a real number, your eta was {eta}")
                prior = LKJCovariancePrior(num_tasks, eta, sd_prior)

            else:
                raise NotImplementedError(
                    "Currently only LKJ prior is supported,"
                    f"your prior type was {prior_type}."
                )

        if all_nan_Yvar:
            gp = MultiTaskGP(
                train_X=X,
                train_Y=Y,
                task_feature=task_feature,
                rank=kwargs.get("rank"),
                task_covar_prior=prior,
                input_transform=warp_tf,
            )
        else:
            gp = FixedNoiseMultiTaskGP(
                train_X=X,
                train_Y=Y,
                train_Yvar=Yvar,
                task_feature=task_feature,
                rank=kwargs.get("rank"),
                task_covar_prior=prior,
                input_transform=warp_tf,
            )
    return gp