def _get_model_and_data(
    iteration_fidelity,
    data_fidelity,
    batch_shape,
    m,
    lin_truncated,
    outcome_transform=None,
    **tkwargs,
):
    n_fidelity = (iteration_fidelity is not None) + (data_fidelity is not None)
    train_X, train_Y = _get_random_data_with_fidelity(batch_shape=batch_shape,
                                                      m=m,
                                                      n_fidelity=n_fidelity,
                                                      **tkwargs)
    model_kwargs = {
        "train_X": train_X,
        "train_Y": train_Y,
        "iteration_fidelity": iteration_fidelity,
        "data_fidelity": data_fidelity,
        "linear_truncated": lin_truncated,
    }
    if outcome_transform is not None:
        model_kwargs["outcome_transform"] = outcome_transform
    model = SingleTaskMultiFidelityGP(**model_kwargs)
    return model, model_kwargs
Beispiel #2
0
def _get_model(
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    task_feature: Optional[int] = None,
    fidelity_features: Optional[List[int]] = None,
    **kwargs: Any,
) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data.

    Args:
        X: A `n x d` tensor of input features.
        Y: A `n x m` tensor of input observations.
        Yvar: A `n x m` tensor of input variances (NaN if unobserved).
        task_feature: The index of the column pertaining to the task feature
            (if present).
        fidelity_features: List of columns of X that are fidelity parameters.

    Returns:
        A GPyTorchModel (unfitted).
    """
    Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL)
    is_nan = torch.isnan(Yvar)
    any_nan_Yvar = torch.any(is_nan)
    all_nan_Yvar = torch.all(is_nan)
    if any_nan_Yvar and not all_nan_Yvar:
        raise ValueError(
            "Mix of known and unknown variances indicates valuation function "
            "errors. Variances should all be specified, or none should be."
        )
    if fidelity_features is None:
        fidelity_features = []
    if len(fidelity_features) == 0:
        # only pass linear_truncated arg if there are fidelities
        kwargs = {k: v for k, v in kwargs.items() if k != "linear_truncated"}
    if len(fidelity_features) > 0:
        if task_feature:
            raise NotImplementedError(
                "multi-task multi-fidelity models not yet available"
            )
        # at this point we can assume that there is only a single fidelity parameter
        gp = SingleTaskMultiFidelityGP(
            train_X=X, train_Y=Y, data_fidelity=fidelity_features[0], **kwargs
        )
    elif task_feature is None and all_nan_Yvar:
        gp = SingleTaskGP(train_X=X, train_Y=Y, **kwargs)
    elif task_feature is None:
        gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar, **kwargs)
    elif all_nan_Yvar:
        gp = MultiTaskGP(train_X=X, train_Y=Y, task_feature=task_feature, **kwargs)
    else:
        gp = FixedNoiseMultiTaskGP(
            train_X=X,
            train_Y=Y.view(-1),
            train_Yvar=Yvar.view(-1),
            task_feature=task_feature,
            **kwargs,
        )
    return gp
 def test_init_error(self):
     train_X = torch.rand(2, 2, device=self.device)
     train_Y = torch.rand(2, 1)
     for lin_truncated in (True, False):
         with self.assertRaises(UnsupportedError):
             SingleTaskMultiFidelityGP(train_X,
                                       train_Y,
                                       linear_truncated=lin_truncated)
Beispiel #4
0
def _get_model_and_data(
    iteration_fidelity,
    data_fidelity,
    batch_shape,
    num_outputs,
    lin_truncated,
    **tkwargs,
):
    n_fidelity = (iteration_fidelity is not None) + (data_fidelity is not None)
    train_X, train_Y = _get_random_data_with_fidelity(
        batch_shape=batch_shape,
        num_outputs=num_outputs,
        n_fidelity=n_fidelity,
        **tkwargs,
    )
    model_kwargs = {
        "train_X": train_X,
        "train_Y": train_Y,
        "iteration_fidelity": iteration_fidelity,
        "data_fidelity": data_fidelity,
        "linear_truncated": lin_truncated,
    }
    model = SingleTaskMultiFidelityGP(**model_kwargs)
    return model, model_kwargs
Beispiel #5
0
def _get_model(
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    task_feature: Optional[int] = None,
    fidelity_features: Optional[List[int]] = None,
    use_input_warping: bool = False,
    **kwargs: Any,
) -> GPyTorchModel:
    """Instantiate a model of type depending on the input data.

    Args:
        X: A `n x d` tensor of input features.
        Y: A `n x m` tensor of input observations.
        Yvar: A `n x m` tensor of input variances (NaN if unobserved).
        task_feature: The index of the column pertaining to the task feature
            (if present).
        fidelity_features: List of columns of X that are fidelity parameters.

    Returns:
        A GPyTorchModel (unfitted).
    """
    Yvar = Yvar.clamp_min(MIN_OBSERVED_NOISE_LEVEL)  # pyre-ignore[16]
    is_nan = torch.isnan(Yvar)
    any_nan_Yvar = torch.any(is_nan)
    all_nan_Yvar = torch.all(is_nan)
    if any_nan_Yvar and not all_nan_Yvar:
        if task_feature:
            # TODO (jej): Replace with inferred noise before making perf judgements.
            Yvar[Yvar != Yvar] = MIN_OBSERVED_NOISE_LEVEL
        else:
            raise ValueError(
                "Mix of known and unknown variances indicates valuation function "
                "errors. Variances should all be specified, or none should be."
            )
    if use_input_warping:
        warp_tf = get_warping_transform(
            d=X.shape[-1],
            task_feature=task_feature,
            batch_shape=X.shape[:-2],  # pyre-ignore [6]
        )
    else:
        warp_tf = None
    if fidelity_features is None:
        fidelity_features = []
    if len(fidelity_features) == 0:
        # only pass linear_truncated arg if there are fidelities
        kwargs = {k: v for k, v in kwargs.items() if k != "linear_truncated"}
    if len(fidelity_features) > 0:
        if task_feature:
            raise NotImplementedError(  # pragma: no cover
                "multi-task multi-fidelity models not yet available"
            )
        # at this point we can assume that there is only a single fidelity parameter
        gp = SingleTaskMultiFidelityGP(
            train_X=X,
            train_Y=Y,
            data_fidelity=fidelity_features[0],
            input_transform=warp_tf,
            **kwargs,
        )
    elif task_feature is None and all_nan_Yvar:
        gp = SingleTaskGP(train_X=X, train_Y=Y, input_transform=warp_tf, **kwargs)
    elif task_feature is None:
        gp = FixedNoiseGP(
            train_X=X, train_Y=Y, train_Yvar=Yvar, input_transform=warp_tf, **kwargs
        )
    else:
        # instantiate multitask GP
        all_tasks, _, _ = MultiTaskGP.get_all_tasks(X, task_feature)
        num_tasks = len(all_tasks)
        prior_dict = kwargs.get("prior")
        prior = None
        if prior_dict is not None:
            prior_type = prior_dict.get("type", None)
            if issubclass(prior_type, LKJCovariancePrior):
                sd_prior = prior_dict.get("sd_prior", GammaPrior(1.0, 0.15))
                sd_prior._event_shape = torch.Size([num_tasks])
                eta = prior_dict.get("eta", 0.5)
                if not isinstance(eta, float) and not isinstance(eta, int):
                    raise ValueError(f"eta must be a real number, your eta was {eta}")
                prior = LKJCovariancePrior(num_tasks, eta, sd_prior)

            else:
                raise NotImplementedError(
                    "Currently only LKJ prior is supported,"
                    f"your prior type was {prior_type}."
                )

        if all_nan_Yvar:
            gp = MultiTaskGP(
                train_X=X,
                train_Y=Y,
                task_feature=task_feature,
                rank=kwargs.get("rank"),
                task_covar_prior=prior,
                input_transform=warp_tf,
            )
        else:
            gp = FixedNoiseMultiTaskGP(
                train_X=X,
                train_Y=Y,
                train_Yvar=Yvar,
                task_feature=task_feature,
                rank=kwargs.get("rank"),
                task_covar_prior=prior,
                input_transform=warp_tf,
            )
    return gp