Esempio n. 1
0
def _initialize_bayesian_gp(xr, xg, y):
    """
    Initialize a Bayesian GP where we use the elgacy data to form priors on the
    model's (constant) mean function * kernel parameters.

    Get Gaussians for priors based on empirical moments of models trained on
    legacy task.  For now, weight each legacy task equally
    """

    assert xg.shape[1] == 1  # Not sure how this might work otherwise

    # 1) Train models on legacy tasks to get data to form priors:
    legacy_parameters = {
        "scales": [],
        "variances": [],
        "jitters": [],
        "biases": []
    }
    for xgi in np.unique(xg):
        if xgi == "0": continue
        i = np.where(xg == xgi)[0]
        xi, yi = xr[i], y[i]
        kern = Rbf(xi.shape[1], ARD=True)
        model = GPR(xi,
                    yi,
                    Rbf(xi.shape[1], ARD=True),
                    mean_function=gptorch.mean_functions.Constant(1),
                    likelihood=gptorch.likelihoods.Gaussian(variance=0.001))
        # We don't need a good model--just a ballpark of reasonable
        # hyperparameters so we can get a prior.
        model.optimize(method="Adam", max_iter=200)
        # ASSUMPTION: gptorch using ExpTransform as BayesianGP will!
        # Valid for at least gptorch version <= 0.3.
        # Get raw, torch param values
        legacy_parameters["scales"].append(model.kernel.length_scales.data)
        legacy_parameters["variances"].append(model.kernel.variance.data)
        legacy_parameters["jitters"].append(model.likelihood.variance.data)
        legacy_parameters["biases"].append(model.mean_function.val.data)

    for key in legacy_parameters.keys():
        legacy_parameters[key] = torch.stack(legacy_parameters[key])

    # 2) Initialize the Bayesian GP:
    i = np.where(xg == "0")[0]
    model = BayesianGP(xr[i], y[i].flatten())
    model.raw_scales_prior = Normal(legacy_parameters["scales"].mean(dim=0),
                                    legacy_parameters["scales"].std(dim=0))
    model.raw_variance_prior = Normal(
        legacy_parameters["variances"].mean(dim=0),
        legacy_parameters["variances"].std(dim=0))
    model.raw_jitter_prior = Normal(legacy_parameters["jitters"].mean(dim=0),
                                    legacy_parameters["jitters"].std(dim=0))
    model.bias_prior = Normal(legacy_parameters["biases"].mean(dim=0),
                              legacy_parameters["biases"].std(dim=0))

    return model
Esempio n. 2
0
    def _predict_fy_samples(self, attr):
        """
        attr="predict_f_samples" or "predict_y_samples"
        """

        # TODO mock a GPModel?  Using GPR for the moment.
        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)
        f = getattr(gp, attr)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        samples = f(x_test)
        assert isinstance(samples, np.ndarray)
        assert samples.ndim == 3  # [sample x n_test x dy]
        assert samples.shape == (1, n_test, dy)

        n_samples = 3
        samples_2 = f(x_test, n_samples=n_samples)
        assert isinstance(samples_2, np.ndarray)
        assert samples_2.ndim == 3  # [sample x n_test x dy]
        assert samples_2.shape == (n_samples, n_test, dy)

        x_test_torch = TensorType(x_test)
        samples_torch = f(x_test_torch)
        assert isinstance(samples_torch, TensorType)
        assert samples_torch.ndimension() == 3  # [1 x n_test x dy]
        assert samples_torch.shape == (1, n_test, dy)
Esempio n. 3
0
    def _predict_fy(self, attr):
        """
        attr='predict_f' or 'predict_y'
        """

        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        f = getattr(gp, attr)
        mu, v = f(x_test)
        for result in [mu, v]:
            assert isinstance(result, np.ndarray)
            assert result.ndim == 2  # [n_test x dy]
            assert result.shape == (n_test, dy)

        x_test_torch = TensorType(x_test)
        mu_torch, v_torch = f(x_test_torch)
        for result in [mu_torch, v_torch]:
            assert isinstance(result, TensorType)
            assert result.ndimension() == 2  # [n_test x dy]
            assert result.shape == (n_test, dy)
Esempio n. 4
0
    def test_predict_cuda(self):
        n, n_test, dx, dy = 5, 7, 3, 2
        x, y = torch.randn(n, dx), torch.randn(n, dy)
        kern = Rbf(x.shape[1], ARD=True)
        model = GPR(x, y, kern)
        model.cuda()

        x_test = torch.randn(n_test, dx, dtype=torch_dtype).cuda()
        for t in model._predict(x_test):  # mean, variance
            assert t.is_cuda
Esempio n. 5
0
    def test_init(self):
        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(x.shape[1], ARD=True)

        # init w/ numpy
        GPR(x, y, kern)
        # init w/ PyTorch tensors:
        GPR(TensorType(y), TensorType(x), kern)
        # init w/ a mean function:
        GPR(x, y, kern, mean_function=torch.nn.Linear(dx, dy))
Esempio n. 6
0
def initialize_model(xr, xg, y, model_type):
    if model_type == "GP":
        assert xg.shape[1] == 1
        i = np.where(xg.flatten() == "0")[0]
        xr, xg, y = xr[i], xg[i], y[i]
        return GPR(
            xr, 
            y, 
            Rbf(xr.shape[1], ARD=True), 
            likelihood=gptorch.likelihoods.Gaussian(variance=0.001)
        )
    else:
        return experiment_utils.initialize_model(xr, xg, y, model_type)
Esempio n. 7
0
    def test_predict(self):
        n, n_test, dx, dy = 5, 7, 3, 2
        x, y = torch.randn(n, dx), torch.randn(n, dy)
        kern = Rbf(x.shape[1], ARD=True)
        model = GPR(x, y, kern)

        x_test = torch.randn(n_test, dx)
        mu_var, var = model._predict(x_test)
        assert all([e == a for e, a in zip(mu_var.shape, (n_test, dy))])
        assert all([e == a for e, a in zip(var.shape, (n_test, dy))])

        mu_cov, cov = model._predict(x_test, diag=False)
        assert all([e == a for e, a in zip(mu_cov.shape, (n_test, dy))])
        assert all([e == a for e, a in zip(cov.shape, (n_test, n_test))])
Esempio n. 8
0
def initialize_model(xr, xg, y, model_type):
    """
    Initialize the metamodel to be used for BO.
    """

    if model_type == "BEGP":
        return EGP(xr, xg, y)
    elif model_type == "EGP":
        return EGP(xr, xg, y, embedder_type=DeterministicEmbedder)
    elif model_type == "GP":
        x = TensorType(np.zeros((0, xr.shape[1])))
        y = TensorType(np.zeros((0, 1)))
        return GPR(x,
                   y,
                   Rbf(x.shape[1], ARD=True),
                   likelihood=gptorch.likelihoods.Gaussian(variance=0.001))
    elif model_type == "BGP":
        return _initialize_bayesian_gp(xr, xg, y)
    raise ValueError("Unexpected model type %s" % model_type)
Esempio n. 9
0
    def test_predict_y_samples(self):
        # TODO mock a GPModel?  Using GPR for the moment.
        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(y, x, kern)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        y_samples = gp.predict_y_samples(x_test)
        assert isinstance(y_samples, th.Tensor)
        assert y_samples.ndimension() == 3  # [sample x n_test x dy]
        assert y_samples.shape == (1, n_test, dy)

        n_samples = 3
        y_samples_2 = gp.predict_y_samples(x_test, n_samples=n_samples)
        assert isinstance(y_samples_2, th.Tensor)
        assert y_samples_2.ndimension() == 3  # [sample x n_test x dy]
        assert y_samples_2.shape == (n_samples, n_test, dy)
Esempio n. 10
0
    def _predict_fy_samples_cuda(self, attr):

        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)
        f = getattr(gp, attr)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        x_test_torch = TensorType(x_test)

        gp.cuda()
        # Numpy input:
        samples_cuda_np = f(x_test)
        assert isinstance(samples_cuda_np, np.ndarray)
        # PyTorch (cpu) input
        samples_cuda_torch = f(x_test_torch)
        assert samples_cuda_torch.device == x_test_torch.device
        # PyTorch (GPU) input
        samples_cuda_gpu = f(x_test_torch.to("cuda"))
        assert samples_cuda_gpu.is_cuda
Esempio n. 11
0
 def _get_model():
     n, dx, dy = 5, 3, 2
     x, y = np.random.randn(n, dx), np.random.randn(n, dy)
     kern = Rbf(dx, ARD=True)
     return GPR(x, y, kern)