Пример #1
0
    def test_predict_mean_covariance(self):
        """
        Test propagation of full Gaussian through the likelihood density.
        """
        lik = self._standard_likelihood()
        input_mean = Variable(TensorType([0.0, 1.0, 2.1]))
        input_covariance = Variable(
            TensorType([[1.0, 0.5, 0.2], [0.5, 1.0, 0.5], [0.2, 0.5, 1.0]]))
        expected_output_mean = input_mean
        # Ugh, sorry about this.  Will cleanup when we move PyTorch forward!
        expected_output_covariance = (
            input_covariance +
            Variable(TensorType([self._expected_likelihood_variance
                                 ])).expand_as(input_covariance).diag().diag())

        # API
        output_mean, output_covariance = lik.predict_mean_covariance(
            input_mean, input_covariance)
        assert isinstance(output_mean, Variable)
        assert isinstance(output_covariance, Variable)

        # Value
        assert all(
            output_mean.data.numpy() == expected_output_mean.data.numpy())
        assert output_covariance.data.numpy() == pytest.approx(
            expected_output_covariance.data.numpy())
Пример #2
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        x_test = TensorType(_InducingData._x_test())
        mu, s = TestSVGP._y_pred()
        gaussian_predictions(model, x_test, mu, s)
Пример #3
0
 def compute_loss(self, *args, **kwargs):
     if self.num_data == 0:
         loss = TensorType([0.0])
         loss.requires_grad_(True)
         return loss
     else:
         return super().loss(*args, **kwargs)
Пример #4
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(8.842242323920674)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]))
Пример #5
0
def append_function_egp(x_new, y_new):
    x_new, y_new = np.atleast_2d(x_new), np.atleast_2d(y_new)
    n_new = x_new.shape[0]
    xg_new = np.array([["0"] * system.general_dimensions] * n_new)

    model.xr = torch.cat((model.xr, TensorType(x_new)))
    model.xg = np.concatenate((model.xg, xg_new))
    model.Y = torch.cat((model.Y, TensorType(y_new)))
Пример #6
0
    def test_init(self):
        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(x.shape[1], ARD=True)

        # init w/ numpy
        GPR(x, y, kern)
        # init w/ PyTorch tensors:
        GPR(TensorType(y), TensorType(x), kern)
        # init w/ a mean function:
        GPR(x, y, kern, mean_function=torch.nn.Linear(dx, dy))
Пример #7
0
 def setup_class(cls, kernel_type):
     cls.kernel_type = kernel_type
     cls.x1 = TensorType(np.load(os.path.join(data_dir, "x1.npy")))
     cls.x2 = TensorType(np.load(os.path.join(data_dir, "x2.npy")))
     cls.n1, cls.d1 = cls.x1.data.numpy().shape
     cls.n2, cls.d2 = cls.x2.data.numpy().shape
     cls.kern = cls.kernel_type(cls.d1)
     cls.kern_str = cls.kern.__class__.__name__
     cls.kx_expected = np.load(os.path.join(data_dir, "{}_kx.npy".format(
         cls.kern_str)))
     cls.kx2_expected = np.load(os.path.join(data_dir, "{}_kx2.npy".format(
         cls.kern_str)))
     cls.kdiag_expected = np.load(os.path.join(data_dir, 
         "{}_kdiag.npy".format(cls.kern_str)))
Пример #8
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(9.534628739243518)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]), y=TensorType(y))

        model_minibatch = SVGP(x, y, kernel, batch_size=1)
        loss_mb = model_minibatch.loss()
        assert isinstance(loss_mb, torch.Tensor)
        assert loss_mb.ndimension() == 0

        model_full_mb = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
            batch_size=x.shape[0],
        )
        model_full_mb.induced_output_mean.data = TensorType(u_mu)
        model_full_mb.induced_output_chol_cov.data = model_full_mb.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))
        loss_full_mb = model_full_mb.loss()
        assert isinstance(loss_full_mb, torch.Tensor)
        assert loss_full_mb.ndimension() == 0
        assert loss_full_mb.item() == pytest.approx(loss.item())

        model.loss(model.X, model.Y)  # Just make sure it works!
Пример #9
0
    def test_logp(self):
        """
        Log-density
        """
        lik = self._standard_likelihood()
        mean = Variable(TensorType([0.0]))
        target = Variable(TensorType([0.1]))
        expected_logp = 0.8836465597893728

        # API
        logp = lik.logp(mean, target)
        assert isinstance(logp, Variable)

        # Value
        assert logp.data.numpy() == pytest.approx(expected_logp)
Пример #10
0
    def _predict_fy_samples(self, attr):
        """
        attr="predict_f_samples" or "predict_y_samples"
        """

        # TODO mock a GPModel?  Using GPR for the moment.
        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)
        f = getattr(gp, attr)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        samples = f(x_test)
        assert isinstance(samples, np.ndarray)
        assert samples.ndim == 3  # [sample x n_test x dy]
        assert samples.shape == (1, n_test, dy)

        n_samples = 3
        samples_2 = f(x_test, n_samples=n_samples)
        assert isinstance(samples_2, np.ndarray)
        assert samples_2.ndim == 3  # [sample x n_test x dy]
        assert samples_2.shape == (n_samples, n_test, dy)

        x_test_torch = TensorType(x_test)
        samples_torch = f(x_test_torch)
        assert isinstance(samples_torch, TensorType)
        assert samples_torch.ndimension() == 3  # [1 x n_test x dy]
        assert samples_torch.shape == (1, n_test, dy)
Пример #11
0
    def test_loss(self):
        model, x, y = self._get_model()
        n = x.shape[0]

        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 1  # TODO change this...

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:n // 2]))
Пример #12
0
    def _get_p_best(self, x_test, y_test=None, n_samples=100000, show=False):
        """
        Out of the inputs in x_test, determine for each input the probability 
        that its y would be the best (lowest)
        """

        with torch.no_grad():
            # Need the FULL predictive distribution!
            m, c = self.predict_function(TensorType(x_test), diag=False)
            assert m.shape[1] == 1, 'How to quantify "best" for multi-output?'
            
            lc = cholesky(c)
            epsilon = torch.randn(n_samples, *m.shape, dtype=torch_dtype)
            samples = (m[None, :, :] + lc[None, :, :] @ epsilon).cpu().numpy()

        i_best = np.argmin(samples, axis=1)

        p_best = np.array(
            [np.sum(i_best == i) for i in range(self.x_all.shape[0])]
        ) / n_samples

        if show:
            self._show_p_best_analysis(x_test, y_test, m, c, samples, p_best)

        return p_best
Пример #13
0
    def _predict_fy(self, attr):
        """
        attr='predict_f' or 'predict_y'
        """

        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        f = getattr(gp, attr)
        mu, v = f(x_test)
        for result in [mu, v]:
            assert isinstance(result, np.ndarray)
            assert result.ndim == 2  # [n_test x dy]
            assert result.shape == (n_test, dy)

        x_test_torch = TensorType(x_test)
        mu_torch, v_torch = f(x_test_torch)
        for result in [mu_torch, v_torch]:
            assert isinstance(result, TensorType)
            assert result.ndimension() == 2  # [n_test x dy]
            assert result.shape == (n_test, dy)
Пример #14
0
def initialize_model(xr, xg, y, model_type):
    """
    Initialize the metamodel to be used for BO.
    """

    if model_type == "BEGP":
        return EGP(xr, xg, y)
    elif model_type == "EGP":
        return EGP(xr, xg, y, embedder_type=DeterministicEmbedder)
    elif model_type == "GP":
        x = TensorType(np.zeros((0, xr.shape[1])))
        y = TensorType(np.zeros((0, 1)))
        return GPR(x,
                   y,
                   Rbf(x.shape[1], ARD=True),
                   likelihood=gptorch.likelihoods.Gaussian(variance=0.001))
    elif model_type == "BGP":
        return _initialize_bayesian_gp(xr, xg, y)
    raise ValueError("Unexpected model type %s" % model_type)
Пример #15
0
        def func_and_grad(x):
            x = TensorType(np.atleast_2d(x))
            x.requires_grad_(True)
            m, v = self.predict_function(x)
            s = v.sqrt()

            if not self.y:  # No current data: use mean ("everything is an improvement")
                f = m
            else:
                f = _expected_improvement(m, s, min(self.y), mode="min")

            if f.requires_grad:
                f.backward()
                g = x.grad.detach().cpu().numpy().flatten()
            else:
                g = 0.0 * x.detach().cpu().numpy().flatten()
            f = f.detach().cpu().item()
            
            return f, g
Пример #16
0
    def test_predict_mean_variance(self):
        """
        Test propagation of diagonal Gaussian through the likelihood density.
        """
        lik = self._standard_likelihood()
        input_mean = Variable(TensorType([0.0]))
        input_variance = Variable(TensorType([1.0]))
        expected_output_mean = input_mean
        expected_output_variance = input_variance + self._expected_likelihood_variance

        # API
        output_mean, output_variance = lik.predict_mean_variance(
            input_mean, input_variance)
        assert isinstance(output_mean, Variable)
        assert isinstance(output_variance, Variable)

        # Value
        assert output_mean.data.numpy() == expected_output_mean.data.numpy()
        assert output_variance.data.numpy() == pytest.approx(
            expected_output_variance.data.numpy())
Пример #17
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(x,
                     y,
                     kernel,
                     inducing_points=z,
                     likelihood=likelihood,
                     mean_function=mean_functions.Zero(1))
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov.\
            _transform.inv(TensorType(u_l_s))

        return model
Пример #18
0
    def _predict_fy_samples_cuda(self, attr):

        n, dx, dy = 5, 3, 2
        x, y = np.random.randn(n, dx), np.random.randn(n, dy)
        kern = Rbf(dx, ARD=True)
        gp = GPR(x, y, kern)
        f = getattr(gp, attr)

        n_test = 5
        x_test = np.random.randn(n_test, dx)
        x_test_torch = TensorType(x_test)

        gp.cuda()
        # Numpy input:
        samples_cuda_np = f(x_test)
        assert isinstance(samples_cuda_np, np.ndarray)
        # PyTorch (cpu) input
        samples_cuda_torch = f(x_test_torch)
        assert samples_cuda_torch.device == x_test_torch.device
        # PyTorch (GPU) input
        samples_cuda_gpu = f(x_test_torch.to("cuda"))
        assert samples_cuda_gpu.is_cuda
Пример #19
0
    def _predict_fy_cuda(self, attr):
        """
        attr='predict_f' or 'predict_y'
        """

        gp = self._get_model()
        f = getattr(gp, attr)
        x_test = np.random.randn(5, gp.input_dimension)
        x_test_torch = TensorType(x_test)

        # Test that CUDA works in all cases:
        gp.cuda()
        # Numpy input:
        cuda_np = f(x_test)
        for result in cuda_np:
            assert isinstance(result, np.ndarray)
        # PyTorch (cpu) input
        cuda_torch = f(x_test_torch)
        for result in cuda_torch:
            assert result.device == x_test_torch.device
        # PyTorch (GPU) input
        cuda_gpu = f(x_test_torch.to("cuda"))
        for result in cuda_gpu:
            assert result.is_cuda
Пример #20
0
def as_variable(x: np.array) -> Variable:
    return Variable(TensorType(x))
Пример #21
0
def append_function_bgp(x_new, y_new):
    model.x = torch.cat((model.X, TensorType(np.atleast_2d(x_new))))
    model.Y = torch.cat((model.Y, TensorType(np.atleast_2d(y_new))))
Пример #22
0
    x = np.linspace(0, 1, n).reshape((-1, 1))
    y = f(x) + 0.1 * np.random.randn(n, 1)

    # Try different kernels...
    # kern = kernels.Rbf(1)
    # kern = kernels.Matern32(1)
    # kern = kernels.Matern52(1)
    # kern = kernels.Exp(1)
    # kern = kernels.Constant(1)
    # kern = kernels.Linear(1)
    kern = kernels.Linear(1) + kernels.Rbf(1) + kernels.Constant(1)

    # Try different models:
    model = GPR(y, x, kern)
    # model = VFE(y, x, kern)
    model.likelihood.variance.data = TensorType([1.0e-6])

    # Train
    model.optimize(method="L-BFGS-B", max_iter=100)
    print("Trained model:")
    print(model)

    # Predict
    n_test = 200
    n_samples = 5
    x_test = np.linspace(-1, 2, n_test).reshape((-1, 1))
    mu, s = model.predict_y(x_test)
    mu, s = mu.detach().numpy().flatten(), s.detach().numpy().flatten()
    y_samp = model.predict_y_samples(x_test, n_samples=n_samples).detach().\
        numpy()
    unc = 2.0 * np.sqrt(s)
Пример #23
0
 def __init__(self, x, y, model_type, **kwargs):
     super().__init__(x, y, **kwargs)
     self._model = model_type(y, x, gptorch.kernels.Rbf(x.shape[1], ARD=True))
     # Initial guess: likelihood variance is 1% of the data variance
     self._model.likelihood.variance.data = TensorType([np.log(np.var(y) * 1.0e-2)])