Esempio n. 1
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(9.534628739243518)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]), y=TensorType(y))

        model_minibatch = SVGP(x, y, kernel, batch_size=1)
        loss_mb = model_minibatch.loss()
        assert isinstance(loss_mb, torch.Tensor)
        assert loss_mb.ndimension() == 0

        model_full_mb = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
            batch_size=x.shape[0],
        )
        model_full_mb.induced_output_mean.data = TensorType(u_mu)
        model_full_mb.induced_output_chol_cov.data = model_full_mb.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))
        loss_full_mb = model_full_mb.loss()
        assert isinstance(loss_full_mb, torch.Tensor)
        assert loss_full_mb.ndimension() == 0
        assert loss_full_mb.item() == pytest.approx(loss.item())

        model.loss(model.X, model.Y)  # Just make sure it works!
Esempio n. 2
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(8.842242323920674)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]))
Esempio n. 3
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        x_test = TensorType(_InducingData._x_test())
        mu, s = TestSVGP._y_pred()
        gaussian_predictions(model, x_test, mu, s)
Esempio n. 4
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        return model
Esempio n. 5
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(x,
                     y,
                     kernel,
                     inducing_points=z,
                     likelihood=likelihood,
                     mean_function=mean_functions.Zero(1))
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov.\
            _transform.inv(TensorType(u_l_s))

        return model
Esempio n. 6
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        x_test = torch.Tensor(_InducingData._x_test())
        mu, s = TestVFE._y_pred()
        gaussian_predictions(model, x_test, mu, s)
Esempio n. 7
0
 def test_forward(self):
     n, dx, dy = 5, 3, 2
     y = mean_functions.Zero(dy)(torch.rand(n, dx))
     assert isinstance(y, torch.Tensor)
     assert all([e == a for e, a in zip(y.flatten(), torch.zeros(n, dy).flatten())])
Esempio n. 8
0
 def test_init(self):
     mean_functions.Zero(2)