Beispiel #1
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(8.842242323920674)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]))
Beispiel #2
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        x_test = TensorType(_InducingData._x_test())
        mu, s = TestSVGP._y_pred()
        gaussian_predictions(model, x_test, mu, s)
Beispiel #3
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(9.534628739243518)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]), y=TensorType(y))

        model_minibatch = SVGP(x, y, kernel, batch_size=1)
        loss_mb = model_minibatch.loss()
        assert isinstance(loss_mb, torch.Tensor)
        assert loss_mb.ndimension() == 0

        model_full_mb = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
            batch_size=x.shape[0],
        )
        model_full_mb.induced_output_mean.data = TensorType(u_mu)
        model_full_mb.induced_output_chol_cov.data = model_full_mb.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))
        loss_full_mb = model_full_mb.loss()
        assert isinstance(loss_full_mb, torch.Tensor)
        assert loss_full_mb.ndimension() == 0
        assert loss_full_mb.item() == pytest.approx(loss.item())

        model.loss(model.X, model.Y)  # Just make sure it works!
Beispiel #4
0
    def test_init(self):
        x, y = _InducingData._xy()
        kernel = Matern32(x.shape[1], ARD=True)
        SVGP(x, y, kernel)
        SVGP(x, y, kernel, inducing_points=_InducingData._z())

        SVGP(x, y, kernel, mean_function=mean_functions.Constant(y.shape[1]))
        SVGP(x,
             y,
             kernel,
             mean_function=torch.nn.Linear(x.shape[1], y.shape[1]))
Beispiel #5
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        return model
Beispiel #6
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(x,
                     y,
                     kernel,
                     inducing_points=z,
                     likelihood=likelihood,
                     mean_function=mean_functions.Zero(1))
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov.\
            _transform.inv(TensorType(u_l_s))

        return model
Beispiel #7
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        x_test = torch.Tensor(_InducingData._x_test())
        mu, s = TestVFE._y_pred()
        gaussian_predictions(model, x_test, mu, s)
Beispiel #8
0
 def test_init(self):
     x, y = _InducingData._xy()
     kernel = Matern32(x.shape[1], ARD=True)
     VFE(x, y, kernel)
     VFE(x, y, kernel, inducing_points=_InducingData._z())