예제 #1
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        x_test = TensorType(_InducingData._x_test())
        mu, s = TestSVGP._y_pred()
        gaussian_predictions(model, x_test, mu, s)
예제 #2
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(8.842242323920674)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]))
예제 #3
0
    def test_compute_loss(self):
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
        )
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))

        loss = model.loss()
        assert isinstance(loss, torch.Tensor)
        assert loss.ndimension() == 0
        # Computed while I trust the result.
        assert loss.item() == pytest.approx(9.534628739243518)

        # Test ability to specify x and y
        loss_xy = model.loss(x=TensorType(x), y=TensorType(y))
        assert isinstance(loss_xy, torch.Tensor)
        assert loss_xy.item() == loss.item()

        with pytest.raises(ValueError):
            # Size mismatch
            model.loss(x=TensorType(x[:x.shape[0] // 2]), y=TensorType(y))

        model_minibatch = SVGP(x, y, kernel, batch_size=1)
        loss_mb = model_minibatch.loss()
        assert isinstance(loss_mb, torch.Tensor)
        assert loss_mb.ndimension() == 0

        model_full_mb = SVGP(
            x,
            y,
            kernel,
            inducing_points=z,
            likelihood=likelihood,
            mean_function=mean_functions.Zero(1),
            batch_size=x.shape[0],
        )
        model_full_mb.induced_output_mean.data = TensorType(u_mu)
        model_full_mb.induced_output_chol_cov.data = model_full_mb.induced_output_chol_cov._transform.inv(
            TensorType(u_l_s))
        loss_full_mb = model_full_mb.loss()
        assert isinstance(loss_full_mb, torch.Tensor)
        assert loss_full_mb.ndimension() == 0
        assert loss_full_mb.item() == pytest.approx(loss.item())

        model.loss(model.X, model.Y)  # Just make sure it works!
예제 #4
0
파일: models.py 프로젝트: 212726320/BEBO-1
def _init_kernel_and_likelihood(x: torch.Tensor, y: torch.Tensor):
    """
    Reasonable initial values for kernels and likelihoods
    """

    kernel = kernels.Rbf(x.shape[1], ARD=True)
    x_range = x.max(dim=0)[0] - x.min(dim=0)[0]
    x_range[np.where(x_range == 0.0)[0]] = 1.0
    kernel.length_scales.data = torch.log(0.2 * x_range)
    kernel.variance.data[0] = torch.log(y.var())

    likelihood = likelihoods.Gaussian(variance=0.001 * y.var())

    return kernel, likelihood
예제 #5
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        return model
예제 #6
0
    def _get_model():
        x, y = _InducingData._xy()
        z = _InducingData._z()
        u_mu, u_l_s = TestSVGP._induced_outputs()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = SVGP(x,
                     y,
                     kernel,
                     inducing_points=z,
                     likelihood=likelihood,
                     mean_function=mean_functions.Zero(1))
        model.induced_output_mean.data = TensorType(u_mu)
        model.induced_output_chol_cov.data = model.induced_output_chol_cov.\
            _transform.inv(TensorType(u_l_s))

        return model
예제 #7
0
    def test_predict(self):
        """
        Just the ._predict() method
        """

        x, y = _InducingData._xy()
        z = _InducingData._z()
        kernel = Matern32(1)
        kernel.length_scales.data = torch.zeros(1, dtype=torch_dtype)
        kernel.variance.data = torch.zeros(1, dtype=torch_dtype)
        likelihood = likelihoods.Gaussian(variance=1.0)

        model = VFE(x,
                    y,
                    kernel,
                    inducing_points=z,
                    likelihood=likelihood,
                    mean_function=mean_functions.Zero(1))

        x_test = torch.Tensor(_InducingData._x_test())
        mu, s = TestVFE._y_pred()
        gaussian_predictions(model, x_test, mu, s)
예제 #8
0
 def test_init(self):
     """
     Ensure that initialization succeeds
     """
     likelihoods.Gaussian()
     self._standard_likelihood()
예제 #9
0
 def _standard_likelihood(self):
     return likelihoods.Gaussian(
         variance=self._expected_likelihood_variance)