Esempio n. 1
0
    def testEmptyDataMatchesGPPrior(self):
        amp = np.float64(.5)
        len_scale = np.float64(.2)
        jitter = np.float64(1e-4)
        index_points = np.random.uniform(-1., 1., (10, 1)).astype(np.float64)

        # k_xx - k_xn @ (k_nn + sigma^2) @ k_nx + sigma^2
        mean_fn = lambda x: x[:, 0]**2

        kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
        gp = tfd.GaussianProcess(kernel,
                                 index_points,
                                 mean_fn=mean_fn,
                                 jitter=jitter)

        gprm_nones = tfd.GaussianProcessRegressionModel(kernel,
                                                        index_points,
                                                        mean_fn=mean_fn,
                                                        jitter=jitter)

        gprm_zero_shapes = tfd.GaussianProcessRegressionModel(
            kernel,
            index_points,
            observation_index_points=tf.ones([5, 0], tf.float64),
            observations=tf.ones([5, 0], tf.float64),
            mean_fn=mean_fn,
            jitter=jitter)

        for gprm in [gprm_nones, gprm_zero_shapes]:
            self.assertAllClose(self.evaluate(gp.mean()),
                                self.evaluate(gprm.mean()))
            self.assertAllClose(self.evaluate(gp.covariance()),
                                self.evaluate(gprm.covariance()))
            self.assertAllClose(self.evaluate(gp.variance()),
                                self.evaluate(gprm.variance()))

            observations = np.random.uniform(-1., 1., 10).astype(np.float64)
            self.assertAllClose(self.evaluate(gp.log_prob(observations)),
                                self.evaluate(gprm.log_prob(observations)))
Esempio n. 2
0
    def testCustomMarginalFn(self):
        def test_marginal_fn(loc,
                             covariance,
                             validate_args=False,
                             allow_nan_stats=False,
                             name="custom_marginal"):
            return tfd.MultivariateNormalDiag(
                loc=loc,
                scale_diag=tf.math.sqrt(tf.linalg.diag_part(covariance)),
                validate_args=validate_args,
                allow_nan_stats=allow_nan_stats,
                name=name)

        index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)

        gp = tfd.GaussianProcess(kernel=psd_kernels.ExponentiatedQuadratic(),
                                 index_points=index_points,
                                 marginal_fn=test_marginal_fn,
                                 validate_args=True)

        self.assertAllClose(np.eye(10),
                            gp.get_marginal_distribution().covariance())
  def testLateBindingIndexPoints(self):
    amp = np.float64(.5)
    len_scale = np.float64(.2)
    kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
    mean_fn = lambda x: x[:, 0]**2
    jitter = np.float64(1e-4)
    observation_noise_variance = np.float64(3e-3)

    gp = tfd.GaussianProcess(
        kernel=kernel,
        mean_fn=mean_fn,
        observation_noise_variance=observation_noise_variance,
        jitter=jitter,
        validate_args=True)

    index_points = np.random.uniform(-1., 1., [10, 1])

    expected_mean = mean_fn(index_points)
    self.assertAllClose(expected_mean,
                        self.evaluate(gp.mean(index_points=index_points)))

    def _kernel_fn(x, y):
      return amp ** 2 * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))

    expected_covariance = (
        _kernel_fn(np.expand_dims(index_points, -3),
                   np.expand_dims(index_points, -2)) +
        observation_noise_variance * np.eye(10))

    self.assertAllClose(expected_covariance,
                        self.evaluate(gp.covariance(index_points=index_points)))
    self.assertAllClose(np.diag(expected_covariance),
                        self.evaluate(gp.variance(index_points=index_points)))
    self.assertAllClose(np.sqrt(np.diag(expected_covariance)),
                        self.evaluate(gp.stddev(index_points=index_points)))

    # Calling mean with no index_points should raise an Error
    with self.assertRaises(ValueError):
      gp.mean()
  def testUnivariateLogProbWithIsMissing(self):
    index_points = tf.convert_to_tensor([[[0.0, 0.0]], [[0.5, 1.0]]])
    amplitude = tf.convert_to_tensor(1.1)
    length_scale = tf.convert_to_tensor(0.9)

    gp = tfd.GaussianProcess(
        kernel=psd_kernels.ExponentiatedQuadratic(
            amplitude, length_scale),
        index_points=index_points,
        mean_fn=lambda x: tf.reduce_mean(x, axis=-1),
        observation_noise_variance=.05,
        jitter=0.0)

    x = gp.sample(3, seed=test_util.test_seed())
    lp = gp.log_prob(x)

    self.assertAllClose(lp, gp.log_prob(x, is_missing=[False, False]))
    self.assertAllClose(tf.convert_to_tensor([np.zeros((3, 2)), lp]),
                        gp.log_prob(x, is_missing=[[[True]], [[False]]]))
    self.assertAllClose(
        tf.convert_to_tensor([[lp[0, 0], 0.0], [0.0, 0.0], [0., lp[2, 1]]]),
        gp.log_prob(x, is_missing=[[False, True], [True, True], [True, False]]))
Esempio n. 5
0
def gaussian_processes(draw,
                       kernel_name=None,
                       batch_shape=None,
                       event_dim=None,
                       feature_dim=None,
                       feature_ndims=None,
                       enable_vars=False):
    # First draw a kernel.
    k, _ = draw(
        kernel_hps.base_kernels(
            kernel_name=kernel_name,
            batch_shape=batch_shape,
            event_dim=event_dim,
            feature_dim=feature_dim,
            feature_ndims=feature_ndims,
            # Disable variables
            enable_vars=False))
    compatible_batch_shape = draw(
        tfp_hps.broadcast_compatible_shape(k.batch_shape))
    index_points = draw(
        kernel_hps.kernel_input(batch_shape=compatible_batch_shape,
                                example_ndims=1,
                                feature_dim=feature_dim,
                                feature_ndims=feature_ndims,
                                enable_vars=enable_vars,
                                name='index_points'))
    params = draw(
        broadcasting_params('GaussianProcess',
                            compatible_batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))

    gp = tfd.GaussianProcess(
        kernel=k,
        index_points=index_points,
        cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
        observation_noise_variance=params['observation_noise_variance'])
    return gp
    def testVarianceAndCovarianceMatrix(self):
        amp = np.float64(.5)
        len_scale = np.float64(.2)
        jitter = np.float64(1e-4)

        kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)

        index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)

        gp = tfd.GaussianProcess(kernel, index_points, jitter=jitter)

        def _kernel_fn(x, y):
            return amp * np.exp(-.5 * (np.squeeze(
                (x - y)**2)) / (len_scale**2))

        expected_covariance = (_kernel_fn(np.expand_dims(index_points, 0),
                                          np.expand_dims(index_points, 1)) +
                               jitter * np.eye(10))

        self.assertAllClose(expected_covariance,
                            self.evaluate(gp.covariance()))
        self.assertAllClose(np.diag(expected_covariance),
                            self.evaluate(gp.variance()))
Esempio n. 7
0
    def testShapes(self):
        # 5x5 grid of index points in R^2 and flatten to 25x2
        index_points = np.linspace(-4., 4., 5, dtype=np.float32)
        index_points = np.stack(np.meshgrid(index_points, index_points),
                                axis=-1)
        index_points = np.reshape(index_points, [-1, 2])
        # ==> shape = [25, 2]

        # Kernel with batch_shape [2, 4, 3, 1]
        amplitude = np.array([1., 2.], np.float32).reshape([2, 1, 1, 1])
        length_scale = np.array([1., 2., 3., 4.],
                                np.float32).reshape([1, 4, 1, 1])
        observation_noise_variance = np.array([1e-5, 1e-6, 1e-5],
                                              np.float32).reshape([1, 1, 3, 1])
        batched_index_points = np.stack([index_points] * 6)
        # ==> shape = [6, 25, 2]
        if not self.is_static:
            amplitude = tf1.placeholder_with_default(amplitude, shape=None)
            length_scale = tf1.placeholder_with_default(length_scale,
                                                        shape=None)
            batched_index_points = tf1.placeholder_with_default(
                batched_index_points, shape=None)
        kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
        gp = tfd.GaussianProcess(
            kernel,
            batched_index_points,
            observation_noise_variance=observation_noise_variance,
            jitter=1e-5,
            validate_args=True)

        batch_shape = [2, 4, 3, 6]
        event_shape = [25]
        sample_shape = [5, 3]

        samples = gp.sample(sample_shape, seed=test_util.test_seed())

        if self.is_static or tf.executing_eagerly():
            self.assertAllEqual(gp.batch_shape_tensor(), batch_shape)
            self.assertAllEqual(gp.event_shape_tensor(), event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
            self.assertAllEqual(gp.batch_shape, batch_shape)
            self.assertAllEqual(gp.event_shape, event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
            self.assertAllEqual(gp.mean().shape, batch_shape + event_shape)
            self.assertAllEqual(gp.variance().shape, batch_shape + event_shape)
        else:
            self.assertAllEqual(self.evaluate(gp.batch_shape_tensor()),
                                batch_shape)
            self.assertAllEqual(self.evaluate(gp.event_shape_tensor()),
                                event_shape)
            self.assertAllEqual(
                self.evaluate(samples).shape,
                sample_shape + batch_shape + event_shape)
            self.assertIsNone(tensorshape_util.rank(samples.shape))
            self.assertIsNone(tensorshape_util.rank(gp.batch_shape))
            self.assertEqual(tensorshape_util.rank(gp.event_shape), 1)
            self.assertIsNone(
                tf.compat.dimension_value(
                    tensorshape_util.dims(gp.event_shape)[0]))
            self.assertAllEqual(self.evaluate(tf.shape(gp.mean())),
                                batch_shape + event_shape)
            self.assertAllEqual(self.evaluate(tf.shape(gp.variance())),
                                batch_shape + event_shape)