コード例 #1
0
  def test_bug170030378(self):
    n_item = 50
    n_rater = 7

    stream = test_util.test_seed_stream()
    weight = self.evaluate(
        tfd.Sample(tfd.Dirichlet([0.25, 0.25]), n_item).sample(seed=stream()))
    mixture_dist = tfd.Categorical(probs=weight)  # batch_shape=[50]

    rater_sensitivity = self.evaluate(
        tfd.Sample(tfd.Beta(5., 1.), n_rater).sample(seed=stream()))
    rater_specificity = self.evaluate(
        tfd.Sample(tfd.Beta(2., 5.), n_rater).sample(seed=stream()))

    probs = tf.stack([rater_sensitivity, rater_specificity])[None, ...]

    components_dist = tfd.BatchBroadcast(  # batch_shape=[50, 2]
        tfd.Independent(tfd.Bernoulli(probs=probs),
                        reinterpreted_batch_ndims=1),
        [50, 2])

    obs_dist = tfd.MixtureSameFamily(mixture_dist, components_dist)

    observed = self.evaluate(obs_dist.sample(seed=stream()))
    mixture_logp = obs_dist.log_prob(observed)

    expected_logp = tf.math.reduce_logsumexp(
        tf.math.log(weight) + components_dist.distribution.log_prob(
            observed[:, None, ...]),
        axis=-1)
    self.assertAllClose(expected_logp, mixture_logp)
コード例 #2
0
    def _build_sts(self, observed_time_series=None):
        max_timesteps = 100
        num_features = 3

        prior = tfd.Sample(tfd.Laplace(0., 1.), sample_shape=[num_features])

        # LinearRegression components don't currently take an `observed_time_series`
        # argument, so they can't infer a prior batch shape. This means we have to
        # manually set the batch shape expected by the tests.
        dtype = np.float32
        if observed_time_series is not None:
            observed_time_series_tensor, _ = (
                sts_util.canonicalize_observed_time_series_with_mask(
                    observed_time_series))
            batch_shape = tf.shape(observed_time_series_tensor)[:-2]
            dtype = dtype_util.as_numpy_dtype(
                observed_time_series_tensor.dtype)
            prior = tfd.Sample(tfd.Laplace(tf.zeros(batch_shape, dtype=dtype),
                                           1.),
                               sample_shape=[num_features])

        regression = LinearRegression(design_matrix=np.random.randn(
            max_timesteps, num_features).astype(dtype),
                                      weights_prior=prior)
        return Sum(components=[regression],
                   observed_time_series=observed_time_series)
コード例 #3
0
    def test_transformed_affine(self):
        sample_shape = 3
        mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)
        aff = tfb.Affine(scale_tril=[[0.75, 0.], [0.05, 0.5]])

        def expected_lp(y):
            x = aff.inverse(y)  # Ie, tf.random.normal([4, 3, 2])
            fldj = aff.forward_log_det_jacobian(x, event_ndims=1)
            return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)

        # Transform a Sample.
        d = tfd.TransformedDistribution(tfd.Sample(mvn,
                                                   sample_shape,
                                                   validate_args=True),
                                        bijector=aff)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)

        # Sample a Transform.
        d = tfd.Sample(tfd.TransformedDistribution(mvn, bijector=aff),
                       sample_shape,
                       validate_args=True)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)
コード例 #4
0
 def model():
     i = yield Root(
         tfd.Sample(tfd.Categorical(probs=probs, dtype=tf.int32),
                    sample_shape=[2]))
     # Note use of scalar `sample_shape` to test expansion of shape
     # to vector.
     j = yield tfd.Sample(tfd.Categorical(probs=probs, dtype=tf.int32),
                          sample_shape=2)
コード例 #5
0
 def test_kl_divergence(self):
   q_scale = 2.
   p = tfd.Sample(
       tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],
       validate_args=True)
   q = tfd.Sample(
       tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=2.), 1), [5, 4],
       validate_args=True)
   actual_kl = tfd.kl_divergence(p, q)
   expected_kl = ((5 * 4) *
                  (0.5 * q_scale**-2. - 0.5 + np.log(q_scale)) *  # Actual KL.
                  np.ones([3]) * 2)  # Batch, events.
   self.assertAllClose(expected_kl, self.evaluate(actual_kl))
コード例 #6
0
 def test_broadcast_event(self):
   d = tfd.Sample(tfd.Normal(0, 1, validate_args=True), 4, validate_args=True)
   # Batch of 2 events: works.
   two_batch = d.log_prob(tf.zeros([2, 4]))
   self.assertEqual((2,), self.evaluate(two_batch).shape)
   # Broadcast of event: works.
   self.assertAllEqual(two_batch, d.log_prob(tf.zeros([2, 1])))
コード例 #7
0
 def test_entropy(self):
   sample_shape = [3, 4]
   mvn = tfd.Independent(tfd.Normal(loc=0, scale=[[0.25, 0.5]]), 1)
   d = tfd.Sample(mvn, sample_shape, validate_args=True)
   expected_entropy = 12 * tf.reduce_sum(mvn.distribution.entropy(), axis=-1)
   actual_entropy = d.entropy()
   self.assertAllEqual(*self.evaluate([expected_entropy, actual_entropy]))
コード例 #8
0
  def test_legacy_dists(self):

    class StatefulNormal(tfd.Normal):

      def _sample_n(self, n, seed=None):
        return self.loc + self.scale * tf.random.normal(
            tf.concat([[n], self.batch_shape_tensor()], axis=0),
            seed=seed)

    # pylint: disable=bad-whitespace
    d = tfd.JointDistributionNamed(dict(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        loc  =          StatefulNormal(loc=0, scale=2.),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        m    =          tfd.Normal,
        x    =lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12)),
                                   validate_args=True)
    # pylint: enable=bad-whitespace

    warnings.simplefilter('always')
    with warnings.catch_warnings(record=True) as w:
      d.sample(seed=test_util.test_seed())
    self.assertRegexpMatches(
        str(w[0].message),
        r'Falling back to stateful sampling for.*of type.*StatefulNormal.*'
        r'component name "loc" and `dist.name` "Normal"',
        msg=w)
コード例 #9
0
 def testCompareToBijector(self):
     """Demonstrates equivalence between TD, Bijector approach and AR dist."""
     sample_shape = np.int32([4, 5])
     batch_shape = np.int32([])
     event_size = np.int32(2)
     batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
     sample0 = tf.zeros(batch_event_shape)
     affine = tfb.ScaleMatvecTriL(
         scale_tril=self._random_scale_tril(event_size), validate_args=True)
     ar = tfd.Autoregressive(self._normal_fn(affine),
                             sample0,
                             validate_args=True)
     ar_flow = tfb.MaskedAutoregressiveFlow(
         is_constant_jacobian=True,
         shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
         validate_args=True)
     td = tfd.TransformedDistribution(
         # TODO(b/137665504): Use batch-adding meta-distribution to set the batch
         # shape instead of tf.zeros.
         distribution=tfd.Sample(tfd.Normal(tf.zeros(batch_shape), 1.),
                                 [event_size]),
         bijector=ar_flow,
         validate_args=True)
     x_shape = np.concatenate([sample_shape, batch_shape, [event_size]],
                              axis=0)
     x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
     td_log_prob_, ar_log_prob_ = self.evaluate(
         [td.log_prob(x), ar.log_prob(x)])
     self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)
コード例 #10
0
  def test_doc_string(self):
    # Generate data.
    n = 2000
    x2 = np.random.randn(n).astype(dtype=np.float32) * 2.
    x1 = np.random.randn(n).astype(dtype=np.float32) + (x2 * x2 / 4.)
    data = np.stack([x1, x2], axis=-1)

    # Density estimation with MADE.
    made = tfb.AutoregressiveNetwork(params=2, hidden_units=[10, 10])

    distribution = tfd.TransformedDistribution(
        distribution=tfd.Sample(tfd.Normal(0., 1.), [2]),
        bijector=tfb.MaskedAutoregressiveFlow(made))

    # Construct and fit model.
    x_ = tfkl.Input(shape=(2,), dtype=tf.float32)
    log_prob_ = distribution.log_prob(x_)
    model = tfk.Model(x_, log_prob_)

    model.compile(optimizer=tf1.train.AdamOptimizer(),
                  loss=lambda _, log_prob: -log_prob)

    batch_size = 25
    model.fit(x=data,
              y=np.zeros((n, 0), dtype=np.float32),
              batch_size=batch_size,
              epochs=1,
              steps_per_epoch=1,  # Usually `n // batch_size`.
              shuffle=True,
              verbose=True)

    # Use the fitted distribution.
    self.assertAllEqual((3, 1, 2), distribution.sample((3, 1)).shape)
    self.assertAllEqual(
        (3,), distribution.log_prob(np.ones((3, 2), dtype=np.float32)).shape)
コード例 #11
0
 def test_misshapen_event(self):
     d = tfd.Sample(tfd.Normal(0, 1, validate_args=True),
                    4,
                    validate_args=True)
     with self.assertRaisesRegexp(ValueError,
                                  r'Incompatible shapes for broadcasting'):
         self.evaluate(d.log_prob(tf.zeros([3])))
コード例 #12
0
 def test_bijector_scalar_underlying_ildj(self):
     d = tfd.Normal(0., 1.)  # Uses Identity bijector, ildj=0.
     bij = tfd.Sample(d, [1]).experimental_default_event_space_bijector()
     ildj = bij.inverse_log_det_jacobian(tf.zeros([1, 1]), event_ndims=1)
     self.assertAllEqual(0., ildj)
     ildj = bij.inverse_log_det_jacobian(tf.zeros([1, 1]), event_ndims=2)
     self.assertAllEqual(0., ildj)
コード例 #13
0
 def test_bijector_constant_underlying_ildj(self):
     d = tfb.Scale([2., 3.])(tfd.Normal([0., 0.], 1.))
     bij = tfd.Sample(d, [3]).experimental_default_event_space_bijector()
     ildj = bij.inverse_log_det_jacobian(tf.zeros([2, 3]), event_ndims=1)
     self.assertAllClose(-np.log([2., 3.]) * 3, ildj)
     ildj = bij.inverse_log_det_jacobian(tf.zeros([2, 3]), event_ndims=2)
     self.assertAllClose(-np.log([2., 3.]).sum() * 3, ildj)
コード例 #14
0
 def test_mixed_scalar(self):
   s = tfd.Sample(tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1),
                  3, validate_args=False)
   x = s.sample(4, seed=test_util.test_seed())
   lp = s.log_prob(x)
   self.assertEqual((4, 3, 2), x.shape)
   self.assertEqual((4,), lp.shape)
コード例 #15
0
ファイル: nuts_test.py プロジェクト: qoffee/probability
  def testDivergence(self):
    """Neals funnel with large step size."""
    strm = tfp_test_util.test_seed_stream()
    neals_funnel = tfd.JointDistributionSequential(
        [
            tfd.Normal(loc=0., scale=3.),  # b0
            lambda y: tfd.Sample(  # pylint: disable=g-long-lambda
                tfd.Normal(loc=0., scale=tf.math.exp(y / 2)),
                sample_shape=9),
        ],
        validate_args=True
    )

    @tf.function(autograph=False)
    def run_chain_and_get_divergence():
      nchains = 5
      init_states = neals_funnel.sample(nchains, seed=strm())
      _, has_divergence = tfp.mcmc.sample_chain(
          num_results=100,
          kernel=tfp.mcmc.NoUTurnSampler(
              target_log_prob_fn=lambda *args: neals_funnel.log_prob(args),
              step_size=[1., 1.],
              parallel_iterations=1,
              seed=strm()),
          current_state=init_states,
          trace_fn=lambda _, pkr: pkr.has_divergence,
          parallel_iterations=1)
      return tf.reduce_sum(tf.cast(has_divergence, dtype=tf.int32))

    divergence_count = self.evaluate(run_chain_and_get_divergence())

    # Test that we observe a fair among of divergence.
    self.assertAllGreater(divergence_count, 100)
コード例 #16
0
  def test_namedtuple_sample_log_prob(self):
    Model = collections.namedtuple('Model', ['e', 'scale', 'loc', 'm', 'x'])  # pylint: disable=invalid-name
    # pylint: disable=bad-whitespace
    model = Model(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        loc  =          tfd.Normal(loc=0, scale=2.),
        m    =          tfd.Normal,
        x    =lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12))
    # pylint: enable=bad-whitespace
    d = tfd.JointDistributionNamed(model, validate_args=True)

    self.assertEqual(
        (
            ('e', ()),
            ('scale', ('e',)),
            ('loc', ()),
            ('m', ('loc', 'scale')),
            ('x', ('m',)),
        ),
        d.resolve_graph())

    xs = d.sample(seed=test_util.test_seed())
    self.assertLen(xs, 5)
    # We'll verify the shapes work as intended when we plumb these back into the
    # respective log_probs.

    ds, _ = d.sample_distributions(value=xs, seed=test_util.test_seed())
    self.assertLen(ds, 5)
    self.assertIsInstance(ds.e, tfd.Independent)
    self.assertIsInstance(ds.scale, tfd.Gamma)
    self.assertIsInstance(ds.loc, tfd.Normal)
    self.assertIsInstance(ds.m, tfd.Normal)
    self.assertIsInstance(ds.x, tfd.Sample)

    # Static properties.
    self.assertAllEqual(Model(e=tf.float32, scale=tf.float32, loc=tf.float32,
                              m=tf.float32, x=tf.int32),
                        d.dtype)

    batch_shape_tensor_, event_shape_tensor_ = self.evaluate([
        d.batch_shape_tensor(), d.event_shape_tensor()])

    expected_batch_shape = Model(e=[], scale=[], loc=[], m=[], x=[])
    for (expected, actual_tensorshape, actual_shape_tensor_) in zip(
        expected_batch_shape, d.batch_shape, batch_shape_tensor_):
      self.assertAllEqual(expected, actual_tensorshape)
      self.assertAllEqual(expected, actual_shape_tensor_)

    expected_event_shape = Model(e=[2], scale=[], loc=[], m=[], x=[12])
    for (expected, actual_tensorshape, actual_shape_tensor_) in zip(
        expected_event_shape, d.event_shape, event_shape_tensor_):
      self.assertAllEqual(expected, actual_tensorshape)
      self.assertAllEqual(expected, actual_shape_tensor_)

    expected_jlp = sum(d.log_prob(x) for d, x in zip(ds, xs))
    actual_jlp = d.log_prob(xs)
    self.assertAllClose(*self.evaluate([expected_jlp, actual_jlp]),
                        atol=0., rtol=1e-4)
コード例 #17
0
    def test_doc_string_2(self):
        n = 2000
        c = np.r_[np.zeros(n // 2), np.ones(n // 2)]
        mean_0, mean_1 = 0, 5
        x = np.r_[np.random.randn(n // 2).astype(dtype=np.float32) + mean_0,
                  np.random.randn(n // 2).astype(dtype=np.float32) + mean_1]
        shuffle_idxs = np.arange(n)
        np.random.shuffle(shuffle_idxs)
        x = x[shuffle_idxs]
        c = c[shuffle_idxs]

        seed = test_util.test_seed_stream()

        # Density estimation with MADE.
        made = tfb.AutoregressiveNetwork(
            params=2,
            hidden_units=[1],
            event_shape=(1, ),
            kernel_initializer=tfk.initializers.VarianceScaling(0.1,
                                                                seed=seed() %
                                                                2**31),
            conditional=True,
            conditional_event_shape=(1, ))

        distribution = tfd.TransformedDistribution(
            distribution=tfd.Sample(tfd.Normal(loc=0., scale=1.),
                                    sample_shape=[1]),
            bijector=tfb.MaskedAutoregressiveFlow(made))

        # Construct and fit model.
        x_ = tfkl.Input(shape=(1, ), dtype=tf.float32)
        c_ = tfkl.Input(shape=(1, ), dtype=tf.float32)
        log_prob_ = distribution.log_prob(
            x_, bijector_kwargs={"conditional_input": c_})
        model = tfk.Model([x_, c_], log_prob_)

        model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                      loss=lambda _, log_prob: -log_prob)

        batch_size = 25
        model.fit(x=[x, c],
                  y=np.zeros((n, 0), dtype=np.float32),
                  batch_size=batch_size,
                  epochs=3,
                  steps_per_epoch=n // batch_size,
                  shuffle=False,
                  verbose=True)

        # Use the fitted distribution to sample condition on c = 1
        n_samples = 1000
        cond = 1
        samples = distribution.sample((n_samples, ),
                                      bijector_kwargs={
                                          "conditional_input":
                                          cond * np.ones((n_samples, 1))
                                      },
                                      seed=seed())
        # Assert mean is close to conditional mean
        self.assertAllMeansClose(samples[..., 0], mean_1, axis=0, atol=1.)
コード例 #18
0
 def test_summary_statistic(self, attr):
     sample_shape = [5, 4]
     mvn = tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1)
     d = tfd.Sample(mvn, sample_shape, validate_args=True)
     self.assertEqual((3, ), d.batch_shape)
     expected_stat = (getattr(mvn, attr)()[:, tf.newaxis, tf.newaxis, :] *
                      tf.ones([3, 5, 4, 2]))
     actual_stat = getattr(d, attr)()
     self.assertAllEqual(*self.evaluate([expected_stat, actual_stat]))
コード例 #19
0
 def test_kahan_precision(self, jit=False):
     maybe_jit = lambda f: f
     if jit:
         self.skip_if_no_xla()
         maybe_jit = tf.function(experimental_compile=True)
     stream = test_util.test_seed_stream()
     n = 20_000
     samps = tfd.Poisson(rate=1.).sample(n, seed=stream())
     log_rate = tfd.Normal(0, .2).sample(seed=stream())
     pois = tfd.Poisson(log_rate=log_rate)
     lp = maybe_jit(
         tfd.Sample(pois, n,
                    experimental_use_kahan_sum=True).log_prob)(samps)
     pois64 = tfd.Poisson(log_rate=tf.cast(log_rate, tf.float64))
     lp64 = tfd.Sample(pois64, n).log_prob(tf.cast(samps, tf.float64))
     # Evaluate together to ensure we use the same samples.
     lp, lp64 = self.evaluate((tf.cast(lp, tf.float64), lp64))
     # Fails 75% CPU, 0-80% GPU --vary_seed runs w/o experimental_use_kahan_sum.
     self.assertAllClose(lp64, lp, rtol=0., atol=.01)
コード例 #20
0
  def test_bijector_shapes(self):
    d = tfd.Sample(tfd.Uniform(tf.zeros([5]), 1.), 2)
    b = d.experimental_default_event_space_bijector()
    self.assertEqual((2,), d.event_shape)
    self.assertEqual((2,), b.inverse_event_shape((2,)))
    self.assertEqual((2,), b.forward_event_shape((2,)))
    self.assertEqual((5, 2), b.forward_event_shape((5, 2)))
    self.assertEqual((5, 2), b.inverse_event_shape((5, 2)))
    self.assertEqual((3, 5, 2), b.inverse_event_shape((3, 5, 2)))
    self.assertEqual((3, 5, 2), b.forward_event_shape((3, 5, 2)))

    d = tfd.Sample(tfd.CholeskyLKJ(4, concentration=tf.ones([5])), 2)
    b = d.experimental_default_event_space_bijector()
    self.assertEqual((2, 4, 4), d.event_shape)
    dim = (4 * 3) // 2
    self.assertEqual((5, 2, dim), b.inverse_event_shape((5, 2, 4, 4)))
    self.assertEqual((5, 2, 4, 4), b.forward_event_shape((5, 2, dim)))
    self.assertEqual((3, 5, 2, dim), b.inverse_event_shape((3, 5, 2, 4, 4)))
    self.assertEqual((3, 5, 2, 4, 4), b.forward_event_shape((3, 5, 2, dim)))
コード例 #21
0
def get_base_distribution(flat_event_size, dtype=DEFAULT_FLOAT_DTYPE_TF):
    base_standard_dist = tfd.JointDistributionSequential([
        tfd.Sample(
            tfd.Normal(
                loc=tf.constant(0.0, dtype=dtype),
                scale=tf.constant(1.0, dtype=dtype),
            ),
            s,
        ) for s in flat_event_size
    ])
    return base_standard_dist
コード例 #22
0
 def test_everything_scalar(self):
   s = tfd.Sample(tfd.Normal(loc=0, scale=1), 5, validate_args=True)
   x = s.sample(seed=test_util.test_seed())
   actual_lp = s.log_prob(x)
   # Sample.log_prob will reduce over event space, ie, dims [0, 2]
   # corresponding to sizes concat([[5], [2]]).
   expected_lp = tf.reduce_sum(s.distribution.log_prob(x), axis=0)
   x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])
   self.assertEqual((5,), x_.shape)
   self.assertEqual((), actual_lp_.shape)
   self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)
コード例 #23
0
 def test_docstring_example(self):
   stream = test_util.test_seed_stream()
   loc = tfp.random.spherical_uniform([10], 3, seed=stream())
   components_dist = tfd.VonMisesFisher(mean_direction=loc, concentration=50.)
   mixture_dist = tfd.Categorical(
       logits=tf.random.uniform([500, 10], seed=stream()))
   obs_dist = tfd.MixtureSameFamily(
       mixture_dist, tfd.BatchBroadcast(components_dist, [500, 10]))
   test_sites = tfp.random.spherical_uniform([20], 3, seed=stream())
   lp = tfd.Sample(obs_dist, 20).log_prob(test_sites)
   self.assertEqual([500], lp.shape)
   self.evaluate(lp)
コード例 #24
0
  def testMVN(self, event_shape, shift, tril, dynamic_shape):
    if dynamic_shape and tf.executing_eagerly():
      self.skipTest('Eager execution does not support dynamic shape.')
    as_tensor = tf.convert_to_tensor
    if dynamic_shape:
      as_tensor = lambda v, name: tf1.placeholder_with_default(  # pylint: disable=g-long-lambda
          v, shape=None, name='dynamic_' + name)

    fake_mvn = tfd.TransformedDistribution(
        distribution=tfd.Sample(
            tfd.Normal(loc=as_tensor(0., name='loc'),
                       scale=as_tensor(1., name='scale'),
                       validate_args=True),
            sample_shape=as_tensor(np.int32(event_shape), name='event_shape')),
        bijector=tfb.Chain(
            [tfb.Shift(shift=as_tensor(shift, name='shift')),
             tfb.ScaleMatvecTriL(scale_tril=as_tensor(tril, name='scale_tril'))
             ]), validate_args=True)

    base_dist = fake_mvn.distribution
    expected_mean = tf.linalg.matvec(
        tril, tf.broadcast_to(base_dist.mean(), shift.shape)) + shift
    expected_cov = tf.linalg.matmul(
        tril,
        tf.matmul(
            tf.linalg.diag(tf.broadcast_to(base_dist.variance(), shift.shape)),
            tril,
            adjoint_b=True))
    expected_batch_shape = ps.shape(expected_mean)[:-1]

    if dynamic_shape:
      self.assertAllEqual(tf.TensorShape(None), fake_mvn.event_shape)
      self.assertAllEqual(tf.TensorShape(None), fake_mvn.batch_shape)
    else:
      self.assertAllEqual(event_shape, fake_mvn.event_shape)
      self.assertAllEqual(expected_batch_shape, fake_mvn.batch_shape)

    # Ensure sample works by checking first, second moments.
    num_samples = 7e3
    y = fake_mvn.sample(int(num_samples), seed=test_util.test_seed())
    x = y[0:5, ...]
    self.assertAllClose(expected_mean, tf.reduce_mean(y, axis=0),
                        atol=0.1, rtol=0.1)
    self.assertAllClose(expected_cov, tfp.stats.covariance(y, sample_axis=0),
                        atol=0., rtol=0.1)

    self.assertAllEqual(event_shape, fake_mvn.event_shape_tensor())
    self.assertAllEqual(expected_batch_shape, fake_mvn.batch_shape_tensor())
    self.assertAllEqual(
        ps.concat([[5], expected_batch_shape, event_shape], axis=0),
        ps.shape(x))
    self.assertAllClose(expected_mean, fake_mvn.mean())
コード例 #25
0
        def _make_kernel(**kwargs):
            running_variance = tfp.experimental.stats.RunningVariance.from_stats(
                num_samples=10., mean=tf.zeros(5), variance=tf.ones(5))

            kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
                target_log_prob_fn=tfd.Sample(tfd.Normal(0., 1.), 5).log_prob,
                num_leapfrog_steps=2,
                step_size=1.)
            kernel = tfp.experimental.mcmc.DiagonalMassMatrixAdaptation(
                inner_kernel=kernel,
                initial_running_variance=running_variance,
                **kwargs)
            return kernel
コード例 #26
0
    def test_transformed_exp(self):
        sample_shape = 3
        mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)
        exp = tfb.Exp()

        def expected_lp(y):
            x = exp.inverse(y)  # Ie, tf.random.normal([4, 3, 2])
            fldj = exp.forward_log_det_jacobian(x, event_ndims=1)
            return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)

        # Transform a Sample.
        d = tfd.TransformedDistribution(tfd.Sample(mvn,
                                                   sample_shape,
                                                   validate_args=True),
                                        bijector=exp)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        # If `TransformedDistribution` didn't scale the jacobian by
        # `_sample_distribution_size`, then `scale_fldj` would need to be `False`.
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)

        # Sample a Transform.
        d = tfd.Sample(tfd.TransformedDistribution(mvn, bijector=exp),
                       sample_shape,
                       validate_args=True)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        # Regardless of whether `TransformedDistribution` scales the jacobian by
        # `_sample_distribution_size`, `scale_fldj` is `True`.
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)
コード例 #27
0
 def test_variable_sample_shape_exception(self):
   loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))
   scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))
   sample_shape = tf.Variable([[1, 2]], shape=tf.TensorShape(None))
   with self.assertRaisesWithPredicateMatch(
       Exception,
       'Argument `sample_shape` must be either a scalar or a vector.'):
     dist = tfd.Sample(
         tfd.Independent(tfd.Logistic(loc=loc, scale=scale),
                         reinterpreted_batch_ndims=1),
         sample_shape=sample_shape,
         validate_args=True)
     self.evaluate([v.initializer for v in dist.trainable_variables])
     self.evaluate(dist.mean())
コード例 #28
0
  def test_variable_shape_change(self):
    loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))
    scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))
    # In real life, you'd really always want `sample_shape` to be
    # `trainable=False`.
    sample_shape = tf.Variable([1, 2], shape=tf.TensorShape(None))
    dist = tfd.Sample(
        tfd.Independent(tfd.Logistic(loc=loc, scale=scale),
                        reinterpreted_batch_ndims=1),
        sample_shape=sample_shape,
        validate_args=True)
    self.evaluate([v.initializer for v in dist.trainable_variables])

    x = dist.mean()
    y = dist.sample([7, 2], seed=test_util.test_seed())
    loss_x = -dist.log_prob(x)
    loss_0 = -dist.log_prob(0.)
    batch_shape = dist.batch_shape_tensor()
    event_shape = dist.event_shape_tensor()
    [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([
        x, y, loss_x, loss_0, batch_shape, event_shape])
    self.assertAllEqual([4, 5, 1, 2, 3], x_.shape)
    self.assertAllEqual([7, 2, 4, 5, 1, 2, 3], y_.shape)
    self.assertAllEqual([4, 5], loss_x_.shape)
    self.assertAllEqual([4, 5], loss_0_.shape)
    self.assertAllEqual([4, 5], batch_shape_)
    self.assertAllEqual([1, 2, 3], event_shape_)
    self.assertLen(dist.trainable_variables, 3)

    with tf.control_dependencies([
        loc.assign(tf.zeros([])),
        scale.assign(tf.ones([3, 1, 2])),
        sample_shape.assign(6),
    ]):
      x = dist.mean()
      y = dist.sample([7, 2], seed=test_util.test_seed())
      loss_x = -dist.log_prob(x)
      loss_0 = -dist.log_prob(0.)
      batch_shape = dist.batch_shape_tensor()
      event_shape = dist.event_shape_tensor()
    [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([
        x, y, loss_x, loss_0, batch_shape, event_shape])
    self.assertAllEqual([3, 1, 6, 2], x_.shape)
    self.assertAllEqual([7, 2, 3, 1, 6, 2], y_.shape)
    self.assertAllEqual([3, 1], loss_x_.shape)
    self.assertAllEqual([3, 1], loss_0_.shape)
    self.assertAllEqual([3, 1], batch_shape_)
    self.assertAllEqual([6, 2], event_shape_)
    self.assertLen(dist.trainable_variables, 3)
コード例 #29
0
 def one_term(event_shape, event_shape_tensor, batch_shape,
              batch_shape_tensor, dtype):
     if not tensorshape_util.is_fully_defined(event_shape):
         event_shape = event_shape_tensor
     result = tfd.Sample(tfd.Uniform(low=tf.constant(-2., dtype=dtype),
                                     high=tf.constant(2., dtype=dtype)),
                         sample_shape=event_shape)
     if not tensorshape_util.is_fully_defined(batch_shape):
         batch_shape = batch_shape_tensor
         needs_bcast = True
     else:  # Only batch broadcast when batch ndims > 0.
         needs_bcast = bool(tensorshape_util.as_list(batch_shape))
     if needs_bcast:
         result = tfd.BatchBroadcast(result, batch_shape)
     return result
コード例 #30
0
 def test_everything_nonscalar(self):
   s = tfd.Sample(
       tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],
       validate_args=True)
   x = s.sample([6, 1], seed=test_util.test_seed())
   actual_lp = s.log_prob(x)
   # Sample.log_prob will reduce over event space, ie, dims [2, 3, 5]
   # corresponding to sizes concat([[5, 4], [2]]).
   expected_lp = tf.reduce_sum(
       s.distribution.log_prob(tf.transpose(a=x, perm=[0, 1, 3, 4, 2, 5])),
       axis=[2, 3])
   x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])
   self.assertEqual((6, 1, 3, 5, 4, 2), x_.shape)
   self.assertEqual((6, 1, 3), actual_lp_.shape)
   self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)