def test_doc_string_images_case_2(self):
        # Generate fake images.
        images = np.random.choice([0, 1], size=(100, 8, 8, 3))
        n, width, height, channels = images.shape

        # Reshape images to achieve desired autoregressivity.
        reshaped_images = np.transpose(np.reshape(
            images, [n, width * height, channels]),
                                       axes=[0, 2, 1])

        made = tfb.AutoregressiveNetwork(params=1,
                                         event_shape=[width * height],
                                         hidden_units=[20, 20],
                                         activation="relu")

        # Density estimation with MADE.
        #
        # NOTE: Parameterize an autoregressive distribution over an event_shape of
        # [channels, width * height], with univariate Bernoulli conditional
        # distributions.
        distribution = tfd.Autoregressive(
            lambda x: tfd.Independent(  # pylint: disable=g-long-lambda
                tfd.Bernoulli(logits=tf.unstack(made(x), axis=-1)[0],
                              dtype=tf.float32),
                reinterpreted_batch_ndims=2),
            sample0=tf.zeros([channels, width * height], dtype=tf.float32))

        # Construct and fit model.
        x_ = tfkl.Input(shape=(channels, width * height), dtype=tf.float32)
        log_prob_ = distribution.log_prob(x_)
        model = tfk.Model(x_, log_prob_)

        model.compile(optimizer=tf1.train.AdamOptimizer(),
                      loss=lambda _, log_prob: -log_prob)

        batch_size = 10
        model.fit(
            x=reshaped_images,
            y=np.zeros((n, 0), dtype=np.float32),
            batch_size=batch_size,
            epochs=1,
            steps_per_epoch=1,  # Usually `n // batch_size`.
            shuffle=True,
            verbose=True)

        # Use the fitted distribution.
        self.assertAllEqual((7, channels, width * height),
                            distribution.sample(7).shape)
        self.assertAllEqual((n, ),
                            distribution.log_prob(reshaped_images).shape)
Пример #2
0
    def testVariableNumSteps(self):
        def fn(sample=0.):
            return tfd.Normal(loc=tf.zeros_like(sample), scale=1.)

        num_steps = tf.Variable(4, dtype=tf.int64)
        self.evaluate(num_steps.initializer)

        ar = tfd.Autoregressive(fn, num_steps=num_steps, validate_args=True)
        sample = ar.sample(seed=test_util.test_seed())
        log_prob = ar.log_prob(sample)
        self.assertAllEqual([], sample.shape)
        self.assertAllEqual([], log_prob.shape)

        [sample, log_prob] = self.evaluate([sample, log_prob])
        self.assertAllEqual([], sample.shape)
        self.assertAllEqual([], log_prob.shape)
Пример #3
0
 def testSampleAndLogProbConsistency(self):
   batch_shape = np.int32([])
   event_size = 2
   batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
   sample0 = tf.zeros(batch_event_shape)
   affine = tfb.ScaleMatvecTriL(
       scale_tril=self._random_scale_tril(event_size), validate_args=True)
   ar = tfd.Autoregressive(
       self._normal_fn(affine), sample0, validate_args=True)
   self.run_test_sample_consistent_log_prob(
       self.evaluate,
       ar,
       num_samples=int(1e6),
       radius=1.,
       center=0.,
       rtol=0.01,
       seed=test_util.test_seed())
Пример #4
0
    def testStatefulDistFn(self):
        class StatefulNormal(tfd.Distribution):
            def __init__(self, loc):
                self._loc = tf.convert_to_tensor(loc)
                super(StatefulNormal, self).__init__(
                    dtype=tf.float32,
                    reparameterization_type=tfd.FULLY_REPARAMETERIZED,
                    validate_args=False,
                    allow_nan_stats=False)

            def _batch_shape(self):
                return self._loc.shape

            def _event_shape(self):
                return []

            def _sample_n(self, n, seed=None):
                return self._loc + tf.random.normal(
                    tf.concat([[n], tf.shape(self._loc)], axis=0), seed=seed)

        def dist_fn(s):
            return StatefulNormal(loc=s)

        ar = tfd.Autoregressive(dist_fn,
                                sample0=tfd.Normal(0., 1.).sample(
                                    7, seed=test_util.test_seed()),
                                num_steps=7)

        with warnings.catch_warnings(record=True) as triggered:
            self.evaluate(ar.sample(seed=test_util.test_seed()))
        self.assertTrue(
            any('Falling back to stateful sampling for `distribution_fn(sample0)`'
                in str(warning.message) for warning in triggered))

        num_steps = tf.Variable(9)
        self.evaluate(num_steps.initializer)
        with warnings.catch_warnings(record=True) as triggered:
            self.evaluate(
                ar.copy(num_steps=num_steps).sample(
                    seed=test_util.test_seed()))
        self.assertTrue(
            any('Falling back to stateful sampling for `distribution_fn(sample0)`'
                in str(warning.message) for warning in triggered))
Пример #5
0
    def testVariableNumStepsAndEventShape(self):
        loc = tf.Variable(np.zeros((4, 2)), shape=tf.TensorShape(None))
        event_ndims = tf.Variable(1)

        def fn(sample=None):
            if sample is not None:
                loc_param = tf.broadcast_to(loc, shape=tf.shape(sample))
            else:
                loc_param = loc
            return tfd.Independent(tfd.Normal(loc=loc_param, scale=1.),
                                   reinterpreted_batch_ndims=event_ndims)

        num_steps = tf.Variable(7)
        self.evaluate([v.initializer for v in [loc, num_steps, event_ndims]])

        ar = tfd.Autoregressive(fn, num_steps=num_steps, validate_args=True)
        sample = self.evaluate(ar.sample(3, seed=test_util.test_seed()))
        self.assertAllEqual([3, 4, 2], sample.shape)
        self.assertAllEqual([2], self.evaluate(ar.event_shape_tensor()))
        self.assertAllEqual([4], self.evaluate(ar.batch_shape_tensor()))
Пример #6
0
    def testBatchAndEventShape(self):
        loc = tf.Variable(np.zeros((5, 1, 3)), shape=tf.TensorShape(None))
        event_ndims = tf.Variable(2)

        def fn(sample):
            return tfd.Independent(
                tfd.Normal(loc=loc + 0. * sample, scale=1.),
                reinterpreted_batch_ndims=tf.convert_to_tensor(event_ndims))

        self.evaluate([v.initializer for v in [loc, event_ndims]])

        zero = tf.convert_to_tensor(0., dtype=loc.dtype)
        ar = tfd.Autoregressive(fn,
                                num_steps=7,
                                sample0=zero,
                                validate_args=True)

        # NOTE: `ar.event_shape` and `ar.batch_shape` are not known statically,
        # even though the output of `ar.distribution_fn(...)` has statically-known
        # event shape and batch shape.
        self.assertEqual(ar.batch_shape, tf.TensorShape(None))
        self.assertEqual(ar.event_shape, tf.TensorShape(None))
        self.assertAllEqual([5], self.evaluate(ar.batch_shape_tensor()))
        self.assertAllEqual([1, 3], self.evaluate(ar.event_shape_tensor()))
        if tf.executing_eagerly():
            self.assertEqual(tf.TensorShape([5]),
                             ar.distribution_fn(zero).batch_shape)
            self.assertEqual(tf.TensorShape([1, 3]),
                             ar.distribution_fn(zero).event_shape)

        with tf.control_dependencies(
            [loc.assign(np.zeros((4, 7))),
             event_ndims.assign(1)]):
            self.assertAllEqual([4], self.evaluate(ar.batch_shape_tensor()))
            self.assertAllEqual([7], self.evaluate(ar.event_shape_tensor()))
            if tf.executing_eagerly():
                self.assertEqual(tf.TensorShape([4]),
                                 ar.distribution_fn(zero).batch_shape)
                self.assertEqual(tf.TensorShape([7]),
                                 ar.distribution_fn(zero).event_shape)
Пример #7
0
  def testSampleIndependenceWithoutSeedBug(self):
    # Under eager, there was a bug where subsequent samples for position 0 would
    # be erroneously independent of the first sample when a seed was not
    # provided.

    # A pithy example: A simple 1-Markov autoregressive sequence for which the
    # first frame is either 0 or 1 with equal probability and all subsequent
    # frames copy the previous frame. Thus the overall sequence-level
    # distribution is 0000 with probability 0.5 and 1111 with probability 0.5.
    def distribution_fn(sample):
      num_frames = sample.shape[-1]
      mask = tf.one_hot(0, num_frames)[:, tf.newaxis]
      probs = tf.roll(tf.one_hot(sample, 3), shift=1, axis=-2)
      probs = probs * (1.0 - mask) + tf.convert_to_tensor([0.5, 0.5, 0]) * mask
      return tfd.Independent(tfd.Categorical(probs=probs),
                             reinterpreted_batch_ndims=1)

    ar = tfd.Autoregressive(distribution_fn,
                            sample0=tf.constant([2, 2, 2, 2]),
                            num_steps=4)
    samps = self.evaluate(ar.sample(10))
    for s in samps:
      self.assertIn(np.mean(s), (0., 1.), msg=str(s))
Пример #8
0
 def testCompareToBijector(self):
   """Demonstrates equivalence between TD, Bijector approach and AR dist."""
   sample_shape = np.int32([4, 5])
   batch_shape = np.int32([])
   event_size = np.int32(2)
   batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
   sample0 = tf.zeros(batch_event_shape)
   affine = tfb.Affine(scale_tril=self._random_scale_tril(event_size))
   ar = tfd.Autoregressive(
       self._normal_fn(affine), sample0, validate_args=True)
   ar_flow = tfb.MaskedAutoregressiveFlow(
       is_constant_jacobian=True,
       shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
       validate_args=True)
   td = tfd.TransformedDistribution(
       distribution=tfd.Normal(loc=0., scale=1.),
       bijector=ar_flow,
       event_shape=[event_size],
       batch_shape=batch_shape,
       validate_args=True)
   x_shape = np.concatenate([sample_shape, batch_shape, [event_size]], axis=0)
   x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
   td_log_prob_, ar_log_prob_ = self.evaluate([td.log_prob(x), ar.log_prob(x)])
   self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)