Example #1
0
 def test_laplace_tail_mass_for_small_inputs(self):
     prior = deep_factorized.NoisyDeepFactorized(batch_shape=(1, ))
     em1 = universal.UniversalBatchedEntropyModel(prior,
                                                  coding_rank=1,
                                                  compression=True,
                                                  laplace_tail_mass=1e-3)
     em2 = universal.UniversalBatchedEntropyModel(prior,
                                                  coding_rank=1,
                                                  compression=True)
     x = tf.linspace(-10.0, 10.0, 50)
     _, bits1 = em1(x[..., None])
     _, bits2 = em2(x[..., None])
     self.assertAllClose(bits1, bits2, rtol=0.01, atol=0.05)
Example #2
0
 def test_bitstring_length_matches_estimates(self, training, prior):
     priors = {
         "deep_factorized":
         deep_factorized.NoisyDeepFactorized(batch_shape=(16, )),
         "normal":
         uniform_noise.NoisyNormal(loc=tf.range(16.0), scale=1.0)
     }
     prior = priors[prior]
     em = universal.UniversalBatchedEntropyModel(prior,
                                                 coding_rank=2,
                                                 compression=True)
     num_symbols = 1000
     # Source distribution is fixed as gaussian.
     source = priors["normal"].base
     x = source.sample((3, num_symbols), seed=0)
     x_perturbed, bits_estimate = em(x, training=training)
     bitstring = em.compress(x)
     x_decoded = em.decompress(bitstring, (num_symbols, ))
     bitstring_bits = tf.reshape(
         [len(b) * 8 for b in bitstring.numpy().flatten()], bitstring.shape)
     # Max error 1% and 2 bytes.
     self.assertAllClose(bits_estimate, bitstring_bits, atol=16, rtol=0.01)
     # Quantization noise should be between -.5 and .5
     self.assertAllLessEqual(tf.abs(x - x_decoded), 0.5)
     self.assertAllLessEqual(tf.abs(x - x_perturbed), 0.5)
Example #3
0
 def test_laplace_tail_mass_for_large_inputs(self):
     prior = deep_factorized.NoisyDeepFactorized(batch_shape=(1, ))
     em = universal.UniversalBatchedEntropyModel(prior,
                                                 coding_rank=1,
                                                 compression=True,
                                                 laplace_tail_mass=1e-3)
     x = tf.convert_to_tensor([1e3, 1e4, 1e5, 1e6, 1e7, 1e8], tf.float32)
     _, bits = em(x[..., None])
     self.assertAllClose(bits, tf.abs(x) / tf.math.log(2.0), rtol=0.01)
Example #4
0
 def test_can_instantiate_and_compress(self):
   prior = deep_factorized.NoisyDeepFactorized(batch_shape=(4, 4))
   em = universal.UniversalBatchedEntropyModel(
       prior, coding_rank=3, compression=True)
   x = tf.random.stateless_normal((3, 8, 4, 4), seed=(0, 0))
   bitstring = em.compress(x)
   em(x)
   x_hat = em.decompress(bitstring, (8,))
   # Quantization noise should be between -.5 and .5
   u = x - x_hat
   self.assertAllLessEqual(tf.abs(u), 0.5)
Example #5
0
  def test_bitstring_length_matches_entropy_normal(self, scale=1e-8):
    prior = uniform_noise.NoisyNormal(loc=100 * tf.range(15.0), scale=scale)
    base_df = prior.base
    em = universal.UniversalBatchedEntropyModel(
        prior, coding_rank=2, compression=True)
    num_samples = 100000
    x = base_df.sample(num_samples, seed=0)
    bitstring = em.compress(x)
    x_decoded = em.decompress(bitstring, (num_samples,))
    bits = len(bitstring.numpy()) * 8
    bits_per_sample = bits / num_samples
    # Quantization noise should be between -.5 and .5
    self.assertAllLessEqual(tf.abs(x - x_decoded), 0.5)

    # Lets estimate entropy via sampling the distribution.
    samples = prior.sample(num_samples, seed=0)
    log_probs = prior.log_prob(samples) / tf.math.log(2.0)
    entropy_bits = -tf.reduce_sum(log_probs)
    rtol = 0.01  # Maximum relative error 1%.
    atol = 16  # Maximum 2 bytes absolute error.
    self.assertLessEqual(bits_per_sample, entropy_bits * (1 + rtol) + atol)
Example #6
0
 def test_expected_grads_gives_gradients(self):
   priors = {
       "deep_factorized":
           deep_factorized.NoisyDeepFactorized(batch_shape=(16,)),
       "normal":
           uniform_noise.NoisyNormal(loc=tf.range(16.0), scale=1.0)
   }
   prior = priors["deep_factorized"]
   em = universal.UniversalBatchedEntropyModel(
       prior, coding_rank=2, compression=True, expected_grads=True)
   self.assertTrue(em._expected_grads)
   num_symbols = 1000
   # Source distribution is fixed as gaussian.
   source = priors["normal"].base
   x = source.sample((3, num_symbols), seed=0)
   with tf.GradientTape(persistent=True) as g:
     g.watch(x)
     x2, bits = em(x, training=True)
   self.assertIsInstance(g.gradient(x2, x), tf.Tensor)
   self.assertIsInstance(g.gradient(bits, x), tf.Tensor)
   for variable in em.trainable_variables:
     self.assertIsInstance(g.gradient(bits, variable), tf.Tensor)