Exemple #1
0
 def test_tails_and_offset_are_in_order(self):
     df = deep_factorized.NoisyDeepFactorized()
     offset = helpers.quantization_offset(df)
     lower_tail = helpers.lower_tail(df, 2**-8)
     upper_tail = helpers.upper_tail(df, 2**-8)
     self.assertGreater(upper_tail, offset)
     self.assertGreater(offset, lower_tail)
 def test_bitstring_length_matches_estimates(self, training, prior):
     priors = {
         "deep_factorized":
         deep_factorized.NoisyDeepFactorized(batch_shape=(16, )),
         "normal":
         uniform_noise.NoisyNormal(loc=tf.range(16.0), scale=1.0)
     }
     prior = priors[prior]
     em = universal.UniversalBatchedEntropyModel(prior,
                                                 coding_rank=2,
                                                 compression=True)
     num_symbols = 1000
     # Source distribution is fixed as gaussian.
     source = priors["normal"].base
     x = source.sample((3, num_symbols), seed=0)
     x_perturbed, bits_estimate = em(x, training=training)
     bitstring = em.compress(x)
     x_decoded = em.decompress(bitstring, (num_symbols, ))
     bitstring_bits = tf.reshape(
         [len(b) * 8 for b in bitstring.numpy().flatten()], bitstring.shape)
     # Max error 1% and 2 bytes.
     self.assertAllClose(bits_estimate, bitstring_bits, atol=16, rtol=0.01)
     # Quantization noise should be between -.5 and .5
     self.assertAllLessEqual(tf.abs(x - x_decoded), 0.5)
     self.assertAllLessEqual(tf.abs(x - x_perturbed), 0.5)
Exemple #3
0
 def test_can_instantiate_and_run_scalar(self):
     df = deep_factorized.NoisyDeepFactorized(num_filters=(2, 3, 4))
     self.assertEqual(df.batch_shape, ())
     self.assertEqual(df.event_shape, ())
     self.assertEqual(df.base.num_filters, (2, 3, 4))
     self.assertEqual(df.base.init_scale, 10)
     x = tf.random.normal((10, ))
     df.prob(x)
Exemple #4
0
 def test_variables_receive_gradients(self):
     df = deep_factorized.NoisyDeepFactorized()
     with tf.GradientTape() as tape:
         x = tf.random.normal([20])
         loss = -tf.reduce_mean(df.log_prob(x))
     grads = tape.gradient(loss, df.trainable_variables)
     self.assertLen(grads, 8)
     self.assertNotIn(None, grads)
 def test_laplace_tail_mass_for_large_inputs(self):
     prior = deep_factorized.NoisyDeepFactorized(batch_shape=(1, ))
     em = universal.UniversalBatchedEntropyModel(prior,
                                                 coding_rank=1,
                                                 compression=True,
                                                 laplace_tail_mass=1e-3)
     x = tf.convert_to_tensor([1e3, 1e4, 1e5, 1e6, 1e7, 1e8], tf.float32)
     _, bits = em(x[..., None])
     self.assertAllClose(bits, tf.abs(x) / tf.math.log(2.0), rtol=0.01)
Exemple #6
0
 def test_logistic_is_special_case(self):
     # With no hidden units, the density should collapse to a logistic
     # distribution convolved with a standard uniform distribution.
     df = deep_factorized.NoisyDeepFactorized(num_filters=(), init_scale=1)
     logistic = tfp.distributions.Logistic(loc=-df.base._biases[0][0, 0],
                                           scale=1.)
     x = tf.linspace(-5., 5., 20)
     prob_df = df.prob(x)
     prob_log = logistic.cdf(x + .5) - logistic.cdf(x - .5)
     self.assertAllClose(prob_df, prob_log)
Exemple #7
0
 def test_can_instantiate_and_compress(self):
   prior = deep_factorized.NoisyDeepFactorized(batch_shape=(4, 4))
   em = universal.UniversalBatchedEntropyModel(
       prior, coding_rank=3, compression=True)
   x = tf.random.stateless_normal((3, 8, 4, 4), seed=(0, 0))
   bitstring = em.compress(x)
   em(x)
   x_hat = em.decompress(bitstring, (8,))
   # Quantization noise should be between -.5 and .5
   u = x - x_hat
   self.assertAllLessEqual(tf.abs(u), 0.5)
Exemple #8
0
 def test_stats_throw_error(self):
     df = deep_factorized.NoisyDeepFactorized()
     with self.assertRaises(NotImplementedError):
         df.mode()
     with self.assertRaises(NotImplementedError):
         df.mean()
     with self.assertRaises(NotImplementedError):
         df.quantile(.5)
     with self.assertRaises(NotImplementedError):
         df.survival_function(.5)
     with self.assertRaises(NotImplementedError):
         df.sample()
 def test_laplace_tail_mass_for_small_inputs(self):
     prior = deep_factorized.NoisyDeepFactorized(batch_shape=(1, ))
     em1 = universal.UniversalBatchedEntropyModel(prior,
                                                  coding_rank=1,
                                                  compression=True,
                                                  laplace_tail_mass=1e-3)
     em2 = universal.UniversalBatchedEntropyModel(prior,
                                                  coding_rank=1,
                                                  compression=True)
     x = tf.linspace(-10.0, 10.0, 50)
     _, bits1 = em1(x[..., None])
     _, bits2 = em2(x[..., None])
     self.assertAllClose(bits1, bits2, rtol=0.01, atol=0.05)
Exemple #10
0
 def test_expected_grads_gives_gradients(self):
   priors = {
       "deep_factorized":
           deep_factorized.NoisyDeepFactorized(batch_shape=(16,)),
       "normal":
           uniform_noise.NoisyNormal(loc=tf.range(16.0), scale=1.0)
   }
   prior = priors["deep_factorized"]
   em = universal.UniversalBatchedEntropyModel(
       prior, coding_rank=2, compression=True, expected_grads=True)
   self.assertTrue(em._expected_grads)
   num_symbols = 1000
   # Source distribution is fixed as gaussian.
   source = priors["normal"].base
   x = source.sample((3, num_symbols), seed=0)
   with tf.GradientTape(persistent=True) as g:
     g.watch(x)
     x2, bits = em(x, training=True)
   self.assertIsInstance(g.gradient(x2, x), tf.Tensor)
   self.assertIsInstance(g.gradient(bits, x), tf.Tensor)
   for variable in em.trainable_variables:
     self.assertIsInstance(g.gradient(bits, variable), tf.Tensor)
Exemple #11
0
 def test_quantization_offset_is_zero(self):
     df = deep_factorized.NoisyDeepFactorized()
     self.assertEqual(helpers.quantization_offset(df), 0)
Exemple #12
0
 def test_uniform_is_special_case(self):
     # With the scale parameter going to zero, the density should approach a
     # unit-width uniform distribution.
     df = deep_factorized.NoisyDeepFactorized(init_scale=1e-3)
     x = tf.linspace(-1., 1., 10)
     self.assertAllClose(df.prob(x), [0, 0, 0, 1, 1, 1, 1, 0, 0, 0])
 def test_tails_are_in_order(self):
     df = deep_factorized.NoisyDeepFactorized()
     lower_tail = helpers.lower_tail(df, 2**-8)
     upper_tail = helpers.upper_tail(df, 2**-8)
     self.assertGreater(upper_tail, lower_tail)
Exemple #14
0
 def test_noisy_deep_factorized_tails_are_in_order(self):
     dist = deep_factorized.NoisyDeepFactorized(batch_shape=[10])
     self.assertAllGreater(
         helpers.upper_tail(dist, 2**-8) - helpers.lower_tail(dist, 2**-8),
         0)