Пример #1
0
 def test_decompress(self):
     # Test that decompression of values compressed with a previous version
     # works, i.e. that the file format doesn't change across revisions.
     bitstrings = tf.placeholder(tf.string)
     input_shape = tf.placeholder(tf.int32)
     quantized_cdf = tf.placeholder(tf.int32)
     cdf_length = tf.placeholder(tf.int32)
     layer = entropy_models.EntropyBottleneck(data_format="channels_first",
                                              filters=(),
                                              init_scale=2,
                                              dtype=tf.float32)
     layer.build(self.expected.shape)
     layer._quantized_cdf = quantized_cdf
     layer._cdf_length = cdf_length
     decoded = layer.decompress(bitstrings, input_shape[1:])
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         decoded, = sess.run(
             [decoded], {
                 bitstrings: self.bitstrings,
                 input_shape: self.expected.shape,
                 quantized_cdf: self.quantized_cdf,
                 cdf_length: self.cdf_length
             })
     self.assertAllClose(self.expected, decoded, rtol=0, atol=1e-6)
Пример #2
0
 def test_build_decompress(self):
     # Test that layer can be built when `decompress` is the first call to it.
     bitstrings = tf.placeholder(tf.string)
     input_shape = tf.placeholder(tf.int32, shape=[3])
     layer = entropy_models.EntropyBottleneck(dtype=tf.float32)
     layer.decompress(bitstrings, input_shape[1:], channels=5)
     self.assertTrue(layer.built)
Пример #3
0
 def test_codec(self):
     # Tests that inputs are compressed and decompressed correctly, and not
     # quantized to full integer values after quantiles have been updated.
     # However, the difference between input and output should be between -0.5
     # and 0.5, and the offset must be consistent.
     inputs = tf.placeholder(tf.float32, (1, None, 1))
     layer = entropy_models.EntropyBottleneck(data_format="channels_last",
                                              init_scale=40)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
     opt = tf.train.GradientDescentOptimizer(learning_rate=1)
     self.assertEqual(1, len(layer.losses))
     step = opt.minimize(layer.losses[0])
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         sess.run(step)
         self.assertEqual(1, len(layer.updates))
         sess.run(layer.updates[0])
         values = np.linspace(-50, 50, 100)[None, :, None]
         decoded, = sess.run([decoded], {inputs: values})
     self.assertAllClose(values, decoded, rtol=0, atol=.5)
     diff = np.ravel(np.around(values) - decoded) % 1
     self.assertAllClose(diff,
                         np.full_like(diff, diff[0]),
                         rtol=0,
                         atol=5e-6)
     self.assertNotEqual(diff[0], 0)
Пример #4
0
 def test_visualize(self):
     # Test that summary op can be constructed.
     layer = entropy_models.EntropyBottleneck(dtype=dtypes.float32)
     layer.build((None, 10))
     summary = layer.visualize()
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         sess.run([summary])
Пример #5
0
 def test_noise(self):
     # Tests that the noise added is uniform noise between -0.5 and 0.5.
     inputs = tf.placeholder(tf.float32, (None, 1))
     layer = entropy_models.EntropyBottleneck()
     noisy, _ = layer(inputs, training=True)
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         values = np.linspace(-50, 50, 100)[:, None]
         noisy, = sess.run([noisy], {inputs: values})
     self.assertFalse(np.allclose(values, noisy, rtol=0, atol=.45))
     self.assertAllClose(values, noisy, rtol=0, atol=.5)
Пример #6
0
 def test_quantization_init(self):
     # Tests that inputs are quantized to full integer values right after
     # initialization.
     inputs = tf.placeholder(tf.float32, (None, 1))
     layer = entropy_models.EntropyBottleneck()
     quantized, _ = layer(inputs, training=False)
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         values = np.linspace(-50, 50, 100)[:, None]
         quantized, = sess.run([quantized], {inputs: values})
     self.assertAllClose(np.around(values), quantized, rtol=0, atol=1e-6)
Пример #7
0
 def test_pmf_normalization(self):
     # Test that probability mass functions are normalized correctly.
     layer = entropy_models.EntropyBottleneck(dtype=dtypes.float32)
     layer.build((None, 10))
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         pmf, = sess.run([layer._pmf])
         self.assertAllClose(np.ones(10),
                             np.sum(pmf, axis=-1),
                             rtol=0,
                             atol=1e-6)
Пример #8
0
 def test_normalization(self):
     # Test that densities are normalized correctly.
     inputs = tf.placeholder(tf.float32, (None, 1))
     layer = entropy_models.EntropyBottleneck(filters=(2, ))
     _, likelihood = layer(inputs, training=True)
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         x = np.repeat(np.arange(-200, 201), 2000)[:, None]
         likelihood, = sess.run([likelihood], {inputs: x})
     self.assertEqual(x.shape, likelihood.shape)
     integral = np.sum(likelihood) * .0005
     self.assertAllClose(1, integral, rtol=0, atol=2e-4)
Пример #9
0
 def test_codec_init(self):
     # Tests that inputs are compressed and decompressed correctly, and quantized
     # to full integer values right after initialization.
     inputs = tf.placeholder(tf.float32, (1, None, 1))
     layer = entropy_models.EntropyBottleneck(data_format="channels_last",
                                              init_scale=30)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         values = np.linspace(-50, 50, 100)[None, :, None]
         decoded, = sess.run([decoded], {inputs: values})
     self.assertAllClose(np.around(values), decoded, rtol=0, atol=1e-6)
Пример #10
0
 def test_codec_clipping(self):
     # Tests that inputs are compressed and decompressed correctly, and clipped
     # to the expected range.
     inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
     layer = entropy_models.EntropyBottleneck(data_format="channels_last",
                                              init_scale=40)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         self.assertTrue(len(layer.updates) == 1)
         sess.run(layer.updates[0])
         values = np.linspace(-50, 50, 100)[None, :, None]
         decoded, = sess.run([decoded], {inputs: values})
         expected = np.clip(np.around(values), -40, 40)
         self.assertAllClose(expected, decoded, rtol=0, atol=1e-6)
Пример #11
0
 def test_quantization(self):
     # Tests that inputs are quantized to full integer values, even after
     # quantiles have been updated.
     inputs = array_ops.placeholder(dtypes.float32, (None, 1))
     layer = entropy_models.EntropyBottleneck(optimize_integer_offset=False)
     quantized, _ = layer(inputs, training=False)
     opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
     self.assertTrue(len(layer.losses) == 1)
     step = opt.minimize(layer.losses[0])
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         sess.run(step)
         values = np.linspace(-50, 50, 100)[:, None]
         quantized, = sess.run([quantized], {inputs: values})
         self.assertAllClose(np.around(values),
                             quantized,
                             rtol=0,
                             atol=1e-6)
Пример #12
0
 def test_quantization(self):
   # Tests that inputs are not quantized to full integer values after quantiles
   # have been updated. However, the difference between input and output should
   # be between -0.5 and 0.5, and the offset must be consistent.
   inputs = tf.placeholder(tf.float32, (None, 1))
   layer = entropy_models.EntropyBottleneck()
   quantized, _ = layer(inputs, training=False)
   opt = tf.train.GradientDescentOptimizer(learning_rate=1)
   self.assertEqual(1, len(layer.losses))
   step = opt.minimize(layer.losses[0])
   with self.cached_session() as sess:
     sess.run(tf.global_variables_initializer())
     sess.run(step)
     values = np.linspace(-50, 50, 100)[:, None]
     quantized, = sess.run([quantized], {inputs: values})
   self.assertAllClose(values, quantized, rtol=0, atol=.5)
   diff = np.ravel(np.around(values) - quantized) % 1
   self.assertAllClose(diff, np.full_like(diff, diff[0]), rtol=0, atol=5e-6)
   self.assertNotEqual(diff[0], 0)
Пример #13
0
 def test_channels_first(self):
     # Test the layer with more than one channel and multiple input dimensions,
     # with the channel dimension right after the batch dimension.
     inputs = tf.placeholder(tf.float32, (None, 3, None, None))
     layer = entropy_models.EntropyBottleneck(data_format="channels_first",
                                              init_scale=10)
     noisy, _ = layer(inputs, training=True)
     quantized, _ = layer(inputs, training=False)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(1, len(layer.updates))
         sess.run(layer.updates[0])
         values = 2.5 * np.random.normal(size=(2, 3, 5, 7))
         noisy, quantized, decoded = sess.run([noisy, quantized, decoded],
                                              {inputs: values})
     self.assertAllClose(values, noisy, rtol=0, atol=.5)
     self.assertAllClose(values, quantized, rtol=0, atol=.5)
     self.assertAllClose(values, decoded, rtol=0, atol=.5)
Пример #14
0
 def test_codec(self):
     # Tests that inputs are compressed and decompressed correctly, and quantized
     # to full integer values, even after quantiles have been updated.
     inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
     layer = entropy_models.EntropyBottleneck(data_format="channels_last",
                                              init_scale=60,
                                              optimize_integer_offset=False)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
     opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
     self.assertTrue(len(layer.losses) == 1)
     step = opt.minimize(layer.losses[0])
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         sess.run(step)
         self.assertTrue(len(layer.updates) == 1)
         sess.run(layer.updates[0])
         values = np.linspace(-50, 50, 100)[None, :, None]
         decoded, = sess.run([decoded], {inputs: values})
         self.assertAllClose(np.around(values), decoded, rtol=0, atol=1e-6)
Пример #15
0
 def test_entropy_estimates(self):
     # Test that entropy estimates match actual range coding.
     inputs = tf.placeholder(tf.float32, (1, None, 1))
     layer = entropy_models.EntropyBottleneck(filters=(2, 3),
                                              data_format="channels_last")
     _, likelihood = layer(inputs, training=True)
     diff_entropy = tf.reduce_sum(tf.log(likelihood)) / -np.log(2)
     _, likelihood = layer(inputs, training=False)
     disc_entropy = tf.reduce_sum(tf.log(likelihood)) / -np.log(2)
     bitstrings = layer.compress(inputs)
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(1, len(layer.updates))
         sess.run(layer.updates[0])
         diff_entropy, disc_entropy, bitstrings = sess.run(
             [diff_entropy, disc_entropy, bitstrings],
             {inputs: np.random.normal(size=(1, 10000, 1))})
     codelength = 8 * sum(len(s) for s in bitstrings)
     self.assertAllClose(diff_entropy, disc_entropy, rtol=5e-3, atol=0)
     self.assertAllClose(disc_entropy, codelength, rtol=5e-3, atol=0)
Пример #16
0
 def test_channels_last(self):
     # Test the layer with more than one channel and multiple input dimensions,
     # with the channels in the last dimension.
     inputs = array_ops.placeholder(dtypes.float32, (None, None, None, 2))
     layer = entropy_models.EntropyBottleneck(data_format="channels_last",
                                              init_scale=50)
     noisy, _ = layer(inputs, training=True)
     quantized, _ = layer(inputs, training=False)
     bitstrings = layer.compress(inputs)
     decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
     with self.test_session() as sess:
         sess.run(variables.global_variables_initializer())
         self.assertTrue(len(layer.updates) == 1)
         sess.run(layer.updates[0])
         values = 5 * np.random.normal(size=(7, 5, 3, 2))
         noisy, quantized, decoded = sess.run([noisy, quantized, decoded],
                                              {inputs: values})
         self.assertAllClose(values, noisy, rtol=0, atol=.5)
         self.assertAllClose(values, quantized, rtol=0, atol=.5)
         self.assertAllClose(values, decoded, rtol=0, atol=.5)
Пример #17
0
 def test_compress(self):
   # Test compression and decompression, and produce test data for
   # `test_decompress`. If you set the constant at the end to `True`, this test
   # will fail and the log will contain the new test data.
   inputs = tf.placeholder(tf.float32, (2, 3, 9))
   layer = entropy_models.EntropyBottleneck(
       data_format="channels_first", filters=(), init_scale=2)
   bitstrings = layer.compress(inputs)
   decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
   with self.cached_session() as sess:
     values = 8 * np.random.uniform(size=(2, 3, 9)) - 4
     sess.run(tf.global_variables_initializer())
     self.assertEqual(1, len(layer.updates))
     sess.run(layer.updates[0])
     bitstrings, quantized_cdf, cdf_length, decoded = sess.run(
         [bitstrings, layer._quantized_cdf, layer._cdf_length, decoded],
         {inputs: values})
   self.assertAllClose(values, decoded, rtol=0, atol=.5)
   # Set this constant to `True` to log new test data for `test_decompress`.
   if False:  # pylint:disable=using-constant-test
     assert False, (bitstrings, quantized_cdf, cdf_length, decoded)