コード例 #1
0
    def test_adaptive_stage_using_state_update_tensors(self):
        """Tests adaptive encoding stage with state update tensors."""
        encoder = core_encoder.EncoderComposer(
            test_utils.AdaptiveNormalizeEncodingStage()).add_parent(
                test_utils.PlusOneEncodingStage(), P1_VALS).make()
        x = tf.constant(1.0)
        state = encoder.initial_state()

        for _ in range(1, 5):
            initial_state = state
            encode_params, decode_params = encoder.get_params(state)
            encoded_x, state_update_tensors, input_shapes = encoder.encode(
                x, encode_params)
            decoded_x = encoder.decode(encoded_x, decode_params, input_shapes)
            state = encoder.update_state(initial_state, state_update_tensors)
            data = self.evaluate(
                test_utils.TestData(x, encoded_x, decoded_x, initial_state,
                                    state_update_tensors, state))

            self.assertAllClose(data.x, data.decoded_x)
            self.assertLessEqual(
                data.initial_state[CHILDREN][P1_VALS][STATE][AN_FACTOR_STATE],
                1.0)
            self.assertEqual(
                data.state_update_tensors[CHILDREN][P1_VALS][TENSORS]
                [AN_NORM_UPDATE], 2.0)
            self.assertLessEqual(data.encoded_x[P1_VALS][AN_VALS], 2.0)
コード例 #2
0
 def test_larger_num_iters_improves_accuracy(self):
     # If last_iter_clip = True, this potentially computes lossy representation.
     # Set delta large, to measure the effect of changing num_iters on accuracy.
     x = np.random.randn(3, 12).astype(np.float32)
     errors = []
     seed = tf.constant([1, 2], tf.int64)
     for num_iters in [2, 3, 4, 5]:
         stage = kashin.KashinHadamardEncodingStage(num_iters=num_iters,
                                                    eta=0.9,
                                                    delta=100.0,
                                                    last_iter_clip=True)
         encode_params, decode_params = stage.get_params()
         # To keep the experiment consistent, we always need to use fixed seed.
         encode_params[
             kashin.KashinHadamardEncodingStage.SEED_PARAMS_KEY] = seed
         decode_params[
             kashin.KashinHadamardEncodingStage.SEED_PARAMS_KEY] = seed
         encoded_x, decoded_x = self.encode_decode_x(
             stage, x, encode_params, decode_params)
         test_data = test_utils.TestData(x, encoded_x, decoded_x)
         test_data = self.evaluate_test_data(test_data)
         errors.append(np.linalg.norm(test_data.x - test_data.decoded_x))
     for e1, e2 in zip(errors[:-1], errors[1:]):
         # The incurred error with less iterations should be greater.
         self.assertGreater(e1, e2)
コード例 #3
0
    def test_input_types(self, x_dtype):
        # Tests different input dtypes.
        x = tf.constant([1.0, 0.1, 0.01, 0.001, 0.0001], dtype=x_dtype)
        threshold = 0.05
        stage = misc.SplitBySmallValueEncodingStage(threshold=threshold)
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)
        test_data = test_utils.TestData(x, encoded_x, decoded_x)
        test_data = self.evaluate_test_data(test_data)

        self._assert_is_integer(test_data.encoded_x[
            misc.SplitBySmallValueEncodingStage.ENCODED_INDICES_KEY])

        # The numpy arrays must have the same dtype as the arrays from test_data.
        expected_encoded_values = np.array([1.0, 0.1],
                                           dtype=x.dtype.as_numpy_dtype)
        expected_encoded_indices = np.array([[0], [1]], dtype=np.int32)
        expected_decoded_x = np.array([1.0, 0.1, 0., 0., 0.],
                                      dtype=x_dtype.as_numpy_dtype)
        self.assertAllEqual(test_data.encoded_x[stage.ENCODED_VALUES_KEY],
                            expected_encoded_values)
        self.assertAllEqual(test_data.encoded_x[stage.ENCODED_INDICES_KEY],
                            expected_encoded_indices)
        self.assertAllEqual(test_data.decoded_x, expected_decoded_x)
コード例 #4
0
 def test_input_types(self, x_dtype):
     # Tests combinations of input dtypes.
     stage = quantization.PRNGUniformQuantizationEncodingStage(bits=8)
     x = tf.random.normal([50], dtype=x_dtype)
     encode_params, decode_params = stage.get_params()
     encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                 decode_params)
     test_data = test_utils.TestData(x, encoded_x, decoded_x)
     test_data = self.evaluate_test_data(test_data)
コード例 #5
0
 def test_eta_delta_take_tf_values(self):
     x = self.default_input()
     stage = kashin.KashinHadamardEncodingStage(eta=tf.constant(0.9),
                                                delta=tf.constant(1.0))
     encode_params, decode_params = stage.get_params()
     encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                 decode_params)
     test_data = test_utils.TestData(x, encoded_x, decoded_x)
     self.generic_asserts(test_data, stage)
     self.common_asserts_for_test_data(test_data)
コード例 #6
0
 def test_input_types(self, x_dtype, eta_dtype, delta_dtype):
     stage = kashin.KashinHadamardEncodingStage(
         eta=tf.constant(0.9, eta_dtype),
         delta=tf.constant(1.0, delta_dtype))
     x = tf.random.normal([3, 12], dtype=x_dtype)
     encode_params, decode_params = stage.get_params()
     encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                 decode_params)
     test_data = test_utils.TestData(x, encoded_x, decoded_x)
     test_data = self.evaluate_test_data(test_data)
     self.assertAllEqual(test_data.x.shape, test_data.decoded_x.shape)
コード例 #7
0
    def test_encoding_differs_given_different_seed(self):
        """Tests that encoded_x is different in different evaluations."""
        x = tf.constant(self.evaluate(self.default_input()))
        stage = self.default_encoding_stage()
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)
        test_data_1 = self.evaluate_test_data(
            test_utils.TestData(x, encoded_x, decoded_x))
        test_data_2 = self.evaluate_test_data(
            test_utils.TestData(x, encoded_x, decoded_x))

        # The decoded values should be the sam, but the encoded values not.
        self.assertAllClose(test_data_1.decoded_x,
                            test_data_2.decoded_x,
                            rtol=test_utils.DEFAULT_RTOL,
                            atol=test_utils.DEFAULT_ATOL)
        self.assertNotAllClose(test_data_1.encoded_x[self._ENCODED_VALUES_KEY],
                               test_data_2.encoded_x[self._ENCODED_VALUES_KEY],
                               rtol=test_utils.DEFAULT_RTOL,
                               atol=test_utils.DEFAULT_ATOL)
コード例 #8
0
  def test_basic_encode_decode_tf_constructor_parameters(self):
    """Tests the core funcionality with `tf.Variable` constructor parameters."""
    a_var = tf.get_variable('a_var', initializer=self._DEFAULT_A)
    b_var = tf.get_variable('b_var', initializer=self._DEFAULT_B)
    stage = test_utils.SimpleLinearEncodingStage(a_var, b_var)

    with self.cached_session() as sess:
      sess.run(tf.global_variables_initializer())
    x = self.default_input()
    encode_params, decode_params = stage.get_params()
    encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                decode_params)
    test_data = self.evaluate_test_data(
        test_utils.TestData(x, encoded_x, decoded_x))
    self.common_asserts_for_test_data(test_data)

    # Change the variables and verify the behavior of stage changes.
    self.evaluate([tf.assign(a_var, 5.0), tf.assign(b_var, 6.0)])
    test_data = self.evaluate_test_data(
        test_utils.TestData(x, encoded_x, decoded_x))
    self.assertAllClose(test_data.x * 5.0 + 6.0,
                        test_data.encoded_x[self._ENCODED_VALUES_KEY])
コード例 #9
0
    def test_input_types(self, x_dtype, clip_norm_dtype):
        # Tests combinations of input dtypes.
        stage = clipping.ClipByNormEncodingStage(
            tf.constant(1.0, clip_norm_dtype))
        x = tf.constant([1.0, 1.0, 1.0, 1.0], dtype=x_dtype)
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)
        test_data = test_utils.TestData(x, encoded_x, decoded_x)
        test_data = self.evaluate_test_data(test_data)

        self.assertAllEqual([1.0, 1.0, 1.0, 1.0], test_data.x)
        # The decoded values should have norm 1.
        self.assertAllClose([0.5, 0.5, 0.5, 0.5], test_data.decoded_x)
コード例 #10
0
    def test_empty_input_static(self):
        # Tests that the encoding works when the input shape is [0].
        x = []
        x = tf.convert_to_tensor(x, dtype=tf.int32)
        assert x.shape.as_list() == [0]

        stage = self.default_encoding_stage()
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)

        test_data = self.evaluate_test_data(
            test_utils.TestData(x, encoded_x, decoded_x))
        self.common_asserts_for_test_data(test_data)
コード例 #11
0
ファイル: stages_impl_test.py プロジェクト: Crissal1995/IPCV
  def test_input_types(self, x_dtype, min_max_dtype):
    # Tests combinations of input dtypes.
    stage = stages_impl.UniformQuantizationEncodingStage(
        bits=8, min_max=tf.constant([-1.0, 1.0], min_max_dtype))
    x = tf.random.normal([50], dtype=x_dtype)
    encode_params, decode_params = stage.get_params()
    encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                decode_params)
    test_data = test_utils.TestData(x, encoded_x, decoded_x)
    test_data = self.evaluate_test_data(test_data)

    self.assertLess(np.amin(test_data.x), -1.0)
    self.assertGreater(np.amax(test_data.x), 1.0)
    self.assertAllGreaterEqual(test_data.decoded_x, -1.0)
    self.assertAllLessEqual(test_data.decoded_x, 1.0)
コード例 #12
0
    def test_adaptive_stage(self):
        """Tests composition of two adaptive encoding stages."""
        encoder = core_encoder.EncoderComposer(
            test_utils.PlusOneOverNEncodingStage()).add_parent(
                test_utils.PlusOneOverNEncodingStage(), PN_VALS).make()
        x = tf.constant(1.0)
        state = encoder.initial_state()

        for i in range(1, 5):
            initial_state = state
            encode_params, decode_params = encoder.get_params(state)
            encoded_x, state_update_tensors, input_shapes = encoder.encode(
                x, encode_params)
            decoded_x = encoder.decode(encoded_x, decode_params, input_shapes)
            state = encoder.update_state(initial_state, state_update_tensors)
            data = self.evaluate(
                test_utils.TestData(x, encoded_x, decoded_x, initial_state,
                                    state_update_tensors, state))

            expected_initial_state = {
                STATE: {
                    PN_ITER_STATE: i
                },
                CHILDREN: {
                    PN_VALS: {
                        STATE: {
                            PN_ITER_STATE: i
                        },
                        CHILDREN: {}
                    }
                }
            }
            expected_state_update_tensors = {
                TENSORS: {},
                CHILDREN: {
                    PN_VALS: {
                        TENSORS: {},
                        CHILDREN: {}
                    }
                }
            }

            self.assertAllClose(data.x, data.decoded_x)
            self.assertAllEqual(expected_initial_state, data.initial_state)
            self.assertDictEqual(expected_state_update_tensors,
                                 data.state_update_tensors)
            self.assertAllClose(data.x + 2 * 1 / i,
                                data.encoded_x[PN_VALS][PN_VALS])
コード例 #13
0
    def test_input_types(self, x_dtype, clip_value_min_dtype,
                         clip_value_max_dtype):
        # Tests combinations of input dtypes.
        stage = clipping.ClipByValueEncodingStage(
            tf.constant(-1.0, clip_value_min_dtype),
            tf.constant(1.0, clip_value_max_dtype))
        x = tf.constant([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=x_dtype)
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)
        test_data = test_utils.TestData(x, encoded_x, decoded_x)
        test_data = self.evaluate_test_data(test_data)

        self.common_asserts_for_test_data(test_data)
        self.assertAllEqual([-2.0, -1.0, 0.0, 1.0, 2.0], test_data.x)
        self.assertAllClose([-1.0, -1.0, 0.0, 1.0, 1.0], test_data.decoded_x)
コード例 #14
0
    def test_all_below_threshold_works(self):
        # Tests that encoding does not blow up with all-below-threshold input. In
        # this case, both of the encoded values will be empty arrays.
        stage = misc.SplitBySmallValueEncodingStage(threshold=0.1)
        x = tf.random.uniform([50], minval=-0.01, maxval=0.01)
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)
        test_data = test_utils.TestData(x, encoded_x, decoded_x)
        test_data = self.evaluate_test_data(test_data)

        expected_encoded_indices = np.array([], dtype=np.int32).reshape([0, 1])
        self.assertAllEqual(test_data.encoded_x[stage.ENCODED_VALUES_KEY], [])
        self.assertAllEqual(test_data.encoded_x[stage.ENCODED_INDICES_KEY],
                            expected_encoded_indices)
        self.assertAllEqual(test_data.decoded_x,
                            np.zeros([50], dtype=x.dtype.as_numpy_dtype))
コード例 #15
0
    def test_as_adaptive_encoding_stage(self):
        """Tests correctness of the wrapped encoding stage."""
        a_var = tf.compat.v1.get_variable('a', initializer=2.0)
        b_var = tf.compat.v1.get_variable('b', initializer=3.0)
        stage = test_utils.SimpleLinearEncodingStage(a_var, b_var)
        wrapped_stage = encoding_stage.as_adaptive_encoding_stage(stage)
        self.assertIsInstance(wrapped_stage,
                              encoding_stage.AdaptiveEncodingStageInterface)

        x = tf.constant(2.0)
        state = wrapped_stage.initial_state()
        encode_params, decode_params = wrapped_stage.get_params(state)
        encoded_x, state_update_tensors = wrapped_stage.encode(
            x, encode_params)
        updated_state = wrapped_stage.update_state(state, state_update_tensors)
        decoded_x = wrapped_stage.decode(encoded_x, decode_params)

        # Test that the added state functionality is empty.
        self.assertDictEqual({}, state)
        self.assertDictEqual({}, state_update_tensors)
        self.assertDictEqual({}, updated_state)
        self.assertDictEqual({}, wrapped_stage.state_update_aggregation_modes)
        # Test that __getattr__ retrieves attributes of the wrapped stage.
        self.assertIsInstance(wrapped_stage._a, tf.Variable)
        self.assertIs(wrapped_stage._a, a_var)
        self.assertIsInstance(wrapped_stage._b, tf.Variable)
        self.assertIs(wrapped_stage._b, b_var)

        # Test the functionality remain unchanged.
        self.assertEqual(stage.name, wrapped_stage.name)
        self.assertEqual(stage.compressible_tensors_keys,
                         wrapped_stage.compressible_tensors_keys)
        self.assertEqual(stage.commutes_with_sum,
                         wrapped_stage.commutes_with_sum)
        self.assertEqual(stage.decode_needs_input_shape,
                         wrapped_stage.decode_needs_input_shape)

        self.evaluate(tf.compat.v1.global_variables_initializer())
        test_data = test_utils.TestData(
            *self.evaluate([x, encoded_x, decoded_x]))
        self.assertEqual(2.0, test_data.x)
        self.assertEqual(
            7.0, test_data.encoded_x[
                test_utils.SimpleLinearEncodingStage.ENCODED_VALUES_KEY])
        self.assertEqual(2.0, test_data.decoded_x)
コード例 #16
0
    def test_empty_input_dynamic(self):
        # Tests that the encoding works when the input shape is [0], but not
        # statically known.
        y = tf.zeros((10, ))
        indices = tf.compat.v2.where(tf.abs(y) > 1e-8)
        x = tf.gather_nd(y, indices)
        x = tf.cast(x, tf.int32)  # Empty tensor.
        assert x.shape.as_list() == [None]
        stage = self.default_encoding_stage()
        encode_params, decode_params = stage.get_params()
        encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                    decode_params)

        test_data = self.evaluate_test_data(
            test_utils.TestData(x, encoded_x, decoded_x))
        assert test_data.x.shape == (0, )
        assert test_data.encoded_x[stage.ENCODED_VALUES_KEY].shape == (0, )
        assert test_data.decoded_x.shape == (0, )
コード例 #17
0
  def test_dynamic_input_shape(self):
    # Tests that encoding works when the input shape is not statically known.
    stage = quantization.PRNGUniformQuantizationEncodingStage(bits=8)
    shape = [10, 5, 7]
    prob = [0.5, 0.8, 0.6]
    original_x = tf.random.uniform(shape, dtype=tf.float32)
    rand = [tf.random.uniform([shape[i],]) for i in range(3)]
    sample_indices = [
        tf.reshape(tf.where(rand[i] < prob[i]), [-1]) for i in range(3)
    ]
    x = tf.gather(original_x, sample_indices[0], axis=0)
    x = tf.gather(x, sample_indices[1], axis=1)
    x = tf.gather(x, sample_indices[2], axis=2)

    encode_params, decode_params = stage.get_params()
    encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                decode_params)
    test_data = test_utils.TestData(x, encoded_x, decoded_x)
    test_data = self.evaluate_test_data(test_data)
コード例 #18
0
  def test_quantization_empirically_unbiased(self):
    # Tests that the quantization "seems" to be unbiased.
    # Executing the encoding and decoding many times, the average error should
    # be a lot larger than the error of average decoded value.
    x = tf.constant(np.random.rand((50)).astype(np.float32))
    stage = quantization.PRNGUniformQuantizationEncodingStage(bits=2)
    encode_params, decode_params = stage.get_params()
    encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                decode_params)
    test_data = test_utils.TestData(x, encoded_x, decoded_x)
    test_data_list = [self.evaluate_test_data(test_data) for _ in range(200)]

    norm_errors = []
    errors = []
    for data in test_data_list:
      norm_errors.append(np.linalg.norm(data.x - data.decoded_x))
      errors.append(data.x - data.decoded_x)
    mean_of_errors = np.mean(norm_errors)
    error_of_mean = np.linalg.norm(np.mean(errors, axis=0))
    self.assertGreater(mean_of_errors, error_of_mean * 10)
コード例 #19
0
  def test_approximately_unbiased_in_expectation(self):
    """Tests that average of encodings is more accurate than a single one."""
    # Use a constant input value.
    x = self.evaluate(self.default_input())
    stage = self.default_encoding_stage()
    encode_params, decode_params = stage.get_params()
    encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params,
                                                decode_params)
    test_data = []
    for _ in range(100):
      test_data.append(
          test_utils.TestData(
              *self.evaluate_tf_py_list([x, encoded_x, decoded_x])))

    # Check that the average error created by encoding is significantly larger
    # than error of average of encodings. This is an simple (imperfect)
    # empirical check that the encoding is unbiased.
    mean_error = np.mean([np.linalg.norm(x - d.decoded_x) for d in test_data])
    error_of_mean = np.linalg.norm(
        x - np.mean([d.decoded_x for d in test_data], axis=0))
    self.assertGreater(mean_error, error_of_mean * 5)