示例#1
0
 def test_dorefa_quantize(self, k_bit, mode):
     x = tf.keras.backend.placeholder(ndim=2)
     f = tf.keras.backend.function([x],
                                   [lq.quantizers.DoReFa(k_bit, mode)(x)])
     real_values = testing_utils.generate_real_values_with_zeros()
     result = f([real_values])[0]
     n = 2**k_bit - 1
     if mode == "weights":
         # Create the preprocessed and scaled stimulus, which is then ready to
         # go through the same test like for the activation quantizer
         divider = np.amax(np.abs(np.tanh(real_values)))
         real_values = np.tanh(real_values) / divider
         real_values = (real_values / 2.0) + 0.5
         # The results, which are currently on [-1, 1] range get the same
         # scaling, so they behave like they were created on the activation
         # range and can be tested like that
         result = result / 2.0 + 0.5
     assert not np.any(result > 1)
     assert not np.any(result < 0)
     for i in range(n + 1):
         np.testing.assert_allclose(
             result[(real_values > (2 * i - 1) / (2 * n))
                    & (real_values < (2 * i + 1) / (2 * n))],
             i / n,
             atol=1e-6,
         )
示例#2
0
 def test_identity_ste_grad(self, eager_mode, fn):
     x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
     tf_x = tf.Variable(x)
     with tf.GradientTape() as tape:
         activation = fn(tf_x)
     grad = tape.gradient(activation, tf_x)
     np.testing.assert_allclose(grad.numpy(), np.ones_like(x))
示例#3
0
def test_heaviside(fn):
    x = tf.keras.backend.placeholder(ndim=2)
    f = tf.keras.backend.function([x], [fn(x)])
    binarized_values = np.random.choice([0, 1], size=(2, 5))
    result = f([binarized_values])[0]
    np.testing.assert_allclose(result, binarized_values)

    real_values = generate_real_values_with_zeros()
    result = f([real_values])[0]
    assert np.all(result[real_values <= 0] == 0)
    assert np.all(result[real_values > 0] == 1)
示例#4
0
    def test_ternarization_with_default_threshold(self, fn):
        x = tf.keras.backend.placeholder(ndim=2)
        test_threshold = 0.05  # This is the default
        f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])

        real_values = testing_utils.generate_real_values_with_zeros()
        result = f([real_values])[0]
        assert np.all(result[real_values > test_threshold] == 1)
        assert np.all(result[real_values < -test_threshold] == -1)
        assert np.all(result[np.abs(real_values) < test_threshold] == 0)
        assert not np.any(result > 1)
        assert not np.any(result < -1)
示例#5
0
    def test_and_binarization(self, fn):
        x = tf.keras.backend.placeholder(ndim=2)
        f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])

        binarized_values = np.random.choice([0, 1], size=(2, 5))
        result = f([binarized_values])[0]
        np.testing.assert_allclose(result, binarized_values)

        real_values = testing_utils.generate_real_values_with_zeros()
        result = f([real_values])[0]
        assert np.all(result[real_values <= 0] == 0)
        assert np.all(result[real_values > 0] == 1)
示例#6
0
    def test_ste_grad(self, eager_mode, fn):
        @np.vectorize
        def ste_grad(x):
            if np.abs(x) <= 1:
                return 1.0
            return 0.0

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = fn(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), ste_grad(x))
示例#7
0
    def test_ternarization_with_custom_threshold(self):
        x = tf.keras.backend.placeholder(ndim=2)
        test_threshold = np.random.uniform(0.01, 0.8)
        fn = lq.quantizers.SteTern(threshold_value=test_threshold)
        f = tf.keras.backend.function([x], [fn(x)])

        real_values = testing_utils.generate_real_values_with_zeros()
        result = f([real_values])[0]
        assert np.all(result[real_values > test_threshold] == 1)
        assert np.all(result[real_values < -test_threshold] == -1)
        assert np.all(result[np.abs(real_values) < test_threshold] == 0)
        assert not np.any(result > 1)
        assert not np.any(result < -1)
示例#8
0
 def test_dorefa_quantize(self, fn):
     x = tf.keras.backend.placeholder(ndim=2)
     f = tf.keras.backend.function([x], [fn(x)])
     real_values = testing_utils.generate_real_values_with_zeros()
     result = f([real_values])[0]
     k_bit = 2
     n = 2**k_bit - 1
     assert not np.any(result > 1)
     assert not np.any(result < 0)
     for i in range(n + 1):
         assert np.all(result[(real_values > (2 * i - 1) / (2 * n))
                              & (real_values < (2 * i + 1) /
                                 (2 * n))] == i / n)
示例#9
0
    def test_approx_sign_grad(self, eager_mode):
        @np.vectorize
        def approx_sign_grad(x):
            if np.abs(x) <= 1:
                return 2 - 2 * np.abs(x)
            return 0.0

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = lq.quantizers.ApproxSign()(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), approx_sign_grad(x))
示例#10
0
    def test_dorefa_ste_grad(self, eager_mode):
        @np.vectorize
        def ste_grad(x):
            if x <= 1 and x >= 0:
                return 1.0
            return 0.0

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = lq.quantizers.DoReFaQuantizer(2)(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), ste_grad(x))
示例#11
0
    def test_ternarization_with_ternary_weight_networks(self):
        x = tf.keras.backend.placeholder(ndim=2)
        real_values = testing_utils.generate_real_values_with_zeros()
        test_threshold = 0.7 * np.sum(np.abs(real_values)) / np.size(real_values)
        fn = lq.quantizers.SteTern(ternary_weight_networks=True)
        f = tf.keras.backend.function([x], [fn(x)])

        result = f([real_values])[0]
        assert np.all(result[real_values > test_threshold] == 1)
        assert np.all(result[real_values < -test_threshold] == -1)
        assert np.all(result[np.abs(real_values) < test_threshold] == 0)
        assert not np.any(result > 1)
        assert not np.any(result < -1)
示例#12
0
def test_leaky_tanh():
    @np.vectorize
    def leaky_tanh(x, alpha):
        if x <= -1:
            return -1 + alpha * (x + 1)
        elif x <= 1:
            return x
        else:
            return 1 + alpha * (x - 1)

    real_values = generate_real_values_with_zeros()
    x = tf.keras.backend.placeholder(ndim=2)
    f = tf.keras.backend.function([x], [lq.activations.leaky_tanh(x)])
    result = f([real_values])[0]
    np.testing.assert_allclose(result, leaky_tanh(real_values, alpha=0.2))
示例#13
0
    def test_swish_grad(self, eager_mode):
        def swish_grad(x, beta):
            return (beta * (2 - beta * x * np.tanh(beta * x / 2)) /
                    (1 + np.cosh(beta * x)))

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = lq.quantizers.SwishSign()(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=5.0))

        with tf.GradientTape() as tape:
            activation = lq.quantizers.SwishSign(beta=10.0)(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=10.0))
示例#14
0
def test_sign(fn):
    x = tf.keras.backend.placeholder(ndim=2)
    f = tf.keras.backend.function([x], [fn(x)])
    binarized_values = np.random.choice([-1, 1],
                                        size=(2, 5)).astype(np.float32)
    result = f(binarized_values)[0]
    np.testing.assert_allclose(result, binarized_values)

    real_values = generate_real_values_with_zeros()
    result = f(real_values)[0]
    assert not np.any(result == 0)
    assert np.all(result[real_values < 0] == -1)
    assert np.all(result[real_values >= 0] == 1)

    zero_values = np.zeros((2, 5))
    result = f(zero_values)[0]
    assert np.all(result == 1)
示例#15
0
    def test_ternarization_basic(self, fn):
        x = tf.keras.backend.placeholder(ndim=2)
        f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])

        ternarized_values = np.random.choice([-1, 0, 1], size=(4, 10))
        result = f([ternarized_values])[0]
        np.testing.assert_allclose(result, ternarized_values)
        assert not np.any(result > 1)
        assert not np.any(result < -1)
        assert np.any(result == -1)
        assert np.any(result == 1)
        assert np.any(result == 0)

        real_values = testing_utils.generate_real_values_with_zeros()
        result = f([real_values])[0]
        assert not np.any(result > 1)
        assert not np.any(result < -1)
        assert np.any(result == -1)
        assert np.any(result == 1)
        assert np.any(result == 0)
示例#16
0
    def test_dorefa_ste_grad(self, mode):
        @np.vectorize
        def ste_grad(x):
            if x <= 1 and x >= 0:
                return 1.0
            return 0.0

        def tanh_grad(x):
            # 1/(cosh**2) is the derivative of tanh. The gradients of the
            # scaling operations cancel each other and the gradient of the
            # quantizek function is supposed to be 1 everywhere, because it
            # is used on its linear region only. tanh does all the limiting.
            dividend = np.amax(np.abs(np.tanh(x)))
            return 1 / (np.cosh(x)**2.0) / dividend

        expected_gradient = ste_grad if mode == "activations" else tanh_grad

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = lq.quantizers.DoReFa(2, mode)(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), expected_gradient(x))
示例#17
0
def test_clip():
    real_values = generate_real_values_with_zeros()
    clip_instance = lq.constraints.weight_clip(clip_value=0.75)
    result = clip_instance(tf.keras.backend.variable(real_values))
    result = tf.keras.backend.eval(result)
    np.testing.assert_allclose(result, np.clip(real_values, -0.75, 0.75))
示例#18
0
def test_hard_tanh():
    real_values = generate_real_values_with_zeros()
    x = tf.keras.backend.placeholder(ndim=2)
    f = tf.keras.backend.function([x], [lq.activations.hard_tanh(x)])
    result = f([real_values])[0]
    np.testing.assert_allclose(result, np.clip(real_values, -1, 1))