Esempio n. 1
0
class Precision_16_32Test(unittest.TestCase):
    def setUp(self):
        self.precision_instance = Precision('16.32')

    def test_half_precision_instanciation(self):
        assert (self.precision_instance.compute_precision == tf.float16)
        assert (self.precision_instance.weight_update_precision == tf.float32)

    def test_set_precision(self):
        self.precision_instance.apply()

        input_layer = tf.keras.layers.Input(shape=(None, 1))
        layer = tf.keras.layers.Dense(1)
        assert (layer.compute_dtype == tf.float16)
        output = layer(input_layer)
        assert (output.dtype == tf.float16)

        model = tf.keras.Model(inputs=input_layer, outputs=output)

        with tf.GradientTape() as tape:
            input_data = np.array([[1]])
            output_data = model(input_data)

            grad = tape.gradient(output_data, model.trainable_variables)
            # note gradients dtype are tf.float32
            assert (all([g.dtype == tf.float32 for g in grad]))
        # note that model weights are also tf.float32
        assert (all(
            [var.dtype == tf.float32 for var in model.trainable_variables]))
Esempio n. 2
0
class Precision32_32Test(unittest.TestCase):
    def setUp(self):
        self.precision_instance = Precision('32.32')

    def test_full_precision_instantiation(self):
        assert (self.precision_instance.compute_precision == tf.float32)
        assert (self.precision_instance.weight_update_precision == tf.float32)

    def test_set_precision(self):
        self.precision_instance.apply()

        input_layer = tf.keras.layers.Input(shape=(1))
        layer = tf.keras.layers.Dense(1)
        assert (layer.dtype == tf.float32)
        output = layer(input_layer)
        assert (output.dtype == tf.float32)

        model = tf.keras.Model(inputs=input_layer, outputs=output)

        with tf.GradientTape() as tape:
            input_data = np.array([[1]])
            output_data = model(input_data)

            grads = tape.gradient(output_data, model.trainable_variables)
            assert (all([g.dtype == tf.float32 for g in grads]))
Esempio n. 3
0
    def test_mixed_precision(self):
        mixed_precision = Precision('16.32')
        mixed_precision.apply()

        model = self.get_model()

        computed_value = model(self.input_value).numpy()

        # checking for underflow
        self.assertNotAlmostEqual(self.expected_value[0][0],
                                  computed_value[0][0],
                                  places=10)
Esempio n. 4
0
    def test_full_precision(self):
        full_precision = Precision('32.32')
        full_precision.apply()

        model = self.get_model()

        computed_value = model(self.input_value).numpy()

        # checking it did NOT underflow
        self.assertAlmostEqual(self.expected_value[0][0],
                               computed_value[0][0],
                               places=10)