예제 #1
0
    def test_saved_model_tf(self, adapted):
        input_data = [[0.0], [2.0], [0.0], [2.0]]
        expected_output = [[-1.0], [1.0], [-1.0], [1.0]]

        inputs = keras.Input(shape=(1,), dtype=tf.float32)
        if adapted:
            layer = normalization.Normalization(axis=-1)
            layer.adapt(input_data)
        else:
            layer = normalization.Normalization(mean=1.0, variance=1.0)
        outputs = layer(inputs)
        model = keras.Model(inputs=inputs, outputs=outputs)

        output_data = model.predict(input_data)
        self.assertAllClose(output_data, expected_output)

        # Save the model to disk.
        output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
        tf.saved_model.save(model, output_path)
        loaded_model = tf.saved_model.load(output_path)
        f = loaded_model.signatures["serving_default"]

        # Ensure that the loaded model is unique (so that the save/load is real)
        self.assertIsNot(model, loaded_model)

        # Validate correctness of the new model.
        new_output_data = f(tf.constant(input_data))["normalization"]
        self.assertAllClose(new_output_data, expected_output)
예제 #2
0
    def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
                               expected):
        input_shape = tuple(
            [test_data.shape[i] for i in range(1, test_data.ndim)])
        if use_dataset:
            # Keras APIs expect batched datasets
            adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
                test_data.shape[0] // 2)
            test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
                test_data.shape[0] // 2)

        layer = normalization.Normalization(axis=axis)
        layer.adapt(adapt_data)

        input_data = keras.Input(shape=input_shape)
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)

        weights = layer.get_weights()
        mean = weights[0]
        var = weights[1]

        direct_set_layer = normalization.Normalization(axis=axis,
                                                       mean=mean,
                                                       variance=var)
        input_data = keras.Input(shape=input_shape)
        output = direct_set_layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)
예제 #3
0
  def test_merge_state(self):
    if not tf.executing_eagerly():
      self.skipTest("`merge_state` only supported in TF2")

    data = np.random.rand(30, 10, 2)
    ds = tf.data.Dataset.from_tensor_slices(data).batch(2)
    norm = normalization.Normalization(axis=(1, 2))
    norm.adapt(ds)

    partial_ds_1 = ds.shard(3, 0)
    partial_ds_2 = ds.shard(3, 1)
    partial_ds_3 = ds.shard(3, 2)

    norm_1 = normalization.Normalization(axis=(1, 2))
    norm_2 = normalization.Normalization(axis=(1, 2))
    norm_3 = normalization.Normalization(axis=(1, 2))

    norm_1.adapt(partial_ds_1)
    norm_2.adapt(partial_ds_2)
    norm_3.adapt(partial_ds_3)

    norm_1.merge_state([norm_2, norm_3])
    merged_norm = norm_1

    self.assertAllClose(norm(data), merged_norm(data))
예제 #4
0
 def test_invert(self):
     data = np.array([0.0, 2.0, 0.0, 2.0])
     norm = normalization.Normalization(mean=1.0, variance=1.0)
     inv_norm = normalization.Normalization(
         mean=1.0, variance=1.0, invert=True
     )
     output = norm(data)
     output2 = inv_norm(output)
     self.assertListEqual(output2.shape.as_list(), [4])
     self.assertAllClose(output2, [0.0, 2.0, 0.0, 2.0])
예제 #5
0
 def test_output_dtype(self):
   if not tf.__internal__.tf2.enabled():
     self.skipTest("set_global_policy only supported in TF2.")
   # Output should respect an explicit dtype, and default to the global policy.
   policy.set_global_policy("float64")
   input_data = keras.Input(batch_size=16, shape=(1,))
   layer = normalization.Normalization(mean=1.0, variance=1.0, dtype="float16")
   output = layer(input_data)
   self.assertAllEqual(output.dtype, tf.float16)
   layer = normalization.Normalization(mean=1.0, variance=1.0)
   output = layer(input_data)
   self.assertAllEqual(output.dtype, tf.float64)
    def run_dataset_implementation(self, num_elements, batch_size):
        input_t = keras.Input(shape=(1, ))
        layer = normalization.Normalization()
        _ = layer(input_t)

        num_repeats = 5
        starts = []
        ends = []
        for _ in range(num_repeats):
            ds = tf.data.Dataset.range(num_elements)
            ds = ds.map(
                lambda x: tf.compat.v1.expand_dims(tf.cast(x, tf.float32), -1))
            ds = ds.batch(batch_size)

            starts.append(time.time())
            # Benchmarked code begins here.
            k, n, ex, ex2 = ds.reduce((0.0, 0, 0.0, 0.0), reduce_fn)
            mean = k.numpy() + ex.numpy() / n.numpy()
            var = (ex2.numpy() -
                   (ex.numpy() * ex.numpy()) / n.numpy()) / (n.numpy() - 1)
            layer.set_weights([mean, var])
            # Benchmarked code ends here.
            ends.append(time.time())

        avg_time = np.mean(np.array(ends) - np.array(starts))
        return avg_time
    def bm_adapt_implementation(self, num_elements, batch_size):
        """Test the KPL adapt implementation."""
        input_t = keras.Input(shape=(1, ), dtype=tf.float32)
        layer = normalization.Normalization()
        _ = layer(input_t)

        num_repeats = 5
        starts = []
        ends = []
        for _ in range(num_repeats):
            ds = tf.data.Dataset.range(num_elements)
            ds = ds.map(
                lambda x: tf.compat.v1.expand_dims(tf.cast(x, tf.float32), -1))
            ds = ds.batch(batch_size)

            starts.append(time.time())
            # Benchmarked code begins here.
            layer.adapt(ds)
            # Benchmarked code ends here.
            ends.append(time.time())

        avg_time = np.mean(np.array(ends) - np.array(starts))
        name = "normalization_adapt|%s_elements|batch_%s" % (num_elements,
                                                             batch_size)
        baseline = self.run_dataset_implementation(num_elements, batch_size)
        extras = {
            "tf.data implementation baseline": baseline,
            "delta seconds": (baseline - avg_time),
            "delta percent": ((baseline - avg_time) / baseline) * 100
        }
        self.report_benchmark(iters=num_repeats,
                              wall_time=avg_time,
                              extras=extras,
                              name=name)
예제 #8
0
 def test_0d_unbatched_adapt(self):
     ds = tf.data.Dataset.from_tensor_slices([2., 0., 2., 0.])
     layer = normalization.Normalization(axis=None)
     layer.adapt(ds)
     output_ds = ds.map(layer)
     self.assertAllClose(list(output_ds.as_numpy_iterator()),
                         [1., -1., 1., -1.])
예제 #9
0
 def test_list_input(self):
     with self.assertRaisesRegex(
             ValueError,
         ("Normalization only accepts a single input. If you are "
          "passing a python list or tuple as a single input, "
          "please convert to a numpy array or `tf.Tensor`.")):
         normalization.Normalization()([1, 2, 3])
예제 #10
0
 def test_1d_data(self):
     data = [0, 2, 0, 2]
     layer = normalization.Normalization(axis=-1)
     layer.adapt(data)
     output = layer(data)
     self.assertListEqual(output.shape.as_list(), [4, 1])
     if tf.executing_eagerly():
         self.assertAllClose(output.numpy(), [[-1], [1], [-1], [1]])
예제 #11
0
 def test_broadcasting_during_direct_setting(self):
     layer = normalization.Normalization(axis=-1,
                                         mean=[1.0],
                                         variance=[1.0])
     output = layer(np.array([[1., 2.]]))
     expected_output = [[0., 1.]]
     self.assertAllClose(output, expected_output)
     self.assertAllClose(layer.get_weights(), [])
예제 #12
0
 def test_broadcasting_during_direct_setting_with_tensors(self):
     layer = normalization.Normalization(axis=-1,
                                         mean=tf.constant([1.0]),
                                         variance=tf.constant([2.0]))
     layer.build((None, 2))
     weights = layer.get_weights()
     self.assertAllClose([1.0, 1.0], weights[0])
     self.assertAllClose([2.0, 2.0], weights[1])
예제 #13
0
 def test_axis_permutations(self, axis):
     layer = normalization.Normalization(axis=axis)
     # data.shape = [2, 2, 3]
     data = np.array([[[0., 1., 2.], [0., 2., 6.]],
                      [[2., 3., 4.], [3., 6., 10.]]])
     expect = np.array([[[-1., -1., -1.], [-1., -1., -1.]],
                        [[1., 1., 1.], [1., 1., 1.]]])
     layer.adapt(data)
     self.assertAllClose(expect, layer(data))
예제 #14
0
 def test_model_summary_after_layer_adapt(self):
   data = np.array([[[0., 1., 2.], [0., 2., 6.]],
                    [[2., 3., 4.], [3., 6., 10.]]])
   layer = normalization.Normalization(axis=-1)
   layer.adapt(data)
   model = keras.Sequential(
       [layer,
        keras.layers.Dense(64, activation="relu"),
        keras.layers.Dense(1)])
   model.summary()
예제 #15
0
    def test_0d_data(self):
        if not tf.executing_eagerly():
            self.skipTest("Only supported in TF2.")

        data = [0, 2, 0, 2]
        layer = normalization.Normalization(axis=-1)
        layer.adapt(data)
        output = layer(0.)
        self.assertListEqual(output.shape.as_list(), [1, 1])
        self.assertAllClose(output.numpy(), [[-1]])
예제 #16
0
    def test_broadcasting_during_direct_setting_with_tensors(self):
        if not tf.executing_eagerly():
            self.skipTest("Only supported in TF2.")

        layer = normalization.Normalization(axis=-1,
                                            mean=tf.constant([1.0]),
                                            variance=tf.constant([1.0]))
        output = layer(np.array([[1., 2.]]))
        expected_output = [[0., 1.]]
        self.assertAllClose(output, expected_output)
        self.assertAllClose(layer.get_weights(), [])
예제 #17
0
 def test_mixing_preprocessing_and_regular_layers(self):
     stage = preprocessing_stage.PreprocessingStage([
         image_preprocessing.CenterCrop(16, 16),
         normalization.Normalization(),
         convolutional.Conv2D(4, 3)
     ])
     data = np.ones((16, 20, 20, 3), dtype='float32')
     stage.adapt(data)
     _ = stage(data)
     stage.compile('rmsprop', 'mse')
     stage.fit(data, np.ones((16, 14, 14, 4)))
     _ = stage.evaluate(data, np.ones((16, 14, 14, 4)))
     _ = stage.predict(data)
예제 #18
0
  def test_merge_state(self):
    data = np.random.rand(30, 10, 2)
    ds = tf.data.Dataset.from_tensor_slices(data).batch(2)
    norm = normalization.Normalization(axis=(1, 2))
    norm.adapt(ds)

    partial_ds_1 = ds.shard(3, 0)
    partial_ds_2 = ds.shard(3, 1)
    partial_ds_3 = ds.shard(3, 2)

    norm_1 = normalization.Normalization(axis=(1, 2))
    norm_2 = normalization.Normalization(axis=(1, 2))
    norm_3 = normalization.Normalization(axis=(1, 2))

    norm_1.adapt(partial_ds_1)
    norm_2.adapt(partial_ds_2)
    norm_3.adapt(partial_ds_3)

    norm_1.merge_state([norm_2, norm_3])
    merged_norm = norm_1

    self.assertAllClose(norm(data), merged_norm(data))
예제 #19
0
 def test_1d_unbatched_adapt(self):
     ds = tf.data.Dataset.from_tensor_slices([
         [2.0, 0.0, 2.0, 0.0],
         [0.0, 2.0, 0.0, 2.0],
     ])
     layer = normalization.Normalization(axis=-1)
     layer.adapt(ds)
     output_ds = ds.map(layer)
     self.assertAllClose(
         list(output_ds.as_numpy_iterator()),
         [
             [1.0, -1.0, 1.0, -1.0],
             [-1.0, 1.0, -1.0, 1.0],
         ],
     )
예제 #20
0
    def test_layer_computation(self, strategy, adapt_data, axis, test_data,
                               use_dataset, expected):
        input_shape = tuple([None for _ in range(test_data.ndim - 1)])
        if use_dataset:
            # Keras APIs expect batched datasets
            adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
                2)
            test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(2)

        with strategy.scope():
            input_data = keras.Input(shape=input_shape)
            layer = normalization.Normalization(axis=axis)
            layer.adapt(adapt_data)
            output = layer(input_data)
            model = keras.Model(input_data, output)
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)
    def test_mixing_preprocessing_and_regular_layers(self):
        x0 = Input(shape=(10, 10, 3))
        x1 = Input(shape=(10, 10, 3))
        x2 = Input(shape=(10, 10, 3))

        y0 = merge.Add()([x0, x1])
        y1 = image_preprocessing.CenterCrop(8, 8)(x2)
        y1 = convolutional.ZeroPadding2D(padding=1)(y1)

        z = merge.Add()([y0, y1])
        z = normalization.Normalization()(z)
        z = convolutional.Conv2D(4, 3)(z)

        stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2],
                                                                 z)

        data = [
            np.ones((12, 10, 10, 3), dtype='float32'),
            np.ones((12, 10, 10, 3), dtype='float32'),
            np.ones((12, 10, 10, 3), dtype='float32')
        ]

        stage.adapt(data)
        _ = stage(data)
        stage.compile('rmsprop', 'mse')
        with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
            stage.fit(data, np.ones((12, 8, 8, 4)))

        ds_x0 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
        ds_x1 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
        ds_x2 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
        ds_x = tf.data.Dataset.zip((ds_x0, ds_x1, ds_x2))
        ds_y = tf.data.Dataset.from_tensor_slices(np.ones((12, 8, 8, 4)))
        dataset = tf.data.Dataset.zip((ds_x, ds_y)).batch(4)

        with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
            stage.fit(dataset)
        _ = stage.evaluate(data, np.ones((12, 8, 8, 4)))
        _ = stage.predict(data)
예제 #22
0
  def test_multiple_adapts(self):
    first_adapt = [[0], [2], [0], [2]]
    second_adapt = [[2], [4], [2], [4]]
    predict_input = [[2], [2]]
    expected_first_output = [[1], [1]]
    expected_second_output = [[-1], [-1]]

    inputs = keras.Input(shape=(1,), dtype=tf.int32)
    layer = normalization.Normalization(axis=-1)
    layer.adapt(first_adapt)
    outputs = layer(inputs)
    model = keras.Model(inputs=inputs, outputs=outputs)

    actual_output = model.predict(predict_input)
    self.assertAllClose(actual_output, expected_first_output)

    # Re-adapt the layer on new inputs.
    layer.adapt(second_adapt)
    # Re-compile the model.
    model.compile()
    # `predict` should now use the new model state.
    actual_output = model.predict(predict_input)
    self.assertAllClose(actual_output, expected_second_output)
예제 #23
0
def _create_normalization_layer_without_adapt():
    return normalization.Normalization(mean=np.random.normal(size=(4, )),
                                       variance=np.random.uniform(0.5,
                                                                  2.,
                                                                  size=(4, )))
예제 #24
0
def _create_normalization_layer_with_adapt():
    layer = normalization.Normalization()
    layer.adapt(np.random.normal(size=(10, 4)))
    return layer
예제 #25
0
 def test_0d_data(self):
   layer = normalization.Normalization(axis=None, mean=1.0, variance=1.0)
   output = layer(0.)
   self.assertListEqual(output.shape.as_list(), [])
   self.assertAllClose(output, -1)
예제 #26
0
 def test_keeping_an_unknown_axis_fails(self):
   layer = normalization.Normalization(axis=-1)
   with self.assertRaisesRegex(ValueError, "axis.*must have known shape"):
     layer.build([None])
예제 #27
0
 def test_scalar_input(self):
   with self.assertRaisesRegex(ValueError,
                               "axis.*values must be in the range"):
     normalization.Normalization()(1)
예제 #28
0
 def test_broadcasting_during_direct_setting_with_variables_fails(self):
     with self.assertRaisesRegex(ValueError, "passing a Variable"):
         _ = normalization.Normalization(axis=-1,
                                         mean=tf.Variable([1.0]),
                                         variance=tf.Variable([2.0]))
예제 #29
0
 def test_bad_axis_fail_build(self, axis):
     layer = normalization.Normalization(axis=axis)
     with self.assertRaisesRegex(ValueError, r"in the range"):
         layer.build([None, 2, 3])
예제 #30
0
 def test_zeros_fail_init(self, axis):
     with self.assertRaisesRegex(ValueError,
                                 "The argument 'axis' may not be 0."):
         normalization.Normalization(axis=axis)