Ejemplo n.º 1
0
    def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
                               expected):
        input_shape = tuple(
            [test_data.shape[i] for i in range(1, test_data.ndim)])
        if use_dataset:
            # Keras APIs expect batched datasets
            adapt_data = dataset_ops.Dataset.from_tensor_slices(
                adapt_data).batch(test_data.shape[0] // 2)
            test_data = dataset_ops.Dataset.from_tensor_slices(
                test_data).batch(test_data.shape[0] // 2)

        layer = normalization.Normalization(axis=axis)
        layer.adapt(adapt_data)

        input_data = keras.Input(shape=input_shape)
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)

        weights = layer.get_weights()
        mean = weights[0]
        var = weights[1]

        direct_set_layer = normalization.Normalization(axis=axis,
                                                       mean=mean,
                                                       variance=var)
        input_data = keras.Input(shape=input_shape)
        output = direct_set_layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)
    def bm_adapt_implementation(self, num_elements, batch_size):
        """Test the KPL adapt implementation."""
        input_t = keras.Input(shape=(1, ), dtype=dtypes.float32)
        layer = normalization.Normalization()
        _ = layer(input_t)

        num_repeats = 5
        starts = []
        ends = []
        for _ in range(num_repeats):
            ds = dataset_ops.Dataset.range(num_elements)
            ds = ds.map(lambda x: array_ops.expand_dims(
                math_ops.cast(x, dtypes.float32), -1))
            ds = ds.batch(batch_size)

            starts.append(time.time())
            # Benchmarked code begins here.
            layer.adapt(ds)
            # Benchmarked code ends here.
            ends.append(time.time())

        avg_time = np.mean(np.array(ends) - np.array(starts))
        name = "normalization_adapt|%s_elements|batch_%s" % (num_elements,
                                                             batch_size)
        baseline = self.run_dataset_implementation(num_elements, batch_size)
        extras = {
            "tf.data implementation baseline": baseline,
            "delta seconds": (baseline - avg_time),
            "delta percent": ((baseline - avg_time) / baseline) * 100
        }
        self.report_benchmark(iters=num_repeats,
                              wall_time=avg_time,
                              extras=extras,
                              name=name)
    def run_dataset_implementation(self, num_elements, batch_size):
        input_t = keras.Input(shape=(1, ))
        layer = normalization.Normalization()
        _ = layer(input_t)

        num_repeats = 5
        starts = []
        ends = []
        for _ in range(num_repeats):
            ds = dataset_ops.Dataset.range(num_elements)
            ds = ds.map(lambda x: array_ops.expand_dims(
                math_ops.cast(x, dtypes.float32), -1))
            ds = ds.batch(batch_size)

            starts.append(time.time())
            # Benchmarked code begins here.
            k, n, ex, ex2 = ds.reduce((0.0, 0, 0.0, 0.0), reduce_fn)
            mean = k.numpy() + ex.numpy() / n.numpy()
            var = (ex2.numpy() -
                   (ex.numpy() * ex.numpy()) / n.numpy()) / (n.numpy() - 1)
            layer.set_weights([mean, var])
            # Benchmarked code ends here.
            ends.append(time.time())

        avg_time = np.mean(np.array(ends) - np.array(starts))
        return avg_time
Ejemplo n.º 4
0
    def test_saved_model_tf(self):
        input_data = [[0], [2], [0], [2]]
        expected_output = [[-1], [1], [-1], [1]]

        inputs = keras.Input(shape=(1, ), dtype=dtypes.int32)
        layer = normalization.Normalization(axis=-1)
        layer.adapt(input_data)
        outputs = layer(inputs)
        model = keras.Model(inputs=inputs, outputs=outputs)

        output_data = model.predict(input_data)
        self.assertAllClose(output_data, expected_output)

        # Save the model to disk.
        output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
        save.save(model, output_path)
        loaded_model = load.load(output_path)
        f = loaded_model.signatures["serving_default"]

        # Ensure that the loaded model is unique (so that the save/load is real)
        self.assertIsNot(model, loaded_model)

        # Validate correctness of the new model.
        new_output_data = f(constant_op.constant(input_data))["normalization"]
        self.assertAllClose(new_output_data, expected_output)
Ejemplo n.º 5
0
 def test_broadcasting_during_direct_setting(self):
     layer = normalization.Normalization(axis=-1,
                                         mean=[1.0],
                                         variance=[2.0])
     layer.build((None, 2))
     weights = layer.get_weights()
     self.assertAllClose([1.0, 1.0], weights[0])
     self.assertAllClose([2.0, 2.0], weights[1])
Ejemplo n.º 6
0
 def test_1d_data(self):
     data = [0, 2, 0, 2]
     layer = normalization.Normalization(axis=-1)
     layer.adapt(data)
     output = layer(data)
     self.assertListEqual(output.shape.as_list(), [4, 1])
     if context.executing_eagerly():
         self.assertAllClose(output.numpy(), [[-1], [1], [-1], [1]])
Ejemplo n.º 7
0
 def test_broadcasting_during_direct_setting(self):
     layer = normalization.Normalization(axis=-1,
                                         mean=[1.0],
                                         variance=[1.0])
     output = layer(np.array([[1., 2.]]))
     expected_output = [[0., 1.]]
     self.assertAllClose(output, expected_output)
     self.assertAllClose(layer.get_weights(), [])
Ejemplo n.º 8
0
 def test_axis_permutations(self, axis):
     layer = normalization.Normalization(axis=axis)
     # data.shape = [2, 2, 3]
     data = np.array([[[0., 1., 2.], [0., 2., 6.]],
                      [[2., 3., 4.], [3., 6., 10.]]])
     expect = np.array([[[-1., -1., -1.], [-1., -1., -1.]],
                        [[1., 1., 1.], [1., 1., 1.]]])
     layer.adapt(data)
     self.assertAllClose(expect, layer(data))
Ejemplo n.º 9
0
 def test_broadcasting_during_direct_setting_with_tensors(self):
     layer = normalization.Normalization(axis=-1,
                                         mean=constant_op.constant([1.0]),
                                         variance=constant_op.constant(
                                             [2.0]))
     layer.build((None, 2))
     weights = layer.get_weights()
     self.assertAllClose([1.0, 1.0], weights[0])
     self.assertAllClose([2.0, 2.0], weights[1])
Ejemplo n.º 10
0
    def test_0d_data(self):
        if not context.executing_eagerly():
            self.skipTest("Only supported in TF2.")

        data = [0, 2, 0, 2]
        layer = normalization.Normalization(axis=-1)
        layer.adapt(data)
        output = layer(0.)
        self.assertListEqual(output.shape.as_list(), [1, 1])
        self.assertAllClose(output.numpy(), [[-1]])
Ejemplo n.º 11
0
 def test_model_summary_after_layer_adapt(self):
     data = np.array([[[0., 1., 2.], [0., 2., 6.]],
                      [[2., 3., 4.], [3., 6., 10.]]])
     layer = normalization.Normalization(axis=-1)
     layer.adapt(data)
     model = keras.Sequential([
         layer,
         keras.layers.Dense(64, activation="relu"),
         keras.layers.Dense(1)
     ])
     model.summary()
Ejemplo n.º 12
0
    def test_broadcasting_during_direct_setting_with_tensors(self):
        if not context.executing_eagerly():
            self.skipTest("Only supported in TF2.")

        layer = normalization.Normalization(axis=-1,
                                            mean=constant_op.constant([1.0]),
                                            variance=constant_op.constant(
                                                [1.0]))
        output = layer(np.array([[1., 2.]]))
        expected_output = [[0., 1.]]
        self.assertAllClose(output, expected_output)
        self.assertAllClose(layer.get_weights(), [])
Ejemplo n.º 13
0
    def test_merge_state(self):
        data = np.random.rand(30, 10, 2)
        ds = dataset_ops.Dataset.from_tensor_slices(data).batch(2)
        norm = normalization.Normalization(axis=(1, 2))
        norm.adapt(ds)

        partial_ds_1 = ds.shard(3, 0)
        partial_ds_2 = ds.shard(3, 1)
        partial_ds_3 = ds.shard(3, 2)

        norm_1 = normalization.Normalization(axis=(1, 2))
        norm_2 = normalization.Normalization(axis=(1, 2))
        norm_3 = normalization.Normalization(axis=(1, 2))

        norm_1.adapt(partial_ds_1)
        norm_2.adapt(partial_ds_2)
        norm_3.adapt(partial_ds_3)

        norm_1.merge_state([norm_2, norm_3])
        merged_norm = norm_1

        self.assertAllClose(norm(data), merged_norm(data))
Ejemplo n.º 14
0
 def test_mixing_preprocessing_and_regular_layers(self):
     stage = preprocessing_stage.PreprocessingStage([
         image_preprocessing.CenterCrop(16, 16),
         normalization.Normalization(),
         convolutional.Conv2D(4, 3)
     ])
     data = np.ones((16, 20, 20, 3), dtype='float32')
     stage.adapt(data)
     _ = stage(data)
     stage.compile('rmsprop', 'mse')
     stage.fit(data, np.ones((16, 14, 14, 4)))
     _ = stage.evaluate(data, np.ones((16, 14, 14, 4)))
     _ = stage.predict(data)
Ejemplo n.º 15
0
    def test_layer_computation(self, distribution, adapt_data, axis, test_data,
                               use_dataset, expected):
        input_shape = tuple([None for _ in range(test_data.ndim - 1)])
        if use_dataset:
            # Keras APIs expect batched datasets
            adapt_data = dataset_ops.Dataset.from_tensor_slices(
                adapt_data).batch(test_data.shape[0] // 2)
            test_data = dataset_ops.Dataset.from_tensor_slices(
                test_data).batch(test_data.shape[0] // 2)

        with distribution.scope():
            input_data = keras.Input(shape=input_shape)
            layer = normalization.Normalization(axis=axis)
            layer.adapt(adapt_data)
            output = layer(input_data)
            model = keras.Model(input_data, output)
            output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)
Ejemplo n.º 16
0
  def test_mixing_preprocessing_and_regular_layers(self):
    x0 = Input(shape=(10, 10, 3))
    x1 = Input(shape=(10, 10, 3))
    x2 = Input(shape=(10, 10, 3))

    y0 = merge.Add()([x0, x1])
    y1 = image_preprocessing.CenterCrop(8, 8)(x2)
    y1 = convolutional.ZeroPadding2D(padding=1)(y1)

    z = merge.Add()([y0, y1])
    z = normalization.Normalization()(z)
    z = convolutional.Conv2D(4, 3)(z)

    stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2], z)

    data = [
        np.ones((12, 10, 10, 3), dtype='float32'),
        np.ones((12, 10, 10, 3), dtype='float32'),
        np.ones((12, 10, 10, 3), dtype='float32')
    ]

    stage.adapt(data)
    _ = stage(data)
    stage.compile('rmsprop', 'mse')
    with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
      stage.fit(data, np.ones((12, 8, 8, 4)))

    ds_x0 = dataset_ops.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
    ds_x1 = dataset_ops.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
    ds_x2 = dataset_ops.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
    ds_x = dataset_ops.Dataset.zip((ds_x0, ds_x1, ds_x2))
    ds_y = dataset_ops.Dataset.from_tensor_slices(np.ones((12, 8, 8, 4)))
    dataset = dataset_ops.Dataset.zip((ds_x, ds_y)).batch(4)

    with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
      stage.fit(dataset)
    _ = stage.evaluate(data, np.ones((12, 8, 8, 4)))
    _ = stage.predict(data)
Ejemplo n.º 17
0
    def test_multiple_adapts(self):
        first_adapt = [[0], [2], [0], [2]]
        second_adapt = [[2], [4], [2], [4]]
        predict_input = [[2], [2]]
        expected_first_output = [[1], [1]]
        expected_second_output = [[-1], [-1]]

        inputs = keras.Input(shape=(1, ), dtype=dtypes.int32)
        layer = normalization.Normalization(axis=-1)
        layer.adapt(first_adapt)
        outputs = layer(inputs)
        model = keras.Model(inputs=inputs, outputs=outputs)

        actual_output = model.predict(predict_input)
        self.assertAllClose(actual_output, expected_first_output)

        # Re-adapt the layer on new inputs.
        layer.adapt(second_adapt)
        # Re-compile the model.
        model.compile()
        # `predict` should now use the new model state.
        actual_output = model.predict(predict_input)
        self.assertAllClose(actual_output, expected_second_output)
Ejemplo n.º 18
0
def _create_normalization_layer_with_adapt():
    layer = normalization.Normalization()
    layer.adapt(np.random.normal(size=(10, 4)))
    return layer
Ejemplo n.º 19
0
def _create_normalization_layer_without_adapt():
    return normalization.Normalization(mean=np.random.normal(size=(4, )),
                                       variance=np.random.uniform(0.5,
                                                                  2.,
                                                                  size=(4, )))
Ejemplo n.º 20
0
 def test_zeros_fail_init(self, axis):
     with self.assertRaisesRegex(ValueError,
                                 "The argument 'axis' may not be 0."):
         normalization.Normalization(axis=axis)
Ejemplo n.º 21
0
 def test_broadcasting_during_direct_setting_with_variables_fails(self):
     with self.assertRaisesRegex(ValueError, "passing a Variable"):
         _ = normalization.Normalization(axis=-1,
                                         mean=variables.Variable([1.0]),
                                         variance=variables.Variable([2.0]))
Ejemplo n.º 22
0
 def test_bad_axis_fail_build(self, axis):
     layer = normalization.Normalization(axis=axis)
     with self.assertRaisesRegex(ValueError, r"in the range"):
         layer.build([None, 2, 3])