def test_distribution_strategy_output_with_adapt(self): vocab_data = [[ "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and", "and", "fire" ]] vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data) input_array = np.array([["earth", "wind", "and", "fire"], ["fire", "and", "earth", "michigan"]]) input_dataset = dataset_ops.Dataset.from_tensor_slices( input_array).batch(2, drop_remainder=True) expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]] config.set_soft_device_placement(True) strategy = tpu_strategy_test_utils.get_tpu_strategy() with strategy.scope(): input_data = keras.Input(shape=(None, ), dtype=dtypes.string) layer = text_vectorization.TextVectorization( max_tokens=None, standardize=None, split=None, output_mode=text_vectorization.INT) layer.adapt(vocab_dataset) int_data = layer(input_data) model = keras.Model(inputs=input_data, outputs=int_data) output_dataset = model.predict(input_dataset) self.assertAllEqual(expected_output, output_dataset)
def test_tpu_distribution(self): input_data = np.asarray([["omar"], ["stringer"], ["marlo"], ["wire"]]) input_dataset = dataset_ops.Dataset.from_tensor_slices( input_data).batch(2, drop_remainder=True) expected_output = [[0], [0], [1], [0]] config.set_soft_device_placement(True) strategy = tpu_strategy_test_utils.get_tpu_strategy() with strategy.scope(): input_data = keras.Input(shape=(None, ), dtype=dtypes.string) layer = hashing.Hashing(num_bins=2) int_data = layer(input_data) model = keras.Model(inputs=input_data, outputs=int_data) output_dataset = model.predict(input_dataset) self.assertAllEqual(expected_output, output_dataset)
def test_tpu_distribution(self): input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]]) # pyformat: disable expected_output = [[0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 0, 0]] # pyformat: enable max_tokens = 6 strategy = tpu_strategy_test_utils.get_tpu_strategy() with strategy.scope(): input_data = keras.Input(shape=(4, ), dtype=dtypes.int32) layer = categorical_encoding.CategoricalEncoding( max_tokens=max_tokens, output_mode=categorical_encoding.BINARY) int_data = layer(input_data) model = keras.Model(inputs=input_data, outputs=int_data) output_dataset = model.predict(input_array) self.assertAllEqual(expected_output, output_dataset)
def test_tpu_distribution(self): input_array = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]]) expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]] expected_output_shape = [None, None] strategy = tpu_strategy_test_utils.get_tpu_strategy() with strategy.scope(): input_data = keras.Input(shape=(None, )) layer = discretization.Discretization( bins=[0., 1., 2.], output_mode=discretization.INTEGER) bucket_data = layer(input_data) self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list()) model = keras.Model(inputs=input_data, outputs=bucket_data) output_dataset = model.predict(input_array) self.assertAllEqual(expected_output, output_dataset)
def test_layer_computation(self, adapt_data, axis, test_data, use_dataset, expected): input_shape = tuple([None for _ in range(test_data.ndim - 1)]) if use_dataset: # Keras APIs expect batched datasets adapt_data = dataset_ops.Dataset.from_tensor_slices( adapt_data).batch(test_data.shape[0] // 2) test_data = dataset_ops.Dataset.from_tensor_slices( test_data).batch(test_data.shape[0] // 2) strategy = tpu_strategy_test_utils.get_tpu_strategy() with strategy.scope(): input_data = keras.Input(shape=input_shape) layer = normalization.Normalization(axis=axis) layer.adapt(adapt_data) output = layer(input_data) model = keras.Model(input_data, output) output_data = model.predict(test_data) self.assertAllClose(expected, output_data)