def test_CrossNet(layer_num, ):
    with CustomObjectScope({'CrossNet': layers.CrossNet}):
        layer_test(layers.CrossNet,
                   kwargs={
                       'layer_num': layer_num,
                   },
                   input_shape=(2, 3))
Example #2
0
 def test_random_contrast_amplitude(self, amplitude):
   with CustomObjectScope(
       {'RandomContrast': image_preprocessing.RandomContrast}):
     input_images = np.random.random((2, 5, 8, 3))
     with tf_test_util.use_gpu():
       layer = image_preprocessing.RandomContrast(amplitude)
       layer(input_images)
Example #3
0
    def test_fit_embed(self):
        model = keras.models.Sequential()
        model.add(
            keras.layers.Embedding(
                input_shape=(None, ),
                input_dim=5,
                output_dim=16,
                mask_zero=True,
            ))
        model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=8)))
        model.add(keras.layers.Dense(units=2, activation='softmax'))
        model.compile(AdamWarmup(
            decay_steps=10000,
            warmup_steps=5000,
            learning_rate=1e-3,
            min_lr=1e-4,
            amsgrad=True,
            weight_decay=1e-3,
        ),
                      loss='sparse_categorical_crossentropy')

        x = np.random.randint(0, 5, (1024, 15))
        y = (x[:, 1] > 2).astype('int32')
        model.fit(x, y, epochs=10, verbose=1)

        model_path = os.path.join(tempfile.gettempdir(),
                                  'test_warmup_%f.h5' % np.random.random())
        model.save(model_path)
        from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
        with CustomObjectScope({
                'AdamWarmup': AdamWarmup
        }):  # Workaround for incorrect global variable used in keras
            keras.models.load_model(model_path,
                                    custom_objects={'AdamWarmup': AdamWarmup})
Example #4
0
 def test_random_contrast_int_dtype(self):
   with CustomObjectScope(
       {'RandomContrast': image_preprocessing.RandomContrast}):
     input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
     with tf_test_util.use_gpu():
       layer = image_preprocessing.RandomContrast((0.1, 0.2))
       layer(input_images)
def test_CIN(layer_size, split_half):
    with CustomObjectScope({'CIN': layers.CIN}):
        layer_test(layers.CIN,
                   kwargs={
                       "layer_size": layer_size,
                       "split_half": split_half
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
def test_FwFM(reg_strength):
    with CustomObjectScope({'FwFMLayer': layers.FwFMLayer}):
        layer_test(layers.FwFMLayer,
                   kwargs={
                       'num_fields': FIELD_SIZE,
                       'regularizer': reg_strength
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
Example #7
0
 def test_random_flip_inference(self):
   with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
     input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
     expected_output = input_images
     with tf_test_util.use_gpu():
       layer = image_preprocessing.RandomFlip()
       actual_output = layer(input_images, training=0)
       self.assertAllClose(expected_output, actual_output)
 def test_random_crop_output_shape(self, expected_height, expected_width):
     if test.is_built_with_rocm():
         # TODO(rocm):
         # re-enable this test once ROCm adds support for
         # the StatefulUniformFullInt Op (on the GPU)
         self.skipTest('Feature not supported on ROCm')
     with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
         self._run_test(expected_height, expected_width)
Example #9
0
def test_LocalActivationUnit(hidden_units, activation):
    if tf.__version__ >= '1.13.0' and activation != 'sigmoid':
        return

    with CustomObjectScope({'LocalActivationUnit': layers.LocalActivationUnit}):
        layer_test(layers.LocalActivationUnit,
                   kwargs={'hidden_units': hidden_units, 'activation': activation, 'dropout_rate': 0.5},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE), (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE)])
Example #10
0
def test_KMaxPooling():
    with CustomObjectScope({'KMaxPooling': sequence.KMaxPooling}):
        layer_test(sequence.KMaxPooling,
                   kwargs={
                       'k': 3,
                       'axis': 1
                   },
                   input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE, 2))
Example #11
0
def test_Linear():
    with CustomObjectScope({'Linear': Linear}):
        layer_test(Linear,
                   kwargs={
                       'mode': 1,
                       'use_bias': True
                   },
                   input_shape=(BATCH_SIZE, EMBEDDING_SIZE))
Example #12
0
 def test_random_flip_vertical_half(self):
   with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
     np.random.seed(1337)
     mock_random = [1, 0]
     mock_random = np.reshape(mock_random, [2, 1, 1, 1])
     input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
     expected_output = input_images.copy()
     expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
     self._run_test('vertical', expected_output, mock_random)
Example #13
0
def test_AttentionSequencePoolingLayer(weight_normalization):
    with CustomObjectScope({
            'AttentionSequencePoolingLayer':
            sequence.AttentionSequencePoolingLayer
    }):
        layer_test(sequence.AttentionSequencePoolingLayer,
                   kwargs={'weight_normalization': weight_normalization},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE),
                                (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, 1)])
Example #14
0
    def _test_fit(self, optimizer, atol=1e-2):
        x, y, w = self.gen_linear_data()
        model = self.gen_linear_model(optimizer)

        callbacks = [
            keras.callbacks.EarlyStopping(monitor='loss',
                                          patience=3,
                                          min_delta=1e-8)
        ]
        if isinstance(optimizer, RAdam):
            model_path = os.path.join(
                tempfile.gettempdir(),
                'test_accumulation_%f.h5' % np.random.random())
            model.save(model_path)
            from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
            with CustomObjectScope({
                    'RAdam': RAdam
            }):  # Workaround for incorrect global variable used in keras
                model = keras.models.load_model(
                    model_path, custom_objects={'RAdam': RAdam})
            callbacks.append(
                keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                  min_lr=1e-8,
                                                  patience=2,
                                                  verbose=True))

        model.fit(x, y, epochs=100, batch_size=32, callbacks=callbacks)

        model_path = os.path.join(
            tempfile.gettempdir(),
            'test_accumulation_%f.h5' % np.random.random())
        model.save(model_path)
        from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
        with CustomObjectScope(
            {'RAdam':
             RAdam}):  # Workaround for incorrect global variable used in keras
            model = keras.models.load_model(model_path,
                                            custom_objects={'RAdam': RAdam})

        x, y, w = self.gen_linear_data(w)
        predicted = model.predict(x)
        self.assertLess(np.max(np.abs(predicted - y)), atol)
Example #15
0
def test_PositionEncoding(pos_embedding_trainable, zero_pad):
    with CustomObjectScope({
            'PositionEncoding': sequence.PositionEncoding,
            "tf": tf
    }):
        layer_test(sequence.PositionEncoding,
                   kwargs={
                       'pos_embedding_trainable': pos_embedding_trainable,
                       'zero_pad': zero_pad
                   },
                   input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE))
Example #16
0
def test_InteractingLayer(
    head_num,
    use_res,
):
    with CustomObjectScope({'InteractingLayer': layers.InteractingLayer}):
        layer_test(layers.InteractingLayer,
                   kwargs={
                       "head_num": head_num,
                       "use_res": use_res,
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
Example #17
0
def test_BiLSTM(merge_mode):
    with CustomObjectScope({'BiLSTM': sequence.BiLSTM}):
        layer_test(
            sequence.BiLSTM,
            kwargs={
                'merge_mode': merge_mode,
                'units': EMBEDDING_SIZE,
                'dropout_rate': 0.0
            },
            # todo 0.5
            input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE))
Example #18
0
 def test_layer_api_compatibility(self):
   cls = get_layer_class()
   with CustomObjectScope({"Normalization": cls}):
     output_data = testing_utils.layer_test(
         cls,
         kwargs={"axis": -1},
         input_shape=(None, 3),
         input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32),
         validate_training=False,
         adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]]))
   expected = np.array([[3., -3., -0.33333333], [9., 5., 1.]])
   self.assertAllClose(expected, output_data)
Example #19
0
 def test_random_flip_default(self):
   with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
     input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
     expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
     mock_random = [1, 1]
     mock_random = np.reshape(mock_random, [2, 1, 1, 1])
     with test.mock.patch.object(
         random_ops, 'random_uniform', return_value=mock_random):
       with self.cached_session(use_gpu=True):
         layer = image_preprocessing.RandomFlip()
         actual_output = layer(input_images, training=1)
         self.assertAllClose(expected_output, actual_output)
Example #20
0
def test_SequencePoolingLayer(mode, supports_masking, input_shape):
    if version.parse(tf.__version__) >= version.parse(
            '1.14.0') and mode != 'sum':  # todo check further version
        return
    with CustomObjectScope(
        {'SequencePoolingLayer': sequence.SequencePoolingLayer}):
        layer_test(sequence.SequencePoolingLayer,
                   kwargs={
                       'mode': mode,
                       'supports_masking': supports_masking
                   },
                   input_shape=input_shape,
                   supports_masking=supports_masking)
Example #21
0
def test_FGCNNLayer():
    with CustomObjectScope({'FGCNNLayer': layers.FGCNNLayer}):
        layer_test(layers.FGCNNLayer,
                   kwargs={
                       'filters': (
                           4,
                           6,
                       ),
                       'kernel_width': (
                           7,
                           7,
                       )
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
Example #22
0
def test_Transformer():
    with CustomObjectScope({'Transformer': sequence.Transformer}):
        layer_test(sequence.Transformer,
                   kwargs={
                       'att_embedding_size': 1,
                       'head_num': 8,
                       'use_layer_norm': True,
                       'supports_masking': False,
                       'attention_type': 'additive',
                       'dropout_rate': 0.5,
                       'output_type': 'sum'
                   },
                   input_shape=[(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, 1), (BATCH_SIZE, 1)])
Example #23
0
    def test_int_output_explicit_vocab_from_config(self):
        vocab_data = ["earth", "wind", "and", "fire"]
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
        layer = get_layer_class()(vocabulary=vocab_data)
        int_data = layer(input_data)
        model = keras.Model(inputs=input_data, outputs=int_data)

        with CustomObjectScope({"IndexLookup": get_layer_class()}):
            new_model = keras.Model.from_config(model.get_config())
        output_dataset = new_model.predict(input_array)
        self.assertAllEqual(expected_output, output_dataset)
Example #24
0
def test_Hash(num_buckets, mask_zero, vocabulary_path, input_data,
              expected_output):
    if not hasattr(tf, 'version') or tf.version.VERSION < '2.0.0':
        return

    with CustomObjectScope({'Hash': Hash}):
        layer_test(Hash,
                   kwargs={
                       'num_buckets': num_buckets,
                       'mask_zero': mask_zero,
                       'vocabulary_path': vocabulary_path
                   },
                   input_dtype=tf.string,
                   input_data=np.array(input_data, dtype='str'),
                   expected_output_dtype=tf.int64,
                   expected_output=expected_output)
Example #25
0
 def test_sample(self):
     model = get_model(
         token_num=200,
         head_num=3,
         transformer_num=2,
     )
     model_path = os.path.join(tempfile.gettempdir(),
                               'keras_bert_%f.h5' % np.random.random())
     model.save(model_path)
     from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
     with CustomObjectScope(get_custom_objects(
     )):  # Workaround for incorrect global variable used in keras
         model = keras.models.load_model(
             model_path,
             custom_objects=get_custom_objects(),
         )
     model.summary(line_length=200)
Example #26
0
    def test_fit_embed(self):
        optimizers = [RAdam]
        for optimizer in optimizers:
            for amsgrad in [False, True]:
                model = keras.models.Sequential()
                model.add(
                    keras.layers.Embedding(
                        input_shape=(None, ),
                        input_dim=5,
                        output_dim=16,
                        mask_zero=True,
                    ))
                model.add(
                    keras.layers.Bidirectional(keras.layers.LSTM(units=8)))
                model.add(keras.layers.Dense(units=2, activation='softmax'))
                model.compile(optimizer(
                    total_steps=38400,
                    warmup_proportion=0.1,
                    min_lr=1e-6,
                    weight_decay=1e-6,
                    amsgrad=amsgrad,
                ),
                              loss='sparse_categorical_crossentropy')

                x = np.random.randint(0, 5, (64, 3))
                y = []
                for i in range(x.shape[0]):
                    if 2 in x[i]:
                        y.append(1)
                    else:
                        y.append(0)
                y = np.array(y)
                model.fit(x, y, epochs=10)

                model_path = os.path.join(
                    tempfile.gettempdir(),
                    'test_accumulation_%f.h5' % np.random.random())
                model.save(model_path)
                from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
                with CustomObjectScope({
                        'RAdam': RAdam
                }):  # Workaround for incorrect global variable used in keras
                    keras.models.load_model(model_path,
                                            custom_objects={'RAdam': RAdam})
Example #27
0
  def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
                                       use_dataset, expected_output,
                                       input_dtype):
    cls = get_layer_class()
    if "invert" in kwargs and kwargs["invert"]:
      expected_output_dtype = kwargs["dtype"]
    else:
      expected_output_dtype = dtypes.int64

    input_shape = input_data.shape

    if use_dataset:
      # Keras APIs expect batched datasets.
      # TODO(rachelim): `model.predict` predicts the result on each
      # dataset batch separately, then tries to concatenate the results
      # together. When the results have different shapes on the non-concat
      # axis (which can happen in the output_mode = INT case for
      # IndexLookup), the concatenation fails. In real use cases, this may
      # not be an issue because users are likely to pipe the preprocessing layer
      # into other keras layers instead of predicting it directly. A workaround
      # for these unit tests is to have the dataset only contain one batch, so
      # no concatenation needs to happen with the result. For consistency with
      # numpy input, we should make `predict` join differently shaped results
      # together sensibly, with 0 padding.
      input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
          input_shape[0])
      vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
          input_shape[0])

    with CustomObjectScope({"IndexLookup": cls}):
      output_data = testing_utils.layer_test(
          cls,
          kwargs=kwargs,
          input_shape=input_shape,
          input_data=input_data,
          input_dtype=input_dtype,
          expected_output_dtype=expected_output_dtype,
          validate_training=False,
          adapt_data=vocab_data)
    if "invert" in kwargs and kwargs["invert"]:
      self.assertAllEqual(expected_output, output_data)
    else:
      self.assertAllClose(expected_output, output_data)
    def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
                                         expected_output):
        cls = get_layer_class()
        if kwargs.get("output_mode") == text_vectorization.TFIDF:
            expected_output_dtype = dtypes.float32
        else:
            expected_output_dtype = dtypes.int64

        with CustomObjectScope({"TextVectorization": cls}):
            output_data = testing_utils.layer_test(
                cls,
                kwargs=kwargs,
                input_shape=(None),
                input_data=input_data,
                input_dtype=dtypes.string,
                expected_output_dtype=expected_output_dtype,
                validate_training=False,
                adapt_data=vocab_data)
        self.assertAllClose(expected_output, output_data)
Example #29
0
    def test_layer_api_compatibility(self):
        input_array = tf.constant([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0],
                                   [2.0, 3.0, 5.0]])

        cls = affine_transform.AffineTransform
        with CustomObjectScope({cls.__name__: cls}):
            output = testing_utils.layer_test(
                cls,
                kwargs={
                    'output_size':
                    1,
                    'initializer':
                    tf.keras.initializers.TruncatedNormal(stddev=0.02)
                },
                input_shape=(None),
                input_data=input_array)

        expected_values = tf.constant([[0.01368301], [0.01368301],
                                       [0.0314441]])
        self.assertAllClose(expected_values, output)
Example #30
0
    def _test_fit(self, optmizer):
        x = np.random.standard_normal((1000, 5))
        y = np.dot(x, np.random.standard_normal((5, 2))).argmax(axis=-1)
        model = keras.models.Sequential()
        model.add(
            keras.layers.Dense(
                units=2,
                input_shape=(5, ),
                kernel_constraint=keras.constraints.MaxNorm(1000.0),
                activation='softmax',
            ))
        model.compile(
            optimizer=optmizer,
            loss='sparse_categorical_crossentropy',
        )
        model.fit(
            x,
            y,
            batch_size=10,
            epochs=110,
            callbacks=[
                keras.callbacks.EarlyStopping(monitor='loss',
                                              min_delta=1e-4,
                                              patience=3)
            ],
        )

        model_path = os.path.join(tempfile.gettempdir(),
                                  'keras_warmup_%f.h5' % np.random.random())
        model.save(model_path)

        from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
        with CustomObjectScope({
                'AdamWarmup': AdamWarmup
        }):  # Workaround for incorrect global variable used in keras
            model = keras.models.load_model(
                model_path, custom_objects={'AdamWarmup': AdamWarmup})

        results = model.predict(x).argmax(axis=-1)
        diff = np.sum(np.abs(y - results))
        self.assertLess(diff, 100)