Ejemplo n.º 1
0
  def __init__(self,
               num_features,
               input_window_size,
               output_window_size,
               num_units=128):
    """Construct the LSTM prediction model.

    Args:
      num_features: number of input features per time step.
      input_window_size: Number of past time steps of data to look at when doing
        the regression.
      output_window_size: Number of future time steps to predict. Note that
        setting it to > 1 empirically seems to give a better fit.
      num_units: The number of units in the encoder and decoder LSTM cells.
    """
    super(LSTMPredictionModel, self).__init__()
    # TODO(scottzhu): Change to recurrent_v2.LSTM once the change reaches HEAD.
    self._encoder = recurrent.LSTM(
        num_units, name="encoder", dtype=self.dtype, return_state=True)
    self._decoder = recurrent.LSTM(
        num_units, name="decoder", dtype=self.dtype, return_sequences=True)
    self._mean_transform = core.Dense(num_features,
                                      name="mean_transform")
    self._covariance_transform = core.Dense(num_features,
                                            name="covariance_transform")
Ejemplo n.º 2
0
    def test_explicit_device_with_go_backward_and_mask(self):
        batch_size = 8
        timestep = 7
        masksteps = 5
        units = 4

        inputs = np.random.randn(batch_size, timestep,
                                 units).astype(np.float32)
        mask = np.ones((batch_size, timestep)).astype(np.bool)
        mask[:, masksteps:] = 0

        # Test for V1 behavior.
        lstm_v1 = rnn_v1.LSTM(units, return_sequences=True, go_backwards=True)
        with test_util.device(use_gpu=True):
            outputs_masked_v1 = lstm_v1(inputs,
                                        mask=constant_op.constant(mask))
            outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
        self.assertAllClose(outputs_masked_v1[:, -masksteps:],
                            outputs_trimmed_v1)

        # Test for V2 behavior.
        lstm = rnn.LSTM(units, return_sequences=True, go_backwards=True)
        with test_util.device(use_gpu=True):
            outputs_masked = lstm(inputs, mask=constant_op.constant(mask))
            outputs_trimmed = lstm(inputs[:, :masksteps])
        self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
Ejemplo n.º 3
0
    def test_explicit_device_with_go_backward_and_mask(self):
        if test.is_built_with_rocm():
            self.skipTest('Skipping the test as ROCm MIOpen does not '
                          'support padded input yet.')

        batch_size = 8
        timestep = 7
        masksteps = 5
        units = 4

        inputs = np.random.randn(batch_size, timestep,
                                 units).astype(np.float32)
        mask = np.ones((batch_size, timestep)).astype(np.bool)
        mask[:, masksteps:] = 0

        # Test for V1 behavior.
        lstm_v1 = rnn_v1.LSTM(units, return_sequences=True, go_backwards=True)
        with testing_utils.device(should_use_gpu=True):
            outputs_masked_v1 = lstm_v1(inputs,
                                        mask=constant_op.constant(mask))
            outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
        self.assertAllClose(outputs_masked_v1[:, -masksteps:],
                            outputs_trimmed_v1)

        # Test for V2 behavior.
        lstm = rnn.LSTM(units, return_sequences=True, go_backwards=True)
        with testing_utils.device(should_use_gpu=True):
            outputs_masked = lstm(inputs, mask=constant_op.constant(mask))
            outputs_trimmed = lstm(inputs[:, :masksteps])
        self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
Ejemplo n.º 4
0
    def test_lstm_v2_feature_parity_with_canonical_lstm(self):
        if test.is_built_with_rocm():
            self.skipTest('Skipping the test as ROCm MIOpen does not '
                          'support padded input yet.')
        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 20

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=batch,
                                                   test_samples=0,
                                                   input_shape=(timestep,
                                                                input_shape),
                                                   num_classes=rnn_state_size,
                                                   random_seed=87654321)
        y_train = np_utils.to_categorical(y_train, rnn_state_size)
        # For the last batch item of the test data, we filter out the last
        # timestep to simulate the variable length sequence and masking test.
        x_train[-2:, -1, :] = 0.0
        y_train[-2:] = 0

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=dtypes.float32)
        masked_input = keras.layers.Masking()(inputs)
        lstm_layer = rnn_v1.LSTM(rnn_state_size,
                                 recurrent_activation='sigmoid')
        output = lstm_layer(masked_input)
        lstm_model = keras.models.Model(inputs, output)
        weights = lstm_model.get_weights()
        y_1 = lstm_model.predict(x_train)
        lstm_model.compile('rmsprop', 'mse')
        lstm_model.fit(x_train, y_train)
        y_2 = lstm_model.predict(x_train)

        with testing_utils.device(should_use_gpu=True):
            cudnn_layer = rnn.LSTM(rnn_state_size)
            cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
        cudnn_model.set_weights(weights)
        y_3 = cudnn_model.predict(x_train)
        cudnn_model.compile('rmsprop', 'mse')
        cudnn_model.fit(x_train, y_train)
        y_4 = cudnn_model.predict(x_train)

        self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
        self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
Ejemplo n.º 5
0
    def test_unified_lstm_feature_parity_with_canonical_lstm(self):
        with context.eager_mode():
            # Run this test under eager only due to b/120160788 for model.set_weights.
            input_shape = 10
            rnn_state_size = 8
            timestep = 4
            batch = 20

            (x_train, y_train), _ = testing_utils.get_test_data(
                train_samples=batch,
                test_samples=0,
                input_shape=(timestep, input_shape),
                num_classes=rnn_state_size)
            y_train = keras.utils.to_categorical(y_train, rnn_state_size)
            # For the last batch item of the test data, we filter out the last
            # timestep to simulate the variable length sequence and masking test.
            x_train[-2:, -1, :] = 0.0
            y_train[-2:] = 0

            inputs = keras.layers.Input(shape=[timestep, input_shape],
                                        dtype=dtypes.float32)
            masked_input = keras.layers.Masking()(inputs)
            lstm_layer = rnn_v1.LSTM(rnn_state_size,
                                     recurrent_activation='sigmoid')
            output = lstm_layer(masked_input)
            lstm_model = keras.models.Model(inputs, output)
            weights = lstm_model.get_weights()
            y_1 = lstm_model.predict(x_train)
            lstm_model.compile('rmsprop', 'mse')
            lstm_model.fit(x_train, y_train)
            y_2 = lstm_model.predict(x_train)

            with test_util.device(use_gpu=True):
                cudnn_layer = rnn.LSTM(rnn_state_size)
                cudnn_model = keras.models.Model(inputs,
                                                 cudnn_layer(masked_input))
            cudnn_model.set_weights(weights)
            y_3 = cudnn_model.predict(x_train)
            cudnn_model.compile('rmsprop', 'mse')
            cudnn_model.fit(x_train, y_train)
            y_4 = cudnn_model.predict(x_train)

            self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
            self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
Ejemplo n.º 6
0
    def _time_performance_run_normal_lstm(self, test_config, x_train, y_train):
        # Get performance number for standard LSTM on GPU.
        input_shape = test_config['input_shape']
        rnn_state_size = test_config['rnn_state_size']
        timestep = test_config['timestep']

        layer = rnn_v1.LSTM(rnn_state_size)
        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=dtypes.float32)

        outputs = layer(inputs)
        model = keras.models.Model(inputs, outputs)
        model.compile('sgd', 'mse')

        sec_per_epoch = self._measure_performance(test_config, model, x_train,
                                                  y_train)
        logging.info('Average performance for %s per epoch is: %s',
                     'Normal LSTM', sec_per_epoch)
        return sec_per_epoch
Ejemplo n.º 7
0
    def test_lstm_output_on_multiple_kernel(self):
        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 100

        x_train = np.random.random((batch, timestep, input_shape))

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=dtypes.float32)
        with test_util.device(use_gpu=False):
            layer = rnn.LSTM(rnn_state_size)
            output = layer(inputs)
            cpu_model = keras.models.Model(inputs, output)
            weights = cpu_model.get_weights()
        y_1 = cpu_model.predict(x_train)

        with test_util.device(use_gpu=True):
            layer = rnn.LSTM(rnn_state_size)
            output = layer(inputs)
            gpu_model = keras.models.Model(inputs, output)
            gpu_model.set_weights(weights)
        y_2 = gpu_model.predict(x_train)

        # Note that CuDNN uses 'sigmoid' as activation, so the LSTM V2 uses
        # 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve
        # the same output.
        with test_util.device(use_gpu=True):
            layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid')
            output = layer(inputs)
            canonical_model = keras.models.Model(inputs, output)
            # Remove the extra cudnn bias since canonical lstm will not use it.
            canonical_model.set_weights(weights[:3])
        y_3 = canonical_model.predict(x_train)

        self.assertAllClose(y_1, y_2)
        self.assertAllClose(y_2, y_3)
Ejemplo n.º 8
0
class LayerCorrectnessTest(keras_parameterized.TestCase):

  def setUp(self):
    super(LayerCorrectnessTest, self).setUp()
    # Set two virtual CPUs to test MirroredStrategy with multiple devices
    cpus = config_module.list_physical_devices('CPU')
    config_module.set_logical_device_configuration(cpus[0], [
        context.LogicalDeviceConfiguration(),
        context.LogicalDeviceConfiguration(),
    ])

  def _create_model_from_layer(self, layer, input_shapes):
    inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
    if len(inputs) == 1:
      inputs = inputs[0]
    y = layer(inputs)
    model = models.Model(inputs, y)
    model.compile('sgd', 'mse')
    return model

  @parameterized.named_parameters(
      ('LeakyReLU', advanced_activations.LeakyReLU, (2, 2)),
      ('PReLU', advanced_activations.PReLU, (2, 2)),
      ('ELU', advanced_activations.ELU, (2, 2)),
      ('ThresholdedReLU', advanced_activations.ThresholdedReLU, (2, 2)),
      ('Softmax', advanced_activations.Softmax, (2, 2)),
      ('ReLU', advanced_activations.ReLU, (2, 2)),
      ('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
      ('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
      ('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
      ('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
       (2, 2, 2, 2)),
      ('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
       (2, 2, 2, 1)),
      ('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
       (2, 2, 2, 1)),
      ('UpSampling2D', convolutional.UpSampling2D, (2, 2, 2, 1)),
      ('ZeroPadding2D', convolutional.ZeroPadding2D, (2, 2, 2, 1)),
      ('Cropping2D', convolutional.Cropping2D, (2, 3, 3, 1)),
      ('ConvLSTM2D',
       lambda: convolutional_recurrent.ConvLSTM2D(4, kernel_size=(2, 2)),
       (4, 4, 4, 4, 4)),
      ('Dense', lambda: core.Dense(2), (2, 2)),
      ('Dropout', lambda: core.Dropout(0.5), (2, 2)),
      ('SpatialDropout2D', lambda: core.SpatialDropout2D(0.5), (2, 2, 2, 2)),
      ('Activation', lambda: core.Activation('sigmoid'), (2, 2)),
      ('Reshape', lambda: core.Reshape((1, 4, 1)), (2, 2, 2)),
      ('Permute', lambda: core.Permute((2, 1)), (2, 2, 2)),
      ('Attention', dense_attention.Attention, [(2, 2, 3), (2, 3, 3),
                                                (2, 3, 3)]),
      ('AdditiveAttention', dense_attention.AdditiveAttention, [(2, 2, 3),
                                                                (2, 3, 3),
                                                                (2, 3, 3)]),
      ('Embedding', lambda: embeddings.Embedding(4, 4),
       (2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
      ('LocallyConnected1D', lambda: local.LocallyConnected1D(2, 2), (2, 2, 1)),
      ('LocallyConnected2D', lambda: local.LocallyConnected2D(2, 2),
       (2, 2, 2, 1)),
      ('Add', merge.Add, [(2, 2), (2, 2)]),
      ('Subtract', merge.Subtract, [(2, 2), (2, 2)]),
      ('Multiply', merge.Multiply, [(2, 2), (2, 2)]),
      ('Average', merge.Average, [(2, 2), (2, 2)]),
      ('Maximum', merge.Maximum, [(2, 2), (2, 2)]),
      ('Minimum', merge.Minimum, [(2, 2), (2, 2)]),
      ('Concatenate', merge.Concatenate, [(2, 2), (2, 2)]),
      ('Dot', lambda: merge.Dot(1), [(2, 2), (2, 2)]),
      ('GaussianNoise', lambda: noise.GaussianNoise(0.5), (2, 2)),
      ('GaussianDropout', lambda: noise.GaussianDropout(0.5), (2, 2)),
      ('AlphaDropout', lambda: noise.AlphaDropout(0.5), (2, 2)),
      ('BatchNormalization', normalization_v2.BatchNormalization,
       (2, 2), 1e-2, 1e-2),
      ('LayerNormalization', normalization.LayerNormalization, (2, 2)),
      ('LayerNormalizationUnfused',
       lambda: normalization.LayerNormalization(axis=1), (2, 2, 2)),
      ('MaxPooling2D', pooling.MaxPooling2D, (2, 2, 2, 1)),
      ('AveragePooling2D', pooling.AveragePooling2D, (2, 2, 2, 1)),
      ('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D, (2, 2, 2, 1)),
      ('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D, (2, 2, 2, 1)),
      ('SimpleRNN', lambda: recurrent.SimpleRNN(units=4),
       (4, 4, 4), 1e-2, 1e-2),
      ('GRU', lambda: recurrent.GRU(units=4), (4, 4, 4)),
      ('LSTM', lambda: recurrent.LSTM(units=4), (4, 4, 4)),
      ('GRUV2', lambda: recurrent_v2.GRU(units=4), (4, 4, 4)),
      ('LSTMV2', lambda: recurrent_v2.LSTM(units=4), (4, 4, 4)),
      ('TimeDistributed', lambda: wrappers.TimeDistributed(core.Dense(2)),
       (2, 2, 2)),
      ('Bidirectional',
       lambda: wrappers.Bidirectional(recurrent.SimpleRNN(units=4)), (2, 2, 2)),
      ('AttentionLayerCausal', lambda: dense_attention.Attention(causal=True), [
          (2, 2, 3), (2, 3, 3), (2, 3, 3)
      ]),
      ('AdditiveAttentionLayerCausal',
       lambda: dense_attention.AdditiveAttention(causal=True), [(2, 3, 4),
                                                                (2, 3, 4),
                                                                (2, 3, 4)]),
  )
  def test_layer(self, f32_layer_fn, input_shape, rtol=2e-3, atol=2e-3,
                 input_data=None):
    """Tests a layer by comparing the float32 and mixed precision weights.

    A float32 layer, a mixed precision layer, and a distributed mixed precision
    layer are run. The three layers are identical other than their dtypes and
    distribution strategies. The outputs after predict() and weights after fit()
    are asserted to be close.

    Args:
      f32_layer_fn: A function returning a float32 layer. The other two layers
        will automatically be created from this
      input_shape: The shape of the input to the layer, including the batch
        dimension. Or a list of shapes if the layer takes multiple inputs.
      rtol: The relative tolerance to be asserted.
      atol: The absolute tolerance to be asserted.
      input_data: A Numpy array with the data of the input. If None, input data
        will be randomly generated
    """

    if f32_layer_fn == convolutional.ZeroPadding2D and \
       test.is_built_with_rocm():
      return
    if isinstance(input_shape[0], int):
      input_shapes = [input_shape]
    else:
      input_shapes = input_shape
    strategy = create_mirrored_strategy()
    f32_layer = f32_layer_fn()

    # Create the layers
    assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
    config = f32_layer.get_config()
    config['dtype'] = policy.Policy('mixed_float16')
    mp_layer = f32_layer.__class__.from_config(config)
    distributed_mp_layer = f32_layer.__class__.from_config(config)

    # Compute per_replica_input_shapes for the distributed model
    global_batch_size = input_shapes[0][0]
    assert global_batch_size % strategy.num_replicas_in_sync == 0, (
        'The number of replicas, %d, does not divide the global batch size of '
        '%d' % (strategy.num_replicas_in_sync, global_batch_size))
    per_replica_batch_size = (
        global_batch_size // strategy.num_replicas_in_sync)
    per_replica_input_shapes = [(per_replica_batch_size,) + s[1:]
                                for s in input_shapes]

    # Create the models
    f32_model = self._create_model_from_layer(f32_layer, input_shapes)
    mp_model = self._create_model_from_layer(mp_layer, input_shapes)
    with strategy.scope():
      distributed_mp_model = self._create_model_from_layer(
          distributed_mp_layer, per_replica_input_shapes)

    # Set all model weights to the same values
    f32_weights = f32_model.get_weights()
    mp_model.set_weights(f32_weights)
    distributed_mp_model.set_weights(f32_weights)

    # Generate input data
    if input_data is None:
      # Cast inputs to float16 to avoid measuring error from having f16 layers
      # cast to float16.
      input_data = [np.random.normal(size=s).astype('float16')
                    for s in input_shapes]
      if len(input_data) == 1:
        input_data = input_data[0]

    # Assert all models have close outputs.
    f32_output = f32_model.predict(input_data)
    mp_output = mp_model.predict(input_data)
    self.assertAllClose(
        mp_output, f32_output, rtol=rtol, atol=atol)
    self.assertAllClose(
        distributed_mp_model.predict(input_data), f32_output, rtol=rtol,
        atol=atol)

    # Run fit() on models
    output = np.random.normal(size=f32_model.outputs[0].shape).astype('float16')
    for model in f32_model, mp_model, distributed_mp_model:
      model.fit(input_data, output, batch_size=global_batch_size)

    # Assert all models have close weights
    f32_weights = f32_model.get_weights()
    self.assertAllClose(
        mp_model.get_weights(), f32_weights, rtol=rtol, atol=atol)
    self.assertAllClose(
        distributed_mp_model.get_weights(), f32_weights, rtol=rtol, atol=atol)