예제 #1
0
    def call(self, inputs, training=None, mask=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        uses_learning_phase = False

        if not isinstance(inputs, list):
            inputs = [inputs]

        reshaped_inputs = []
        for input in inputs:
            input_shape = K.int_shape(input)

            # No batch size specified, therefore the layer will be able
            # to process batches of any size.
            # We can go with reshape-based implementation for performance.
            input_length = input_shape[1]
            if not input_length:
                input_length = K.shape(input)[1]
            # Shape: (num_samples * timesteps, ...). And track the
            # transformation in self._input_map.
            input_uid = _object_list_uid(input)
            input = K.reshape(input, (-1, ) + input_shape[2:])
            self._input_map[input_uid] = input

            reshaped_inputs.append(input)

        # (num_samples * timesteps, ...)
        reshaped_initial_states = reshaped_inputs[1:]
        reshaped_inputs = reshaped_inputs[0]
        if reshaped_initial_states:
            kwargs['initial_state'] = reshaped_initial_states

        y = self.layer.call(reshaped_inputs, **kwargs)

        if hasattr(y, '_uses_learning_phase'):
            uses_learning_phase = y._uses_learning_phase
        # Shape: (num_samples, timesteps, ...)
        input_shape = [K.int_shape(input) for input in inputs]
        output_shape = self.compute_output_shape(input_shape)
        y = K.reshape(y, (-1, input_length) + output_shape[2:])

        # Apply activity regularizer if any:
        if (hasattr(self.layer, 'activity_regularizer')
                and self.layer.activity_regularizer is not None):
            regularization_loss = self.layer.activity_regularizer(y)
            self.add_loss(regularization_loss, inputs)

        if uses_learning_phase:
            y._uses_learning_phase = True
        return y
예제 #2
0
    def call(self, inputs, training=None, mask=None):
        # Copied from https://github.com/fchollet/keras/blob/master/keras/layers/wrappers.py#L100
        # (tag 2.1.0) with our modifications marked in comments
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        # Our modification
        if has_arg(self.layer.call, 'mask'):
            kwargs['mask'] = mask
        # end our modification
        uses_learning_phase = False

        input_shape = K.int_shape(inputs)

        input_length = input_shape[1]
        if not input_length:
            input_length = K.shape(inputs)[1]
        # Shape: (num_samples * timesteps, ...). And track the
        # transformation in self._input_map.
        input_uid = _object_list_uid(inputs)
        inputs = K.reshape(inputs, (-1, ) + input_shape[2:])
        self._input_map[input_uid] = inputs
        # (num_samples * timesteps, ...)
        # Our modification
        if kwargs.get('mask', None) is not None:
            mask_shape = K.int_shape(mask)
            kwargs['mask'] = K.reshape(kwargs['mask'], (-1, ) + mask_shape[2:])
        # end our modification
        y = self.layer.call(inputs, **kwargs)
        if hasattr(y, '_uses_learning_phase'):
            uses_learning_phase = y._uses_learning_phase
        # Shape: (num_samples, timesteps, ...)
        output_shape = self.compute_output_shape(input_shape)
        y = K.reshape(y, (-1, input_length) + output_shape[2:])

        # Apply activity regularizer if any:
        if (hasattr(self.layer, 'activity_regularizer')
                and self.layer.activity_regularizer is not None):
            regularization_loss = self.layer.activity_regularizer(y)
            self.add_loss(regularization_loss, inputs)

        if uses_learning_phase:
            y._uses_learning_phase = True
        return y
예제 #3
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(layers.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 batch_input_shape=(10, 3, 4),
                                 dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)),
              epochs=1,
              batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 input_shape=(3, 4),
                                 dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Conv2D(5, (2, 2), padding='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(layers.Dense(3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              epochs=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(layers.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.BatchNormalization(center=True,
                                                           scale=True),
                                 name='bn',
                                 input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = _object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
예제 #4
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(wrappers.TimeDistributed(layers.Dense(2),
                                           batch_input_shape=(1, 3, 4)))
    reference.add(layers.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
                                       batch_input_shape=(10, 3, 4),
                                       dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)), epochs=1, batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
                                           input_shape=(3, 4), dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Conv2D(5, (2, 2),
                                                     padding='same'),
                                       input_shape=(2, 4, 4, 3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(layers.Dense(3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)),
              epochs=1, batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(layers.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
                    epochs=1, batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
                    epochs=1, batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(wrappers.TimeDistributed(
        layers.BatchNormalization(center=True, scale=True),
        name='bn', input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = _object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
예제 #5
0
    def call(self, inputs, training=None, mask=None):
        kwargs = {}
        if K.has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        uses_learning_phase = False

        # transformation in self._input_map.
        input_uid = _object_list_uid(inputs)

        actual_input_shape = K.shape(inputs)

        reshaped_axis_size = actual_input_shape[0]
        for i in range(self.n_distribution_axes):
            reshaped_axis_size *= actual_input_shape[i + 1]

        reshaped_input_shape = [reshaped_axis_size]

        in_ndims = K.ndim(inputs)
        # print("in_ndims:", in_ndims)
        # print("reshaped_axis_size:", reshaped_axis_size)

        for i in range(1 + self.n_distribution_axes, in_ndims):
            # print("i", i)
            reshaped_input_shape.append(actual_input_shape[i])

        # reshaped_input_shape = (reshaped_axis_size,) + tuple(actual_input_shape[self.n_distribution_axes + 1:])
        reshaped_input_shape = tuple(reshaped_input_shape)

        inputs = K.reshape(inputs, reshaped_input_shape)
        # inputs = K.reshape(inputs, reshaped_input_shape, ndim=len(reshaped_input_shape))

        self._input_map[input_uid] = inputs
        # (num_samples * timesteps, ...)
        y = self.layer.call(inputs, **kwargs)
        if hasattr(y, '_uses_learning_phase'):
            uses_learning_phase = y._uses_learning_phase
        # Shape: (num_samples, timesteps, ...)

        actual_output_shape = K.shape(y)

        # reshaped_output_shape = tuple(actual_input_shape[:self.n_distribution_axes + 1]) + tuple(
        #     actual_output_shape[1:])
        reshaped_output_shape = []
        for i in range(self.n_distribution_axes + 1):
            reshaped_output_shape.append(actual_input_shape[i])

        out_ndims = K.ndim(y)
        # print("out_ndims:", out_ndims)
        for i in range(1, out_ndims):
            # print("i", i)
            reshaped_output_shape.append(actual_output_shape[i])

        reshaped_output_shape = tuple(reshaped_output_shape)

        y = K.reshape(y, reshaped_output_shape)
        # y = K.reshape(y, reshaped_output_shape, ndim=len(reshaped_output_shape))

        # print("out_ndims:", K.ndim(y))
        # Apply activity regularizer if any:
        if (hasattr(self.layer, 'activity_regularizer') and self.layer.activity_regularizer is not None):
            regularization_loss = self.layer.activity_regularizer(y)
            self.add_loss(regularization_loss, inputs)

        if uses_learning_phase:
            y._uses_learning_phase = True
        return y