Esempio n. 1
0
    def _forward(self, steps, outputs, features, batch, return_seq):

        activation = 'tanh'

        inpt = np.random.uniform(size=(batch, features))
        inpt_keras, _ = data_to_timesteps(inpt, steps=steps)

        assert inpt_keras.shape == (batch, steps, features)

        # weights init
        kernel = np.random.uniform(low=-1, high=1, size=(features, outputs))
        recurrent_kernel = np.random.uniform(low=-1,
                                             high=1,
                                             size=(outputs, outputs))
        bias = np.random.uniform(low=-1, high=1, size=(outputs, ))

        # create keras model
        inp = Input(shape=inpt_keras.shape[1:])
        rnn = SimpleRNN(units=outputs,
                        activation=activation,
                        return_sequences=return_seq)(inp)
        model = Model(inputs=inp, outputs=rnn)

        # set weights for the keras model
        model.set_weights([kernel, recurrent_kernel, bias])

        # create NumPyNet layer
        layer = SimpleRNN_layer(outputs=outputs,
                                steps=steps,
                                input_shape=(batch, 1, 1, features),
                                activation=activation,
                                return_sequence=return_seq)

        # set NumPyNet weights
        layer.load_weights(
            np.concatenate(
                [bias.ravel(),
                 kernel.ravel(),
                 recurrent_kernel.ravel()]))

        # FORWARD

        # forward for keras
        forward_out_keras = model.predict(inpt_keras)

        # forward NumPyNet
        layer.forward(inpt)
        forward_out_numpynet = layer.output.reshape(forward_out_keras.shape)

        assert np.allclose(forward_out_numpynet,
                           forward_out_keras,
                           atol=1e-4,
                           rtol=1e-3)
Esempio n. 2
0
  def test_forward (self):

    batch     = 11
    timesteps = 5
    features  = 3
    outputs   = 5

    np.random.seed(123)

    data = np.random.uniform(size=(batch, features))

    inpt_keras, _ = data_to_timesteps(data, timesteps)

    assert inpt_keras.shape == (batch - timesteps, timesteps, features)

    weights = [np.random.uniform(size=(features, outputs)), np.random.uniform(size=(outputs,outputs))]
    bias    = [np.zeros(shape=(outputs,), dtype=float), np.zeros(shape=outputs, dtype=float)]

    # assign same weights to all the kernel in keras as for NumPyNet
    keras_weights1 = np.concatenate([weights[0] for i in range(4)], axis=1)
    keras_weights2 = np.concatenate([weights[1] for i in range(4)], axis=1)
    keras_bias     = np.concatenate([bias[0] for i in range(4)])

    for i in range(4):
      np.testing.assert_allclose(keras_weights1[:,outputs*i:outputs*(i+1)], weights[0], rtol=1e-5, atol=1e-8)

    for i in range(4):
      np.testing.assert_allclose(keras_weights2[:,outputs*i:outputs*(i+1)], weights[1], rtol=1e-5, atol=1e-8)

    inp   = Input(shape=(inpt_keras.shape[1:]))
    lstm  = LSTM(units=outputs, implementation=1, use_bias=False)(inp)
    model = Model(inputs=[inp], outputs=[lstm])

    model.set_weights([keras_weights1, keras_weights2])

    inpt_numpynet = data.reshape(batch, 1, 1, features)
    layer = LSTM_layer(outputs=outputs, steps=timesteps, weights=weights, bias=bias, input_shape=inpt_numpynet.shape)

    np.testing.assert_allclose(layer.uf.weights, model.get_weights()[0][:, :outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.ui.weights, model.get_weights()[0][:, outputs:2*outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.ug.weights, model.get_weights()[0][:, 2*outputs:3*outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.uo.weights, model.get_weights()[0][:, 3*outputs:4*outputs], rtol=1e-5, atol=1e-8)

    np.testing.assert_allclose(layer.wf.weights, model.get_weights()[1][:, :outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.wi.weights, model.get_weights()[1][:, outputs:2*outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.wg.weights, model.get_weights()[1][:, 2*outputs:3*outputs], rtol=1e-5, atol=1e-8)
    np.testing.assert_allclose(layer.wo.weights, model.get_weights()[1][:, 3*outputs:4*outputs], rtol=1e-5, atol=1e-8)

    forward_out_keras = model.predict(inpt_keras)

    layer.forward(inpt=inpt_numpynet)
    forward_out_numpynet = layer.output.reshape(batch, outputs)
Esempio n. 3
0
    def _forward(self):
        outputs = 30
        steps = 1
        features = 10
        batch = 16

        data = np.random.uniform(size=(batch, features))

        weights = [
            np.random.uniform(size=(features, outputs)),
            np.random.uniform(size=(features, outputs)),
            np.random.uniform(size=(features, outputs)),
            np.random.uniform(size=(outputs, outputs)),
            np.random.uniform(size=(outputs, outputs)),
            np.random.uniform(size=(outputs, outputs))
        ]

        bias = [np.zeros(shape=(outputs, )), np.zeros(shape=outputs)]

        # assign same weights to all the kernel in keras as for NumPyNet
        keras_weights1 = np.concatenate([weights[i] for i in range(3)], axis=1)
        keras_weights2 = np.concatenate([weights[i] for i in range(3, 6)],
                                        axis=1)
        keras_bias = np.concatenate([bias[0] for i in range(4)])

        inpt_keras, _ = data_to_timesteps(data, steps)

        assert inpt_keras.shape == (batch - steps, steps, features)

        inp = Input(shape=(steps, features))
        gru = GRU(units=outputs, use_bias=False)(inp)
        model = Model(inputs=inp, outputs=gru)

        model.set_weights([keras_weights1, keras_weights2])

        layer = GRU_layer(outputs=outputs,
                          steps=steps,
                          weights=weights,
                          bias=[0, 0, 0])

        layer.forward(data)

        forward_out_keras = model.predict(inpt_keras)

        forward_out_numpynet = layer.output

        np.allclose(forward_out_keras, forward_out_numpynet)

        forward_out_keras
        forward_out_numpynet
    def test_forward(self, steps, outputs, features, batch, return_seq):

        activation = 'tanh'

        inpt = np.random.uniform(size=(batch, features))
        inpt_keras, _ = data_to_timesteps(inpt, steps=steps)

        assert inpt_keras.shape == (batch - steps, steps, features)

        # INITIALIZE
        model, layer = TestSimpleRNNLayer.initialize_step(
            steps, outputs, features, batch, return_seq, activation,
            inpt_keras)

        # FORWARD
        forward_out_numpynet, forward_out_keras = TestSimpleRNNLayer.forward_step(
            model, layer, inpt_keras, inpt)

        assert np.allclose(forward_out_numpynet,
                           forward_out_keras,
                           atol=1e-4,
                           rtol=1e-3)
Esempio n. 5
0
__email__ = ['*****@*****.**', '*****@*****.**']

np.random.seed(42)

if __name__ == '__main__':

  Npoints = 1000
  train_size = 800

  time = np.arange(0, Npoints)
  noisy_signal = np.sin(0.02 * time) + 2 * np.random.rand(Npoints)

  steps = 4
  window_size=steps

  X, _ = data_to_timesteps(noisy_signal, steps=steps)
  y = np.concatenate([X[1:, 0, :], X[-1:, 0, :]], axis=0)

  # Reshape the data according to a 4D tensor
  num_samples, size, _ = X.shape

  if size != steps:
    raise ValueError('Something went wrong with the stride trick!')

  if X.max() > noisy_signal.max() or X.min() < noisy_signal.min():
    raise ValueError('Something went wrong with the stride trick!')

  X = X.reshape(num_samples, 1, 1, size)

  X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
  y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]
    def test_backprop(self, outputs, steps, features, batch, return_seq):

        activation = 'tanh'
        return_seq = False

        with tf.Graph().as_default():
            inpt = np.random.uniform(size=(batch, features))
            inpt_keras, _ = data_to_timesteps(inpt, steps=steps)

            # INITIALIZE
            model, layer = TestSimpleRNNLayer.initialize_step(
                steps, outputs, features, batch, return_seq, activation,
                inpt_keras)

            # FORWARD
            TestSimpleRNNLayer.forward_step(model, layer, inpt_keras, inpt)

            lr = .1
            momentum = .9
            decay = 0.
            layer.optimizer = Momentum(lr=lr, momentum=momentum, decay=decay)
            model.optimizer = SGD(learning_rate=lr,
                                  momentum=momentum,
                                  nesterov=False,
                                  decay=decay)

            # BACKWARD
            print('outputs ' + str(outputs))
            print('steps ' + str(steps))
            print('features ' + str(features))
            print('batch ' + str(batch))
            print('return_seq ' + str(return_seq))
            layer.delta = np.ones(shape=layer.out_shape, dtype=float)
            a, b, c = layer.X.shape
            delta = np.ones(shape=(b, a, c), dtype=float)
            layer.backward(delta=delta, copy=True)

            tf.compat.v1.disable_eager_execution()
            # Compute the gradient of output w.r.t the weights
            gradient = K.gradients(model.output, model.trainable_weights)

            # Define a function to evaluate the gradient
            func = K.function(
                model.inputs + model.trainable_weights + model.outputs,
                gradient)

            updates = func([inpt_keras])
            weights_update_keras = updates[0]
            recurrent_weights_update_keras = updates[1]
            bias_update_keras = updates[2]

            np.testing.assert_allclose(layer.bias_update,
                                       bias_update_keras,
                                       atol=1e-4,
                                       rtol=1e-4)
            np.testing.assert_allclose(layer.weights_update,
                                       weights_update_keras,
                                       atol=1e-4,
                                       rtol=1e-4)
            np.testing.assert_allclose(layer.recurrent_weights_update,
                                       recurrent_weights_update_keras,
                                       atol=1e-4,
                                       rtol=1e-4)
Esempio n. 7
0
    def _backward(self, steps, outputs, features, batch, return_seq):

        return_seq = False  # fixed to "many_to_one" for now
        activation = 'tanh'

        inpt = np.random.uniform(size=(batch, features))
        inpt_keras, _ = data_to_timesteps(inpt, steps=steps)

        assert inpt_keras.shape == (batch, steps, features)

        # weights init
        kernel = np.random.uniform(low=-1, high=1, size=(features, outputs))
        recurrent_kernel = np.random.uniform(low=-1,
                                             high=1,
                                             size=(outputs, outputs))
        bias = np.random.uniform(low=-1, high=1, size=(outputs, ))

        # create keras model
        inp = Input(shape=inpt_keras.shape[1:])
        rnn = SimpleRNN(units=outputs,
                        activation=activation,
                        return_sequences=return_seq)(inp)
        model = Model(inputs=inp, outputs=rnn)

        # set weights for the keras model
        model.set_weights([kernel, recurrent_kernel, bias])

        # create NumPyNet layer
        layer = SimpleRNN_layer(outputs=outputs,
                                steps=steps,
                                input_shape=(batch, 1, 1, features),
                                activation=activation,
                                return_sequence=return_seq)

        # set NumPyNet weights
        layer.load_weights(
            np.concatenate(
                [bias.ravel(),
                 kernel.ravel(),
                 recurrent_kernel.ravel()]))

        np.testing.assert_allclose(layer.weights,
                                   model.get_weights()[0],
                                   rtol=1e-5,
                                   atol=1e-8)
        np.testing.assert_allclose(layer.recurrent_weights,
                                   model.get_weights()[1],
                                   rtol=1e-5,
                                   atol=1e-8)
        np.testing.assert_allclose(layer.bias,
                                   model.get_weights()[2],
                                   rtol=1e-5,
                                   atol=1e-8)

        # FORWARD

        # forward for keras
        forward_out_keras = model.predict(inpt_keras)

        # forward NumPyNet
        layer.forward(inpt)
        forward_out_numpynet = layer.output.reshape(forward_out_keras.shape)

        np.testing.assert_allclose(forward_out_numpynet,
                                   forward_out_keras,
                                   atol=1e-4,
                                   rtol=1e-3)

        # BACKWARD

        # Compute the gradient of output w.r.t input
        gradient1 = K.gradients(model.output, [model.input])
        gradient2 = K.gradients(model.output, model.trainable_weights)

        # Define a function to evaluate the gradient
        func1 = K.function(model.inputs + [model.output], gradient1)
        func2 = K.function(
            model.inputs + model.trainable_weights + model.outputs, gradient2)

        # Compute delta for Keras
        delta_keras = func1([inpt_keras])[0]
        updates = func2([inpt_keras])

        weights_update_keras = updates[0]
        recurrent_weights_update_keras = updates[1]
        bias_update_keras = updates[2]

        # backward pass for NumPyNet
        delta = np.zeros(shape=inpt_keras.shape, dtype=float)
        layer.delta = np.ones(shape=layer.output.shape, dtype=float)
        layer.backward(inpt, delta, copy=True)

        np.testing.assert_allclose(layer.bias_update,
                                   bias_update_keras,
                                   atol=1e-8,
                                   rtol=1e-5)
        np.testing.assert_allclose(layer.weights_update,
                                   weights_update_keras,
                                   atol=1e-8,
                                   rtol=1e-5)
        np.testing.assert_allclose(delta, delta_keras, atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(layer.recurrent_weights_update,
                                   recurrent_weights_update_keras,
                                   atol=1e-8,
                                   rtol=1e-5)