Пример #1
0
def test_connected_layer():
    '''
  Tests:
    if the forward is coherent with keras
    if the updates (weight, bias) and delta computed by the backward are correct

  to be tested:
    update function, keras update not clear.
  '''
    np.random.seed(123)

    keras_activ = ['relu', 'sigmoid', 'tanh', 'linear']
    numpynet_activ = [Relu, Logistic, Tanh, Linear]

    # Usefull variables initialization
    outputs = 10
    batch, w, h, c = (5, 10, 10, 3)
    inpt = np.random.uniform(low=0., high=1., size=(batch, w, h, c))

    weights = np.random.uniform(low=0., high=1., size=(w * h * c, outputs))
    bias = np.random.uniform(low=0., high=1., size=(outputs))

    for keras_act, numpynet_act in zip(keras_activ, numpynet_activ):

        # Numpy_net model
        numpynet_layer = Connected_layer(inpt.shape,
                                         outputs,
                                         activation=numpynet_act,
                                         weights=weights,
                                         bias=bias)
        # Keras Model
        inp = Input(shape=(w * h * c), batch_shape=(batch, w * h * c))
        x = Dense(outputs,
                  activation=keras_act,
                  input_shape=(batch, inpt.size))(inp)
        model = Model(inputs=[inp], outputs=x)

        # Set weights in Keras Model.
        model.set_weights([weights, bias])

        # FORWARD

        # Keras forward output
        forward_out_keras = model.predict(inpt.reshape(batch, -1))

        # Numpy_net forward output
        numpynet_layer.forward(inpt)
        forward_out_numpynet = numpynet_layer.output

        #Forward output Test
        assert np.allclose(forward_out_numpynet[:, 0, 0, :],
                           forward_out_keras,
                           atol=1e-8)

        # BACKWARD

        #Output derivative in respect to input
        grad = K.gradients(model.output, [model.input])

        # Output derivative respect to trainable_weights(Weights and Biases)
        gradients = K.gradients(model.output, model.trainable_weights)

        # Definning functions to compute those gradients
        func = K.function(model.inputs + [model.output], grad)
        func2 = K.function(
            model.inputs + model.trainable_weights + [model.output], gradients)

        # Evaluation of Delta, weights_updates and bias_updates for Keras
        delta_keras = func([inpt.reshape(batch, -1)])
        updates = func2([inpt.reshape(batch, -1)])

        # Initialization of numpy_net starting delta to ones
        numpynet_layer.delta = np.ones(shape=numpynet_layer.out_shape,
                                       dtype=float)

        # Initialization of global delta
        delta = np.zeros(shape=(batch, w, h, c), dtype=float)

        # Computation of delta, weights_update and bias updates for numpy_net
        numpynet_layer.backward(inpt, delta=delta)

        #Now the global variable delta is updated

        assert np.allclose(delta_keras[0].reshape(batch, w, h, c),
                           delta,
                           atol=1e-8)
        assert np.allclose(updates[0],
                           numpynet_layer.weights_update,
                           atol=1e-8)
        assert np.allclose(updates[1], numpynet_layer.bias_update, atol=1e-8)
Пример #2
0
    def test_backward(self, outputs, b, w, h, c, idx_act):

        weights = np.random.uniform(low=0., high=1.,
                                    size=(w * h * c, outputs)).astype(float)
        bias = np.random.uniform(low=0., high=1., size=(outputs)).astype(float)

        inpt = np.random.uniform(low=-1., high=1.,
                                 size=(b, w, h, c)).astype(float)
        tf_input = tf.Variable(inpt.astype(float).reshape(b, -1))

        # NumPyNet model
        layer = Connected_layer(outputs,
                                input_shape=inpt.shape,
                                activation=nn_activation[idx_act],
                                weights=weights,
                                bias=bias)
        # Tensorflow layer
        model = tf.keras.layers.Dense(
            outputs,
            activation=tf_activation[idx_act],
            kernel_initializer=lambda shape, dtype=None: weights,
            bias_initializer=lambda shape, dtype=None: bias)

        # Try backward
        with pytest.raises(NotFittedError):
            delta = np.empty(shape=inpt.shape, dtype=float)
            layer.backward(inpt=inpt, delta=delta)

        # FORWARD

        # Keras forward output
        # Tensorflow forward and backward
        with tf.GradientTape(persistent=True) as tape:
            preds = model(tf_input)
            grad1 = tape.gradient(preds, tf_input)
            grad2 = tape.gradient(preds, model.trainable_weights)

            forward_out_keras = preds.numpy()
            delta_keras = grad1.numpy()
            updates = grad2

        # Numpy_net forward output
        layer.forward(inpt=inpt)
        forward_out_numpynet = layer.output

        # Forward output Test
        np.testing.assert_allclose(forward_out_numpynet[:, 0, 0, :],
                                   forward_out_keras,
                                   rtol=1e-5,
                                   atol=1e-2)

        # BACKWARD

        # Initialization of NumPyNet starting delta to ones
        layer.delta = np.ones(shape=layer.out_shape, dtype=float)

        # Initialization of global delta
        delta = np.zeros(shape=(b, w, h, c), dtype=float)

        # Computation of delta, weights_update and bias updates for numpy_net
        layer.backward(inpt=inpt, delta=delta)

        #Now the global variable delta is updated
        np.testing.assert_allclose(delta_keras.reshape(b, w, h, c),
                                   delta,
                                   rtol=1e-5,
                                   atol=1e-6)
        np.testing.assert_allclose(updates[0],
                                   layer.weights_update,
                                   rtol=1e-5,
                                   atol=1e-6)
        np.testing.assert_allclose(updates[1],
                                   layer.bias_update,
                                   rtol=1e-4,
                                   atol=1e-7)