Ejemplo n.º 1
0
    def test_printer(self, outputs, b, w, h, c):

        layer = Connected_layer(outputs=outputs, activation=Linear)

        with pytest.raises(TypeError):
            print(layer)

        layer = Connected_layer(outputs=outputs,
                                activation=Linear,
                                input_shape=(b, w, h, c))

        print(layer)
Ejemplo n.º 2
0
    def test_constructor(self, outputs, b, w, h, c, act_fun):

        if outputs > 0:
            weights_choice = [
                np.random.uniform(low=-1, high=1., size=(w * h * c, outputs)),
                None
            ]
            bias_choice = [
                np.random.uniform(low=-1, high=1., size=(outputs, )), None
            ]

        else:
            with pytest.raises(ValueError):
                layer = Connected_layer(outputs=outputs)

            outputs += 10
            weights_choice = [
                np.random.uniform(low=-1, high=1., size=(w * h * c, outputs)),
                None
            ]
            bias_choice = [
                np.random.uniform(low=-1, high=1., size=(outputs, )), None
            ]

        weights = choice(weights_choice)
        bias = choice(bias_choice)

        layer = Connected_layer(outputs=outputs,
                                activation=act_fun,
                                input_shape=(b, w, h, c),
                                weights=weights,
                                bias=bias)

        if weights is not None:
            assert np.allclose(layer.weights, weights)

        if bias is not None:
            assert np.allclose(layer.bias, bias)
        else:
            assert np.allclose(layer.bias, np.zeros(shape=(outputs, )))

        assert layer.output == None
        assert layer.weights_update == None
        assert layer.bias_update == None
        assert layer.optimizer == None

        assert layer.activation == act_fun.activate
        assert layer.gradient == act_fun.gradient
Ejemplo n.º 3
0
    def test_forward(self, outputs, b, w, h, c, idx_act):

        weights = np.random.uniform(low=-1.,
                                    high=1.,
                                    size=(w * h * c, outputs)).astype(float)
        bias = np.random.uniform(low=-1., high=1.,
                                 size=(outputs)).astype(float)

        inpt = np.random.uniform(low=-1., high=1.,
                                 size=(b, w, h, c)).astype(float)

        # Numpy_net model
        layer = Connected_layer(outputs,
                                input_shape=inpt.shape,
                                activation=nn_activation[idx_act],
                                weights=weights,
                                bias=bias)

        # Tensorflow Layer
        model = tf.keras.layers.Dense(
            outputs,
            activation=tf_activation[idx_act],
            kernel_initializer=lambda shape, dtype=None: weights,
            bias_initializer=lambda shape, dtype=None: bias)

        # FORWARD

        # Keras forward output
        forward_out_keras = model(inpt.reshape(b, -1))

        # Numpy_net forward output
        layer.forward(inpt=inpt)
        forward_out_numpynet = layer.output

        assert forward_out_numpynet.shape == (b, 1, 1, outputs)
        np.testing.assert_allclose(forward_out_numpynet[:, 0, 0, :],
                                   forward_out_keras,
                                   rtol=1e-5,
                                   atol=1e-2)
Ejemplo n.º 4
0
    # Create the model and training
    model = Network(batch=batch, input_shape=X_train.shape[1:])

    model.add(
        Convolutional_layer(size=3,
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Maxpool_layer(size=2, stride=1, padding=True))

    model.add(Connected_layer(outputs=100, activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Connected_layer(outputs=num_classes, activation='Linear'))

    model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
    # model.add(Cost_layer(cost_type=cost_type.mse))

    # model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
    model.compile(optimizer=Adam(), metrics=[accuracy])

    print('*************************************')
    print('\n Total input dimension: {}'.format(X_train.shape), '\n')
    print('**************MODEL SUMMARY***********')
Ejemplo n.º 5
0
def test_connected_layer():
    '''
  Tests:
    if the forward is coherent with keras
    if the updates (weight, bias) and delta computed by the backward are correct

  to be tested:
    update function, keras update not clear.
  '''
    np.random.seed(123)

    keras_activ = ['relu', 'sigmoid', 'tanh', 'linear']
    numpynet_activ = [Relu, Logistic, Tanh, Linear]

    # Usefull variables initialization
    outputs = 10
    batch, w, h, c = (5, 10, 10, 3)
    inpt = np.random.uniform(low=0., high=1., size=(batch, w, h, c))

    weights = np.random.uniform(low=0., high=1., size=(w * h * c, outputs))
    bias = np.random.uniform(low=0., high=1., size=(outputs))

    for keras_act, numpynet_act in zip(keras_activ, numpynet_activ):

        # Numpy_net model
        numpynet_layer = Connected_layer(inpt.shape,
                                         outputs,
                                         activation=numpynet_act,
                                         weights=weights,
                                         bias=bias)
        # Keras Model
        inp = Input(shape=(w * h * c), batch_shape=(batch, w * h * c))
        x = Dense(outputs,
                  activation=keras_act,
                  input_shape=(batch, inpt.size))(inp)
        model = Model(inputs=[inp], outputs=x)

        # Set weights in Keras Model.
        model.set_weights([weights, bias])

        # FORWARD

        # Keras forward output
        forward_out_keras = model.predict(inpt.reshape(batch, -1))

        # Numpy_net forward output
        numpynet_layer.forward(inpt)
        forward_out_numpynet = numpynet_layer.output

        #Forward output Test
        assert np.allclose(forward_out_numpynet[:, 0, 0, :],
                           forward_out_keras,
                           atol=1e-8)

        # BACKWARD

        #Output derivative in respect to input
        grad = K.gradients(model.output, [model.input])

        # Output derivative respect to trainable_weights(Weights and Biases)
        gradients = K.gradients(model.output, model.trainable_weights)

        # Definning functions to compute those gradients
        func = K.function(model.inputs + [model.output], grad)
        func2 = K.function(
            model.inputs + model.trainable_weights + [model.output], gradients)

        # Evaluation of Delta, weights_updates and bias_updates for Keras
        delta_keras = func([inpt.reshape(batch, -1)])
        updates = func2([inpt.reshape(batch, -1)])

        # Initialization of numpy_net starting delta to ones
        numpynet_layer.delta = np.ones(shape=numpynet_layer.out_shape,
                                       dtype=float)

        # Initialization of global delta
        delta = np.zeros(shape=(batch, w, h, c), dtype=float)

        # Computation of delta, weights_update and bias updates for numpy_net
        numpynet_layer.backward(inpt, delta=delta)

        #Now the global variable delta is updated

        assert np.allclose(delta_keras[0].reshape(batch, w, h, c),
                           delta,
                           atol=1e-8)
        assert np.allclose(updates[0],
                           numpynet_layer.weights_update,
                           atol=1e-8)
        assert np.allclose(updates[1], numpynet_layer.bias_update, atol=1e-8)
Ejemplo n.º 6
0
  X = X.reshape(num_samples, 1, 1, size)

  X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
  y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]

  batch = 20
  step = batch

  y_train = y_train.reshape(-1, 1, 1, 1)
  y_test = y_test.reshape(-1, 1, 1, 1)

  # Create the model and training
  model = Network(batch=batch, input_shape=X_train.shape[1:])

  model.add(RNN_layer(outputs=32, steps=step, activation='linear'))
  model.add(Connected_layer(outputs=8, activation='relu'))
  model.add(Connected_layer(outputs=1, activation='linear'))
  model.add(Cost_layer(cost_type='mse'))
                          # keras standard arguments
  model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])

  print('*************************************')
  print('\n Total input dimension: {}'.format(X_train.shape), '\n')
  print('**************MODEL SUMMARY***********')

  model.summary()

  print('\n***********START TRAINING***********\n')

  # Fit the model on the training set
  model.fit(X=X_train, y=y_train.reshape(-1, 1, 1, 1), max_iter=10)
Ejemplo n.º 7
0
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))
    model.add(
        Convolutional_layer(input_shape=(batch, 8, 8, 32),
                            size=3,
                            filters=64,
                            stride=1,
                            pad=True,
                            activation='Relu'))
    model.add(Maxpool_layer(size=2, stride=1, padding=True))
    model.add(Dropout_layer(prob=0.25))
    model.add(
        Connected_layer(input_shape=(batch, 8, 8, 64),
                        outputs=128,
                        activation='Relu'))
    model.add(Dropout_layer(prob=0.5))
    model.add(
        Connected_layer(input_shape=(batch, 1, 1, 128),
                        outputs=num_classes,
                        activation='Linear'))
    model.add(Softmax_layer(spatial=True))

    print('*************************************')
    print('\n Total input dimension: {}'.format(X_train.shape), '\n')
    print('*************************************')

    model.compile(optimizer=Adam)
    model.summary()
Ejemplo n.º 8
0
    def test_backward(self, outputs, b, w, h, c, idx_act):

        weights = np.random.uniform(low=0., high=1.,
                                    size=(w * h * c, outputs)).astype(float)
        bias = np.random.uniform(low=0., high=1., size=(outputs)).astype(float)

        inpt = np.random.uniform(low=-1., high=1.,
                                 size=(b, w, h, c)).astype(float)
        tf_input = tf.Variable(inpt.astype(float).reshape(b, -1))

        # NumPyNet model
        layer = Connected_layer(outputs,
                                input_shape=inpt.shape,
                                activation=nn_activation[idx_act],
                                weights=weights,
                                bias=bias)
        # Tensorflow layer
        model = tf.keras.layers.Dense(
            outputs,
            activation=tf_activation[idx_act],
            kernel_initializer=lambda shape, dtype=None: weights,
            bias_initializer=lambda shape, dtype=None: bias)

        # Try backward
        with pytest.raises(NotFittedError):
            delta = np.empty(shape=inpt.shape, dtype=float)
            layer.backward(inpt=inpt, delta=delta)

        # FORWARD

        # Keras forward output
        # Tensorflow forward and backward
        with tf.GradientTape(persistent=True) as tape:
            preds = model(tf_input)
            grad1 = tape.gradient(preds, tf_input)
            grad2 = tape.gradient(preds, model.trainable_weights)

            forward_out_keras = preds.numpy()
            delta_keras = grad1.numpy()
            updates = grad2

        # Numpy_net forward output
        layer.forward(inpt=inpt)
        forward_out_numpynet = layer.output

        # Forward output Test
        np.testing.assert_allclose(forward_out_numpynet[:, 0, 0, :],
                                   forward_out_keras,
                                   rtol=1e-5,
                                   atol=1e-2)

        # BACKWARD

        # Initialization of NumPyNet starting delta to ones
        layer.delta = np.ones(shape=layer.out_shape, dtype=float)

        # Initialization of global delta
        delta = np.zeros(shape=(b, w, h, c), dtype=float)

        # Computation of delta, weights_update and bias updates for numpy_net
        layer.backward(inpt=inpt, delta=delta)

        #Now the global variable delta is updated
        np.testing.assert_allclose(delta_keras.reshape(b, w, h, c),
                                   delta,
                                   rtol=1e-5,
                                   atol=1e-6)
        np.testing.assert_allclose(updates[0],
                                   layer.weights_update,
                                   rtol=1e-5,
                                   atol=1e-6)
        np.testing.assert_allclose(updates[1],
                                   layer.bias_update,
                                   rtol=1e-4,
                                   atol=1e-7)