Exemple #1
0
def test_route_layer():

  np.random.seed(123)

  batch, w, h, c = (1, 5, 5, 3)
  input = np.random.uniform(low=-10, high=10. ,size=(batch, w, h, c)) # from -10 to 10 to see both the effect of Relu and TanH activation

  # init keras model
  inp    = Input(shape=(w, h, c), batch_shape=(batch, w, h, c))
  x      = Activation(activation='relu')(inp)
  y      = Activation(activation='tanh')(x)
  Concat = Concatenate( axis=-1)([x, y]) # concatenate of x and y
  model  = Model(inputs=[inp], outputs=Concat)

  # init NumPyNet model
  net = Network(batch=batch, input_shape=(w, h, c))

  net.add(Activation_layer(activation='relu')) # layer 1
  net.add(Activation_layer(activation='tanh')) # layer 2
  net.add(Route_layer(input_layers=(1,2), by_channels=True))

  net._fitted = True # False control

  # FORWARDS

  fwd_out_numpynet = net.predict(X=input)
  fwd_out_keras    = model.predict(x=input, batch_size=batch)

  assert np.allclose(fwd_out_keras, fwd_out_numpynet) # ok

  net.fitted = False # the correct state of the network

  # BACKWARD

  # try some derivatives
  gradient    = K.gradients(model.output, model.inputs)
  func        = K.function(model.inputs + model.outputs ,gradient)
  delta_keras = func([input])[0]

  net._net[3].delta = np.ones(shape=fwd_out_numpynet.shape)
  net._backward(X=input)

  delta_numpynet = net._net[0].delta
Exemple #2
0
    def test_backward(self, b, w, h, c):

        # TODO: test backward correctly

        input = np.random.uniform(low=-10, high=10., size=(b, w, h, c))
        tf_input = tf.Variable(input)

        # init keras model
        inp = Input(batch_shape=(b, w, h, c))
        x = Activation(activation='relu')(inp)
        y = Activation(activation='tanh')(x)
        Concat = Concatenate(axis=-1)([x, y])  # concatenate of x and y
        model = Model(inputs=[inp], outputs=Concat)
        model.compile(optimizer='sgd', loss='mse')

        # init NumPyNet model
        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net._fitted = True

        # FORWARDS

        fwd_out_numpynet = net.predict(X=input)

        with tf.GradientTape() as tape:
            preds = model(tf_input)
            grads = tape.gradient(preds, tf_input)

            fwd_out_keras = preds.numpy()
            delta_keras = grads.numpy()

        np.testing.assert_allclose(fwd_out_keras,
                                   fwd_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)

        net._fitted = False

        # BACKWARD

        net._net[3].delta = np.ones(shape=fwd_out_numpynet.shape, dtype=float)
        net._backward(X=input)

        delta_numpynet = net._net[0].delta

        assert delta_numpynet.shape == delta_keras.shape
Exemple #3
0
    def test_printer(self, b, w, h, c):

        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net.summary()
Exemple #4
0
    def test_forward(self, b, w, h, c):

        input = np.random.uniform(low=-10, high=10.,
                                  size=(b, w, h, c)).astype(float)

        # init keras model
        inp = Input(batch_shape=(b, w, h, c))
        x = Activation(activation='relu')(inp)
        y = Activation(activation='tanh')(x)
        Concat = Concatenate(axis=-1)([x, y])  # concatenate of x and y
        model = Model(inputs=[inp], outputs=Concat)
        model.compile(optimizer='sgd', loss='mse')

        # init NumPyNet model
        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net.summary()

        assert net._fitted == False
        net._fitted = True  # False control

        # FORWARDS

        fwd_out_numpynet = net.predict(X=input)
        fwd_out_keras = model.predict(x=input, batch_size=b)

        np.testing.assert_allclose(fwd_out_keras,
                                   fwd_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)
Exemple #5
0
    ############################################

    n_train = X_train.shape[0]
    n_test = X_test.shape[0]

    # transform y to array of dimension 10 and in 4 dimension
    y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1)
    y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1)

    # Create the model and training
    model = Network(batch=batch, input_shape=X_train.shape[1:])

    model.add(
        Convolutional_layer(size=3,
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Maxpool_layer(size=2, stride=1, padding=True))

    model.add(Connected_layer(outputs=100, activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Connected_layer(outputs=num_classes, activation='Linear'))

    model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
    # model.add(Cost_layer(cost_type=cost_type.mse))
Exemple #6
0
  X = X.reshape(num_samples, 1, 1, size)

  X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
  y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]

  batch = 20
  step = batch

  y_train = y_train.reshape(-1, 1, 1, 1)
  y_test = y_test.reshape(-1, 1, 1, 1)

  # Create the model and training
  model = Network(batch=batch, input_shape=X_train.shape[1:])

  model.add(RNN_layer(outputs=32, steps=step, activation='linear'))
  model.add(Connected_layer(outputs=8, activation='relu'))
  model.add(Connected_layer(outputs=1, activation='linear'))
  model.add(Cost_layer(cost_type='mse'))
                          # keras standard arguments
  model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])

  print('*************************************')
  print('\n Total input dimension: {}'.format(X_train.shape), '\n')
  print('**************MODEL SUMMARY***********')

  model.summary()

  print('\n***********START TRAINING***********\n')

  # Fit the model on the training set
Exemple #7
0
    n_train = X_train.shape[0]
    n_test = X_test.shape[0]

    # transform y to array of dimension 10 and in 4 dimension
    y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1)
    y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1)

    # Create the modeland training
    model = Network(batch=batch, input_shape=X_train.shape[1:])

    # model.add(Input_layer(input_shape=(batch, 32, 32, 3))) # not necessary if input_shape is given to Network
    model.add(
        Convolutional_layer(input_shape=(batch, 8, 8, 3),
                            size=3,
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))
    model.add(
        Convolutional_layer(input_shape=(batch, 8, 8, 32),
                            size=3,
                            filters=64,
                            stride=1,
                            pad=True,
                            activation='Relu'))
    model.add(Maxpool_layer(size=2, stride=1, padding=True))
    model.add(Dropout_layer(prob=0.25))
    model.add(
        Connected_layer(input_shape=(batch, 8, 8, 64),
                        outputs=128,