Example #1
0
    def test_printer(self, cost):

        layer = Cost_layer(cost_type=cost)

        with pytest.raises(TypeError):
            print(layer)

        layer.input_shape = (1, 2, 3, 4)

        print(layer)
Example #2
0
    def test_backward(self, b, w, h, c):

        # TODO: test backward correctly

        input = np.random.uniform(low=-10, high=10., size=(b, w, h, c))
        tf_input = tf.Variable(input)

        # init keras model
        inp = Input(batch_shape=(b, w, h, c))
        x = Activation(activation='relu')(inp)
        y = Activation(activation='tanh')(x)
        Concat = Concatenate(axis=-1)([x, y])  # concatenate of x and y
        model = Model(inputs=[inp], outputs=Concat)
        model.compile(optimizer='sgd', loss='mse')

        # init NumPyNet model
        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net._fitted = True

        # FORWARDS

        fwd_out_numpynet = net.predict(X=input)

        with tf.GradientTape() as tape:
            preds = model(tf_input)
            grads = tape.gradient(preds, tf_input)

            fwd_out_keras = preds.numpy()
            delta_keras = grads.numpy()

        np.testing.assert_allclose(fwd_out_keras,
                                   fwd_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)

        net._fitted = False

        # BACKWARD

        net._net[3].delta = np.ones(shape=fwd_out_numpynet.shape, dtype=float)
        net._backward(X=input)

        delta_numpynet = net._net[0].delta

        assert delta_numpynet.shape == delta_keras.shape
Example #3
0
    def test_backward(self, outputs, cost_idx):
        # testing only default values since the backard is really simple

        nn_cost = nn_losses[cost_idx]
        tf_cost = tf_losses[cost_idx]

        truth = np.random.uniform(low=0., high=1., size=(outputs, )).astype(
            np.float32
        )  # I don't know why but TF in this case requires a float32
        inpt = np.random.uniform(low=0., high=1., size=(outputs, )).astype(
            np.float32
        )  # I don't know why but TF in this case requires a float32

        truth_tf = tf.Variable(truth)
        inpt_tf = tf.Variable(inpt)

        layer = Cost_layer(input_shape=inpt.shape,
                           cost_type=nn_cost,
                           scale=1.,
                           ratio=0.,
                           noobject_scale=1.,
                           threshold=0.,
                           smoothing=0.)

        with tf.GradientTape() as tape:
            preds = tf_cost(truth_tf, inpt_tf)
            grads = tape.gradient(preds, inpt_tf)

            keras_loss = preds.numpy()
            delta_keras = grads.numpy()

        layer.forward(inpt=inpt, truth=truth)
        loss = layer.cost

        assert np.isclose(keras_loss, loss, atol=1e-7)

        # BACKWARD

        numpynet_delta = layer.delta

        np.testing.assert_allclose(delta_keras,
                                   numpynet_delta,
                                   rtol=1e-4,
                                   atol=1e-8)
Example #4
0
    def test_forward(self, outputs, scale, nbj_scale, threshold, smoothing,
                     cost_idx):

        ratio = 0.
        nn_cost = nn_losses[cost_idx]
        tf_cost = tf_losses[cost_idx]

        truth = np.random.uniform(low=0., high=10., size=(outputs, )).astype(
            np.float32
        )  # I don't know why but TF in this case requires a float32
        inpt = np.random.uniform(low=0., high=10., size=(outputs, )).astype(
            np.float32
        )  # I don't know why but TF in this case requires a float32

        truth_tf = tf.Variable(truth)
        inpt_tf = tf.Variable(inpt)

        layer = Cost_layer(input_shape=inpt.shape,
                           cost_type=nn_cost,
                           scale=scale,
                           ratio=ratio,
                           noobject_scale=nbj_scale,
                           threshold=threshold,
                           smoothing=smoothing)

        keras_loss_tf = tf_cost(truth_tf, inpt_tf)

        keras_loss = keras_loss_tf.numpy()

        layer.forward(inpt=inpt, truth=truth)

        assert layer.out_shape == inpt.shape
        assert layer.output is not None
        assert layer.delta is not None
        assert layer.cost is not None

        # recreate cost layer with default values foor testing against keras
        layer = Cost_layer(input_shape=inpt.shape,
                           cost_type=nn_cost,
                           scale=1.,
                           ratio=0.,
                           noobject_scale=1.,
                           threshold=0.,
                           smoothing=0.)
        layer.forward(inpt=inpt, truth=truth)
        loss = layer.cost

        assert np.isclose(keras_loss, loss, atol=1e-3)
Example #5
0
    def test_printer(self, b, w, h, c):

        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net.summary()
Example #6
0
    def test_forward(self, b, w, h, c):

        input = np.random.uniform(low=-10, high=10.,
                                  size=(b, w, h, c)).astype(float)

        # init keras model
        inp = Input(batch_shape=(b, w, h, c))
        x = Activation(activation='relu')(inp)
        y = Activation(activation='tanh')(x)
        Concat = Concatenate(axis=-1)([x, y])  # concatenate of x and y
        model = Model(inputs=[inp], outputs=Concat)
        model.compile(optimizer='sgd', loss='mse')

        # init NumPyNet model
        net = Network(batch=b, input_shape=(w, h, c))
        net.add(Activation_layer(activation='relu'))  # layer 1
        net.add(Activation_layer(activation='tanh'))  # layer 2
        net.add(Route_layer(input_layers=(1, 2), by_channels=True))
        net.add(
            Cost_layer(cost_type='mse',
                       scale=1.,
                       ratio=0.,
                       noobject_scale=1.,
                       threshold=0.,
                       smoothing=0.))
        net.compile(optimizer=SGD())

        net.summary()

        assert net._fitted == False
        net._fitted = True  # False control

        # FORWARDS

        fwd_out_numpynet = net.predict(X=input)
        fwd_out_keras = model.predict(x=input, batch_size=b)

        np.testing.assert_allclose(fwd_out_keras,
                                   fwd_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)
Example #7
0
    def test_constructor(self, b, w, h, c, scale, ratio, nbj_scale, threshold,
                         smoothing, cost):

        input_shape = choice([None, (b, w, h, c)])

        layer = Cost_layer(cost_type=cost,
                           input_shape=input_shape,
                           scale=scale,
                           ratio=ratio,
                           noobject_scale=nbj_scale,
                           threshold=threshold,
                           smoothing=smoothing)

        assert layer.cost_type == cost
        assert layer.scale == scale
        assert layer.ratio == ratio
        assert layer.noobject_scale == nbj_scale
        assert layer.threshold == threshold
        assert layer.smoothing == smoothing

        assert layer.out_shape == input_shape
        assert layer.output == None
        assert layer.delta == None
Example #8
0
  X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
  y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]

  batch = 20
  step = batch

  y_train = y_train.reshape(-1, 1, 1, 1)
  y_test = y_test.reshape(-1, 1, 1, 1)

  # Create the model and training
  model = Network(batch=batch, input_shape=X_train.shape[1:])

  model.add(RNN_layer(outputs=32, steps=step, activation='linear'))
  model.add(Connected_layer(outputs=8, activation='relu'))
  model.add(Connected_layer(outputs=1, activation='linear'))
  model.add(Cost_layer(cost_type='mse'))
                          # keras standard arguments
  model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])

  print('*************************************')
  print('\n Total input dimension: {}'.format(X_train.shape), '\n')
  print('**************MODEL SUMMARY***********')

  model.summary()

  print('\n***********START TRAINING***********\n')

  # Fit the model on the training set
  model.fit(X=X_train, y=y_train.reshape(-1, 1, 1, 1), max_iter=10)

  print('\n***********START TESTING**************\n')
Example #9
0
def test_cost_layer():
  '''
  Tests:
        the fwd function of the cost layer.
        if the cost is the same for every cost_type (mse and mae)
        if the delta is correctly computed
  To be tested:
        _smoothing
        _threshold
        _ratio
        noobject_scale
        masked
        _seg
        _wgan
  '''
  from NumPyNet.layers import cost_layer as cl
  from NumPyNet.layers.cost_layer import Cost_layer

  from keras.losses import mean_squared_error
  from keras.losses import mean_absolute_error

  from math import isclose

  np.random.seed(123)

  losses = [mean_absolute_error, mean_squared_error]

  for loss_function in losses :

    keras_loss_type = mean_absolute_error

    outputs = 100
    truth = np.random.uniform(low=0., high=1., size=(outputs,))
    inpt = np.random.uniform(low=0., high=1., size=(outputs,))

    inp = Input(shape=(inpt.size, ))
    x = Activation(activation='linear')(inp)
    model = Model(inputs=[inp], outputs=x)

    # an input layer to feed labels
    truth_tf = K.variable(truth)

    if   keras_loss_type is mean_squared_error:  cost = cl.cost_type.mse
    elif keras_loss_type is mean_absolute_error: cost = cl.cost_type.mae

    numpynet_layer = Cost_layer(input_shape=inpt.shape, cost_type=cost,
                             scale=1., ratio=0., noobject_scale=1.,
                             threshold=0., smoothing=0.)

    keras_loss = K.eval(keras_loss_type(truth_tf, inpt))
    numpynet_layer.forward(inpt, truth)
    numpynet_loss = numpynet_layer.cost

    assert isclose(keras_loss, numpynet_loss, abs_tol=1e-7)

    # BACKWARD

    # compute loss based on model's output and true labels
    if   keras_loss_type is mean_squared_error:
      loss = K.mean( K.square(truth_tf - model.output) )
    elif keras_loss_type is mean_absolute_error:
      loss = K.mean( K.abs(truth_tf - model.output) )

    # compute gradient of loss with respect to inputs
    grad_loss = K.gradients(loss, [model.input])

    # create a function to be able to run this computation graph
    func = K.function(model.inputs + [truth_tf], grad_loss)
    keras_delta = func([np.expand_dims(inpt, axis=0), truth])

    numpynet_delta = numpynet_layer.delta

    assert np.allclose(keras_delta, numpynet_delta)