Esempio n. 1
0
    def test_printer(self, b, w, h, c):

        layer = Softmax_layer(input_shape=(b, w, h, c))

        print(layer)

        layer.input_shape = (3.14, w, h, c)

        with pytest.raises(ValueError):
            print(layer)
Esempio n. 2
0
def test_softmax_layer():

    np.random.seed(123)

    spatials = [False, True]

    for spatial in spatials:

        if spatial:
            axis = -1
        else:
            axis = (1, 2, 3)

        batch, w, h, c = (1, 3, 3, 3)

        np.random.seed(123)
        inpt = np.random.uniform(low=0., high=1., size=(batch, w, h, c))

        batch, w, h, c = inpt.shape

        truth = np.random.choice([0., 1.], p=[.5, .5], size=(batch, w, h, c))
        truth = np.ones(shape=(batch, w, h, c))

        numpynet = Softmax_layer(groups=1, temperature=1., spatial=spatial)

        inp = Input(shape=(w, h, c), batch_shape=inpt.shape)
        x = Softmax(axis=axis)(inp)
        model = Model(inputs=[inp], outputs=x)
        model.compile(optimizer='sgd', loss='categorical_crossentropy')

        forward_out_keras = model.predict(inpt)

        # definition of tensorflow variable
        truth_tf = K.variable(truth.ravel())
        forward_out_keras_tf = K.variable(forward_out_keras.ravel())

        loss = categorical_crossentropy(truth_tf, forward_out_keras_tf)

        keras_loss = K.eval(loss)
        numpynet.forward(inpt, truth)
        numpynet_loss = numpynet.cost

        assert np.isclose(keras_loss, numpynet_loss, atol=1e-7)

        forward_out_numpynet = numpynet.output

        assert np.allclose(forward_out_keras, forward_out_numpynet, atol=1e-8)

        def get_loss_grad(model, inputs, outputs):
            x, y, sample_weight = model._standardize_user_data(inputs, outputs)
            grad_ce = K.gradients(model.total_loss, model.output)
            func = K.function((model._feed_inputs + model._feed_targets +
                               model._feed_sample_weights), grad_ce)
            return func(x + y + sample_weight)
Esempio n. 3
0
    def test_constructor(self, s, t):

        if t > 0:
            layer = Softmax_layer(spatial=s, temperature=t)

            assert layer.output == None
            assert layer.delta == None

            assert layer.spatial == s

            assert layer.temperature == 1. / t

        else:
            with pytest.raises(ValueError):
                layer = Softmax_layer(spatial=s, temperature=t)
Esempio n. 4
0
    def test_forward(self, b, w, h, c, spatial):

        inpt = np.random.uniform(low=0., high=1.,
                                 size=(b, w, h, c)).astype(float)
        truth = np.random.choice([0., 1.], p=[.5, .5],
                                 size=(b, w, h, c)).astype(float)

        if spatial:
            inpt_tf = tf.Variable(inpt.copy())
            truth_tf = tf.Variable(truth.copy())

        else:
            inpt_tf = tf.Variable(inpt.copy().reshape(b, -1))
            truth_tf = tf.Variable(truth.copy().reshape(b, -1))

        # NumPyNet layer
        layer = Softmax_layer(input_shape=inpt.shape,
                              temperature=1.,
                              spatial=spatial)

        # Tensorflow layer
        model = tf.keras.layers.Softmax(axis=-1)
        loss = tf.keras.losses.CategoricalCrossentropy(
            reduction=tf.keras.losses.Reduction.SUM)

        # Tensorflow softmax
        preds = model(inpt_tf)
        # Computing loss for tensorflow
        keras_loss = loss(truth_tf, preds).numpy()

        forward_out_keras = preds.numpy().reshape(b, w, h, c)

        # Softmax + crossentropy NumPyNet
        layer.forward(inpt=inpt, truth=truth)
        forward_out_numpynet = layer.output
        numpynet_loss = layer.cost

        # testing softmax
        np.testing.assert_allclose(forward_out_keras,
                                   forward_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)

        # testing crossentropy
        np.testing.assert_allclose(keras_loss,
                                   numpynet_loss,
                                   rtol=1e-5,
                                   atol=1e-6)
Esempio n. 5
0
def test_softmax_layer():

  from NumPyNet.layers.softmax_layer import Softmax_layer

  from keras.losses import categorical_crossentropy
  from keras.layers import Softmax
  from keras import backend as K

  spatials = [False, True]

  for spatial in spatials:

    if spatial:
      axis = -1
    else :
      axis = (1,2,3)

    np.random.seed(123)
    inpt = np.random.uniform(low = 0., high = 1., size = (2,10,10,3))

    batch, w, h, c = inpt.shape

    truth = np.random.choice([0., 1.], p = [.5,.5], size=(batch,w,h,c))

    numpynet = Softmax_layer(groups = 1, temperature = 1., spatial = spatial)

    inp = Input(shape=(w,h,c), batch_shape = inpt.shape)
    x = Softmax(axis = axis)(inp)
    model = Model(inputs=[inp], outputs=x)

    forward_out_keras = model.predict(inpt)

    # definition of tensorflow variable
    truth_tf             = K.variable(truth.ravel())
    forward_out_keras_tf = K.variable(forward_out_keras.ravel())

    loss = categorical_crossentropy( truth_tf, forward_out_keras_tf)

    keras_loss = K.eval(loss)
    numpynet.forward(inpt, truth)
    numpynet_loss = numpynet.cost

    assert np.allclose(numpynet_loss, keras_loss)

    forward_out_numpynet = numpynet.output

    assert np.allclose(forward_out_keras, forward_out_numpynet, atol = 1e-8)
Esempio n. 6
0
    def test_backward(self, b, w, h, c, spatial):

        w, h = (1, 1)  # backward working only in this case for spatial=False

        inpt = np.random.uniform(low=0., high=1.,
                                 size=(b, w, h, c)).astype(float)
        truth = np.random.choice([0., 1.], p=[.5, .5],
                                 size=(b, w, h, c)).astype(float)

        if spatial:
            inpt_tf = tf.Variable(inpt)
            truth_tf = tf.Variable(truth)

        else:
            inpt_tf = tf.Variable(inpt.copy().reshape(b, -1))
            truth_tf = tf.Variable(truth.copy().reshape(b, -1))

        # NumPyNet layer
        layer = Softmax_layer(input_shape=inpt.shape,
                              temperature=1.,
                              spatial=spatial)

        # Tensorflow layer
        model = tf.keras.layers.Softmax(axis=-1)
        loss = tf.keras.losses.CategoricalCrossentropy(
            from_logits=False, reduction=tf.keras.losses.Reduction.SUM)

        with tf.GradientTape() as tape:
            preds = model(inpt_tf)
            cost = loss(truth_tf, preds)
            grads = tape.gradient(cost, inpt_tf)

            forward_out_keras = preds.numpy().reshape(b, w, h, c)
            keras_loss = cost.numpy()
            delta_keras = grads.numpy().reshape(b, w, h, c)

        layer.forward(inpt=inpt, truth=truth)
        forward_out_numpynet = layer.output
        numpynet_loss = layer.cost

        delta = np.zeros(shape=inpt.shape, dtype=float)
        layer.backward(delta=delta)

        np.testing.assert_allclose(forward_out_keras,
                                   forward_out_numpynet,
                                   rtol=1e-5,
                                   atol=1e-8)
        np.testing.assert_allclose(keras_loss,
                                   numpynet_loss,
                                   rtol=1e-5,
                                   atol=1e-6)
        np.testing.assert_allclose(delta, delta_keras, rtol=1e-5, atol=1e-8)
Esempio n. 7
0
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Maxpool_layer(size=2, stride=1, padding=True))

    model.add(Connected_layer(outputs=100, activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Connected_layer(outputs=num_classes, activation='Linear'))

    model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
    # model.add(Cost_layer(cost_type=cost_type.mse))

    # model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
    model.compile(optimizer=Adam(), metrics=[accuracy])

    print('*************************************')
    print('\n Total input dimension: {}'.format(X_train.shape), '\n')
    print('**************MODEL SUMMARY***********')

    model.summary()

    print('\n***********START TRAINING***********\n')

    # Fit the model on the training set
    model.fit(X=X_train, y=y_train, max_iter=10, verbose=True)
Esempio n. 8
0
                            filters=64,
                            stride=1,
                            pad=True,
                            activation='Relu'))
    model.add(Maxpool_layer(size=2, stride=1, padding=True))
    model.add(Dropout_layer(prob=0.25))
    model.add(
        Connected_layer(input_shape=(batch, 8, 8, 64),
                        outputs=128,
                        activation='Relu'))
    model.add(Dropout_layer(prob=0.5))
    model.add(
        Connected_layer(input_shape=(batch, 1, 1, 128),
                        outputs=num_classes,
                        activation='Linear'))
    model.add(Softmax_layer(spatial=True))

    print('*************************************')
    print('\n Total input dimension: {}'.format(X_train.shape), '\n')
    print('*************************************')

    model.compile(optimizer=Adam)
    model.summary()

    print('\n***********START TRAINING***********\n')

    # Fit the model on the training set

    model.fit(X=X_train, y=y_train, max_iter=5)

    print('\n***********END TRAINING**************\n')