Esempio n. 1
0
def test_set_weights_without_biases():
    my_cnn = CNN()
    image_size = (np.random.randint(32, 100), np.random.randint(20, 100), np.random.randint(3, 10))
    number_of_conv_layers = np.random.randint(2, 10)
    my_cnn.add_input_layer(shape=image_size, name="input")
    previous_depth = image_size[2]
    for k in range(number_of_conv_layers):
        number_of_filters = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_conv2d_layer(num_of_filters=number_of_filters,
                                   kernel_size=(kernel_size, kernel_size),
                                   padding="same", activation='linear')

        w = my_cnn.get_weights_without_biases(layer_number=k + 1)
        w_set=np.full_like(w,0.2)
        my_cnn.set_weights_without_biases(w_set,layer_number=k+1)
        w_get = my_cnn.get_weights_without_biases(layer_number=k + 1)
        assert w_get.shape == w_set.shape
        previous_depth = number_of_filters
    pool_size = np.random.randint(2, 5)
    my_cnn.append_maxpooling2d_layer(pool_size=pool_size, padding="same",
                                     strides=2, name="pool1")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=10)
    number_of_dense_layers = np.random.randint(2, 10)
    previous_nodes = 10
    for k in range(number_of_dense_layers):
        number_of_nodes = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_dense_layer(num_nodes=number_of_nodes)

        w = my_cnn.get_weights_without_biases(layer_number=k + number_of_conv_layers + 4)
        w_set = np.full_like(w, 0.8)
        my_cnn.set_weights_without_biases(w_set, layer_number=k + number_of_conv_layers + 4)
        w_get = my_cnn.get_weights_without_biases(layer_number=k + number_of_conv_layers + 4)
        assert w_get.shape == w_set.shape
        previous_nodes = number_of_nodes
Esempio n. 2
0
def test_remove_last_layer():
    from tensorflow.keras.datasets import cifar10
    batch_size = 32
    num_classes = 10
    epochs = 100
    data_augmentation = True
    num_predictions = 20
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    model_name = 'keras_cifar10_trained_model.h5'
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    number_of_train_samples_to_use = 100
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    my_cnn = CNN()
    my_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    my_cnn.append_conv2d_layer(num_of_filters=16,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='linear',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=8,
                               kernel_size=3,
                               activation='relu',
                               name="conv2")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=10, activation="relu", name="dense1")
    my_cnn.append_dense_layer(num_nodes=2, activation="relu", name="dense2")
    out = my_cnn.predict(X_train)
    assert out.shape == (number_of_train_samples_to_use, 2)
    my_cnn.remove_last_layer()
    out = my_cnn.predict(X_train)
    assert out.shape == (number_of_train_samples_to_use, 10)
Esempio n. 3
0
def test_get_weights_without_biases_3():
    my_cnn = CNN()
    image_size=(np.random.randint(32,100),np.random.randint(20,100),np.random.randint(3,10))
    number_of_conv_layers=np.random.randint(2,10)
    my_cnn.add_input_layer(shape=image_size,name="input")
    previous_depth=image_size[2]
    for k in range(number_of_conv_layers):
        number_of_filters = np.random.randint(3, 100)
        kernel_size= np.random.randint(3,9)
        my_cnn.append_conv2d_layer(num_of_filters=number_of_filters,
                                   kernel_size=(kernel_size,kernel_size),
                                   padding="same", activation='linear')

        actual = my_cnn.get_weights_without_biases(layer_number=k+1)
        assert actual.shape == (kernel_size,kernel_size,previous_depth,number_of_filters)
        previous_depth=number_of_filters
    actual = my_cnn.get_weights_without_biases(layer_number=0)
    assert actual is None
    pool_size = np.random.randint(2, 5)
    my_cnn.append_maxpooling2d_layer(pool_size=pool_size,padding="same",
                                     strides=2,name="pool1")
    actual=my_cnn.get_weights_without_biases(layer_name="pool1")
    assert actual is None
    my_cnn.append_flatten_layer(name="flat1")
    actual=my_cnn.get_weights_without_biases(layer_name="flat1")
    assert actual is None
    my_cnn.append_dense_layer(num_nodes=10)
    number_of_dense_layers = np.random.randint(2, 10)
    previous_nodes = 10
    for k in range(number_of_dense_layers):
        number_of_nodes = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_dense_layer(num_nodes=number_of_nodes)
        actual = my_cnn.get_weights_without_biases(layer_number=k+number_of_conv_layers+4 )
        # assert actual.shape == (previous_nodes, number_of_nodes)
        previous_nodes = number_of_nodes
Esempio n. 4
0
def test_evaluate():

    cnn = CNN()
    classes = 10
    bs = 50
    epochs = 50
    train_samples = 500

    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train[0:train_samples, :]
    y_train = y_train[0:train_samples, :]
    x_test = x_test[0:train_samples, :]
    y_test = y_test[0:train_samples, :]
    y_train = keras.utils.to_categorical(y_train, classes)
    y_test = keras.utils.to_categorical(y_test, classes)
    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255

    model = keras.Sequential()
    model.add(Conv2D(45, (1, 1), padding='same', activation='sigmoid'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(15, (3, 3), padding='same', activation='sigmoid'))
    model.add(Flatten())
    model.add(Dense(100, activation='sigmoid'))
    model.add(Dense(classes, activation='softmax'))
    o = keras.optimizers.RMSprop(learning_rate=0.002)
    model.compile(loss='categorical_crossentropy',
                  optimizer=o,
                  metrics=['accuracy'])
    model1 = model.fit(x_train, y_train, batch_size=bs, epochs=epochs)
    evaluate1 = model.evaluate(x_test, y_test)

    cnn.add_input_layer(x_train.shape[1:], name="input")
    cnn.append_conv2d_layer(num_of_filters=32,
                            kernel_size=1,
                            padding='same',
                            strides=1,
                            activation='sigmoid',
                            name="c1")
    cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='p1')
    cnn.append_conv2d_layer(num_of_filters=25,
                            kernel_size=3,
                            padding='same',
                            strides=1,
                            activation='sigmoid',
                            name="c2")
    cnn.append_maxpooling2d_layer(pool_size=(3, 3), name='p2')
    cnn.append_flatten_layer(name='flat')
    cnn.append_dense_layer(num_nodes=150,
                           activation='sigmoid',
                           trainable=True,
                           name='d1')
    cnn.append_dense_layer(num_nodes=classes,
                           activation='softmax',
                           trainable=True,
                           name='d2')
    cnn.model.compile(loss='categorical_crossentropy',
                      optimizer=o,
                      metrics=['accuracy'])
    model2 = cnn.model.fit(x_train, y_train, batch_size=bs, epochs=epochs)
    evaluate2 = cnn.model.evaluate(x_test, y_test)
    assert np.allclose(evaluate1, evaluate2, rtol=1e-1, atol=1e-1 * 6)
Esempio n. 5
0
model.add_input_layer(x_train.shape[1:])
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_flatten_layer()
model.append_dense_layer(num_nodes=512, activation='relu')
model.append_dense_layer(num_nodes=num_classes, activation='softmax')
model.set_optimizer(optimizer='RMSprop', learning_rate=0.0001)
model.set_loss_function('categorical_crossentropy')
model.set_metric(['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model.train(x_train, y_train, num_classes, epochs)
model.evaluate(x_test, y_test)
Esempio n. 6
0
def test_evaluate():
    my_cnn = CNN()
    classes = 10
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    y_train = keras.utils.to_categorical(y_train, classes)
    y_test = keras.utils.to_categorical(y_test, classes)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255 - 0.5
    X_test /= 255 - 0.5
    number_of_train_samples_to_use = 500
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(32, 32, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(classes, activation='softmax'))
    opt = SGD(lr=0.01, momentum=0.0)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=32, epochs=50)
    evaluate = model.evaluate(X_test, y_test)

    my_cnn.add_input_layer(shape=X_train.shape[1:], name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='pool1')
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu',
                               name="conv2")
    my_cnn.append_flatten_layer(name='flat')
    my_cnn.append_dense_layer(num_nodes=512,
                              activation='relu',
                              trainable=True,
                              name='dense1')
    my_cnn.append_dense_layer(num_nodes=classes,
                              activation='softmax',
                              trainable=True,
                              name='dense2')
    my_opt = SGD(lr=0.01, momentum=0.0)
    my_cnn.model.compile(optimizer=my_opt,
                         loss='categorical_crossentropy',
                         metrics=['accuracy'])
    my_cnn.model.fit(X_train, y_train, batch_size=32, epochs=50)
    my_eval = my_cnn.model.evaluate(X_test, y_test)
    assert np.allclose(evaluate, my_eval, rtol=1e-1, atol=1e-1 * 6)
def test_eval():
    my_cnn = CNN()
    n_c = 5
    train_s = 100
    (X_train, Y_train), (X_test, Y_test) = datasets.cifar10.load_data()
    X_train = X_train[0:train_s, :]
    Y_train = Y_train[0:train_s, :]
    X_test = X_test[0:train_s, :]
    Y_test = Y_test[0:train_s, :]
    Y_train = tf.keras.utils.to_categorical(Y_train)
    Y_train = keras.utils.to_categorical(Y_test)
    X_train = X_train.astype('float32') / 255
    Y_train = Y_train.astype('float32') / 255

    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(10))

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    #history = model.fit(X_train, Y_train,batch_size=20, epocs=100)

    x = 1

    my_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='linear',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=8,
                               kernel_size=3,
                               activation='relu',
                               name="conv2")
    test1 = [0] * 3
    my_cnn.append_flatten_layer(name="flat1")
    test1[1] = 3
    my_cnn.append_dense_layer(num_nodes=10, activation="relu", name="dense1")
    my_cnn.append_dense_layer(num_nodes=2, activation="relu", name="dense2")
    assert x == 1

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    model.compile(
        optimizer='adam',
        metrics=['accuracy'],
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
    test = model.evaluate(X_test, Y_test)
    #history = model.fit(x=X_train,y=Y_train,batch_size=100,epochs=200,shuffle=False)
    test1[0] = 2
    #history=my_cnn.train(X_train,Y_train,batch_size=100,num_epochs=200)

    #test2=my_cnn.evaluate(X_test,Y_test)
    assert test1[0] < test1[1]
Esempio n. 8
0
def test_model_train():
    my_cnn = CNN()
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    y_train = keras.utils.to_categorical(y_train, 10)
    y_test = keras.utils.to_categorical(y_test, 10)

    #Comment if you want on entire dataset
    x_train = x_train[0:600, :]
    x_test = x_test[0:100]
    y_train = y_train[0:600, :]
    y_test = y_test[0:100]
    x_train = x_train.astype('float32') / 255.0
    x_test = x_test.astype('float32') / 255.0

    #Tradition Model
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(32, 32, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    Optimizer = SGD(lr=0.01, momentum=0.0)
    model.compile(optimizer=Optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    Correct_Loss_History = model.fit(x_train,
                                     y_train,
                                     batch_size=32,
                                     epochs=10,
                                     validation_data=(x_test, y_test))

    #My CNN Model
    my_cnn.add_input_layer(shape=x_train.shape[1:], name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu')
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='pool1')
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu')
    my_cnn.append_flatten_layer(name='flat')
    my_cnn.append_dense_layer(num_nodes=512, activation='relu', trainable=True)
    my_cnn.append_dense_layer(num_nodes=10,
                              activation='softmax',
                              trainable=True)
    my_cnn.model.compile(loss='categorical_crossentropy',
                         optimizer=Optimizer,
                         metrics=['accuracy'])
    my_cnn_Loss_History = my_cnn.model.fit(x_train,
                                           y_train,
                                           batch_size=32,
                                           epochs=10)

    print("\nCorrect_Loss_History.history['loss']: ",
          Correct_Loss_History.history['loss'])
    print("\nmy_cnn_Loss_History.history['loss']: ",
          my_cnn_Loss_History.history['loss'])

    assert np.allclose(Correct_Loss_History.history['loss'],
                       my_cnn_Loss_History.history['loss'],
                       atol=1e-1 * 6,
                       rtol=1e-3)