Esempio n. 1
0
def test_remove_last_layer():
    from tensorflow.keras.datasets import cifar10
    batch_size = 32
    num_classes = 10
    epochs = 100
    data_augmentation = True
    num_predictions = 20
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    model_name = 'keras_cifar10_trained_model.h5'
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    number_of_train_samples_to_use = 100
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    my_cnn=CNN()
    my_cnn.add_input_layer(shape=(32,32,3),name="input")
    my_cnn.append_conv2d_layer(num_of_filters=16, kernel_size=(3,3),padding="same", activation='linear', name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=2, padding="same", strides=2,name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=8, kernel_size=3, activation='relu', name="conv2")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=10,activation="relu",name="dense1")
    my_cnn.append_dense_layer(num_nodes=2,activation="relu",name="dense2")
    out=my_cnn.predict(X_train)
    assert out.shape == (number_of_train_samples_to_use, 2)
    my_cnn.remove_last_layer()
    out = my_cnn.predict(X_train)
    assert out.shape==(number_of_train_samples_to_use,10)
Esempio n. 2
0
def test_append_maxpooling2d_layer():
    model = CNN()
    model.add_input_layer(shape=(256, 256, 3), name="input0")
    model.append_maxpooling2d_layer(pool_size=(2, 2), padding="same", strides=2, name='pooling')
    input = np.zeros((10, 256, 256, 3))
    out = model.predict(input)
    assert (out.shape == (10, 128, 128, 3))
def test_train():
    from tensorflow.keras.datasets import mnist
    import tensorflow.keras as keras

    bsize_entity = 128
    no_of_cls = 10
    epochs = 1
    img_row, img_col = 28, 28
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 28, 28, 1)
    x_test = x_test.reshape(10000, 28, 28, 1)
    y_train = keras.utils.to_categorical(y_train, no_of_cls)
    y_test = keras.utils.to_categorical(y_test, no_of_cls)

    model = CNN()
    model.add_input_layer(shape=(28, 28, 1), name="Input")
    model.append_conv2d_layer(32, kernel_size=(3, 3))
    model.append_conv2d_layer(64, kernel_size=(3, 3))
    model.append_maxpooling2d_layer(pool_size=(2, 2))
    model.append_flatten_layer()
    model.append_dense_layer(128, activation="relu")
    model.append_dense_layer(no_of_cls, activation="softmax")
    model.set_loss_function("categorical_crossentropy")
    model.set_metric("accuracy")
    model.set_optimizer("Adagrad")
    model.train(x_train, y_train, batch_size=bsize_entity, num_epochs=epochs)

    mk = model.evaluate(x_test, y_test)
    correct = np.array([0.06997422293154523, 0.9907])

    np.testing.assert_almost_equal(correct[0], mk[0], decimal=2)
    np.testing.assert_almost_equal(correct[1], mk[1], decimal=2)
Esempio n. 4
0
def test_evaluate():
    from tensorflow.keras.datasets import cifar10
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = X_train[:100, :]
    y_train = y_train[:100]
    X_test=X_test[:100,:]
    y_test = y_test[:100]
    # np.random.seed(100)
    initilizer = tensorflow.keras.initializers.Zeros()
    # initilizer = tensorflow.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=20)
    model_testing = CNN()
    model = Sequential()
    model.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     input_shape=(32, 32, 3), kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=70, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=75, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=90, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(MaxPool2D(pool_size=2, padding='same', strides=1))
    model.add(Flatten())
    model.add(
        Dense(units=256, activation='relu', trainable=True, kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(
        Dense(units=256, activation='relu', trainable=True, kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Dense(units=256, activation='sigmoid', trainable=True, kernel_initializer=initilizer,
                    bias_initializer=initilizer))
    model.compile(optimizer='Adagrad', loss='hinge', metrics=['mse'])
    history = model.fit(x=X_train, y=y_train, batch_size=32, epochs=5, shuffle=False)





    model_testing.add_input_layer(shape=(32, 32, 3), name="")
    model_testing.append_conv2d_layer(num_of_filters=64, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="1")
    model_testing.append_conv2d_layer(num_of_filters=70, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="2")
    model_testing.append_conv2d_layer(num_of_filters=75, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="3")
    model_testing.append_conv2d_layer(num_of_filters=90, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="4")
    model_testing.append_maxpooling2d_layer(pool_size=2, padding='same', strides=1, name="5")
    model_testing.append_flatten_layer(name='6')
    model_testing.append_dense_layer(num_nodes=256, activation='relu', name='7')
    model_testing.append_dense_layer(num_nodes=256, activation='relu', name='8')
    model_testing.append_dense_layer(num_nodes=256, activation='sigmoid', name='9')
    model_testing.set_optimizer(optimizer='adagrad')
    model_testing.set_loss_function(loss='hinge')
    model_testing.set_metric(metric='mse')
    loss = model_testing.train(X_train=X_train, y_train=y_train, batch_size=32, num_epochs=5)

    model_evaluate = model.evaluate(X_test,y_test)

    model_testing_evaluate = model_testing.evaluate(X_test,y_test)

    #assert model_testing_evaluate == model_evaluate
    assert np.allclose(model_testing_evaluate,model_evaluate,rtol=1e-2,atol=1e-2)
def test_evaluate():
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    number_of_train_samples_to_use = 500
    number_of_test_samples_to_use = 200
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    X_test = X_test[0:number_of_test_samples_to_use, :]
    y_test = y_test[0:number_of_test_samples_to_use]
    my_cnn = CNN()
    my_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               activation='relu',
                               name="conv2")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name="pool2")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               activation='relu',
                               name="conv3")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=64, activation="relu", name="dense1")
    my_cnn.append_dense_layer(num_nodes=10,
                              activation="softmax",
                              name="dense2")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv1")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv1")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv2")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv2")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv3")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv3")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="dense1")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="dense1")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="dense2")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="dense2")
    my_cnn.set_loss_function()
    my_cnn.set_optimizer(optimizer="SGD", learning_rate=0.01, momentum=0.0)
    my_cnn.set_metric(metric="accuracy")
    # los = np.array([2.30277, 2.30264, 2.30242, 2.30225, 2.30207, 2.30190, 2.30171, 2.30154, 2.30138])
    # los = np.around(los,decimals=2)
    my_cnn.train(X_train, y_train, 60, 10)
    acc = my_cnn.evaluate(X_test, y_test)
    de = np.float32(0.07)
    assert (acc == de)
Esempio n. 6
0
def test_train_and_evaluate():
    from tensorflow.keras.datasets import cifar10
    batch_size = 32
    num_classes = 10
    epochs = 1
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    model_name = 'keras_cifar10_trained_model.h5'
    (train_images, train_labels), (test_images,
                                   test_labels) = cifar10.load_data()
    train_labels = keras.utils.to_categorical(train_labels, num_classes)
    test_labels = keras.utils.to_categorical(test_labels, num_classes)

    my_cnn = CNN()

    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv1",
                               input_shape=train_images.shape[1:])
    # my_cnn.append_conv2d_layer(num_of_filters=32, kernel_size=(3,3),padding="same", activation='relu', name="conv2")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool1")

    # my_cnn.append_conv2d_layer(num_of_filters=64, kernel_size=(3,3),padding="same", activation='relu', name="conv3")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv4")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool2")

    my_cnn.append_flatten_layer(name="flat1")
    # my_cnn.append_dense_layer(num_nodes=512,activation="relu",name="dense1")
    my_cnn.append_dense_layer(num_nodes=10,
                              activation="softmax",
                              name="dense2")

    my_cnn.set_metric('accuracy')
    my_cnn.set_optimizer('RMSprop')
    my_cnn.set_loss_function('categorical_crossentropy')
    loss = my_cnn.train(train_images, train_labels, batch_size, epochs)

    path = os.getcwd()
    file_path = os.path.join(path, model_name)
    my_cnn.save_model(model_file_name=file_path)

    print("loss :{0}".format(loss))
    assert len(loss) == 1

    test_loss, test_acc = my_cnn.evaluate(test_images, test_labels)
    assert test_loss < 5
    assert test_acc < 1
Esempio n. 7
0
def test_evaluate():
    from tensorflow.keras.datasets import mnist
    import tensorflow.keras as keras

    batch_size = 128
    num_classes = 10
    epochs = 3

    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 28, 28, 1)
    x_test = x_test.reshape(10000, 28, 28, 1)

    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = CNN()
    model.add_input_layer(shape=(28, 28, 1), name="Input")
    model.append_conv2d_layer(32, kernel_size=(3, 3))
    model.append_conv2d_layer(64, kernel_size=(3, 3))
    model.append_maxpooling2d_layer(pool_size=(2, 2))
    model.append_flatten_layer()
    model.append_dense_layer(128, activation="relu")
    model.append_dense_layer(num_classes, activation="softmax")

    model.set_loss_function("categorical_crossentropy")
    model.set_metric("accuracy")
    model.set_optimizer("Adagrad")

    model.train(x_train, y_train, batch_size=batch_size, num_epochs=epochs)

    score = model.evaluate(x_test, y_test)

    actual = np.array([0.05093684684933396, 0.9907])

    # loss
    np.testing.assert_almost_equal(actual[0], score[0], decimal=2)

    # accuracy
    np.testing.assert_almost_equal(actual[1], score[1], decimal=2)
Esempio n. 8
0
def test_set_weights_without_biases():
    my_cnn = CNN()
    image_size = (np.random.randint(32, 100), np.random.randint(20, 100),
                  np.random.randint(3, 10))
    number_of_conv_layers = np.random.randint(2, 10)
    my_cnn.add_input_layer(shape=image_size, name="input")
    previous_depth = image_size[2]
    for k in range(number_of_conv_layers):
        number_of_filters = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_conv2d_layer(num_of_filters=number_of_filters,
                                   kernel_size=(kernel_size, kernel_size),
                                   padding="same",
                                   activation='linear')

        w = my_cnn.get_weights_without_biases(layer_number=k + 1)
        w_set = np.full_like(w, 0.2)
        my_cnn.set_weights_without_biases(w_set, layer_number=k + 1)
        w_get = my_cnn.get_weights_without_biases(layer_number=k + 1)
        print("here 1", w_get.shape)
        print("here 2", w_set.shape)

        assert w_get.shape == w_set.shape
        previous_depth = number_of_filters
    pool_size = np.random.randint(2, 5)
    my_cnn.append_maxpooling2d_layer(pool_size=pool_size,
                                     padding="same",
                                     strides=2,
                                     name="pool1")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=10)
    number_of_dense_layers = np.random.randint(2, 10)
    previous_nodes = 10
    for k in range(number_of_dense_layers):
        number_of_nodes = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_dense_layer(num_nodes=number_of_nodes)

        w = my_cnn.get_weights_without_biases(layer_number=k +
                                              number_of_conv_layers + 4)
        w_set = np.full_like(w, 0.8)
        my_cnn.set_weights_without_biases(w_set,
                                          layer_number=k +
                                          number_of_conv_layers + 4)
        w_get = my_cnn.get_weights_without_biases(layer_number=k +
                                                  number_of_conv_layers + 4)
        assert w_get.shape == w_set.shape
        previous_nodes = number_of_nodes
Esempio n. 9
0
def test_get_weights_without_biases_3():
    my_cnn = CNN()
    image_size = (np.random.randint(32, 100), np.random.randint(20, 100),
                  np.random.randint(3, 10))
    number_of_conv_layers = np.random.randint(2, 10)
    my_cnn.add_input_layer(shape=image_size, name="input")
    previous_depth = image_size[2]
    for k in range(number_of_conv_layers):
        number_of_filters = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_conv2d_layer(num_of_filters=number_of_filters,
                                   kernel_size=(kernel_size, kernel_size),
                                   padding="same",
                                   activation='linear')

        actual = my_cnn.get_weights_without_biases(layer_number=k + 1)
        assert actual.shape == (kernel_size, kernel_size, previous_depth,
                                number_of_filters)
        previous_depth = number_of_filters
    actual = my_cnn.get_weights_without_biases(layer_number=0)
    assert actual is None
    pool_size = np.random.randint(2, 5)
    my_cnn.append_maxpooling2d_layer(pool_size=pool_size,
                                     padding="same",
                                     strides=2,
                                     name="pool1")
    actual = my_cnn.get_weights_without_biases(layer_name="pool1")
    assert actual is None
    my_cnn.append_flatten_layer(name="flat1")
    actual = my_cnn.get_weights_without_biases(layer_name="flat1")
    assert actual is None
    my_cnn.append_dense_layer(num_nodes=10)
    number_of_dense_layers = np.random.randint(2, 10)
    previous_nodes = 10
    for k in range(number_of_dense_layers):
        number_of_nodes = np.random.randint(3, 100)
        kernel_size = np.random.randint(3, 9)
        my_cnn.append_dense_layer(num_nodes=number_of_nodes)
        actual = my_cnn.get_weights_without_biases(layer_number=k +
                                                   number_of_conv_layers + 4)
        # assert actual.shape == (previous_nodes, number_of_nodes)
        previous_nodes = number_of_nodes
Esempio n. 10
0
def test_train_and_evaluate():
    # Initializing and adding layers
    print("*********** PLEASE WAIT FOR DATA TO LOAD ***********")
    (train_images, train_labels), (test_images,
                                   test_labels) = cifar10.load_data()
    train_images, test_images = train_images / 255.0, test_images / 255.0
    new_cnn = CNN()
    new_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    new_cnn.append_conv2d_layer(32,
                                strides=3,
                                activation="relu",
                                name="conv2d_1")
    new_cnn.append_maxpooling2d_layer(pool_size=2, name="maxpool_1")
    new_cnn.append_conv2d_layer(64,
                                strides=3,
                                activation="relu",
                                name="conv2d_2")
    new_cnn.append_maxpooling2d_layer(pool_size=2, name="maxpool_2")
    new_cnn.append_conv2d_layer(64,
                                strides=3,
                                activation="relu",
                                name="conv2d_3")
    new_cnn.append_flatten_layer(name="flatten")
    new_cnn.append_dense_layer(64, activation="relu", name="dense_1")
    new_cnn.append_dense_layer(10, activation="softmax", name="dense_2")
    # Setting Compiler values
    new_cnn.set_loss_function(loss="SparseCategoricalCrossentropy")
    new_cnn.set_optimizer(optimizer="SGD")
    new_cnn.set_metric('accuracy')
    # Entering Num Epoch
    batch_size = 1000
    num_epoch = 10
    history = new_cnn.train(train_images,
                            train_labels,
                            batch_size=batch_size,
                            num_epochs=num_epoch)
    assert len(history) == len(train_images) / batch_size * num_epoch
    evaluate = new_cnn.evaluate(train_images, train_labels)
    assert evaluate[1] <= 1
    assert evaluate[0] <= 3
Esempio n. 11
0
def test_evaluate():

    cnn = CNN()
    classes = 10
    bs = 50
    epochs = 50
    train_samples = 500

    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train[0:train_samples, :]
    y_train = y_train[0:train_samples, :]
    x_test = x_test[0:train_samples, :]
    y_test = y_test[0:train_samples, :]
    y_train = keras.utils.to_categorical(y_train, classes)
    y_test = keras.utils.to_categorical(y_test, classes)
    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255

    model = keras.Sequential()
    model.add(Conv2D(45, (1, 1), padding='same', activation='sigmoid'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(15, (3, 3), padding='same', activation='sigmoid'))
    model.add(Flatten())
    model.add(Dense(100, activation='sigmoid'))
    model.add(Dense(classes, activation='softmax'))
    o = keras.optimizers.RMSprop(learning_rate=0.002)
    model.compile(loss='categorical_crossentropy',
                  optimizer=o,
                  metrics=['accuracy'])
    model1 = model.fit(x_train, y_train, batch_size=bs, epochs=epochs)
    evaluate1 = model.evaluate(x_test, y_test)

    cnn.add_input_layer(x_train.shape[1:], name="input")
    cnn.append_conv2d_layer(num_of_filters=32,
                            kernel_size=1,
                            padding='same',
                            strides=1,
                            activation='sigmoid',
                            name="c1")
    cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='p1')
    cnn.append_conv2d_layer(num_of_filters=25,
                            kernel_size=3,
                            padding='same',
                            strides=1,
                            activation='sigmoid',
                            name="c2")
    cnn.append_maxpooling2d_layer(pool_size=(3, 3), name='p2')
    cnn.append_flatten_layer(name='flat')
    cnn.append_dense_layer(num_nodes=150,
                           activation='sigmoid',
                           trainable=True,
                           name='d1')
    cnn.append_dense_layer(num_nodes=classes,
                           activation='softmax',
                           trainable=True,
                           name='d2')
    cnn.model.compile(loss='categorical_crossentropy',
                      optimizer=o,
                      metrics=['accuracy'])
    model2 = cnn.model.fit(x_train, y_train, batch_size=bs, epochs=epochs)
    evaluate2 = cnn.model.evaluate(x_test, y_test)
    assert np.allclose(evaluate1, evaluate2, rtol=1e-1, atol=1e-1 * 6)
Esempio n. 12
0
num_predictions = 20

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = CNN()
model.add_input_layer(x_train.shape[1:])
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_flatten_layer()
model.append_dense_layer(num_nodes=512, activation='relu')
model.append_dense_layer(num_nodes=num_classes, activation='softmax')
model.set_optimizer(optimizer='RMSprop', learning_rate=0.0001)
model.set_loss_function('categorical_crossentropy')
model.set_metric(['accuracy'])
Esempio n. 13
0
def test_evaluate():
    my_cnn = CNN()
    classes = 10
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    y_train = keras.utils.to_categorical(y_train, classes)
    y_test = keras.utils.to_categorical(y_test, classes)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255 - 0.5
    X_test /= 255 - 0.5
    number_of_train_samples_to_use = 500
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(32, 32, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(classes, activation='softmax'))
    opt = SGD(lr=0.01, momentum=0.0)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=32, epochs=50)
    evaluate = model.evaluate(X_test, y_test)

    my_cnn.add_input_layer(shape=X_train.shape[1:], name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='pool1')
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu',
                               name="conv2")
    my_cnn.append_flatten_layer(name='flat')
    my_cnn.append_dense_layer(num_nodes=512,
                              activation='relu',
                              trainable=True,
                              name='dense1')
    my_cnn.append_dense_layer(num_nodes=classes,
                              activation='softmax',
                              trainable=True,
                              name='dense2')
    my_opt = SGD(lr=0.01, momentum=0.0)
    my_cnn.model.compile(optimizer=my_opt,
                         loss='categorical_crossentropy',
                         metrics=['accuracy'])
    my_cnn.model.fit(X_train, y_train, batch_size=32, epochs=50)
    my_eval = my_cnn.model.evaluate(X_test, y_test)
    assert np.allclose(evaluate, my_eval, rtol=1e-1, atol=1e-1 * 6)
def test_eval():
    my_cnn = CNN()
    n_c = 5
    train_s = 100
    (X_train, Y_train), (X_test, Y_test) = datasets.cifar10.load_data()
    X_train = X_train[0:train_s, :]
    Y_train = Y_train[0:train_s, :]
    X_test = X_test[0:train_s, :]
    Y_test = Y_test[0:train_s, :]
    Y_train = tf.keras.utils.to_categorical(Y_train)
    Y_train = keras.utils.to_categorical(Y_test)
    X_train = X_train.astype('float32') / 255
    Y_train = Y_train.astype('float32') / 255

    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(10))

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    #history = model.fit(X_train, Y_train,batch_size=20, epocs=100)

    x = 1

    my_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='linear',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=8,
                               kernel_size=3,
                               activation='relu',
                               name="conv2")
    test1 = [0] * 3
    my_cnn.append_flatten_layer(name="flat1")
    test1[1] = 3
    my_cnn.append_dense_layer(num_nodes=10, activation="relu", name="dense1")
    my_cnn.append_dense_layer(num_nodes=2, activation="relu", name="dense2")
    assert x == 1

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    model.compile(
        optimizer='adam',
        metrics=['accuracy'],
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
    test = model.evaluate(X_test, Y_test)
    #history = model.fit(x=X_train,y=Y_train,batch_size=100,epochs=200,shuffle=False)
    test1[0] = 2
    #history=my_cnn.train(X_train,Y_train,batch_size=100,num_epochs=200)

    #test2=my_cnn.evaluate(X_test,Y_test)
    assert test1[0] < test1[1]
Esempio n. 15
0
def test_model_train():
    my_cnn = CNN()
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    y_train = keras.utils.to_categorical(y_train, 10)
    y_test = keras.utils.to_categorical(y_test, 10)

    #Comment if you want on entire dataset
    x_train = x_train[0:600, :]
    x_test = x_test[0:100]
    y_train = y_train[0:600, :]
    y_test = y_test[0:100]
    x_train = x_train.astype('float32') / 255.0
    x_test = x_test.astype('float32') / 255.0

    #Tradition Model
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(32, 32, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    Optimizer = SGD(lr=0.01, momentum=0.0)
    model.compile(optimizer=Optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    Correct_Loss_History = model.fit(x_train,
                                     y_train,
                                     batch_size=32,
                                     epochs=10,
                                     validation_data=(x_test, y_test))

    #My CNN Model
    my_cnn.add_input_layer(shape=x_train.shape[1:], name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu')
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name='pool1')
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               padding='same',
                               strides=1,
                               activation='relu')
    my_cnn.append_flatten_layer(name='flat')
    my_cnn.append_dense_layer(num_nodes=512, activation='relu', trainable=True)
    my_cnn.append_dense_layer(num_nodes=10,
                              activation='softmax',
                              trainable=True)
    my_cnn.model.compile(loss='categorical_crossentropy',
                         optimizer=Optimizer,
                         metrics=['accuracy'])
    my_cnn_Loss_History = my_cnn.model.fit(x_train,
                                           y_train,
                                           batch_size=32,
                                           epochs=10)

    print("\nCorrect_Loss_History.history['loss']: ",
          Correct_Loss_History.history['loss'])
    print("\nmy_cnn_Loss_History.history['loss']: ",
          my_cnn_Loss_History.history['loss'])

    assert np.allclose(Correct_Loss_History.history['loss'],
                       my_cnn_Loss_History.history['loss'],
                       atol=1e-1 * 6,
                       rtol=1e-3)