def test_append_dense_layer(): model = CNN() model.add_input_layer(shape=(256 * 256 * 3), name="input0") model.append_dense_layer(num_nodes=100, activation='relu') input = np.zeros((10, 256 * 256 * 3)) result = model.predict(input) assert result.shape == (10, 100)
def test_add_flatten_layer(): model = CNN() model.add_input_layer(shape=(256, 256, 3), name="input0") model.append_flatten_layer(name='flatten') input = np.zeros((10, 256, 256, 3)) out = model.predict(input) assert out.shape == (10, 256 * 256 * 3)
def test_append_conv2d_layer(): model = CNN() model.add_input_layer(shape=(256, 256, 3), name="input0") model.append_conv2d_layer(10, (3, 3), activation='relu') input = np.zeros((20, 256, 256, 3)) out = model.predict(input) assert (out.shape == (20, 256, 256, 10))
def test_append_maxpooling2d_layer(): model = CNN() model.add_input_layer(shape=(256, 256, 3), name="input0") model.append_maxpooling2d_layer(pool_size=(2, 2), padding="same", strides=2, name='pooling') input = np.zeros((10, 256, 256, 3)) out = model.predict(input) assert (out.shape == (10, 128, 128, 3))
def test_remove_last_layer(): from tensorflow.keras.datasets import cifar10 batch_size = 32 num_classes = 10 epochs = 100 data_augmentation = True num_predictions = 20 save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = 'keras_cifar10_trained_model.h5' (X_train, y_train), (X_test, y_test) = cifar10.load_data() number_of_train_samples_to_use = 100 X_train = X_train[0:number_of_train_samples_to_use, :] y_train = y_train[0:number_of_train_samples_to_use] my_cnn = CNN() my_cnn.add_input_layer(shape=(32, 32, 3), name="input") my_cnn.append_conv2d_layer(num_of_filters=16, kernel_size=(3, 3), padding="same", activation='linear', name="conv1") my_cnn.append_maxpooling2d_layer(pool_size=2, padding="same", strides=2, name="pool1") my_cnn.append_conv2d_layer(num_of_filters=8, kernel_size=3, activation='relu', name="conv2") my_cnn.append_flatten_layer(name="flat1") my_cnn.append_dense_layer(num_nodes=10, activation="relu", name="dense1") my_cnn.append_dense_layer(num_nodes=2, activation="relu", name="dense2") out = my_cnn.predict(X_train) assert out.shape == (number_of_train_samples_to_use, 2) my_cnn.remove_last_layer() out = my_cnn.predict(X_train) assert out.shape == (number_of_train_samples_to_use, 10)
def test_predict(): # some of these may be duplicated X = np.float32([[0.1, 0.2, 0.3, 0.4, 0.5, -0.1, -0.2, -0.3, -0.4, -0.5]]) X = np.float32([[0.1, 0.2, 0.3, 0.4, 0.5, 0, 0, 0, 0, 0]]) X = np.float32([np.linspace(0, 10, num=10)]) # X = np.float32([[0.1, 0.2]]) my_cnn = CNN() my_cnn.add_input_layer(shape=(10, ), name="input0") my_cnn.append_dense_layer(num_nodes=5, activation='linear', name="layer1") w = my_cnn.get_weights_without_biases(layer_name="layer1") w_set = np.full_like(w, 2) my_cnn.set_weights_without_biases(w_set, layer_name="layer1") b = my_cnn.get_biases(layer_name="layer1") b_set = np.full_like(b, 2) b_set[0] = b_set[0] * 2 my_cnn.set_biases(b_set, layer_name="layer1") # my_cnn.append_dense_layer(num_nodes=5, activation='linear', name="layer12") actual = my_cnn.predict(X) assert np.array_equal(actual, np.array([[104., 102., 102., 102., 102.]]))