def stack_discriminator_layers(init): model = Sequential(init_method=init) model.add( Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims)) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(5, 5), padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('sigmoid')) return model
model = Sequential(init_method='he_uniform') model.add( Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(1, 8, 8), padding='same')) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add( Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Dense(10, activation='softmax')) # 10 digits classes model.compile(loss='categorical_crossentropy', optimizer=opt) model_epochs = 12 fit_stats = model.fit(train_data.reshape(-1, 1, 8, 8), one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data.reshape(-1, 1, 8, 8), one_hot(test_label)), shuffle_data=True)