def test_predict_once(): x = get_image() model = Models.load_model(model_path) yy = Models.predict(model, x) assert yy.shape[1] == 10
def test_confusion_matrix(): model = Models.load_model('../resources/model') input_shape = model.input_shape[1:] train, test = prepare_dataset(input_shape, batch_size=1) expected = [] predictions = [] errors = [] for i in range(len(test)): x, y = next(test) y = np.argmax(y) z = np.argmax(model.predict(x)) expected.append(y) predictions.append(z) if z != y: print('expected: {} actual: {}'.format(y, z)) errors.append((x, z)) cm = show_confusion_matrix(expected, predictions) print('confusion matrix :') print(cm) # display errors errors = errors[:100] show_samples(errors, 10, 10)
def train(): input_shape = (96, 96, 3) model = basic_model(input_shape=input_shape, output_class_number=10) model.compile( optimizer=keras.optimizers.Adam(lr=1e-4), loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) # display model summary with open('model_summary.txt', 'w') as _: model.summary(print_fn=lambda x: _.write(str(x) + '\n')) train_gen, test_gen = prepare_dataset(model.input_shape[1:]) # define training hyper parameters epochs = 10 # train model with dataset history = model.fit_generator(train_gen, # steps_per_epoch=np.ceil(train_gen.n / train_gen.batch_size), epochs=epochs, validation_data=test_gen, # validation_steps=np.ceil(test_gen.n / test_gen.batch_size), verbose=1, callbacks=[ ModelCheckpoint( filepath='model_epoch{epoch:02d}_loss{val_loss:.2f}.h5', monitor='val_loss', verbose=1, save_best_only=True), CSVLogger('training_logs.csv', append=False), LambdaCallback(on_train_end=lambda logs: Models.save_model(model, 'model_final')) ])
def test_confusion_matrix(): sample_provider = Datasets.mnist() x = np.array([_[0] for _ in sample_provider()]) y = np.array([_[1] for _ in sample_provider()]) train, validation = Datasets.split_dataset(x, y) expected = [np.argmax(_) for _ in validation[1]] dataset = ImageDataGenerator(rescale=1. / 255).flow(validation[0], shuffle=False, batch_size=1) model = Models.load_model(model_path) yy = model.predict_generator(dataset, steps=dataset.n) predictions = [np.argmax(_) for _ in yy] cm = show_confusion_matrix(expected, predictions) print('confusion matrix :') print(cm) # display errors errors = [] for i in range(yy.shape[0]): if expected[i] != predictions[i]: print('expected: {} actual: {}'.format(expected[i], predictions[i])) errors.append((x[i], predictions[i])) errors = errors[:100] show_samples(errors, 10, 10)
def test_batch_normalization(): train, validation = dataset_preparation() model = Models.load_model('./my_model') # evaluate the model scores = model.evaluate_generator(validation, steps=validation.n, verbose=0) accuracy_percentage = scores[1] * 100 print("evaluation on unseen dataset : {} = {}".format( model.metrics_names[1], accuracy_percentage)) # TODO : add batch normalization to Models.create_model() to gain some accuracy assert accuracy_percentage >= 65, "Bad accuracy ({}%) : {}".format( accuracy_percentage, "add a batch normalization layer to your model and retrain it !")
def test_convolutional_network(): train, validation = dataset_preparation() model = Models.load_model('./my_model') # evaluate the model scores = model.evaluate_generator(validation, steps=validation.n, verbose=0) accuracy_percentage = scores[1] * 100 print("evaluation on unseen dataset : {} = {}".format( model.metrics_names[1], accuracy_percentage)) # TODO : add convolutional layers to Models.create_model() to improve its accuracy # https://keras.io/layers/convolutional/ assert accuracy_percentage >= 75, "Bad accuracy : ({}%) : {}".format( accuracy_percentage, "use a Convolutional layer in your model and retrain it !")
def test_model_training(): train, validation = dataset_preparation() # TODO : Modify the Model to improve performances ! model = Models.create(input_shape=(28, 28, 1), num_classes=10) Models.print_model_summary(model) Models.train(model, train, validation) Models.save_model(model, './my_model') # evaluate the model scores = model.evaluate_generator(validation, steps=validation.n, verbose=0) accuracy_percentage = scores[1] * 100 print("evaluation on unseen dataset : {} = {}".format( model.metrics_names[1], accuracy_percentage)) # TODO : add batch normalization to Models.create_model() to gain some accuracy # https://keras.io/layers/normalization/ assert accuracy_percentage >= 50, "Bad accuracy ({}%) : {}".format( accuracy_percentage, "something is wrong :(")
def test_load_model(): model = Models.load_model(model_path) assert isinstance(model, keras.models.Model)
def test_create_model(): model = Models.create(input_shape=(28, 28, 1), num_classes=10) Models.save_model(model, model_path) assert os.path.exists(os.path.join(model_path, 'model.h5'))