def trainCNNModel(X_LL_train, X_LH_train, X_HL_train, X_HH_train, y_train, X_LL_test, X_LH_test, X_HL_test, X_HH_test, y_test, num_epochs): batch_size = 32 # in each iteration, we consider 32 training examples at once num_train, height, width, depth = X_LL_train.shape num_classes = len(np.unique(y_train)) Y_train = np_utils.to_categorical(y_train, num_classes) # One-hot encode the labels Y_test = np_utils.to_categorical(y_test, num_classes) # One-hot encode the labels checkPointFolder = 'checkPoint' checkpoint_name = checkPointFolder + '/Weights-{epoch:03d}--{val_loss:.5f}.hdf5' checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose=1, save_best_only=True, mode='auto') callbacks_list = [checkpoint] if not os.path.exists(checkPointFolder): os.makedirs(checkPointFolder) model = createModel(height, width, depth, num_classes) model.compile( loss='binary_crossentropy', #loss='categorical_crossentropy', # using the cross-entropy loss function optimizer='adam', # using the Adam optimiser metrics=['accuracy']) # reporting the accuracy model.fit( [X_LL_train, X_LH_train, X_HL_train, X_HH_train], Y_train, # Train the model using the training set... batch_size=batch_size, epochs=num_epochs, verbose=1, validation_split=0.1, callbacks=callbacks_list ) # ...holding out 10% of the data for validation score, acc = model.evaluate( [X_LL_test, X_LH_test, X_HL_test, X_HH_test], Y_test, verbose=1) # Evaluate the trained model on the test set! model.save('moirePattern3CNN_.h5') return model
def main(args): weights_file = (args.weightsFile) positiveImagePath = (args.positiveTestImages) negativeImagePath = (args.negativeTestImages) os.system("python3 createTrainingData.py {} {} {}".format( positiveImagePath, negativeImagePath, 1)) X_LL, X_LH, X_HL, X_HH, X_index, Y, imageCount = readWaveletData( positiveImagePath, negativeImagePath, positiveTestImagePath, positiveTestImagePath) X_LL = np.array(X_LL) X_LL = X_LL.reshape((imageCount, height, width, depth)) X_LH = np.array(X_LH) X_LH = X_LH.reshape((imageCount, height, width, depth)) X_HL = np.array(X_HL) X_HL = X_HL.reshape((imageCount, height, width, depth)) X_HH = np.array(X_HH) X_HH = X_HH.reshape((imageCount, height, width, depth)) CNN_model = createModel(height, width, depth, num_classes) CNN_model.load_weights(weights_file) evaluate(CNN_model, X_LL, X_LH, X_HL, X_HH, Y)