clf = initializeClassifier('ANN', x_train, ANNparams) history = clf.fit(x_train, y_train_enc, epochs=ANNparams[5], verbose=1, validation_data=(x_test, y_test_enc)) #history1 = clf.fit(x_train, y_train_enc, epochs=5000, verbose = 1, validation_data = (x_test, y_test_enc)) plt.plot(history.history['val_loss'], 'k', label='Test loss') plt.plot(history.history['val_acc'], 'k:', label='Test accuracy') plt.xlabel('number of epochs') legend = plt.legend(loc='upper center', fontsize='x-large') plt.ylim(0, 1.5) plt.show() y_pred = clf.predict_classes(x_test) if alg == 'CNN': x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1)) x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1)) clf = initializeFCN(x_train, CNNparams) history = clf.fit(x_train, y_train_enc, epochs=CNNparams[7], verbose=0, validation_data=(x_test, y_test_enc)) plt.plot(history.history['val_loss'], 'k', label='Test loss') plt.plot(history.history['val_acc'], 'k:', label='Test accuracy')
verbose=0, validation_data=(x_val, y_val_enc)) bestValAcc = np.max(history.history['val_acc']) bestValLoss = np.min(history.history['val_loss']) bestValAccEpoch = np.argmax(history.history['val_acc']) bestValLossEpoch = np.argmin(history.history['val_loss']) # plt.plot(history.history['val_loss'], 'k', label='Validation loss') # plt.plot(history.history['val_acc'], 'k:', label='Validation accuracy') # plt.xlabel('number of epochs') # legend = plt.legend(loc='upper center', fontsize='x-large') # plt.ylim(0,1.5) # plt.show() y_pred = clf.predict_classes(x_val) if alg == 'CNN': x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1)) x_val = x_val.reshape((x_val.shape[0], x_val.shape[1], 1)) clf = initializeFCN(x_train, params) history = clf.fit(x_train, y_train_enc, epochs=2000, verbose=0, validation_data=(x_val, y_val_enc)) bestValAcc = np.max(history.history['val_acc']) bestValLoss = np.min(history.history['val_loss']) bestValAccEpoch = np.argmax(history.history['val_acc'])
labels = np.array(labels) print("[INFO] features matrix: {:.2f}MB".format(features.nbytes / (1024 * 1000.0))) # partition the data into training and testing splits, using 80% # of the data for training and the remaining 20% for testing (trainFeat, testFeat, trainLabels, testLabels) = train_test_split(features, labels, test_size=0.2, random_state=42) # train and evaluate a k-NN classifer on the histogram # representations print("[INFO] evaluating histogram accuracy...") model = KNeighborsClassifier(n_neighbors=20, n_jobs=-1) model.fit(trainFeat, trainLabels) acc = model.score(testFeat, testLabels) score_feat = model.predict_classes(testFeat) print("Detailed classification report") print(classification_report(testLabels, score_feat))