predictions = [] idx = np.random.choice(n_test, n_samples, replace=False) train_img_sample = np.array(face_data.face_train_imgs)[idx].tolist() train_labels_sample = np.array(face_data.face_train_labels)[idx].tolist() nbd = NaiveBayesPredictor(train_img_sample, train_labels_sample, range(2)) for feature in test_features: predictions.append(nbd.predict(feature)) correct, wrong = (0, 0) for k, pred in enumerate(predictions): if pred == face_data.face_test_labels[k]: correct += 1 else: wrong += 1 print("Stats while using {} % of training data".format(n * 10)) # print("The predictions are: ", predictions) # print("The actual labels are:", face_data.face_test_labels) print("No. of correct guesses = {}".format(correct)) print("No. of wrong guesses = {}".format(wrong)) percentage = (correct * 100) / (correct + wrong) print("Percentage accuracy: {}".format(percentage)) percentage_list.append(percentage) run_time = time.time() - start_time print("The run_time in seconds is: {}".format(run_time)) time_list.append(run_time) p3_utils.plot_line_graph(range(10, 101, 10), percentage_list, "Naive Bayes Accuracy chart for faces", "Percentage of training data used", "Accuracy obtained on test data") p3_utils.plot_line_graph(range(10, 101, 10), time_list, "Naive Bayes Runtime chart for faces", "Percentage of training data used", "Run time in seconds") print(statistics.stdev(percentage_list))
exec_time = [] accuracy = [] for i in np.arange(0.1, 1.1, 0.1): predictions = [] knnf = KnnFaces(5, i) # Run this with a pool of 10 agents having a chunksize of 10 until finished agents = 10 chunksize = 10 with Pool(processes=agents) as pool: predictions = pool.map(knnf.predict, knnf.testData, chunksize) # cool_visualization(digit_data) correct, wrong = (0, 0) for k, pred in enumerate(predictions): if pred == knnf.face_data.face_test_labels[k]: correct += 1 else: wrong += 1 print("The predictions are: ", predictions) print("The actual labels are:", knnf.face_data.face_test_labels) print("No. of correct guesses = {}".format(correct)) print("No. of wrong guesses = {}".format(wrong)) print("Percentage accuracy: {}".format((correct * 100) / (correct + wrong))) accuracy.append((correct * 100) / (correct + wrong)) print('execution time', time.time() - start_time) exec_time.append(time.time() - start_time) p3_utils.plot_line_graph(range(10, 101, 10), accuracy, "Knn Accuracy chart for faces", "Percentage of training data used", "Accuracy obtained on test data") p3_utils.plot_line_graph(range(10, 101, 10), exec_time, "Knn Runtime chart for faces", "Percentage of training data used", "Run time in seconds") print(statistics.stdev(accuracy))
print('errors over 3 iterations', errors) print('Validating...') validation_guesses = classifier.classify(validationDataList) correct = [ validation_guesses[i] == face_data.face_validation_labels[i] for i in range(len(face_data.face_validation_labels)) ].count(True) print(str(correct), 'correct out of ', str(len(face_data.face_validation_labels))) print('Testing...') test_guesses = classifier.classify(testDataList) correct = [ test_guesses[i] == face_data.face_test_labels[i] for i in range(len(face_data.face_test_labels)) ].count(True) percentage = (100.0 * correct / len(face_data.face_test_labels)) print(str(correct), 'correct out of ', str(len(face_data.face_test_labels)), 'percentage ', percentage) percentages.append(percentage) runtime = time.time() - start_time print("Runtime taken: ", runtime) runtimes.append(runtime) p3_utils.plot_line_graph(range(10, 101, 10), percentages, "Accuracy of perceptron for face data", "Percentage of training data used", "Percentage accuracy obtained on test data") p3_utils.plot_line_graph(range(10, 101, 10), runtimes, "Runtime of perceptron for face data", "Percentage of training data used", "Runtime taken for trianing and testing") print(statistics.stdev(percentages))