def test_max_sensitivity_specificity(self): filename = '../../data/emotiv/EEG_Data_filtered.csv' dataset = DataReader.read_data(filename, ',') training_set, test_set = ExperimentorService.split_dataset(dataset, ratio=0.1) artifact_size = 20 window_size = 20 threshold_max, threshold_avg, threshold_avg_max = ExperimentorService.calibrate(training_set, window_size) artifact_test_set, artifact_list = ExperimentorService.artifactify(test_set, artifact_size, randomly_add_artifacts=True) reconstructed_test_set_max, rejections_max = ExperimentorService.pca_reconstruction(artifact_test_set, window_size, threshold_max) reconstructed_test_set_avg, rejections_avg = ExperimentorService.pca_reconstruction(artifact_test_set, window_size, threshold_avg) reconstructed_test_set_avg_max, rejections_max_avg = ExperimentorService.pca_reconstruction(artifact_test_set, window_size, threshold_avg_max) sensitivity_max, specificity_max = ExperimentorService.sensitivity_specificity(rejections_max, artifact_list) sensitivity_avg, specificity_avg = ExperimentorService.sensitivity_specificity(rejections_avg, artifact_list) sensitivity_avg_max, specificity_avg_max = ExperimentorService.sensitivity_specificity(rejections_max_avg, artifact_list) print '--- MAX THRESHOLD ---' print 'Sensitivity: ', sensitivity_max print 'Specificity: ', specificity_max print '--- AVG THRESHOLD ---' print 'Sensitivity: ', sensitivity_avg print 'Specificity: ', specificity_avg print '--- AVG_MAX THRESHOLD ---' print 'Sensitivity: ', sensitivity_avg_max print 'Specificity: ', specificity_avg_max
def visualize_cross_validation_bars_percentage(training_set, test_set, artifact_dataset, window_sizes, name='figure_cross_validation_bars_difference'): """ :param artifact_dataset: :param thresholds: :param test_set: :param window_sizes: :param name: :return: """ # Do cross validation differences = {'max': [], 'avg': [], 'avg_max': []} for window_size in window_sizes: for i, threshold in enumerate(ExperimentorService.calibrate(training_set, window_size)): original_windows = ExperimentorService.windows(test_set.clone(), window_size) artifact_windows = ExperimentorService.windows(artifact_dataset.clone(), window_size) current_difference = [] for idx, original_window in enumerate(original_windows): reconstructed_window, rejected = ExperimentorService.pca_reconstruction(artifact_windows[idx], window_size, threshold) current_difference += ExperimentorService.difference(original_window, reconstructed_window) if i == 0: differences['max'] += [np.mean(current_difference)] elif i == 1: differences['avg'] += [np.mean(current_difference)] else: differences['avg_max'] += [np.mean(current_difference)] print 'threshold: ' + differences.keys()[i] + ' - window size: ' + str(window_size) + ' - difference: ' + str(np.mean(current_difference)) fig, ax = plt.subplots() indexs = np.arange(len(differences['max'])) width = 0.20 ax.bar(indexs, differences['max'], width, label='Max eigenvalue threshold', color='c', alpha=0.8) ax.bar(indexs + width, differences['avg'], width, label='Average eigenvalue threshold', color='b', alpha=0.8) ax.bar(indexs + width * 2, differences['avg_max'], width, label='Average of max eigenvalue threshold', color='m', alpha=0.8) ax.set_xticks(indexs + width * 1.5) ax.set_xticklabels([str(window_size) for window_size in window_sizes]) plt.xticks(rotation=70) ax.set_title('Difference cross validation') ax.set_ylabel('Difference %') ax.set_xlabel('Window size') plt.legend(loc='upper right') plt.savefig(name)
def test_compare_mse(self): filename = '../../data/emotiv/EEG_Data_filtered.csv' dataset = DataReader.read_data(filename, ',') training_set, test_set = ExperimentorService.split_dataset(dataset, ratio=0.2) artifact_size = 20 window_size = 40 threshold_max, threshold_avg, threshold_avg_max = ExperimentorService.calibrate(training_set, window_size) print threshold_max print threshold_avg print threshold_avg_max artifact_dataset, _ = ExperimentorService.artifactify(test_set, artifact_size, True) reconstructed_dataset_avg, rejections = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_avg) reconstructed_dataset_max, rejections = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_max) reconstructed_dataset_avg_max, rejections = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_avg_max) Visualizer.visualize_mse_on_same(test_set, reconstructed_dataset_max, reconstructed_dataset_avg, reconstructed_dataset_avg_max, window_size)
def visualize_cross_validation_curves(training_set, test_set, artifact_dataset, window_sizes, name='figure_cross_validation_curves'): """ :param training_set: :param artifact_dataset: :param test_set: :param window_sizes: :param name: :return: """ # Do cross validation mse = {'max': [], 'avg': [], 'avg_max': []} for window_size in window_sizes: for i, threshold in enumerate(ExperimentorService.calibrate(training_set, window_size)): original_windows = ExperimentorService.windows(test_set.clone(), window_size) artifact_windows = ExperimentorService.windows(artifact_dataset.clone(), window_size) current_mse = [] for idx, original_window in enumerate(original_windows): reconstructed_window, rejected = ExperimentorService.pca_reconstruction(artifact_windows[idx], window_size, threshold) current_mse += ExperimentorService.mse(original_window, reconstructed_window) if i == 0: mse['max'] += [np.mean(current_mse)] elif i == 1: mse['avg'] += [np.mean(current_mse)] else: mse['avg_max'] += [np.mean(current_mse)] fig, ax = plt.subplots() ax.plot(mse['max'], label='Max eigenvalue threshold', color='c') ax.plot(mse['avg'], label='Average eigenvalue threshold', color='b') ax.plot(mse['avg_max'], label='Average of max eigenvalue threshold', color='m') ax.set_xticks(range(len(window_sizes))) ax.set_xticklabels([str(window_size) for window_size in window_sizes]) ax.set_title('mse cross validation') ax.set_ylabel('Mean squared error') ax.set_xlabel('Window size') plt.legend(loc='upper right') plt.savefig(name)
def test_for_report(self): filename = '../../data/emotiv/EEG_Data_filtered.csv' dataset = DataReader.read_data(filename, ',') training_set, test_set = ExperimentorService.split_dataset(dataset, ratio=0.2) artifact_size = 20 window_size = 20 threshold_max, threshold_avg, threshold_avg_max = ExperimentorService.calibrate(training_set, window_size) print 'max: ' + str(threshold_max) print 'avg: ' + str(threshold_avg) print 'avg_max: ' + str(threshold_avg_max) artifact_dataset, _ = ExperimentorService.artifactify(test_set, artifact_size, True) reconstructed_dataset_max, rejections = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_max) Visualizer.visualize_timeLine(dataset, test_set, artifact_dataset, reconstructed_dataset_max)
def test_speed(self): filename = '../../data/emotiv/EEG_Data_filtered.csv' dataset = DataReader.read_data(filename, ',') training_set, test_set = ExperimentorService.split_dataset(dataset, ratio=0.2) artifact_size = 20 window_size = 20 threshold_max, threshold_avg, threshold_avg_max = ExperimentorService.calibrate(training_set, window_size) artifact_dataset, _ = ExperimentorService.artifactify(test_set, artifact_size, randomly_add_artifacts=False) start_time_max = time.time() reconstructed_dataset_max, _ = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_max) end_time_max = time.time() - start_time_max # reconstructed_dataset_avg, _ = ExperimentorService.pca_reconstruction(artifact_dataset, window_size, threshold_avg) # reconstructed_dataset_avg_max, _ = ExperimentorService.pca_reconstr print 'We were able to reconstruct the entire test set in ' + str(end_time_max) + ' seconds.' print 'There are ' + str(len(ExperimentorService.windows(test_set, window_size))) + ' windows in the test set.' print 'On average, we can reconstruct a window in ' + str(end_time_max / len(ExperimentorService.windows(test_set, window_size))) + ' seconds.' print 'We can do pca projection at a rate of ' + str(1/(end_time_max / len(ExperimentorService.windows(test_set, window_size)))) + 'Hz'
def visualize_cross_validation_bars(training_set, test_set, artifact_dataset, window_sizes, name='figure_cross_validation_bars'): """ :param training_set: :param artifact_dataset: :param test_set: :param window_sizes: :param name: :return: """ # Do cross validation mse = {'max': [], 'avg': [], 'avg_max': []} for window_size in window_sizes: for i, threshold in enumerate(ExperimentorService.calibrate(training_set, window_size)): original_windows = ExperimentorService.windows(test_set.clone(), window_size) artifact_windows = ExperimentorService.windows(artifact_dataset.clone(), window_size) current_mse = [] for idx, original_window in enumerate(original_windows): reconstructed_window, rejected = ExperimentorService.pca_reconstruction(artifact_windows[idx], window_size, threshold) current_mse += ExperimentorService.mse(original_window, reconstructed_window) if i == 0: mse['max'] += [np.mean(current_mse)] elif i == 1: mse['avg'] += [np.mean(current_mse)] else: mse['avg_max'] += [np.mean(current_mse)] best_index_max = mse['max'].index(min(mse['max'])) best_index_avg = mse['avg'].index(min(mse['avg'])) best_index_avg_max = mse['avg_max'].index(min(mse['avg_max'])) print 'Best window size for max threshold: ' + str(window_sizes[best_index_max]) print 'Best window size for avg threshold: ' + str(window_sizes[best_index_avg]) print 'Best window size for avg_max threshold: ' + str(window_sizes[best_index_avg_max]) fig, ax = plt.subplots() indexs = np.arange(len(mse['max'])) width = 0.20 ax.bar(indexs, mse['max'], width, label='Max eigenvalue threshold', color='c', alpha=0.8) ax.bar(indexs + width, mse['avg'], width, label='Average eigenvalue threshold', color='b', alpha=0.8) ax.bar(indexs + width*2, mse['avg_max'], width, label='Average of max eigenvalue threshold', color='m', alpha=0.8) ax.set_ylim([0,1500]) ax.set_xticks(indexs + width*1.5) ax.set_xticklabels([str(window_size) for window_size in window_sizes]) plt.xticks(rotation=70) ax.set_title('mse cross validation') ax.set_ylabel('Mean squared error') ax.set_xlabel('Window size') plt.legend(loc='upper right') plt.savefig(name)