def visualize_cross_validation_bars_percentage(training_set, test_set, artifact_dataset, window_sizes, name='figure_cross_validation_bars_difference'): """ :param artifact_dataset: :param thresholds: :param test_set: :param window_sizes: :param name: :return: """ # Do cross validation differences = {'max': [], 'avg': [], 'avg_max': []} for window_size in window_sizes: for i, threshold in enumerate(ExperimentorService.calibrate(training_set, window_size)): original_windows = ExperimentorService.windows(test_set.clone(), window_size) artifact_windows = ExperimentorService.windows(artifact_dataset.clone(), window_size) current_difference = [] for idx, original_window in enumerate(original_windows): reconstructed_window, rejected = ExperimentorService.pca_reconstruction(artifact_windows[idx], window_size, threshold) current_difference += ExperimentorService.difference(original_window, reconstructed_window) if i == 0: differences['max'] += [np.mean(current_difference)] elif i == 1: differences['avg'] += [np.mean(current_difference)] else: differences['avg_max'] += [np.mean(current_difference)] print 'threshold: ' + differences.keys()[i] + ' - window size: ' + str(window_size) + ' - difference: ' + str(np.mean(current_difference)) fig, ax = plt.subplots() indexs = np.arange(len(differences['max'])) width = 0.20 ax.bar(indexs, differences['max'], width, label='Max eigenvalue threshold', color='c', alpha=0.8) ax.bar(indexs + width, differences['avg'], width, label='Average eigenvalue threshold', color='b', alpha=0.8) ax.bar(indexs + width * 2, differences['avg_max'], width, label='Average of max eigenvalue threshold', color='m', alpha=0.8) ax.set_xticks(indexs + width * 1.5) ax.set_xticklabels([str(window_size) for window_size in window_sizes]) plt.xticks(rotation=70) ax.set_title('Difference cross validation') ax.set_ylabel('Difference %') ax.set_xlabel('Window size') plt.legend(loc='upper right') plt.savefig(name)