def plotRelativeRMSEs( experiment_data ): relative_translation_errors = sptam_parser.aggregateOverKey( experiment_data, lambda exp_id, seq_id: exp_id, lambda exp: exp.relative_translation_errors ) relative_rotation_errors = sptam_parser.aggregateOverKey( experiment_data, lambda exp_id, seq_id: exp_id, lambda exp: exp.relative_rotation_errors ) relative_translation_rmses = sptam_parser.mapDict( relative_translation_errors, comparador.computeRMSE ) relative_rotation_rmses = sptam_parser.mapDict( relative_rotation_errors, comparador.computeRMSE ) relative_translation_max = sptam_parser.mapDict( relative_translation_errors, max ) relative_rotation_max = sptam_parser.mapDict( relative_rotation_errors, max ) sequence_labels = extractSequenceLabels( experiment_data ) relative_translation_rmses_by_seq = {} for experiment_id, sequence_data in experiment_data.items(): relative_translation_rmses_by_seq[ experiment_id ] = [ comparador.computeRMSE( sequence_data[ sequence_id ].relative_translation_errors ) for sequence_id in sequence_labels ] #~ print "max translation" # for experiment_id, value in relative_translation_max.iteritems(): # print experiment_id, "{:.4f}".format(value) #~ print "max rotation" # for experiment_id, value in relative_rotation_max.iteritems(): # print experiment_id, "{:.4f}".format(value) lh.printMultipleLatexTable([relative_translation_rmses, relative_translation_max], "Relative translation error") lh.printMultipleLatexTable([relative_rotation_rmses, relative_rotation_max], "Relative rotation error") pretty_boxplot.boxplot(relative_translation_errors.values(), relative_translation_errors.keys(), colors, "Relative translation errors", "Euclidean distance (m)" ) #~ lh.printLatexTable(relative_translation_rmses_by_seq, sequence_labels, "Relative translation error") plotSequenceDataForEachExperiment(relative_translation_rmses_by_seq, sequence_labels, "Relative translation RMSE")
def plotErrorsForEachSequence( experiment_id, sequence_data ): # plot the sequences in order sorted_data = sorted( sequence_data.items() ) labels = [ label for label, _ in sorted_data ] pretty_boxplot.boxplot( [ pose_data.relative_translation_errors for _, pose_data in sorted_data ], labels, colors, "relative translation errors for "+experiment_id, "Euclidean distance (m)" )
def plotErrorsForEachExperiment( experiment_data ): relative_translation_errors = {} relative_rotation_errors = {} for experiment_id, sequence_data in experiment_data.items(): relative_translation_errors[ experiment_id ] = concatOverAllSequences(sequence_data, "relative_translation_errors") relative_rotation_errors[ experiment_id ] = concatOverAllSequences(sequence_data, "relative_rotation_errors") pretty_boxplot.boxplot( list( relative_translation_errors.values() ), list( relative_translation_errors.keys() ), colors, None, "Euclidean distance (m)" ) pretty_boxplot.boxplot( list( relative_rotation_errors.values() ), list( relative_rotation_errors.keys() ), colors, None, "Angular deviation (deg)" )
args.sequence_name: { 'disparity': task_time_mean[TASK_DISPARITY], 'projection': task_time_mean[TASK_PROJECTION], 'refinement': task_time_mean[TASK_REFINEMENT], } } plot_phase_time(phase_time_data) labels = [""] * TASK_LEN labels[TASK_DISPARITY] = 'Disparity' labels[TASK_PROJECTION] = 'Map Fusion' labels[TASK_REFINEMENT] = 'Map Refinement' colors = [(0, 0.4470, 0.7410)] * TASK_LEN pretty_boxplot.boxplot(task_time, labels, colors, "", "Time (ms)") cloud_size_data = { args.sequence_name: { # Points that were created (triangulated) from a keyframe. 'created': points[POINT_TYPE_NEW], # Points discarded as outliers. 'outliers': points[POINT_TYPE_OUTLIER], # Final point cloud size (composed by hypothesis and validated points). 'total': int(args.hypothesis) + int(args.validated), # Number of hypothesis in final point cloud. 'hypothesis': int(args.hypothesis), # Number of hypothesis in final point cloud. 'validated': int(args.validated), # Number of matches during the sequence, i.e. number of fusions. 'matches': points[POINT_TYPE_MATCH],
experiments[experiment_id][sequence_id] = sptam_parser.ExperimentData( logfile, to_plot) #################################################################### # Plot comparative box plots #################################################################### #~ for task_label, task_id in to_plot.iteritems(): #~ data = aggregateOverKey( experiments, lambda det, desc: det+' / '+desc, lambda exp: getattr(exp, task_id)[:,1] ) #~ pretty_boxplot.boxplot(data.values(), data.keys(), colors, task_label, "required time (s)") data = sptam_parser.aggregateOverKey( experiments, lambda (det, desc), seq: det, lambda exp: exp.FeatureDetection[:, 1]) # title should be 'feature detection' pretty_boxplot.boxplot(data.values(), data.keys(), colors, None, "required time (s)") plt.tight_layout() for _, experiment_data in experiments.iteritems(): for _, sequence_data in experiment_data.iteritems(): assert (len(sequence_data.DescriptorExtraction[::2, 1] + sequence_data.DescriptorExtraction[1::2, 1]) == len( sequence_data.ExtractedPoints)) normalizeDescriptors = lambda exp: np.divide( exp.DescriptorExtraction[::2, 1] + exp.DescriptorExtraction[1::2, 1], exp.ExtractedPoints[:, 1]) data = sptam_parser.aggregateOverKey(experiments, lambda (det, desc), seq: desc, normalizeDescriptors) # title should be 'normalized descriptor extraction' pretty_boxplot.boxplot(data.values(), data.keys(), colors, None,
args.sequence_name: { 'disparity': task_time_mean[TASK_DISPARITY], 'projection': task_time_mean[TASK_PROJECTION], 'refinement': task_time_mean[TASK_REFINEMENT], } } plot_phase_time(phase_time_data) labels = [""] * TASK_LEN labels[TASK_DISPARITY] = 'Disparity' labels[TASK_PROJECTION] = 'Map Fusion' labels[TASK_REFINEMENT] = 'Map Refinement' colors = [(0, 0.4470, 0.7410)] * TASK_LEN pretty_boxplot.boxplot(task_time, labels, colors, "", "Time (ms)" ) cloud_size_data = { args.sequence_name: { # Points that were created (triangulated) from a keyframe. 'created': points[POINT_TYPE_NEW], # Points discarded as outliers. 'outliers': points[POINT_TYPE_OUTLIER], # Final point cloud size (composed by hypothesis and validated points). 'total': int(args.hypothesis) + int(args.validated), # Number of hypothesis in final point cloud. 'hypothesis': int(args.hypothesis), # Number of hypothesis in final point cloud. 'validated': int(args.validated), # Number of matches during the sequence, i.e. number of fusions. 'matches': points[POINT_TYPE_MATCH],