def test_display_hierarchy_label(): plt.figure() # Load some chord data int0, lab0 = load_labeled_intervals("tests/data/hierarchy/ref00.lab") int1, lab1 = load_labeled_intervals("tests/data/hierarchy/ref01.lab") # Plot reference and estimate with a common label set mir_eval.display.hierarchy([int0, int1], [lab0, lab1], levels=["Large", "Small"]) plt.legend()
def test_display_hierarchy_nolabel(): plt.figure() # Load some chord data int0, lab0 = load_labeled_intervals('tests/data/hierarchy/ref00.lab') int1, lab1 = load_labeled_intervals('tests/data/hierarchy/ref01.lab') # Plot reference and estimate with a common label set mir_eval.display.hierarchy([int0, int1], [lab0, lab1]) plt.legend()
def test_display_hierarchy_nolabel(): plt.figure() # Load some chord data int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab') int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab') # Plot reference and estimate with a common label set mir_eval.display.hierarchy([int0, int1], [lab0, lab1]) plt.legend()
def test_display_labeled_intervals_compare(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals("tests/data/chord/ref01.lab") est_int, est_labels = load_labeled_intervals("tests/data/chord/est01.lab") # Plot reference and estimates using label set extension mir_eval.display.labeled_intervals(ref_int, ref_labels, alpha=0.5, label="Reference") mir_eval.display.labeled_intervals(est_int, est_labels, alpha=0.5, label="Estimate") plt.legend()
def test_display_labeled_intervals_compare_common(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals("tests/data/chord/ref01.lab") est_int, est_labels = load_labeled_intervals("tests/data/chord/est01.lab") label_set = list(sorted(set(ref_labels) | set(est_labels))) # Plot reference and estimate with a common label set mir_eval.display.labeled_intervals(ref_int, ref_labels, label_set=label_set, alpha=0.5, label="Reference") mir_eval.display.labeled_intervals(est_int, est_labels, label_set=label_set, alpha=0.5, label="Estimate") plt.legend()
def test_display_labeled_intervals_compare(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab') est_int, est_labels = load_labeled_intervals('data/chord/est01.lab') # Plot reference and estimates using label set extension mir_eval.display.labeled_intervals(ref_int, ref_labels, alpha=0.5, label='Reference') mir_eval.display.labeled_intervals(est_int, est_labels, alpha=0.5, label='Estimate') plt.legend()
def test_display_labeled_intervals_compare_noextend(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab') est_int, est_labels = load_labeled_intervals('data/chord/est01.lab') # Plot reference and estimate, but only use the reference labels mir_eval.display.labeled_intervals(ref_int, ref_labels, alpha=0.5, label='Reference') mir_eval.display.labeled_intervals(est_int, est_labels, extend_labels=False, alpha=0.5, label='Estimate') plt.legend()
def test_display_segment_text(): plt.figure() # Load some segment data intervals, labels = load_labeled_intervals('data/segment/ref00.lab') # Plot the segments with no labels mir_eval.display.segments(intervals, labels, text=True)
def test_display_labeled_intervals(): plt.figure() # Load some chord data intervals, labels = load_labeled_intervals('data/chord/ref01.lab') # Plot the chords with nothing fancy mir_eval.display.labeled_intervals(intervals, labels)
def test_display_labeled_intervals_compare_common(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab') est_int, est_labels = load_labeled_intervals('data/chord/est01.lab') label_set = list(sorted(set(ref_labels) | set(est_labels))) # Plot reference and estimate with a common label set mir_eval.display.labeled_intervals(ref_int, ref_labels, label_set=label_set, alpha=0.5, label='Reference') mir_eval.display.labeled_intervals(est_int, est_labels, label_set=label_set, alpha=0.5, label='Estimate') plt.legend()
def test_display_labeled_intervals_noextend(): plt.figure() # Load some chord data intervals, labels = load_labeled_intervals("tests/data/chord/ref01.lab") # Plot the chords with nothing fancy ax = plt.axes() ax.set_yticklabels([]) mir_eval.display.labeled_intervals(intervals, labels, label_set=[], extend_labels=False, ax=ax)
def test_display_labeled_intervals_compare_noextend(): plt.figure() # Load some chord data ref_int, ref_labels = load_labeled_intervals('tests/data/chord/ref01.lab') est_int, est_labels = load_labeled_intervals('tests/data/chord/est01.lab') # Plot reference and estimate, but only use the reference labels mir_eval.display.labeled_intervals(ref_int, ref_labels, alpha=0.5, label='Reference') mir_eval.display.labeled_intervals(est_int, est_labels, extend_labels=False, alpha=0.5, label='Estimate') plt.legend()
def test_display_segment(): plt.figure() # Load some segment data intervals, labels = load_labeled_intervals('tests/data/segment/ref00.lab') # Plot the segments with no labels mir_eval.display.segments(intervals, labels, text=False) # Draw a legend plt.legend()
def test_display_segment(): plt.figure() # Load some segment data intervals, labels = load_labeled_intervals("tests/data/segment/ref00.lab") # Plot the segments with no labels mir_eval.display.segments(intervals, labels, text=False) # Draw a legend plt.legend()
def test_display_labeled_intervals_noextend(): plt.figure() # Load some chord data intervals, labels = load_labeled_intervals('data/chord/ref01.lab') # Plot the chords with nothing fancy ax = plt.axes() ax.set_yticklabels([]) mir_eval.display.labeled_intervals(intervals, labels, label_set=[], extend_labels=False, ax=ax)
action='store', help='path to the estimated annotation(s) in ' '.lab format, ordered from top to bottom of ' 'the hierarchy') return vars(parser.parse_args(sys.argv[1:])) if __name__ == '__main__': # Get the parameters parameters = process_arguments() # load the data ref_files = parameters['reference_file'] est_files = parameters['estimated_file'] ref_intervals = [load_labeled_intervals(_)[0] for _ in ref_files] est_intervals = [load_labeled_intervals(_)[0] for _ in est_files] # Compute all the scores scores = mir_eval.hierarchy.evaluate(ref_intervals, est_intervals, window=parameters['window']) print("{} [...] vs. {} [...]".format( basename(parameters['reference_file'][0]), basename(parameters['estimated_file'][0]))) eval_utilities.print_evaluation(scores) if parameters['output_file']: print('Saving results to: ', parameters['output_file']) eval_utilities.save_results(scores, parameters['output_file'])
help='path to the estimated annotation(s) in ' '.lab format, ordered from top to bottom of ' 'the hierarchy') return vars(parser.parse_args(sys.argv[1:])) if __name__ == '__main__': # Get the parameters parameters = process_arguments() # load the data ref_files = parameters['reference_file'] est_files = parameters['estimated_file'] ref = [load_labeled_intervals(_) for _ in ref_files] est = [load_labeled_intervals(_) for _ in est_files] ref_intervals = [seg[0] for seg in ref] ref_labels = [seg[1] for seg in ref] est_intervals = [seg[0] for seg in est] est_labels = [seg[1] for seg in est] # Compute all the scores scores = mir_eval.hierarchy.evaluate(ref_intervals, ref_labels, est_intervals, est_labels, window=parameters['window']) print("{} [...] vs. {} [...]".format( basename(parameters['reference_file'][0]), basename(parameters['estimated_file'][0]))) eval_utilities.print_evaluation(scores)
def loadGT(self, GTPath): intervals, labels = load_labeled_intervals(GTPath, delimiter="\t") intervals = intervals / 100.0 # ignore pitch shift like: <"chorus A" (-10)> labels = [label.split("\t")[0].strip('"') for label in labels] return intervals, labels
def loadGT(self, GTPath): intervals, labels = load_labeled_intervals(GTPath, delimiter="\t") labels = np.array([label.strip('"') for label in labels]) intervals = intervals / 100.0 return intervals, labels
def loadGT(self, GTPath): intervals, labels = load_labeled_intervals(GTPath, delimiter="\t") return intervals, labels
help='path to the estimated annotation(s) in ' '.lab format, ordered from top to bottom of ' 'the hierarchy') return vars(parser.parse_args(sys.argv[1:])) if __name__ == '__main__': # Get the parameters parameters = process_arguments() # load the data ref_files = parameters['reference_file'] est_files = parameters['estimated_file'] ref = [load_labeled_intervals(_) for _ in ref_files] est = [load_labeled_intervals(_) for _ in est_files] ref_intervals = [seg[0] for seg in ref] ref_labels = [seg[1] for seg in ref] est_intervals = [seg[0] for seg in est] est_labels = [seg[1] for seg in est] # Compute all the scores scores = mir_eval.hierarchy.evaluate(ref_intervals, ref_labels, est_intervals, est_labels, window=parameters['window']) print("{} [...] vs. {} [...]".format( basename(parameters['reference_file'][0]), basename(parameters['estimated_file'][0])))