def test_equals(self): seg_a = alignment.Segment( 0.0, 0.9, ref=[annotations.Label('a'), annotations.Label('a')]) seg_b = alignment.Segment( 0.0, 0.9, ref=[annotations.Label('a'), annotations.Label('a')]) assert seg_a == seg_b
def test_compare_multi_labels_returns_smaller_start_time(self): seg_a = alignment.Segment( 0.0, 0.9, ref=[annotations.Label('a'), annotations.Label('a')]) seg_b = alignment.Segment( 0.2, 0.8, ref=[annotations.Label('a'), annotations.Label('a')]) assert seg_a < seg_b
def test_compare_multi_labels_with_same_times_returns_smaller_label_value( self): seg_a = alignment.Segment( 0.0, 1.0, ref=[annotations.Label('a'), annotations.Label('a')]) seg_b = alignment.Segment( 0.0, 1.0, ref=[annotations.Label('a'), annotations.Label('b')]) assert seg_a < seg_b
def test_create_from_segments(): segments = [ alignment.Segment(0, 4, annotations.Label('music', start=0, end=5), annotations.Label('music', start=0, end=4)), alignment.Segment(4, 5, annotations.Label('music', start=0, end=5), annotations.Label('speech', start=4, end=6)), alignment.Segment(5, 6, annotations.Label('speech', start=5, end=11), annotations.Label('speech', start=4, end=6)), alignment.Segment(6, 8, annotations.Label('speech', start=5, end=11), None), alignment.Segment(8, 11, annotations.Label('speech', start=5, end=11), annotations.Label('mix', start=8, end=16)), alignment.Segment(11, 14, annotations.Label('mix', start=11, end=14), annotations.Label('mix', start=8, end=16)), alignment.Segment(14, 16, annotations.Label('speech', start=14, end=19), annotations.Label('mix', start=8, end=16)), alignment.Segment(16, 19, annotations.Label('speech', start=14, end=19), annotations.Label('speech', start=16, end=21)), alignment.Segment(19, 21, None, annotations.Label('speech', start=16, end=21)) ] cnf = confusion.create_from_segments(segments) assert cnf.correct == pytest.approx(11) assert cnf.insertions == pytest.approx(2) assert cnf.deletions == pytest.approx(2) assert cnf.substitutions == pytest.approx(6) assert cnf.substitutions_out == pytest.approx(6) assert cnf.total == pytest.approx(19) assert len(cnf.instances) == 3 assert cnf.instances['music'].correct == pytest.approx(4) assert cnf.instances['music'].insertions == pytest.approx(0) assert cnf.instances['music'].deletions == pytest.approx(0) assert cnf.instances['music'].substitutions == pytest.approx(1) assert cnf.instances['music'].substitutions_out == pytest.approx(0) assert cnf.instances['music'].total == pytest.approx(5) assert cnf.instances['speech'].correct == pytest.approx(4) assert cnf.instances['speech'].insertions == pytest.approx(2) assert cnf.instances['speech'].deletions == pytest.approx(2) assert cnf.instances['speech'].substitutions == pytest.approx(5) assert cnf.instances['speech'].substitutions_out == pytest.approx(1) assert cnf.instances['speech'].total == pytest.approx(11) assert cnf.instances['mix'].correct == pytest.approx(3) assert cnf.instances['mix'].insertions == pytest.approx(0) assert cnf.instances['mix'].deletions == pytest.approx(0) assert cnf.instances['mix'].substitutions == pytest.approx(0) assert cnf.instances['mix'].substitutions_out == pytest.approx(5) assert cnf.instances['mix'].total == pytest.approx(3)
def test_evaluate_with_two_label_lists( self, classification_ref_and_hyp_label_list): ll_ref, ll_hyp = classification_ref_and_hyp_label_list result = evaluator.SegmentEvaluator().evaluate(ll_ref, ll_hyp) expected_segments = [ alignment.Segment(0, 4, annotations.Label('music', start=0, end=5), annotations.Label('music', start=0, end=4)), alignment.Segment(4, 5, annotations.Label('music', start=0, end=5), annotations.Label('speech', start=4, end=6)), alignment.Segment(5, 6, annotations.Label('speech', start=5, end=11), annotations.Label('speech', start=4, end=6)), alignment.Segment(6, 8, annotations.Label('speech', start=5, end=11), None), alignment.Segment(8, 11, annotations.Label('speech', start=5, end=11), annotations.Label('mix', start=8, end=16)), alignment.Segment(11, 14, annotations.Label('mix', start=11, end=14), annotations.Label('mix', start=8, end=16)), alignment.Segment(14, 16, annotations.Label('speech', start=14, end=19), annotations.Label('mix', start=8, end=16)), alignment.Segment(16, 19, annotations.Label('speech', start=14, end=19), annotations.Label('speech', start=16, end=21)), alignment.Segment(19, 21, None, annotations.Label('speech', start=16, end=21)) ] assert isinstance(result, evaluator.SegmentEvaluation) assert sorted(result.utt_to_segments[ evaluator.Evaluator.DEFAULT_UTT_IDX]) == sorted(expected_segments)
def test_compare_single_labels_returns_smaller_start_time(self): seg_a = alignment.Segment(0.0, 0.9, ref=annotations.Label('a')) seg_b = alignment.Segment(0.2, 0.7, ref=annotations.Label('a')) assert seg_a < seg_b
def test_duration(self): seg = alignment.Segment(0.8, 1.9) assert seg.duration == pytest.approx(1.1)
def sample_confusion(): cnf = confusion.SegmentConfusion('music') cnf.correct_segments = [ alignment.Segment(0, 3), alignment.Segment(88, 103), alignment.Segment(159.2, 193.1) ] cnf.insertion_segments = [ alignment.Segment(8, 19), alignment.Segment(55, 67.2) ] cnf.deletion_segments = [ alignment.Segment(22, 28.2), alignment.Segment(31, 38.2), alignment.Segment(115, 124.9) ] cnf.substitution_segments = { 'speech': [ alignment.Segment(44, 48.9), alignment.Segment(198, 204.9) ], 'mix': [ alignment.Segment(133.4, 141.2) ] } cnf.substitution_out_segments = { 'speech': [ alignment.Segment(208.9, 210.2) ], 'mix': [ alignment.Segment(70.2, 79.78), alignment.Segment(4, 7) ] } return cnf