Ejemplo n.º 1
0
def eval_all_metrics_lab(refs_URI, detected_URI):
    """
    run all eval metrics on one file
    """
    ref_intervals, ref_labels = load_labeled_intervals(refs_URI)
    detected_intervals, detected_labels = load_labeled_intervals(detected_URI)

    # metric 1: alignment error
    alignmentErrors = _eval_alignment_error(ref_intervals, detected_intervals,
                                            tierAliases.phrases, ref_labels)
    mean, stDev, median = getMeanAndStDevError(alignmentErrors)

    # metric 2: percentage correct
    initialTimeOffset_refs = ref_intervals[0][0]
    finalts_refs = ref_intervals[-1][1]
    durationCorrect, totalLength = _eval_percentage_correct(
        reference_token_list=ref_intervals,
        detected_token_List=detected_intervals,
        final_ts_anno=finalts_refs,
        initial_time_offset_refs=initialTimeOffset_refs,
        reference_labels=ref_labels)
    percentage_correct = durationCorrect / totalLength

    # metric 3: percentage tolerance
    percentage_tolerance = _eval_percentage_tolerance(
        ref_intervals=ref_intervals,
        detected_intervals=detected_intervals,
        reference_labels=ref_labels,
        tolerance=0.3)

    return mean, percentage_correct, percentage_tolerance
def eval_error_textgrid_test():

    audio_name = '05_Semahat_Ozdenses_-_Bir_Ihtimal_Daha_Var_0_zemin_from_69_5205_to_84_2'
    annotation_url = os.path.join(PATH_TEST_DATASET,
                                  audio_name + ANNOTATION_EXT)

    start_index = 0
    end_index = -1

    detected_token_list = [[0.61, 0.94, u'Bir'], [1.02, 3.41, u'ihtimal'],
                           [3.42, 4.11, u'daha'], [4.12, 5.4, u'var'],
                           [8.03, 8.42, u'o'], [8.46, 8.83, u'da'],
                           [8.86, 10.65, u'\xf6lmek'], [10.66, 11.04, u'mi'],
                           [11.05, 14.39, u'dersin']]

    annotation_token_list, detected_token_list, dummy, dummy = \
        strip_non_lyrics_tokens(annotation_url,
                                detected_token_list,
                                tierAliases.phrases,
                                start_index,
                                end_index)

    alignment_errors = _eval_alignment_error(annotation_token_list,
                                             detected_token_list,
                                             tierAliases.phrases)
    mean, std_dev, median = getMeanAndStDevError(alignment_errors)
    print("mean : ", mean, "st dev: ", std_dev)
Ejemplo n.º 3
0
def eval_all_metrics_lab(refs_URI, detected_URI, tolerance=0.3):
    """
    run all eval metrics on one file
    """
    ref_intervals, ref_labels = load_labeled_intervals(refs_URI)
    
    detected_intervals, use_end_ts = load_detected_intervals(detected_URI)

    # metric 1: alignment error
    alignmentErrors = _eval_alignment_error(ref_intervals, detected_intervals, ref_labels, use_end_ts)
    mean, stDev, median = getMeanAndStDevError(alignmentErrors)

    # metric 2: percentage correct
    initialTimeOffset_refs = ref_intervals[0][0]
    finalts_refs = ref_intervals[-1][1]
    durationCorrect, totalLength  = _eval_percentage_correct(reference_token_list=ref_intervals,
                                                             detected_token_List=detected_intervals,
                                                             final_ts_anno=finalts_refs,
                                                             initial_time_offset_refs=initialTimeOffset_refs,
                                                             reference_labels=ref_labels)
    percentage_correct = durationCorrect / totalLength

    # metric 3: percentage tolerance
    percentage_tolerance = _eval_percentage_tolerance(ref_intervals=ref_intervals,
                                                      detected_intervals=detected_intervals,
                                                      reference_labels=ref_labels,
                                                      tolerance=tolerance)
    return mean, percentage_correct, percentage_tolerance
Ejemplo n.º 4
0
def eval_error_textgrid_test():
    
    audio_name = '05_Semahat_Ozdenses_-_Bir_Ihtimal_Daha_Var_0_zemin_from_69_5205_to_84_2'
    annotation_url = os.path.join(PATH_TEST_DATASET,  audio_name + ANNOTATION_EXT)
    
    start_index = 0
    end_index = -1
    
    detected_token_list = [[0.61, 0.94, u'Bir'],
                           [1.02, 3.41, u'ihtimal'],
                           [3.42, 4.11, u'daha'],
                           [4.12, 5.4, u'var'],
                           [8.03, 8.42, u'o'],
                           [8.46, 8.83, u'da'],
                           [8.86, 10.65, u'\xf6lmek'],
                           [10.66, 11.04, u'mi'],
                           [11.05, 14.39, u'dersin']]
    
    annotation_token_list, detected_token_list, dummy, dummy = \
        strip_non_lyrics_tokens(annotation_url,
                                detected_token_list,
                                tierAliases.phrases,
                                start_index,
                                end_index)
     
    alignment_errors = _eval_alignment_error(annotation_token_list,
                                             detected_token_list,
                                             tierAliases.phrases)
    mean, std_dev, median = getMeanAndStDevError(alignment_errors)
    print("mean : ", mean, "st dev: ", std_dev)
Ejemplo n.º 5
0
def test_eval_error_lab_mauch():
    """
    test mean average error/displacement (in seconds) of alignment with loading the .lab files
    """

    ref_intervals, detected_intervals, ref_labels = load_ref_and_detections(dataset='mauch')
    alignment_errors = _eval_alignment_error(ref_intervals, detected_intervals, ref_labels)
    mean_mauch, std_dev_mauch, median_mauch = getMeanAndStDevError(alignment_errors)
    assert mean_mauch == 0.0 and std_dev_mauch == 0.0
def test_eval_error_lab_mauch():
    """
    test mean average error/displacement (in seconds) of alignment with loading the .lab files
    """

    ref_intervals, detected_intervals, ref_labels = load_ref_and_detections(
        dataset='mauch')
    alignment_errors = _eval_alignment_error(ref_intervals, detected_intervals,
                                             ref_labels)
    mean_mauch, std_dev_mauch, median_mauch = getMeanAndStDevError(
        alignment_errors)
    assert mean_mauch == 0.0 and std_dev_mauch == 0.0
Ejemplo n.º 7
0
def test_eval_error_lab_hansen():
    """
    test mean average error/displacement (in seconds) of alignment with loading the .lab files
    """

    ref_intervals, detected_intervals, ref_labels = load_ref_and_detections(
        dataset='hansen')
    alignment_errors = _eval_alignment_error(ref_intervals, detected_intervals,
                                             tierAliases.phrases, ref_labels)
    mean_hansen, std_dev_hansen, median_hansen = getMeanAndStDevError(
        alignment_errors)
    assert mean_hansen == 0.0 and std_dev_hansen == 0.0