Exemplo n.º 1
0
def eval_all_metrics_lab(refs_URI, detected_URI, tolerance=0.3):
    """
    run all eval metrics on one file
    """
    ref_intervals, ref_labels = load_labeled_intervals(refs_URI)
    
    detected_intervals, use_end_ts = load_detected_intervals(detected_URI)

    # metric 1: alignment error
    alignmentErrors = _eval_alignment_error(ref_intervals, detected_intervals, ref_labels, use_end_ts)
    mean, stDev, median = getMeanAndStDevError(alignmentErrors)

    # metric 2: percentage correct
    initialTimeOffset_refs = ref_intervals[0][0]
    finalts_refs = ref_intervals[-1][1]
    durationCorrect, totalLength  = _eval_percentage_correct(reference_token_list=ref_intervals,
                                                             detected_token_List=detected_intervals,
                                                             final_ts_anno=finalts_refs,
                                                             initial_time_offset_refs=initialTimeOffset_refs,
                                                             reference_labels=ref_labels)
    percentage_correct = durationCorrect / totalLength

    # metric 3: percentage tolerance
    percentage_tolerance = _eval_percentage_tolerance(ref_intervals=ref_intervals,
                                                      detected_intervals=detected_intervals,
                                                      reference_labels=ref_labels,
                                                      tolerance=tolerance)
    return mean, percentage_correct, percentage_tolerance
Exemplo n.º 2
0
def eval_all_metrics_lab(refs_URI, detected_URI):
    """
    run all eval metrics on one file
    """
    ref_intervals, ref_labels = load_labeled_intervals(refs_URI)
    detected_intervals, detected_labels = load_labeled_intervals(detected_URI)

    # metric 1: alignment error
    alignmentErrors = _eval_alignment_error(ref_intervals, detected_intervals,
                                            tierAliases.phrases, ref_labels)
    mean, stDev, median = getMeanAndStDevError(alignmentErrors)

    # metric 2: percentage correct
    initialTimeOffset_refs = ref_intervals[0][0]
    finalts_refs = ref_intervals[-1][1]
    durationCorrect, totalLength = _eval_percentage_correct(
        reference_token_list=ref_intervals,
        detected_token_List=detected_intervals,
        final_ts_anno=finalts_refs,
        initial_time_offset_refs=initialTimeOffset_refs,
        reference_labels=ref_labels)
    percentage_correct = durationCorrect / totalLength

    # metric 3: percentage tolerance
    percentage_tolerance = _eval_percentage_tolerance(
        ref_intervals=ref_intervals,
        detected_intervals=detected_intervals,
        reference_labels=ref_labels,
        tolerance=0.3)

    return mean, percentage_correct, percentage_tolerance
def test_eval_percentage_tolerance_lab_generic():
    """
    test the accuracy of tokens with a tolerance window tau loading the .lab files
    """

    ref_intervals, detected_intervals, ref_labels = load_ref_and_detections(dataset='generic')
    accuracy = _eval_percentage_tolerance(ref_intervals=ref_intervals,
                                          detected_intervals=detected_intervals,
                                          reference_labels=ref_labels,
                                          tolerance=0.3)
    assert accuracy == 0.0
def test_eval_percentage_tolerance_lab_generic():
    """
    test the accuracy of tokens with a tolerance window tau loading the .lab files
    """

    ref_intervals, detected_intervals, ref_labels = load_ref_and_detections(
        dataset='generic')
    accuracy = _eval_percentage_tolerance(
        ref_intervals=ref_intervals,
        detected_intervals=detected_intervals,
        reference_labels=ref_labels,
        tolerance=0.3)
    assert accuracy == 0.0