예제 #1
0
def get_DER(all_reference, all_hypothesis):
    """
    calculates DER, CER, FA and MISS

    Args:
    all_reference (list[Annotation]): reference annotations for score calculation
    all_hypothesis (list[Annotation]): hypothesis annotations for score calculation

    Returns:
    DER (float): Diarization Error Rate
    CER (float): Confusion Error Rate
    FA (float): False Alarm
    Miss (float): Miss Detection 

    < Caveat >
    Unlike md-eval.pl, "no score" collar in pyannote.metrics is the maximum length of
    "no score" collar from left to right. Therefore, if 0.25s is applied for "no score"
    collar in md-eval.pl, 0.5s should be applied for pyannote.metrics.

    """
    metric = DiarizationErrorRate(collar=0.5, skip_overlap=True)

    for reference, hypothesis in zip(all_reference, all_hypothesis):
        metric(reference, hypothesis, detailed=True)

    DER = abs(metric)
    CER = metric['confusion'] / metric['total']
    FA = metric['false alarm'] / metric['total']
    MISS = metric['missed detection'] / metric['total']

    metric.reset()

    return DER, CER, FA, MISS
예제 #2
0
def get_DER(all_reference, all_hypothesis):
    """
    calculates DER, CER, FA and MISS

    Args:
    all_reference (list[Annotation]): reference annotations for score calculation
    all_hypothesis (list[Annotation]): hypothesis annotations for score calculation

    Returns:
    DER (float): Diarization Error Rate
    CER (float): Confusion Error Rate
    FA (float): False Alarm
    Miss (float): Miss Detection 

    """
    metric = DiarizationErrorRate(collar=0.25, skip_overlap=True)
    DER = 0

    for reference, hypothesis in zip(all_reference, all_hypothesis):
        metric(reference, hypothesis, detailed=True)

    DER = abs(metric)
    CER = metric['confusion'] / metric['total']
    FA = metric['false alarm'] / metric['total']
    MISS = metric['missed detection'] / metric['total']

    metric.reset()

    return DER, CER, FA, MISS