コード例 #1
0
def validation(args, model, device, val_loader, val_scp, val_utt2label):
    logger.info("Starting Validation")
    val_loss, val_scores = compute_loss(model, device, val_loader)
    val_preds, val_labels =  utt_scores(val_scores, val_scp, val_utt2label)
    val_eer  = compute_eer(val_labels, val_preds)

    logger.info('===> Validation set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(
                val_loss, val_eer))
    return val_loss, val_eer
コード例 #2
0
def prediction(args, model, device, eval_loader, eval_scp, eval_utt2label):
    logger.info("Starting evaluation")
    eval_loss, eval_scores = compute_loss(model, device, eval_loader)
    eval_preds, eval_labels = utt_scores(eval_scores, eval_scp, eval_utt2label)
    eval_eer  = compute_eer(eval_labels, eval_preds)

    logger.info("===> Final predictions done. Here is a snippet")
    logger.info('===> evalidation set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(
                eval_loss, eval_eer))

    return eval_loss, eval_eer
コード例 #3
0
def utt_eer(scores, utt2len, utt2label, key_list, threshold):
    """return eer using majority vote 
    """
    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        # majority vote
        num_total = frames_per_utt
        num_genuine = np.sum(scores[idx:idx + frames_per_utt] >= threshold)
        num_spoof = num_total - num_genuine
        idx = idx + frames_per_utt
        if num_genuine > num_spoof:
            preds.append(1)
        else:
            preds.append(0)
        labels.append(utt2label[key])

    return compute_eer(labels, preds)
コード例 #4
0
def compute_utt_eer(scores, scp, utt2label, threshold):
    """utterance-based eer
    """
    utt2len = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx + frames_per_utt])
        idx = idx + frames_per_utt
        if avg_scores < threshold:
            preds.append(0)
        else:
            preds.append(1)
        labels.append(utt2label[key])

    eer = compute_eer(labels, preds)
    confuse_mat = compute_confuse(labels, preds)
    return eer, confuse_mat
コード例 #5
0
 def f_neg(threshold):
     ## Scipy tries to minimize the function
     return compute_eer(true_labels, predictions >= threshold)