def test_minDCF():
    from speechbrain.utils.metric_stats import minDCF

    positive_scores = torch.tensor([0.1, 0.2, 0.3])
    negative_scores = torch.tensor([0.4, 0.5, 0.6])
    min_dcf, threshold = minDCF(positive_scores, negative_scores)
    assert (0.01 - min_dcf) < 1e-4
    assert threshold >= 0.6

    positive_scores = torch.tensor([0.4, 0.5, 0.6])
    negative_scores = torch.tensor([0.1, 0.2, 0.3])
    min_dcf, threshold = minDCF(positive_scores, negative_scores)
    assert min_dcf == 0
    assert threshold > 0.3 and threshold < 0.4
def verification_performance(scores_plda):
    """Computes the Equal Error Rate give the PLDA scores"""

    # Create ids, labels, and scoring list for EER evaluation
    ids = []
    labels = []
    positive_scores = []
    negative_scores = []
    for line in open(veri_file_path):
        lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
        enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
        test_id = line.split(" ")[2].rstrip().split(".")[0].strip()

        # Assuming enrol_id and test_id are unique
        i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
        j = int(numpy.where(scores_plda.segset == test_id)[0][0])

        s = float(scores_plda.scoremat[i, j])
        labels.append(lab)
        ids.append(enrol_id + "<>" + test_id)
        if lab == 1:
            positive_scores.append(s)
        else:
            negative_scores.append(s)

    # Clean variable
    del scores_plda

    # Final EER computation
    eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
    min_dcf, th = minDCF(torch.tensor(positive_scores),
                         torch.tensor(negative_scores))
    return eer, min_dcf
Beispiel #3
0
    def verification_performance(self,):
        """ Computes the EER using the standard voxceleb test split
        """
        # Computing  enrollment and test embeddings
        print("Computing enroll/test embeddings...")
        self.enrol_dict = self.compute_embeddings_loop(enrol_dataloader)
        self.test_dict = self.compute_embeddings_loop(test_dataloader)

        print("Computing EER..")
        # Reading standard verification split
        gt_file = os.path.join(
            self.hparams.data_folder, "meta", "veri_test.txt"
        )
        with open(gt_file) as f:
            veri_test = [line.rstrip() for line in f]

        positive_scores, negative_scores = self.get_verification_scores(
            veri_test
        )
        del self.enrol_dict, self.test_dict

        eer, th = EER(
            torch.tensor(positive_scores), torch.tensor(negative_scores)
        )

        min_dcf, th = minDCF(
            torch.tensor(positive_scores), torch.tensor(negative_scores)
        )
        return eer * 100, min_dcf
Beispiel #4
0
    # Computing  enrollment and test embeddings
    logger.info("Computing enroll/test embeddings...")

    # First run
    enrol_dict = compute_embedding_loop(enrol_dataloader)
    test_dict = compute_embedding_loop(test_dataloader)

    # Second run (normalization stats are more stable)
    enrol_dict = compute_embedding_loop(enrol_dataloader)
    test_dict = compute_embedding_loop(test_dataloader)

    if "score_norm" in params:
        train_dict = compute_embedding_loop(train_dataloader)

    # Compute the EER
    logger.info("Computing EER..")
    # Reading standard verification split
    with open(veri_file_path) as f:
        veri_test = [line.rstrip() for line in f]

    positive_scores, negative_scores = get_verification_scores(veri_test)
    del enrol_dict, test_dict

    eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
    logger.info("EER(%%)=%f", eer * 100)

    min_dcf, th = minDCF(torch.tensor(positive_scores),
                         torch.tensor(negative_scores))
    logger.info("minDCF=%f", min_dcf * 100)