Ejemplo n.º 1
0
    def score_plda(self):
        self.plda_mean, self.plda_f, self.plda_g, self.plda_sigma = read_plda_hdf5(self.file_name)
        ndx = Ndx(models=self.ivectors.modelset, testsegs=self.ivectors.modelset)

        self.scores = PLDA_scoring(self.ivectors, self.ivectors, ndx, self.plda_mean, self.plda_f, self.plda_g, self.plda_sigma, p_known=0.0)

        return self.scores
Ejemplo n.º 2
0
    def score_two_covariance(self):
        W = read_key_hdf5(self.model_filename, 'two_covariance/within_covariance')
        B = read_key_hdf5(self.model_filename, 'two_covariance/between_covariance')
        ndx = Ndx(models=self.ivectors.modelset, testsegs=self.ivectors.modelset)
        self.scores = two_covariance_scoring(self.ivectors, self.ivectors, ndx, W, B, check_missing=False)

        return self.scores
Ejemplo n.º 3
0
    def score_cosine(self, use_wccn=True):
        wccn = None
        if use_wccn:
            wccn = read_key_hdf5(self.model_filename, 'wccn_choleski')
        ndx = Ndx(models=self.ivectors.modelset, testsegs=self.ivectors.modelset)
        self.scores = cosine_scoring(self.ivectors, self.ivectors, ndx, wccn=wccn, check_missing=False)

        return self.scores
Ejemplo n.º 4
0
    def score_mahalanobis(self, use_covariance=True):
        if use_covariance:
            m = read_key_hdf5(self.model_filename, 'mahalanobis_matrix')
        else:
            m = numpy.identity(self.tv.shape[2])
        ndx = Ndx(models=self.ivectors.modelset, testsegs=self.ivectors.modelset)
        self.scores = mahalanobis_scoring(self.ivectors, self.ivectors, ndx, m, check_missing=False)

        return self.scores
Ejemplo n.º 5
0
    def score_plda_slow(self):
        self.plda_mean, self.plda_f, self.plda_g, self.plda_sigma = read_plda_hdf5(self.model_filename)
        local_ndx = Ndx(models=self.ivectors.modelset, testsegs=self.ivectors.modelset)

        enroll_copy = copy.deepcopy(self.ivectors)

        # Center the i-vectors around the PLDA mean
        enroll_copy.center_stat1(self.plda_mean)

        # Compute temporary matrices
        invSigma = np.linalg.inv(self.plda_sigma)
        I_iv = np.eye(self.plda_mean.shape[0], dtype='float')
        I_ch = np.eye(self.plda_g.shape[1], dtype='float')
        I_spk = np.eye(self.plda_f.shape[1], dtype='float')
        A = np.linalg.inv(self.plda_g.T.dot(invSigma).dot(self.plda_g) + I_ch)
        B = self.plda_f.T.dot(invSigma).dot(I_iv - self.plda_g.dot(A).dot(self.plda_g.T).dot(invSigma))
        K = B.dot(self.plda_f)
        K1 = np.linalg.inv(K + I_spk)
        K2 = np.linalg.inv(2 * K + I_spk)

        # Compute the Gaussian distribution constant
        alpha1 = np.linalg.slogdet(K1)[1]
        alpha2 = np.linalg.slogdet(K2)[1]
        constant = alpha2 / 2.0 - alpha1

        # Compute verification scores
        l = enroll_copy.segset.shape[0]
        scores = Scores()
        scores.scoremat = np.zeros((l, l))
        scores.modelset = enroll_copy.modelset
        scores.segset = enroll_copy.modelset
        scores.scoremask = local_ndx.trialmask

        # Project data in the space that maximizes the speaker separability
        enroll_tmp = B.dot(enroll_copy.stat1.T)

        # Compute verification scores
        # Loop on the models
        for model_idx in range(enroll_copy.modelset.shape[0]):

            s2 = enroll_tmp[:, model_idx].dot(K1).dot(enroll_tmp[:, model_idx])

            mod_plus_test_seg = enroll_tmp + np.atleast_2d(enroll_tmp[:, model_idx]).T

            tmp1 = enroll_tmp.T.dot(K1)
            tmp2 = mod_plus_test_seg.T.dot(K2)

            for seg_idx in range(model_idx, enroll_copy.segset.shape[0]):
                s1 = tmp1[seg_idx, :].dot(enroll_tmp[:, seg_idx])
                s3 = tmp2[seg_idx, :].dot(mod_plus_test_seg[:, seg_idx])
                scores.scoremat[model_idx, seg_idx] = (s3 - s1 - s2) / 2. + constant
                scores.scoremat[seg_idx, model_idx] = (s3 - s1 - s2) / 2. + constant
        self.scores = scores
        return scores