예제 #1
0
    def init_with_logreg(self, scores, same_spk, valid, ptar, std_for_mats=0):

        scores = scores[valid]
        labels = same_spk[valid]

        tar = scores[labels==1]
        non = scores[labels==0]

        a, b = calibration.logregCal(tar, non, ptar, return_params=True)

        if self.is_durdep:
            # Dur-dep calibration (might also have an si-dependent stage)
            self.durdep_alpha.init_with_constant(a, std_for_mats)
            self.durdep_beta.init_with_constant(b, std_for_mats)
            if self.is_sidep:
                self.sidep_alpha.init_with_constant(1.0, std_for_mats)
                self.sidep_beta.init_with_constant(0.0, std_for_mats)

        elif not self.is_sidep:
            # Global calibration
            utils.replace_state_dict(self, {'alpha': a, 'beta': b})

        else:
            # Only si-dependent cal
            self.sidep_alpha.init_with_constant(a, std_for_mats)
            self.sidep_beta.init_with_constant(b, std_for_mats)
예제 #2
0
    def init_with_lda(self, x, class_ids, init_params, complement=True, sec_ids=None, gaussian_backend=False):

        if self.has_bias is False:
            raise Exception("Cannot initialize this component with init_with_lda because it was created with bias=False")

        weights = compute_class_weights(class_ids, sec_ids, init_params.get('balance_by_domain'))

        BCov, WCov, GCov, mu, mu_per_class, _ = compute_lda_model(x, class_ids, weights)

        if gaussian_backend:
            W = linalg.solve(WCov, mu_per_class.T, sym_pos=True)
            b = - 0.5 * (W * mu_per_class.T).sum(0)

        else:
            evals, evecs = linalg.eigh(BCov, WCov)
            evecs = evecs[:, np.argsort(evals)[::-1]] 

            lda_dim = self.W.shape[1]
            if complement:
                W = evecs[:,:lda_dim]
            else:
                W = evecs[:,-lda_dim:]

            # Normalize each dimension so that output features will have variance 1.0
            if init_params.get('variance_norm_lda', True):
                W = W @ (np.diag(1. / np.sqrt(np.diag(W.T @ GCov @ W))))

            # Finally, estimate the shift so that output features have mean 0.0 
            mu = mu @ W
            b = -mu

        utils.replace_state_dict(self, {'W': W, 'b': b})
예제 #3
0
    def init_with_constant(self, k, std_for_mats=0):
        # Initialize k with a given value and, optionally, the 
        # L G and C matrices with a normal

        utils.replace_state_dict(self, {'k': k})
        if std_for_mats>0:
            nn.init.normal_(self.L, 0.0, std_for_mats)
            nn.init.normal_(self.G, 0.0, std_for_mats)
            nn.init.normal_(self.C, 0.0, std_for_mats)
예제 #4
0
    def init_with_plda(self, x, speaker_ids, init_params, domain_ids=None):
        # Compute a PLDA model with the input data, approximating the
        # model with just the usual initialization without the EM iterations

        weights = compute_class_weights(speaker_ids, domain_ids,
                                        init_params.get('balance_by_domain'))

        # Debugging of weight usage in PLDA:
        # Repeat the data from the first 5000 speakers twice either explicitely or through the weights
        # These two models should be identical (and they are!)
        #sela = speaker_ids<5000
        #selb = speaker_ids>=5000
        #x2 = np.concatenate([x[sela],x[sela],x[selb]])
        #speaker_ids2 = np.concatenate((speaker_ids[sela], speaker_ids[sela]+np.max(speaker_ids)+1, speaker_ids[selb]))
        #weights2 = np.ones(len(np.unique(speaker_ids2)))
        #BCov2, WCov2, mu2 = compute_2cov_plda_model(x2, speaker_ids2, weights2, 10)
        #weights3 = weights.copy()
        #weights3[0:5000] *= 2
        #BCov3, WCov3, mu3 = compute_2cov_plda_model(x, speaker_ids, weights3, 10)
        #assert np.allclose(BCov2,BCov3)
        #assert np.allclose(WCov2,WCov3)
        #assert np.allclose(mu2, mu3)

        # Bi and Wi are the between and within covariance matrices and mu is the global (weighted) mean
        Bi, Wi, mu = compute_2cov_plda_model(x, speaker_ids, weights,
                                             init_params.get('plda_em_its', 0))

        # Equations (14) and (16) in Cumani's paper
        # Note that the paper has an error in the formula for k (a 1/2 missing before k_tilde)
        # that is fixed in the equations below
        # To compute L_tild and G_tilde we use the following equality:
        # inv( inv(C) + n*inv(D) ) == C @ inv(D + n*C) @ D == C @ solve(D + n*C, D)
        B = utils.CholInv(Bi)
        W = utils.CholInv(Wi)
        Bmu = B @ mu.T
        L_tilde = Bi @ np.linalg.solve(Wi + 2 * Bi, Wi)
        G_tilde = Bi @ np.linalg.solve(Wi + Bi, Wi)
        WtGL = W @ (L_tilde - G_tilde)
        logdet_L_tilde = np.linalg.slogdet(L_tilde)[1]
        logdet_G_tilde = np.linalg.slogdet(G_tilde)[1]
        logdet_B = B.logdet()
        k_tilde = -2.0 * logdet_G_tilde + logdet_L_tilde - logdet_B + mu @ Bmu

        k = 0.5 * k_tilde + 0.5 * Bmu.T @ (L_tilde - 2 * G_tilde) @ Bmu
        L = 0.5 * (W @ (W @ L_tilde)).T
        G = 0.5 * (W @ WtGL).T
        C = (WtGL @ Bmu)

        state_dict = {'L': L, 'G': G, 'C': C, 'k': k.squeeze()}

        utils.replace_state_dict(self, state_dict)
예제 #5
0
    def init_with_plda(self, x, speaker_ids, init_params, domain_ids=None):
        # Compute a PLDA model with the input data, approximating the 
        # model with just the usual initialization without the EM iterations

        # Get the within and between-class covariance matrices
        # WCov is the covariance of the noise term in SPLDA and BCov = inverse(V V^t)

        weights = compute_sample_weights(speaker_ids, domain_ids, init_params)
                
        BCov, WCov, _, mu, _ = compute_lda_model(x, speaker_ids, weights)

        Binv = np.linalg.inv(BCov)
        Winv = np.linalg.inv(WCov)

        # Equations (14) and (16) in Cumani's paper 
        # Note that the paper has a couple of errors in the derivations, in the k_tilde formula and in k
        # that are fixed in the equations below
        Bmu               = np.dot(Binv,mu)
        L_tilde           = np.linalg.inv(Binv+2*Winv)
        G_tilde           = np.linalg.inv(Binv+Winv)
        WtGL              = np.dot(Winv.T,L_tilde-G_tilde)
        _, logdet_L_tilde = np.linalg.slogdet(L_tilde)
        _, logdet_G_tilde = np.linalg.slogdet(G_tilde)
        _, logdet_Binv    = np.linalg.slogdet(Binv)
        k_tilde           = -2.0*logdet_G_tilde - logdet_Binv + logdet_L_tilde + np.dot(mu.T, Bmu) 

        k = 0.5 * k_tilde + 0.5 * np.dot(Bmu.T, np.dot(G_tilde - 2*L_tilde, Bmu))
        L = 0.5 * np.dot(Winv.T,np.dot(L_tilde,Winv))
        G = 0.5 * np.dot(WtGL,Winv)
        C = np.dot(WtGL,Bmu)[:,np.newaxis]

        if init_params.get('norm_plda_params'):
            # Divide all params by k so that we get rid of that scale at init
            # It will be compensated by the calibration params anyway. This is to make 
            # the params more comparable across different initializations so that
            # regularization works similarly.
            L /= k
            G /= k
            C /= k
            k = 1.0

        state_dict = {'L': L, 'G': G, 'C': C, 'k': k}

        utils.replace_state_dict(self, state_dict)
예제 #6
0
    def init_with_weighted_means(self, data, weights):

        Wl = list()
        for i in np.arange(self.W.shape[0]):
            Wl.append(np.dot(data.T, weights[:,i])/np.sum(weights[:,i]))
        utils.replace_state_dict(self, {'W': np.c_[Wl]})
예제 #7
0
 def init_with_constant(self, k, std_for_mats=0):
     # Initialize k with a given value and, optionally, the 
     # L G and C matrices with a normal
     if std_for_mats>0:
         self.init_random(std_for_mats)
     utils.replace_state_dict(self, {'k': k})
예제 #8
0
    def init_with_weighted_means(self, data, weights):

        Wl = list()
        for w in weights.T:
            Wl.append(np.dot(data.T, w) / np.sum(w))
        utils.replace_state_dict(self, {'W': np.c_[Wl].T})