コード例 #1
0
 def computeMeanCovar(self):
     weights = np.exp(self.logweights - logsumexp(self.logweights))
     mu = np.dot(self.mus, weights).reshape((self.dim, 1))
     Ex2 = np.reshape(np.dot(
         self.Sigmas, weights), (self.dim, self.dim)) + np.asarray(
             np.dot(np.multiply(self.mus, weights), self.mus.T))
     Sigma = Ex2 - np.dot(mu, mu.T)
     return (mu, Sigma)
コード例 #2
0
 def logProb(self, x):
     N = self.numComps()
     if len(x.shape) == 1:
         x = x.reshape((self.dim, -1))
     lps = np.empty((N, x.shape[1]))
     for i in range(N):
         lps[i, :] = gaussian_logprob(
             x, self.getMu(i), self.Sigmas[:, :, i]) + self.logweights[i]
     return logsumexp(lps, 0)
コード例 #3
0
 def normalizeWeights(self):
     N = self.numComps()
     if N == 0:
         return -np.inf
     elif N == 1:
         normFactor = self.logweights[0]
         self.logweights[0] = 0
         #            print 'Normalizing single component mixture, normFactor = {0}, lw = {1}'.format(normFactor,self.logweights[0,0])
         return normFactor
     else:
         normFactor = logsumexp(self.logweights)
         assert (np.isfinite(normFactor))
         self.logweights -= normFactor
         normFactor2 = logsumexp(self.logweights)
         if np.abs(normFactor2) > 1e-8:
             normFactor += normFactor2
             self.logweights -= normFactor2
         return normFactor
コード例 #4
0
def klDiv_varit(Phi,
                logPhi,
                logPsi,
                logweights_ref,
                logweights_other,
                KLdivs,
                upd_other_logweights=False):
    tmp = logPsi - KLdivs
    lsetmp = logsumexp(tmp, 0)
    lsetmp[np.logical_not(np.isfinite(lsetmp))] = 0
    logPhi = logweights_ref.reshape((1, -1)) + (tmp - lsetmp)
    Phi = np.exp(logPhi)
    if upd_other_logweights:
        logweights_other = logsumexp(logPhi, 1)
    logPsi = logweights_other.reshape(
        (-1, 1)) + logPhi - logsumexp(logPhi, 1).reshape((-1, 1))

    D_upper = klDiv_var(Phi, logPhi, logPsi, KLdivs)

    if upd_other_logweights:
        return (D_upper, Phi, logPhi, logPsi, logweights_other)
    else:
        return (D_upper, Phi, logPhi, logPsi)
コード例 #5
0
    def eval(self, X):
        X = np.asarray(X)
        if X.ndim == 1:
            X = X[:, np.newaxis]
        if X.size == 0:
            return np.array([]), np.empty((0, self.n_components))
        if X.shape[1] != self.means_.shape[1]:
            raise ValueError('the shape of X  is not compatible with self')

        lpr = (log_multivariate_normal_density(
                X, self.means_, self.covars_, self.covariance_type)
               + np.log(self.weights_))
        logprob = logsumexp(lpr, axis=1)
        responsibilities = np.exp(lpr - logprob[:, np.newaxis])
        return logprob, responsibilities
コード例 #6
0
 def dlogProb(self, x):
     N = self.numComps()
     origxshape = x.shape
     if len(x.shape) == 1:
         x = x.reshape((self.dim, 1))
     lps = np.empty((N, 1))
     grads = np.empty((N, self.dim))
     for i in range(N):
         cmu = self.getMu(i)
         cSigma = self.Sigmas[:, :, i]
         cholSigma = np.linalg.cholesky(cSigma)
         lps[i, 0] = gaussian_logprob(x, cmu, cSigma,
                                      cholSigma) + self.logweights[i]
         grads[i, :] = gaussian_logprob_grad(x, cmu, cSigma, cholSigma)
     return np.sum(np.multiply(np.exp(lps - logsumexp(lps, 0)), grads),
                   0).reshape(origxshape)
コード例 #7
0
    def klDivergence_varupper(self, other):
        """ Returns an (upper bound) approximation to KL(self||other). """
        D = self.dim
        Nref = self.numComps()
        Nother = other.numComps()

        if KLdivs == None:
            cholSigmaSelf = np.empty((D, D, Nref))
            for b in range(Nref):
                cholSigmaSelf[:, :, b] = np.linalg.cholesky(self.getSigma(b))

            KLdivs = np.empty((Nother, Nref))
            for b in range(Nother):
                mub = other.getMu(b)
                Sigmab = other.getSigma(b)
                cholSigmab = np.linalg.cholesky(other.getSigma(b))

                KLdivs[b, :] = gaussian_kl_vec(self.mus,
                                               self.Sigmas,
                                               mub,
                                               Sigmab,
                                               cholSigma1=cholSigmaSelf,
                                               cholSigma2=cholSigmab)

        logPhi = other.logweights.reshape(Nother, 1) + self.logweights.reshape(
            1, Nref)
        logPsi = self.logweights.reshape(Nother, 1) + logPhi - logsumexp(
            logPhi, 1).reshape((Nother, 1))
        Phi = np.exp(logPhi)

        done = False
        itNum = 0
        D_upper = np.inf
        while not done:
            itNum += 1
            last_D_upper = D_upper

            (D_upper, Phi, logPhi,
             logPsi) = klDiv_varit(Phi, logPhi, logPsi, self.logweights,
                                   other.logweights, KLdivs)

            assert (np.isfinite(D_upper))
            print '{0}: D_upper = {2}'.format(itNum, D_upper)
            if D_upper - last_D_upper > 1e-6:
                break

        return D_upper
コード例 #8
0
 def drawSamples(self, N):
     Ws = np.exp(self.logweights - logsumexp(self.logweights))
     sampleComps = np.random.multinomial(N, Ws)
     samples = np.empty((self.dim, N))
     startInd = 0
     for i in range(len(sampleComps)):
         cN = sampleComps[i]
         if cN < 0:
             print cN
             print sampleComps
             print Ws
             print self.logweights
         assert (cN >= 0)
         assert (startInd >= 0)
         samples[:, startInd:(startInd + cN)] = self.getMu(i) + np.dot(
             np.linalg.cholesky(self.Sigmas[:, :, i]),
             np.random.randn(self.dim, cN))
         startInd += cN
     return samples
コード例 #9
0
 def computeMean(self):
     weights = np.exp(self.logweights - logsumexp(self.logweights))
     mu = np.dot(self.mus, weights).reshape((self.dim, 1))
     return mu
コード例 #10
0
    def simplify(self, ref=None, natThresh=None, minNumComps=1):
        """ Remove components and adjust the mixture until just before the KL divergence exceeds natThresh """
        maxNumSubVarIts = 5
        maxNumVarIts = 20
        maxNumDelPerIt = 1

        D = self.dim
        Nself = self.numComps()
        Nnew = Nself

        # Compute Cholesky decompositions
        cholSigmaSelf = np.empty((D, D, Nself))
        for b in range(Nself):
            cholSigmaSelf[:, :, b] = np.linalg.cholesky(self.getSigma(b))

        if ref == None:
            ref = deepcopy(self)
            cholSigmaRef = deepcopy(cholSigmaSelf)
            Nref = Nself
            # Initialize the variational parameters
            Phi = np.diag(np.exp(self.logweights.reshape(Nnew)))
            #            logPhi = np.log(Phi)
            logPhi = np.empty_like(Phi)
            logPhi[Phi > 0] = np.log(Phi[Phi > 0])
            logPhi[Phi <= 0] = -np.inf
            #            logPsi = np.log(Phi)
            logPsi = np.copy(logPhi)
        else:
            Nref = ref.numComps()
            cholSigmaRef = np.empty((D, D, Nref))
            for a in range(Nref):
                cholSigmaRef[:, :, a] = np.linalg.cholesky(ref.getSigma(a))
            # Initialize the variational parameters
            logPhi = self.logweights.reshape(Nnew, 1) + ref.logweights.reshape(
                1, Nref)
            logPsi = self.logweights.reshape(Nnew, 1) + logPhi - logsumexp(
                logPhi, 1).reshape((Nnew, 1))
            Phi = np.exp(logPhi)

        # Compute KL divergence between components
        KLdivs = np.empty((Nself, Nref))
        for b in range(Nself):
            mub = self.getMu(b)
            Sigmab = self.getSigma(b)
            cholSigmab = cholSigmaSelf[:, :, b]

            KLdivs[b, :] = gaussian_kl_vec(ref.mus,
                                           ref.Sigmas,
                                           mub,
                                           Sigmab,
                                           cholSigma1=cholSigmaRef,
                                           cholSigma2=cholSigmab)
        del cholSigmaSelf

        new_D_upper = klDiv_var(Phi, logPhi, logPsi, KLdivs)

        newActiveComps = [i for i in range(0, Nself)]
        newActiveComps.sort(key=lambda x: self.logweights[x])

        globalVarItCnt = 0
        itNum = 0
        while Nnew - maxNumDelPerIt >= minNumComps:
            itNum += 1

            currNumDel = 1
            #            delLogW = logsumexp(self.logweights[newActiveComps[0:2]])
            #            while delLogW < np.log(1e-4) and currNumDel < maxNumDelPerIt:
            #                currNumDel += 1
            #                delLogW = logsumexp(self.logweights[newActiveComps[0:(currNumDel+1)]])
            #            print '{0}: numDel = {1}'.format(itNum,currNumDel)
            delCompI = newActiveComps[0:currNumDel]
            propComps = newActiveComps[currNumDel:]
            numDel = len(delCompI)
            Nprop = Nnew - numDel

            propLogWeights = np.logaddexp(
                self.logweights[propComps],
                logsumexp(self.logweights[delCompI]) - np.log(Nprop))
            propMus = self.mus[:, propComps].reshape((D, Nprop))
            propSigmas = self.Sigmas[:, :, propComps].reshape((D, D, Nprop))

            propLogPhi = np.logaddexp(
                logPhi[propComps, :],
                logsumexp(logsumexp(logPhi[delCompI, :], 1), 0) -
                np.log(Nprop))
            propPhi = np.exp(propLogPhi)
            prevNormPropPhi = None
            propKLdivs = KLdivs[propComps, :]
            propLogPsi = propLogWeights.reshape(
                Nprop, 1) + propLogPhi - logsumexp(propLogPhi, 1).reshape(
                    (Nprop, 1))

            D_upper = klDiv_var(propPhi, propLogPhi, propLogPsi, propKLdivs)

            varItNum = 0
            #            while varItNum < maxNumVarIts and (itNum == 0 or D_upper >= natThresh):
            while varItNum < maxNumVarIts and (varItNum == 0
                                               or D_upper >= natThresh):
                varItNum += 1
                globalVarItCnt += 1

                last_propLogWeights = propLogWeights
                lastVarIt_D_upper = D_upper

                # Minimize the upper bound wrt the variational parameters and log weights
                subVarItNum = 0
                while subVarItNum < maxNumSubVarIts and D_upper >= natThresh:
                    lastsub_propLogWeights = propLogWeights
                    lastSubVarIt_D_upper = D_upper
                    (D_upper, propPhi, propLogPhi, propLogPsi,
                     propLogWeights) = klDiv_varit(propPhi, propLogPhi,
                                                   propLogPsi, ref.logweights,
                                                   propLogWeights, propKLdivs,
                                                   True)
                    if np.abs(D_upper - lastSubVarIt_D_upper) < 1e-6:
                        break

                if D_upper >= natThresh:
                    normPropPhi = np.exp(
                        propLogPhi -
                        logsumexp(propLogPhi, 1).reshape(Nprop, 1))
                    if prevNormPropPhi != None:
                        #                        updPropComps = np.nonzero(np.sum(np.abs(normPropPhi - prevNormPropPhi),1).ravel() > 1e-2)
                        #                        updPropComps = updPropComps[0]
                        updPropComps = np.array([
                            np.argmax(
                                np.sum(np.abs(normPropPhi - prevNormPropPhi),
                                       1).ravel())
                        ])
                    else:
                        updPropComps = range(Nprop)
#                    updPropComps = range(Nprop)
#                    updPropComps = np.array([np.argmax(logsumexp(propLogPhi,1).ravel())])
                else:
                    updPropComps = []

                if len(updPropComps) > 0:
                    if prevNormPropPhi == None:
                        prevNormPropPhi = normPropPhi
                    else:
                        prevNormPropPhi[updPropComps, :] = normPropPhi[
                            updPropComps, :]
                    propMus[:, updPropComps] = np.dot(
                        ref.mus, normPropPhi[updPropComps, :].T)
                    dmus = propMus[:, updPropComps].reshape(
                        D, -1, 1) - ref.mus.reshape(D, 1, Nref)
                    dmuCovs = np.multiply(dmus.reshape(D, 1, -1, Nref),
                                          dmus.reshape(1, D, -1, Nref))
                    #updPropSigmas = np.sum(np.multiply((ref.Sigmas.reshape(D,D,1,Nref) + dmuCovs).reshape((D,D,-1,Nref)),normPropPhi[updPropComps,:].reshape((1,1,-1,Nref))),3)
                    updPropSigmas = np.sum(
                        np.multiply(
                            ref.Sigmas.reshape(D, D, 1, Nref),
                            normPropPhi[updPropComps, :].reshape(
                                (1, 1, -1, Nref))), 3) + np.sum(
                                    np.multiply(
                                        dmuCovs.reshape((D, D, -1, Nref)),
                                        normPropPhi[updPropComps, :].reshape(
                                            (1, 1, -1, Nref))), 3)
                    propSigmas[:, :, updPropComps] = updPropSigmas

                    prev___D_upper = D_upper
                    prev___KLDivs = propKLdivs[updPropComps, :]
                    for (tInd, propInd) in enumerate(updPropComps):
                        mub = propMus[:, propInd].reshape((D, 1))
                        Sigmab = propSigmas[:, :, propInd]
                        propKLdivs[propInd, :] = gaussian_kl_vec(
                            ref.mus,
                            ref.Sigmas,
                            mub,
                            Sigmab,
                            cholSigma1=cholSigmaRef)

#                    (D_upper,propPhi,propLogPhi,propLogPsi,propLogWeights) = klDiv_varit(propPhi,propLogPhi,propLogPsi,ref.logweights,propLogWeights,propKLdivs,True)
                    D_upper = klDiv_var(propPhi, propLogPhi, propLogPsi,
                                        propKLdivs)
                    if False and D_upper - prev___D_upper >= 1e-6:
                        print '{0} -> {1}'.format(prev___D_upper, D_upper)
                        print 'prevKLDivs = {0}'.format(prev___KLDivs)
                        print 'currKLDivs = {0}'.format(
                            propKLdivs[updPropComps, :])
                        assert (D_upper - prev___D_upper < 1e-6)
#                    print '{0} -> {1}'.format(prev___D_upper,D_upper)
                    if prev___D_upper >= D_upper + 1e-8:
                        break

#                (D_upper,propPhi,propLogPhi,propLogPsi,propLogWeights) = klDiv_varit(propPhi,propLogPhi,propLogPsi,ref.logweights,propLogWeights,propKLdivs,True)
#                print '  {1}: D_upper = {0}, subVarItNums = {2}'.format(D_upper,varItNum,subVarItNum)

                if np.abs(D_upper - lastVarIt_D_upper) < 1e-6 and np.all(
                        np.abs(last_propLogWeights - propLogWeights) < 1e-6):
                    break

#            print '{2}: D_upper = {0}, varItNums = {1}'.format(D_upper,varItNum,itNum)

            if D_upper > natThresh:
                break
            else:
                Nnew = Nprop
                new_D_upper = D_upper

                KLdivs[propComps, :] = propKLdivs
                logPhi[propComps, :] = propLogPhi
                Phi[propComps, :] = propPhi
                logPsi[propComps, :] = propLogPsi

                self.logweights[delCompI] = -np.inf
                self.logweights[propComps] = propLogWeights
                self.mus[:, propComps] = propMus
                self.Sigmas[:, :, propComps] = propSigmas

                while numDel > 0:
                    newActiveComps.pop(0)
                    numDel += -1
                newActiveComps.sort(key=lambda x: self.logweights[x])

        if Nnew < Nself:
            self.logweights = self.logweights[newActiveComps]
            self.mus = self.mus[:, newActiveComps].reshape(D, Nnew)
            self.Sigmas = self.Sigmas[:, :, newActiveComps].reshape(D, D, Nnew)

        return new_D_upper