コード例 #1
0
ファイル: particle_lds.py プロジェクト: viveksck/pgmult
    def predictive_log_likelihood(self, Xtest, data_index=0, Npred=100):
        """
        Hacky way of computing the predictive log likelihood
        :param X_pred:
        :param data_index:
        :param M:
        :return:
        """
        Tpred = Xtest.shape[0]

        # Sample particle trajectories
        preds = self.states_list[data_index].sample_predictions(Tpred, Npred)
        preds = np.transpose(preds, [2, 0, 1])
        assert preds.shape == (Npred, Tpred, self.n)

        psis = np.array([pred.dot(self.C.T) + self.mu for pred in preds])
        pis = np.array([ln_psi_to_pi(psi) for psi in psis])

        # TODO: Generalize for multinomial
        lls = np.zeros(Npred)
        for m in xrange(Npred):
            # lls[m] = np.sum(
            #     [Multinomial(weights=pis[m,t,:], K=self.p).log_likelihood(Xtest[t][None,:])
            #      for t in xrange(Tpred)])
            lls[m] = np.nansum(Xtest * np.log(pis[m]))

        # Compute the average
        hll = logsumexp(lls) - np.log(Npred)

        # Use bootstrap to compute error bars
        samples = np.random.choice(lls, size=(100, Npred), replace=True)
        hll_samples = logsumexp(samples, axis=1) - np.log(Npred)
        std_hll = hll_samples.std()

        return hll, std_hll
コード例 #2
0
ファイル: particle_lds.py プロジェクト: fivejjs/pgmult
 def log_likelihood(self):
     ll = 0
     for states in self.states_list:
         psi = states.stateseq.dot(self.C.T) + self.mu
         pi = ln_psi_to_pi(psi)
         ll += np.sum(states.data * np.log(pi))
     return ll
コード例 #3
0
ファイル: gp.py プロジェクト: yinsenm/pgmult
    def generate(self, keep=True, Z=None, N=None, full_output=True):
        assert Z is not None and Z.ndim == 2 and Z.shape[1] == self.D
        M = Z.shape[0]

        assert N.ndim == 1 and N.shape[0] == M and np.all(N) >= 1
        assert N.dtype in (np.int32, np.int)
        N = N.astype(np.int32)

        # Compute the covariance of the Z's
        C = self.kernel.K(Z)

        # Sample from a zero mean GP, N(0, C) for each output, k
        psis = np.zeros((M, self.K))
        for k in range(self.K):
            # TODO: Reuse the Cholesky
            psis[:,k] = np.random.multivariate_normal(np.zeros(M), C)

        # Add the mean vector
        psis += self.mu[None,:]

        # Sample from the multinomial distribution
        pis = np.array([ln_psi_to_pi(psi) for psi in psis])
        X = np.array([np.random.multinomial(N[m], pis[m]) for m in range(M)])

        if keep:
            self.add_data(Z, X)

        if full_output:
            return X, psis
        else:
            return X
コード例 #4
0
ファイル: particle_lds.py プロジェクト: viveksck/pgmult
 def log_likelihood(self):
     ll = 0
     for states in self.states_list:
         psi = states.stateseq.dot(self.C.T) + self.mu
         pi = ln_psi_to_pi(psi)
         ll += np.sum(states.data * np.log(pi))
     return ll
コード例 #5
0
def lda_initializer(model):
    T, V = model.T, model.V
    model.beta = np.exp(np.loadtxt('ctm-out/000-log-beta.dat')
                            .reshape((-1,V))).T
    lmbda = np.loadtxt('ctm-out/000-lambda.dat').reshape((-1,T))
    nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
    model.theta[nonempty_docs] = ln_psi_to_pi(lmbda)
    model.resample_z()
    return model
コード例 #6
0
ファイル: ctm.py プロジェクト: fivejjs/pgmult
def lda_initializer(model):
    T, V = model.T, model.V
    model.beta = np.exp(np.loadtxt('ctm-out/000-log-beta.dat')
                            .reshape((-1,V))).T
    lmbda = np.loadtxt('ctm-out/000-lambda.dat').reshape((-1,T))
    nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
    model.theta[nonempty_docs] = ln_psi_to_pi(lmbda)
    model.resample_z()
    return model
コード例 #7
0
ファイル: gp.py プロジェクト: yinsenm/pgmult
    def predict(self, Z_new, full_output=True, full_cov=False):
        """
        Predict the multinomial probability vector at a grid of points, Z
        :param Z_new:
        :return:
        """
        assert len(self.data_list) == 1, "Must have one data list in order to predict."
        data = self.data_list[0]
        M = data["M"]
        Z = data["Z"]

        assert Z_new is not None and Z_new.ndim == 2 and Z_new.shape[1] == self.D
        M_new = Z_new.shape[0]

        # Compute the kernel for Z_news
        C   = self.kernel.K(Z, Z)
        Cvv = C + np.diag(1e-6 * np.ones(M))
        Lvv = np.linalg.cholesky(Cvv)

        Cnn = self.kernel.K(Z_new, Z_new)

        # Compute the kernel between the new and valid points
        Cnv = self.kernel.K(Z_new, Z)

        # Predict the psis
        mu_psis_new = np.zeros((self.K, M_new))
        Sig_psis_new = np.zeros((self.K, M_new, M_new))
        for k in range(self.K):
            sys.stdout.write(".")
            sys.stdout.flush()

            psik = data["psi"][:,k]

            # Compute the predictive parameters
            y = solve_triangular(Lvv, psik, lower=True)
            x = solve_triangular(Lvv.T, y, lower=False)
            psik_pred = Cnv.dot(x)

            # Save these into the combined arrays
            mu_psis_new[k] = psik_pred + self.mu[k]

            if full_cov:
                # Sig_pred = Cnn - Cnv.dot(np.linalg.solve(Cvv, Cnv.T))
                Sig_psis_new[k] = Cnn - Cnv.dot(dpotrs(Lvv, Cnv.T, lower=True)[0])

        sys.stdout.write("\n")
        sys.stdout.flush()

        # Convert these to pis
        pis_new = np.array([ln_psi_to_pi(psi) for psi in mu_psis_new])

        if full_output:
            return pis_new, mu_psis_new, Sig_psis_new
        else:
            return pis_new
コード例 #8
0
ファイル: particle_lds.py プロジェクト: viveksck/pgmult
        def log_joint_C(C):
            ll = 0
            for states in self.states_list:
                z = states.stateseq
                psi = z.dot(C.T) + self.mu
                pi = ln_psi_to_pi(psi)

                # TODO: Generalize for multinomial
                ll += np.nansum(states.data * np.log(pi))

            ll += (-0.5 * C**2 / self.sigma_C).sum()

            return ll
コード例 #9
0
ファイル: particle_lds.py プロジェクト: fivejjs/pgmult
        def log_joint_C(C):
            ll = 0
            for states in self.states_list:
                z = states.stateseq
                psi = z.dot(C.T) + self.mu
                pi = ln_psi_to_pi(psi)

                # TODO: Generalize for multinomial
                ll += np.nansum(states.data * np.log(pi))

            ll += (-0.5*C**2/self.sigma_C).sum()

            return ll
コード例 #10
0
ファイル: gp.py プロジェクト: yinsenm/pgmult
    def predictive_log_likelihood(self, Z_pred, X_pred):
        """
        Predict the GP value at the inputs Z_pred and evaluate the likelihood of X_pred
        """
        _, mu_pred, Sig_pred = self.predict(Z_pred, full_output=True)

        psis = np.array([np.random.multivariate_normal(mu, Sig) for mu,Sig in zip(mu_pred, Sig_pred)])
        pis = ln_psi_to_pi(psis.T)

        pll = 0
        pll += gammaln(X_pred.sum(axis=1)+1).sum() - gammaln(X_pred+1).sum()
        pll += np.nansum(X_pred * np.log(pis))

        return pll, pis
コード例 #11
0
ファイル: particle_lds.py プロジェクト: fivejjs/pgmult
    def predictive_log_likelihood(self, Xtest, data_index=0, Npred=100):
        """
        Hacky way of computing the predictive log likelihood
        :param X_pred:
        :param data_index:
        :param M:
        :return:
        """
        Tpred = Xtest.shape[0]

        # Sample particle trajectories
        preds = self.states_list[data_index].sample_predictions(Tpred, Npred)
        preds = np.transpose(preds, [2,0,1])
        assert preds.shape == (Npred, Tpred, self.n)

        psis = np.array([pred.dot(self.C.T) + self.mu for pred in preds])
        pis = np.array([ln_psi_to_pi(psi) for psi in psis])

        # TODO: Generalize for multinomial
        lls = np.zeros(Npred)
        for m in xrange(Npred):
            # lls[m] = np.sum(
            #     [Multinomial(weights=pis[m,t,:], K=self.p).log_likelihood(Xtest[t][None,:])
            #      for t in xrange(Tpred)])
            lls[m] = np.nansum(Xtest * np.log(pis[m]))


        # Compute the average
        hll = logsumexp(lls) - np.log(Npred)

        # Use bootstrap to compute error bars
        samples = np.random.choice(lls, size=(100, Npred), replace=True)
        hll_samples = logsumexp(samples, axis=1) - np.log(Npred)
        std_hll = hll_samples.std()

        return hll, std_hll
コード例 #12
0
ファイル: distributions.py プロジェクト: yinsenm/pgmult
 def pi(self):
     return ln_psi_to_pi(self.psi)
コード例 #13
0
    nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
    model.theta[nonempty_docs] = ln_psi_to_pi(lmbda)
    model.resample_z()
    return model


fit_lda_gibbs = sampler_fitter(
    'fit_lda_gibbs', StandardLDA, 'resample', lda_initializer)
fit_lda_collapsed = sampler_fitter(
    'fit_lda_collapsed', StandardLDA, 'resample_collapsed', lda_initializer)
fit_lnctm_gibbs = sampler_fitter(
    'fit_lnctm_gibbs', LogisticNormalCorrelatedLDA, 'resample',
    make_ctm_initializer(lambda lmbda: lmbda))
fit_sbctm_gibbs = sampler_fitter(
    'fit_sbctm_gibbs', StickbreakingCorrelatedLDA, 'resample',
    make_ctm_initializer(lambda lmbda: pi_to_psi(ln_psi_to_pi(lmbda))))


########################
#  inspecting results  #
########################

def plot_sb_interpretable_results(sb_results, words):
    nwords = 5
    Sigma = sb_results[-1][-1]
    T = Sigma.shape[0]

    def get_topwords(topic):
        return words[np.argsort(sb_results[-1][0][:,topic])[-nwords:]]

    lim = np.abs(Sigma).max()
コード例 #14
0
ファイル: particle_lds.py プロジェクト: viveksck/pgmult
 def pi(self):
     psi = self.stateseq.dot(self.C.T)
     return ln_psi_to_pi(psi)
コード例 #15
0
ファイル: ctm.py プロジェクト: fivejjs/pgmult
    nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
    model.theta[nonempty_docs] = ln_psi_to_pi(lmbda)
    model.resample_z()
    return model


fit_lda_gibbs = sampler_fitter(
    'fit_lda_gibbs', StandardLDA, 'resample', lda_initializer)
fit_lda_collapsed = sampler_fitter(
    'fit_lda_collapsed', StandardLDA, 'resample_collapsed', lda_initializer)
fit_lnctm_gibbs = sampler_fitter(
    'fit_lnctm_gibbs', LogisticNormalCorrelatedLDA, 'resample',
    make_ctm_initializer(lambda lmbda: lmbda))
fit_sbctm_gibbs = sampler_fitter(
    'fit_sbctm_gibbs', StickbreakingCorrelatedLDA, 'resample',
    make_ctm_initializer(lambda lmbda: pi_to_psi(ln_psi_to_pi(lmbda))))


########################
#  inspecting results  #
########################

def plot_sb_interpretable_results(sb_results, words):
    nwords = 5
    Sigma = sb_results[-1][-1]
    T = Sigma.shape[0]

    def get_topwords(topic):
        return words[np.argsort(sb_results[-1][0][:,topic])[-nwords:]]

    lim = np.abs(Sigma).max()
コード例 #16
0
ファイル: gp.py プロジェクト: yinsenm/pgmult
 def pi(self, augmented_data):
     psi = self.psi(augmented_data)
     return ln_psi_to_pi(psi)
コード例 #17
0
 def theta(self):
     return ln_psi_to_pi(self.psi)
コード例 #18
0
ファイル: ctm_wrapper.py プロジェクト: viveksck/pgmult
def wordprobs(data, val):
    lmbda, beta = val['lmbda'], np.exp(val['log_beta'])
    assert np.allclose(ln_psi_to_pi(lmbda).dot(beta).sum(1), 1.)
    return ln_psi_to_pi(lmbda).dot(beta)[csr_nonzero(data)]
コード例 #19
0
ファイル: ctm_wrapper.py プロジェクト: viveksck/pgmult
def wordprobs(data, val):
    lmbda, beta = val['lmbda'], np.exp(val['log_beta'])
    assert np.allclose(ln_psi_to_pi(lmbda).dot(beta).sum(1), 1.)
    return ln_psi_to_pi(lmbda).dot(beta)[csr_nonzero(data)]
コード例 #20
0
ファイル: particle_lds.py プロジェクト: fivejjs/pgmult
 def pi(self):
     psi = self.stateseq.dot(self.C.T)
     return ln_psi_to_pi(psi)
コード例 #21
0
ファイル: lda.py プロジェクト: fivejjs/pgmult
 def theta(self):
     return ln_psi_to_pi(self.psi)