コード例 #1
0
ファイル: network.py プロジェクト: cultivateai/pyhawkes
    def get_vlb(self):
        vlb = 0
        vlb += Gamma(self.alpha, self.beta).\
            negentropy(E_lambda=self.mf_alpha/self.mf_beta,
                       E_ln_lambda=psi(self.mf_alpha) - np.log(self.mf_beta)).sum()

        # Subtract the negative entropy of q(v)
        vlb -= Gamma(self.mf_alpha, self.mf_beta).negentropy().sum()
        return vlb
コード例 #2
0
ファイル: weights.py プロジェクト: cultivateai/pyhawkes
    def get_vlb(self):
        """
        Variational lower bound for A_kk' and W_kk'
        E[LN p(A_kk', W_kk' | p, kappa, v)] -
        E[LN q(A_kk', W_kk' | mf_p, mf_kappa, mf_v)]
        :return:
        """
        vlb = 0

        # First term:
        # E[LN p(A | p)]
        E_A = self.expected_A()
        E_notA = 1.0 - E_A
        E_ln_p = self.network.expected_log_p()
        E_ln_notp = self.network.expected_log_notp()
        vlb += Bernoulli().negentropy(E_x=E_A,
                                      E_notx=E_notA,
                                      E_ln_p=E_ln_p,
                                      E_ln_notp=E_ln_notp).sum()

        # E[LN p(W | A=1, kappa, v)]
        kappa = self.network.kappa
        E_v = self.network.expected_v()
        E_ln_v = self.network.expected_log_v()
        E_W1 = self.expected_W_given_A(A=1)
        E_ln_W1 = self.expected_log_W_given_A(A=1)
        vlb += (E_A * Gamma(kappa).negentropy(
            E_beta=E_v, E_ln_beta=E_ln_v, E_lambda=E_W1,
            E_ln_lambda=E_ln_W1)).sum()

        # E[LN p(W | A=0, kappa0, v0)]
        kappa0 = self.kappa_0
        v0 = self.nu_0
        E_W0 = self.expected_W_given_A(A=0)
        E_ln_W0 = self.expected_log_W_given_A(A=0)
        vlb += (E_notA * Gamma(kappa0, v0).negentropy(
            E_lambda=E_W0, E_ln_lambda=E_ln_W0)).sum()

        # Second term
        # E[LN q(A)]
        vlb -= Bernoulli(self.mf_p).negentropy().sum()
        # print "mf_p: %s, negent: %s" % (self.mf_p, Bernoulli(self.mf_p).negentropy().sum())

        # E[LN q(W | A=1)]
        vlb -= (E_A * Gamma(self.mf_kappa_1, self.mf_v_1).negentropy()).sum()
        vlb -= (E_notA *
                Gamma(self.mf_kappa_0, self.mf_v_0).negentropy()).sum()

        return vlb
コード例 #3
0
ファイル: network.py プロジェクト: twistedmove/pyhawkes
    def get_vlb(self,
                vlb_c=True,
                vlb_p=True,
                vlb_v=True,
                vlb_m=True):
        # import pdb; pdb.set_trace()
        vlb = 0

        # Get the VLB of the expected class assignments
        if vlb_c:
            E_ln_m = self.expected_log_m()
            for k in xrange(self.K):
                # Add the cross entropy of p(c | m)
                vlb += Discrete().negentropy(E_x=self.mf_m[k,:], E_ln_p=E_ln_m)

                # Subtract the negative entropy of q(c)
                vlb -= Discrete(self.mf_m[k,:]).negentropy()

        # Get the VLB of the connection probability matrix
        # Add the cross entropy of p(p | tau1, tau0)
        if vlb_p:
            vlb += Beta(self.tau1, self.tau0).\
                negentropy(E_ln_p=(psi(self.mf_tau1) - psi(self.mf_tau0 + self.mf_tau1)),
                           E_ln_notp=(psi(self.mf_tau0) - psi(self.mf_tau0 + self.mf_tau1))).sum()

            # Subtract the negative entropy of q(p)
            vlb -= Beta(self.mf_tau1, self.mf_tau0).negentropy().sum()

        # Get the VLB of the weight scale matrix, v
        # Add the cross entropy of p(v | alpha, beta)
        if vlb_v:
            vlb += Gamma(self.alpha, self.beta).\
                negentropy(E_lambda=self.mf_alpha/self.mf_beta,
                           E_ln_lambda=psi(self.mf_alpha) - np.log(self.mf_beta)).sum()

            # Subtract the negative entropy of q(v)
            vlb -= Gamma(self.mf_alpha, self.mf_beta).negentropy().sum()

        # Get the VLB of the block probability vector, m
        # Add the cross entropy of p(m | pi)
        if vlb_m:
            vlb += Dirichlet(self.pi).negentropy(E_ln_g=self.expected_log_m())

            # Subtract the negative entropy of q(m)
            vlb -= Dirichlet(self.mf_pi).negentropy()

        return vlb
コード例 #4
0
ファイル: network.py プロジェクト: cultivateai/pyhawkes
    def resample_c(self, A, W):
        """
        Resample block assignments given the weighted adjacency matrix
        and the impulse response fits (if used)
        """
        if self.C == 1:
            return

        # Sample each assignment in order
        for k in range(self.K):
            # Compute unnormalized log probs of each connection
            lp = np.zeros(self.C)

            # Prior from m
            lp += np.log(self.m)

            # Likelihood from network
            for ck in range(self.C):
                c_temp = self.c.copy().astype(np.int)
                c_temp[k] = ck

                # p(A[k,k'] | c)
                lp[ck] += Bernoulli(self.p[ck, c_temp])\
                                .log_probability(A[k,:]).sum()

                # p(A[k',k] | c)
                lp[ck] += Bernoulli(self.p[c_temp, ck])\
                                .log_probability(A[:, k]).sum()

                # p(W[k,k'] | c)
                lp[ck] += (A[k,:] * Gamma(self.kappa, self.v[ck, c_temp])\
                                .log_probability(W[k,:])).sum()

                # p(W[k,k'] | c)
                lp[ck] += (A[:, k] * Gamma(self.kappa, self.v[c_temp, ck])\
                                .log_probability(W[:, k])).sum()

                # TODO: Subtract of self connection since we double counted

                # TODO: Get probability of impulse responses g

            # Resample from lp
            self.c[k] = sample_discrete_from_log(lp)
コード例 #5
0
ファイル: network.py プロジェクト: cultivateai/pyhawkes
    def log_likelihood(self, x):
        """
        Compute the log likelihood of a set of SBM parameters

        :param x:    (m,p,v) tuple
        :return:
        """
        lp = 0
        lp += Gamma(self.alpha, self.beta).log_probability(self.v).sum()
        return lp
コード例 #6
0
    def get_vlb(self):
        """
        Variational lower bound for \lambda_k^0
        E[LN p(\lambda_k^0 | \alpha, \beta)] -
        E[LN q(\lambda_k^0 | \tilde{\alpha}, \tilde{\beta})]
        :return:
        """
        vlb = 0

        # First term
        # E[LN p(\lambda_k^0 | \alpha, \beta)]
        E_ln_lambda = self.expected_log_lambda0()
        E_lambda = self.expected_lambda0()

        vlb += Gamma(self.alpha, self.beta).negentropy(E_lambda=E_lambda,
                                                       E_ln_lambda=E_ln_lambda).sum()

        # Second term
        # E[LN q(\lambda_k^0 | \alpha, \beta)]
        vlb -= Gamma(self.mf_alpha, self.mf_beta).negentropy().sum()

        return vlb
コード例 #7
0
ファイル: network.py プロジェクト: twistedmove/pyhawkes
    def log_likelihood(self, x):
        """
        Compute the log likelihood of a set of SBM parameters

        :param x:    (m,p,v) tuple
        :return:
        """
        m,p,v,c = x

        lp = 0
        lp += Dirichlet(self.pi).log_probability(m)
        lp += Beta(self.tau1 * np.ones((self.C, self.C)),
                   self.tau0 * np.ones((self.C, self.C))).log_probability(p).sum()
        lp += Gamma(self.alpha, self.beta).log_probability(v).sum()
        lp += (np.log(m)[c]).sum()
        return lp
コード例 #8
0
ファイル: network.py プロジェクト: cultivateai/pyhawkes
    def mf_update_c(self,
                    E_A,
                    E_notA,
                    E_W_given_A,
                    E_ln_W_given_A,
                    stepsize=1.0):
        """
        Update the block assignment probabilitlies one at a time.
        This one involves a number of not-so-friendly expectations.
        :return:
        """
        # Sample each assignment in order
        for k in range(self.K):
            notk = np.concatenate((np.arange(k), np.arange(k + 1, self.K)))

            # Compute unnormalized log probs of each connection
            lp = np.zeros(self.C)

            # Prior from m
            lp += self.expected_log_m()

            # Likelihood from network
            for ck in range(self.C):

                # Compute expectations with respect to other block assignments, c_{\neg k}
                # Initialize vectors for expected parameters
                E_ln_p_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_notp_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_p_cnotk_to_ck = np.zeros(self.K - 1)
                E_ln_notp_cnotk_to_ck = np.zeros(self.K - 1)
                E_v_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_v_ck_to_cnotk = np.zeros(self.K - 1)
                E_v_cnotk_to_ck = np.zeros(self.K - 1)
                E_ln_v_cnotk_to_ck = np.zeros(self.K - 1)

                for cnotk in range(self.C):
                    # Get the (K-1)-vector of other class assignment probabilities
                    p_cnotk = self.mf_m[notk, cnotk]

                    # Expected log probability of a connection from ck to cnotk
                    E_ln_p_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_tau1[ck, cnotk]) -
                        psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))
                    E_ln_notp_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_tau0[ck, cnotk]) -
                        psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))

                    # Expected log probability of a connection from cnotk to ck
                    E_ln_p_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_tau1[cnotk, ck]) -
                        psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))
                    E_ln_notp_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_tau0[cnotk, ck]) -
                        psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))

                    # Expected log scale of connections from ck to cnotk
                    E_v_ck_to_cnotk += p_cnotk * (self.mf_alpha[ck, cnotk] /
                                                  self.mf_beta[ck, cnotk])
                    E_ln_v_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_alpha[ck, cnotk]) -
                        np.log(self.mf_beta[ck, cnotk]))

                    # Expected log scale of connections from cnotk to ck
                    E_v_cnotk_to_ck += p_cnotk * (self.mf_alpha[cnotk, ck] /
                                                  self.mf_beta[cnotk, ck])
                    E_ln_v_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_alpha[cnotk, ck]) -
                        np.log(self.mf_beta[cnotk, ck]))

                # Compute E[ln p(A | c, p)]
                lp[ck] += Bernoulli().negentropy(
                    E_x=E_A[k, notk],
                    E_notx=E_notA[k, notk],
                    E_ln_p=E_ln_p_ck_to_cnotk,
                    E_ln_notp=E_ln_notp_ck_to_cnotk).sum()

                lp[ck] += Bernoulli().negentropy(
                    E_x=E_A[notk, k],
                    E_notx=E_notA[notk, k],
                    E_ln_p=E_ln_p_cnotk_to_ck,
                    E_ln_notp=E_ln_notp_cnotk_to_ck).sum()

                # Compute E[ln p(W | A=1, c, v)]
                lp[ck] += (E_A[k, notk] * Gamma(self.kappa).negentropy(
                    E_ln_lambda=E_ln_W_given_A[k, notk],
                    E_lambda=E_W_given_A[k, notk],
                    E_beta=E_v_ck_to_cnotk,
                    E_ln_beta=E_ln_v_ck_to_cnotk)).sum()

                lp[ck] += (E_A[notk, k] * Gamma(self.kappa).negentropy(
                    E_ln_lambda=E_ln_W_given_A[notk, k],
                    E_lambda=E_W_given_A[notk, k],
                    E_beta=E_v_cnotk_to_ck,
                    E_ln_beta=E_ln_v_cnotk_to_ck)).sum()

                # Compute expected log prob of self connection
                if self.allow_self_connections:
                    E_ln_p_ck_to_ck = psi(
                        self.mf_tau1[ck, ck]) - psi(self.mf_tau0[ck, ck] +
                                                    self.mf_tau1[ck, ck])
                    E_ln_notp_ck_to_ck = psi(
                        self.mf_tau0[ck, ck]) - psi(self.mf_tau0[ck, ck] +
                                                    self.mf_tau1[ck, ck])
                    lp[ck] += Bernoulli().negentropy(
                        E_x=E_A[k, k],
                        E_notx=E_notA[k, k],
                        E_ln_p=E_ln_p_ck_to_ck,
                        E_ln_notp=E_ln_notp_ck_to_ck)
                    E_v_ck_to_ck = self.mf_alpha[ck, ck] / self.mf_beta[ck, ck]
                    E_ln_v_ck_to_ck = psi(self.mf_alpha[ck, ck]) - np.log(
                        self.mf_beta[ck, ck])
                    lp[ck] += (E_A[k, k] * Gamma(self.kappa).negentropy(
                        E_ln_lambda=E_ln_W_given_A[k, k],
                        E_lambda=E_W_given_A[k, k],
                        E_beta=E_v_ck_to_ck,
                        E_ln_beta=E_ln_v_ck_to_ck))

                # TODO: Get probability of impulse responses g

            # Normalize the log probabilities to update mf_m
            Z = logsumexp(lp)
            mk_hat = np.exp(lp - Z)

            self.mf_m[
                k, :] = (1.0 - stepsize) * self.mf_m[k, :] + stepsize * mk_hat