Beispiel #1
0
    def get_vlb(self):
        """
        Variational lower bound for A_kk' and W_kk'
        E[LN p(A_kk', W_kk' | p, kappa, v)] -
        E[LN q(A_kk', W_kk' | mf_p, mf_kappa, mf_v)]
        :return:
        """
        vlb = 0

        # First term:
        # E[LN p(A | p)]
        E_A = self.expected_A()
        E_notA = 1.0 - E_A
        E_ln_p = self.network.expected_log_p()
        E_ln_notp = self.network.expected_log_notp()
        vlb += Bernoulli().negentropy(E_x=E_A,
                                      E_notx=E_notA,
                                      E_ln_p=E_ln_p,
                                      E_ln_notp=E_ln_notp).sum()

        # E[LN p(W | A=1, kappa, v)]
        kappa = self.network.kappa
        E_v = self.network.expected_v()
        E_ln_v = self.network.expected_log_v()
        E_W1 = self.expected_W_given_A(A=1)
        E_ln_W1 = self.expected_log_W_given_A(A=1)
        vlb += (E_A * Gamma(kappa).negentropy(
            E_beta=E_v, E_ln_beta=E_ln_v, E_lambda=E_W1,
            E_ln_lambda=E_ln_W1)).sum()

        # E[LN p(W | A=0, kappa0, v0)]
        kappa0 = self.kappa_0
        v0 = self.nu_0
        E_W0 = self.expected_W_given_A(A=0)
        E_ln_W0 = self.expected_log_W_given_A(A=0)
        vlb += (E_notA * Gamma(kappa0, v0).negentropy(
            E_lambda=E_W0, E_ln_lambda=E_ln_W0)).sum()

        # Second term
        # E[LN q(A)]
        vlb -= Bernoulli(self.mf_p).negentropy().sum()
        # print "mf_p: %s, negent: %s" % (self.mf_p, Bernoulli(self.mf_p).negentropy().sum())

        # E[LN q(W | A=1)]
        vlb -= (E_A * Gamma(self.mf_kappa_1, self.mf_v_1).negentropy()).sum()
        vlb -= (E_notA *
                Gamma(self.mf_kappa_0, self.mf_v_0).negentropy()).sum()

        return vlb
Beispiel #2
0
    def resample_c(self, A, W):
        """
        Resample block assignments given the weighted adjacency matrix
        and the impulse response fits (if used)
        """
        if self.C == 1:
            return

        # Sample each assignment in order
        for k in range(self.K):
            # Compute unnormalized log probs of each connection
            lp = np.zeros(self.C)

            # Prior from m
            lp += np.log(self.m)

            # Likelihood from network
            for ck in range(self.C):
                c_temp = self.c.copy().astype(np.int)
                c_temp[k] = ck

                # p(A[k,k'] | c)
                lp[ck] += Bernoulli(self.p[ck, c_temp])\
                                .log_probability(A[k,:]).sum()

                # p(A[k',k] | c)
                lp[ck] += Bernoulli(self.p[c_temp, ck])\
                                .log_probability(A[:, k]).sum()

                # p(W[k,k'] | c)
                lp[ck] += (A[k,:] * Gamma(self.kappa, self.v[ck, c_temp])\
                                .log_probability(W[k,:])).sum()

                # p(W[k,k'] | c)
                lp[ck] += (A[:, k] * Gamma(self.kappa, self.v[c_temp, ck])\
                                .log_probability(W[:, k])).sum()

                # TODO: Subtract of self connection since we double counted

                # TODO: Get probability of impulse responses g

            # Resample from lp
            self.c[k] = sample_discrete_from_log(lp)
Beispiel #3
0
    def mf_update_c(self,
                    E_A,
                    E_notA,
                    E_W_given_A,
                    E_ln_W_given_A,
                    stepsize=1.0):
        """
        Update the block assignment probabilitlies one at a time.
        This one involves a number of not-so-friendly expectations.
        :return:
        """
        # Sample each assignment in order
        for k in range(self.K):
            notk = np.concatenate((np.arange(k), np.arange(k + 1, self.K)))

            # Compute unnormalized log probs of each connection
            lp = np.zeros(self.C)

            # Prior from m
            lp += self.expected_log_m()

            # Likelihood from network
            for ck in range(self.C):

                # Compute expectations with respect to other block assignments, c_{\neg k}
                # Initialize vectors for expected parameters
                E_ln_p_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_notp_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_p_cnotk_to_ck = np.zeros(self.K - 1)
                E_ln_notp_cnotk_to_ck = np.zeros(self.K - 1)
                E_v_ck_to_cnotk = np.zeros(self.K - 1)
                E_ln_v_ck_to_cnotk = np.zeros(self.K - 1)
                E_v_cnotk_to_ck = np.zeros(self.K - 1)
                E_ln_v_cnotk_to_ck = np.zeros(self.K - 1)

                for cnotk in range(self.C):
                    # Get the (K-1)-vector of other class assignment probabilities
                    p_cnotk = self.mf_m[notk, cnotk]

                    # Expected log probability of a connection from ck to cnotk
                    E_ln_p_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_tau1[ck, cnotk]) -
                        psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))
                    E_ln_notp_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_tau0[ck, cnotk]) -
                        psi(self.mf_tau0[ck, cnotk] + self.mf_tau1[ck, cnotk]))

                    # Expected log probability of a connection from cnotk to ck
                    E_ln_p_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_tau1[cnotk, ck]) -
                        psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))
                    E_ln_notp_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_tau0[cnotk, ck]) -
                        psi(self.mf_tau0[cnotk, ck] + self.mf_tau1[cnotk, ck]))

                    # Expected log scale of connections from ck to cnotk
                    E_v_ck_to_cnotk += p_cnotk * (self.mf_alpha[ck, cnotk] /
                                                  self.mf_beta[ck, cnotk])
                    E_ln_v_ck_to_cnotk += p_cnotk * (
                        psi(self.mf_alpha[ck, cnotk]) -
                        np.log(self.mf_beta[ck, cnotk]))

                    # Expected log scale of connections from cnotk to ck
                    E_v_cnotk_to_ck += p_cnotk * (self.mf_alpha[cnotk, ck] /
                                                  self.mf_beta[cnotk, ck])
                    E_ln_v_cnotk_to_ck += p_cnotk * (
                        psi(self.mf_alpha[cnotk, ck]) -
                        np.log(self.mf_beta[cnotk, ck]))

                # Compute E[ln p(A | c, p)]
                lp[ck] += Bernoulli().negentropy(
                    E_x=E_A[k, notk],
                    E_notx=E_notA[k, notk],
                    E_ln_p=E_ln_p_ck_to_cnotk,
                    E_ln_notp=E_ln_notp_ck_to_cnotk).sum()

                lp[ck] += Bernoulli().negentropy(
                    E_x=E_A[notk, k],
                    E_notx=E_notA[notk, k],
                    E_ln_p=E_ln_p_cnotk_to_ck,
                    E_ln_notp=E_ln_notp_cnotk_to_ck).sum()

                # Compute E[ln p(W | A=1, c, v)]
                lp[ck] += (E_A[k, notk] * Gamma(self.kappa).negentropy(
                    E_ln_lambda=E_ln_W_given_A[k, notk],
                    E_lambda=E_W_given_A[k, notk],
                    E_beta=E_v_ck_to_cnotk,
                    E_ln_beta=E_ln_v_ck_to_cnotk)).sum()

                lp[ck] += (E_A[notk, k] * Gamma(self.kappa).negentropy(
                    E_ln_lambda=E_ln_W_given_A[notk, k],
                    E_lambda=E_W_given_A[notk, k],
                    E_beta=E_v_cnotk_to_ck,
                    E_ln_beta=E_ln_v_cnotk_to_ck)).sum()

                # Compute expected log prob of self connection
                if self.allow_self_connections:
                    E_ln_p_ck_to_ck = psi(
                        self.mf_tau1[ck, ck]) - psi(self.mf_tau0[ck, ck] +
                                                    self.mf_tau1[ck, ck])
                    E_ln_notp_ck_to_ck = psi(
                        self.mf_tau0[ck, ck]) - psi(self.mf_tau0[ck, ck] +
                                                    self.mf_tau1[ck, ck])
                    lp[ck] += Bernoulli().negentropy(
                        E_x=E_A[k, k],
                        E_notx=E_notA[k, k],
                        E_ln_p=E_ln_p_ck_to_ck,
                        E_ln_notp=E_ln_notp_ck_to_ck)
                    E_v_ck_to_ck = self.mf_alpha[ck, ck] / self.mf_beta[ck, ck]
                    E_ln_v_ck_to_ck = psi(self.mf_alpha[ck, ck]) - np.log(
                        self.mf_beta[ck, ck])
                    lp[ck] += (E_A[k, k] * Gamma(self.kappa).negentropy(
                        E_ln_lambda=E_ln_W_given_A[k, k],
                        E_lambda=E_W_given_A[k, k],
                        E_beta=E_v_ck_to_ck,
                        E_ln_beta=E_ln_v_ck_to_ck))

                # TODO: Get probability of impulse responses g

            # Normalize the log probabilities to update mf_m
            Z = logsumexp(lp)
            mk_hat = np.exp(lp - Z)

            self.mf_m[
                k, :] = (1.0 - stepsize) * self.mf_m[k, :] + stepsize * mk_hat