Exemplo n.º 1
0
Arquivo: hmm.py Projeto: yahmadian/ssm
 def expected_states(self, data, input=None, mask=None, tag=None):
     log_pi0 = self.init_state_distn.log_initial_state_distn(
         data, input, mask, tag)
     log_Ps = self.transitions.log_transition_matrices(
         data, input, mask, tag)
     log_likes = self.observations.log_likelihoods(data, input, mask, tag)
     return hmm_expected_states(log_pi0, log_Ps, log_likes)
Exemplo n.º 2
0
Arquivo: hmm.py Projeto: chingf/ssm
    def expected_states(self, data, input=None, mask=None, tag=None):
        m = self.state_map
        log_pi0 = self.init_state_distn.log_initial_state_distn(data, input, mask, tag)
        log_Ps = self.transitions.log_transition_matrices(data, input, mask, tag)
        log_likes = self.observations.log_likelihoods(data, input, mask, tag)
        Ez, Ezzp1, normalizer = hmm_expected_states(replicate(log_pi0, m), log_Ps, replicate(log_likes, m))

        # Collapse the expected states
        Ez = collapse(Ez, m)
        Ezzp1 = collapse(collapse(Ezzp1, m, axis=2), m, axis=1)
        return Ez, Ezzp1, normalizer
Exemplo n.º 3
0
    def entropy(self, sample=None):
        """
        Compute the entropy of the variational posterior distirbution.

        Recall that under the structured mean field approximation

        H[q(z)q(x)] = -E_{q(z)q(x)}[log q(z) + log q(x)]
                    = -E_q(z)[log q(z)] - E_q(x)[log q(x)]
                    = H[q(z)] + H[q(x)].

        That is, the entropy separates into the sum of entropies for the
        discrete and continuous states.

        For each one, we have

        E_q(u)[log q(u)] = E_q(u) [log q(u_1) + sum_t log q(u_t | u_{t-1}) + loq q(u_t) - log Z]
                         = E_q(u_1)[log q(u_1)] + sum_t E_{q(u_t, u_{t-1}[log q(u_t | u_{t-1})]
                             + E_q(u_t)[loq q(u_t)] - log Z

        where u \in {z, x} and log Z is the log normalizer.  This shows that we just need the
        posterior expectations and potentials, and the log normalizer of the distribution.

        Note
        ----
        We haven't implemented the exact calculations for the continuous states yet,
        so for now we're approximating the continuous state entropy via samples.
        """

        # Sample the continuous states
        if sample is None:
            sample = self.sample_continuous_states()
        else:
            assert isinstance(sample, list) and len(sample) == len(self.datas)

        negentropy = 0
        for s, prms in zip(sample, self.params):

            # 1. Compute log q(x) of samples of x
            negentropy += block_tridiagonal_log_probability(
                s, prms["J_diag"], prms["J_lower_diag"], prms["h"])

            # 2. Compute E_{q(z)}[ log q(z) ]
            (Ez, Ezzp1,
             normalizer) = hmm_expected_states(prms["log_pi0"], prms["log_Ps"],
                                               prms["log_likes"])
            negentropy -= normalizer  # -log Z
            negentropy += np.sum(Ez[0] * prms["log_pi0"])  # initial factor
            negentropy += np.sum(Ez * prms["log_likes"])  # unitary factors
            negentropy += np.sum(Ezzp1 * prms["log_Ps"])  # pairwise factors

        return -negentropy
Exemplo n.º 4
0
 def expected_states(self,
                     variational_mean,
                     data,
                     input=None,
                     mask=None,
                     tag=None):
     log_pi0 = self.init_state_distn.log_initial_state_distn(
         variational_mean, input, mask, tag)
     log_Ps = self.transitions.log_transition_matrices(
         variational_mean, input, mask, tag)
     log_likes = self.dynamics.log_likelihoods(
         variational_mean, input, np.ones_like(variational_mean,
                                               dtype=bool), tag)
     log_likes += self.emissions.log_likelihoods(data, input, mask, tag,
                                                 variational_mean)
     return hmm_expected_states(log_pi0, log_Ps, log_likes)
Exemplo n.º 5
0
 def mean_discrete_states(self):
     # Now compute the posterior expectations of z under q(z)
     return [
         hmm_expected_states(prms["log_pi0"], prms["log_Ps"],
                             prms["log_likes"]) for prms in self.params
     ]