コード例 #1
0
 def expected_states(self, variational_mean, data, input=None, mask=None, tag=None):
     x_mask = np.ones_like(variational_mean, dtype=bool)
     pi0 = self.init_state_distn.log_initial_state_distn
     Ps = self.transitions.transition_matrices(variational_mean, input, x_mask, tag)
     log_likes = self.dynamics.log_likelihoods(variational_mean, input, x_mask, tag)
     log_likes += self.emissions.log_likelihoods(data, input, mask, tag, variational_mean)
     return hmm_expected_states(pi0, Ps, log_likes)
コード例 #2
0
ファイル: variational.py プロジェクト: pankajkarman/ssm
    def discrete_state_params(self, value):
        assert isinstance(value, list) and len(value) == len(self.datas)
        for prms in value:
            for key in ["pi0", "Ps", "log_likes"]:
                assert key in prms
        self._discrete_state_params = value

        # Rerun the HMM smoother with the updated parameters
        self._discrete_expectations = \
            [hmm_expected_states(prms["pi0"], prms["Ps"], prms["log_likes"])
             for prms in self._discrete_state_params]
コード例 #3
0
    def entropy(self, sample=None):
        """
        Compute the entropy of the variational posterior distirbution.

        Recall that under the structured mean field approximation

        H[q(z)q(x)] = -E_{q(z)q(x)}[log q(z) + log q(x)]
                    = -E_q(z)[log q(z)] - E_q(x)[log q(x)]
                    = H[q(z)] + H[q(x)].

        That is, the entropy separates into the sum of entropies for the
        discrete and continuous states.

        For each one, we have

        E_q(u)[log q(u)] = E_q(u) [log q(u_1) + sum_t log q(u_t | u_{t-1}) + loq q(u_t) - log Z]
                         = E_q(u_1)[log q(u_1)] + sum_t E_{q(u_t, u_{t-1}[log q(u_t | u_{t-1})]
                             + E_q(u_t)[loq q(u_t)] - log Z

        where u \in {z, x} and log Z is the log normalizer.  This shows that we just need the
        posterior expectations and potentials, and the log normalizer of the distribution.

        Note
        ----
        We haven't implemented the exact calculations for the continuous states yet,
        so for now we're approximating the continuous state entropy via samples.
        """

        # Sample the continuous states
        if sample is None:
            sample = self.sample_continuous_states()
        else:
            assert isinstance(sample, list) and len(sample) == len(self.datas)

        negentropy = 0
        for s, prms in zip(sample, self.params):

            # 1. Compute log q(x) of samples of x
            negentropy += block_tridiagonal_log_probability(
                s, prms["J_diag"], prms["J_lower_diag"], prms["h"])

            # 2. Compute E_{q(z)}[ log q(z) ]
            log_pi0 = np.log(prms["pi0"] + 1e-16) - logsumexp(prms["pi0"])
            log_Ps = np.log(prms["Ps"] + 1e-16) - logsumexp(
                prms["Ps"], axis=1, keepdims=True)
            (Ez, Ezzp1,
             normalizer) = hmm_expected_states(prms["pi0"], prms["Ps"],
                                               prms["log_likes"])
            negentropy -= normalizer  # -log Z
            negentropy += np.sum(Ez[0] * log_pi0)  # initial factor
            negentropy += np.sum(Ez * prms["log_likes"])  # unitary factors
            negentropy += np.sum(Ezzp1 * log_Ps)  # pairwise factors

        return -negentropy
コード例 #4
0
ファイル: hmm.py プロジェクト: vishalbelsare/ssm
    def expected_states(self, data, input=None, mask=None, tag=None):
        m = self.state_map
        pi0 = self.init_state_distn.initial_state_distn
        Ps = self.transitions.transition_matrices(data, input, mask, tag)
        log_likes = self.observations.log_likelihoods(data, input, mask, tag)
        Ez, Ezzp1, normalizer = hmm_expected_states(replicate(pi0, m), Ps,
                                                    replicate(log_likes, m))

        # Collapse the expected states
        Ez = collapse(Ez, m)
        Ezzp1 = collapse(collapse(Ezzp1, m, axis=2), m, axis=1)
        return Ez, Ezzp1, normalizer
コード例 #5
0
ファイル: hmm.py プロジェクト: vishalbelsare/ssm
 def expected_states(self, data, input=None, mask=None, tag=None):
     pi0 = self.init_state_distn.initial_state_distn
     Ps = self.transitions.transition_matrices(data, input, mask, tag)
     log_likes = self.observations.log_likelihoods(data, input, mask, tag)
     return hmm_expected_states(pi0, Ps, log_likes)
コード例 #6
0
 def mean_discrete_states(self):
     # Now compute the posterior expectations of z under q(z)
     return [
         hmm_expected_states(prms["pi0"], prms["Ps"], prms["log_likes"])
         for prms in self.params
     ]