コード例 #1
0
ファイル: hmm.py プロジェクト: vishalbelsare/ssm
 def posterior_sample(self, data, input=None, mask=None, tag=None):
     m = self.state_map
     pi0 = self.init_state_distn.initial_state_distn
     Ps = self.transitions.transition_matrices(data, input, mask, tag)
     log_likes = self.observations.log_likelihoods(data, input, mask, tag)
     z_smpl = hmm_sample(replicate(pi0, m), Ps, replicate(log_likes, m))
     return self.state_map[z_smpl]
コード例 #2
0
ファイル: hmm.py プロジェクト: vishalbelsare/ssm
 def filter(self, data, input=None, mask=None, tag=None):
     m = self.state_map
     pi0 = self.init_state_distn.initial_state_distn
     Ps = self.transitions.transition_matrices(data, input, mask, tag)
     log_likes = self.observations.log_likelihoods(data, input, mask, tag)
     pzp1 = hmm_filter(replicate(pi0, m), Ps, replicate(log_likes, m))
     return collapse(pzp1, m)
コード例 #3
0
ファイル: hmm.py プロジェクト: vishalbelsare/ssm
 def most_likely_states(self, data, input=None, mask=None, tag=None):
     m = self.state_map
     pi0 = self.init_state_distn.initial_state_distn
     Ps = self.transitions.transition_matrices(data, input, mask, tag)
     log_likes = self.observations.log_likelihoods(data, input, mask, tag)
     z_star = viterbi(replicate(pi0, m), Ps, replicate(log_likes, m))
     return self.state_map[z_star]
コード例 #4
0
ファイル: hmm.py プロジェクト: chingf/ssm
    def expected_states(self, data, input=None, mask=None, tag=None):
        m = self.state_map
        log_pi0 = self.init_state_distn.log_initial_state_distn(data, input, mask, tag)
        log_Ps = self.transitions.log_transition_matrices(data, input, mask, tag)
        log_likes = self.observations.log_likelihoods(data, input, mask, tag)
        Ez, Ezzp1, normalizer = hmm_expected_states(replicate(log_pi0, m), log_Ps, replicate(log_likes, m))

        # Collapse the expected states
        Ez = collapse(Ez, m)
        Ezzp1 = collapse(collapse(Ezzp1, m, axis=2), m, axis=1)
        return Ez, Ezzp1, normalizer
コード例 #5
0
ファイル: hmm.py プロジェクト: chingf/ssm
    def log_likelihood(self, datas, inputs=None, masks=None, tags=None):
        """
        Compute the log probability of the data under the current
        model parameters.

        :param datas: single array or list of arrays of data.
        :return total log probability of the data.
        """
        m = self.state_map
        ll = 0
        for data, input, mask, tag in zip(datas, inputs, masks, tags):
            log_pi0 = self.init_state_distn.log_initial_state_distn(data, input, mask, tag)
            log_Ps = self.transitions.log_transition_matrices(data, input, mask, tag)
            log_likes = self.observations.log_likelihoods(data, input, mask, tag)
            ll += hmm_normalizer(replicate(log_pi0, m), log_Ps, replicate(log_likes, m))
            assert np.isfinite(ll)
        return ll