def map(self, obs, log=True): scores = np.log(simplex.project(np.array(self.counts))) states = [] for k, component in enumerate(self.components): score_k, states_k = component.map(obs, log=True) scores[k] += score_k states.append(states_k) k_max = scores.argmax() return scores[k_max] if log else np.exp(scores[k_max]), k_max, states[k_max]
def expected_params(self): pi = simplex.project(self.totals) return { 'mix': { 'log': np.log(pi), 'original': pi, }, 'components': [component.expected_params() for component in self.components] }
def fbsample(self, obs, log=True, frozen_params=None): """Sample a sequence of states given parameters. Parameters ---------- obs: np.ndarray Observations log: bool If True return log probability of the sample frozen_params: dict (optional) Frozen parameters. Returns ------- score: float P(S=states | X=obs) states: np.ndarray sampled states Notes ----- @article{scott2002bayesian, title={Bayesian methods for hidden Markov models}, author={Scott, Steven L}, journal={Journal of the American Statistical Association}, volume={97}, number={457}, year={2002} } """ Pmats = self.forward_matrices(obs, log=False, frozen_params=frozen_params) T = len(Pmats) lp = 0.0 states = [] # p = np.exp(Pmats[T-1]).sum(1) p = Pmats[T-1].sum(1) i = p.cumsum().searchsorted(np.random.random()) lp += np.log(p[i]) states.append(i) for t in range(T - 1, 0, -1): i = states[-1] p = simplex.project(Pmats[t][i]) j = p.cumsum().searchsorted(np.random.random()) lp += np.log(p[j]) states.append(j) return lp if log else np.exp(lp), np.array(states[::-1])
def expected_params(self): pi = simplex.project(self.initial_totals) A = simplex.project(self.transition_totals) return {'log': (np.log(pi), np.log(A)), 'original': (pi, A)}
def params(self): pi = simplex.project(self.initial_counts) A = simplex.project(self.transition_counts) return {'log': (np.log(pi), np.log(A)), 'original': (pi, A)}
def proposal_prob(self, k, states, obs, log=True): lp = np.log(simplex.project(self.counts)[k]) + self.components[k].proposal_prob(states, obs, log=True) return lp if log else np.exp(lp)
def propose(self, obs, log=True): probs = simplex.project(self.counts) k = probs.cumsum().searchsorted(np.random.random()) lp_states, states = self.components[k].propose(obs) lp = np.log(probs[k]) + lp_states return lp if log else np.exp(lp), k, states
def loglikelihood_vector(self, obs): logmix = np.log(simplex.project(self.component_counts())) scores = [lp + component.likelihood(obs, log=True) for lp, component in zip(logmix, self.components)] return np.array(scores)
def likelihood(self, obs, log=True): mix = simplex.project(self.component_counts()) scores = [np.log(p) + component.likelihood(obs, log=True) for p, component in zip(mix, self.components)] ll = max(scores) return ll if log else np.exp(ll)