def _do_forward_pass(self, framelogprob): n_samples, n_components = framelogprob.shape fwdlattice = np.zeros((n_samples, n_components)) _hmmc._forward(n_samples, n_components, log_mask_zero(self.startprob_), log_mask_zero(self.transmat_), framelogprob, fwdlattice) with np.errstate(under="ignore"): return logsumexp(fwdlattice[-1]), fwdlattice
def _compute_logprob(self, X, startprob, transmat, **kwargs): logprobX = self._logprob_X(X, **kwargs) n_samples, n_components = logprobX.shape fwdlattice = np.zeros((n_samples, n_components)) _forward(n_samples, n_components, np.log(startprob), np.log(transmat), logprobX, fwdlattice) return logsumexp(fwdlattice[-1])
def _do_forward_pass(log_startprob, log_transmat, framelogprob): n_samples, n_components = framelogprob.shape fwdlattice = np.zeros((n_samples, n_components)) _hmmc._forward( n_samples, n_components, log_startprob, log_transmat, framelogprob, fwdlattice ) return logsumexp(fwdlattice[-1]), fwdlattice
def _do_forward_pass(self, framelogprob): # Based on hmmlearn's _BaseHMM safe_startmat = self.startprob_ + np.finfo(float).eps safe_transmat = self.transmat_ + np.finfo(float).eps n_samples, n_components = framelogprob.shape fwdlattice = np.zeros((n_samples, n_components)) _hmmc._forward(n_samples, n_components, np.log(safe_startmat), np.log(safe_transmat), framelogprob, fwdlattice) return logsumexp(fwdlattice[-1]), fwdlattice
def _do_forward_pass(self, framelogprob): n_samples, n_components = framelogprob.shape # archived numpy version # fwdlattice = _routines._forward(n_samples, n_components, # log_mask_zero(self.startprob_), # log_mask_zero(self.transmat_), # framelogprob) fwdlattice = np.zeros((n_samples, n_components)) _hmmc._forward(n_samples, n_components, log_mask_zero(self.startprob_), log_mask_zero(self.transmat_), framelogprob, fwdlattice) with np.errstate(under="ignore"): return special.logsumexp(fwdlattice[-1]), fwdlattice
print("hmm transition probability: \n{}".format(transmat)) data, mask = batch_data(n_components) print("batch_framelogprob is: \n{}".format(data)) print("batch_mask is: \n{}".format(mask)) # 2. forward comparison # 2.1 forward by hmmlearn method: hmmlearn_fwdlattice = [] hmmlearn_logprob = [] for idx, framelogprob in enumerate(data): fwdlattice = np.zeros((mask[idx].sum(), n_components)) _hmmc._forward(int(mask[idx].sum()), \ int(n_components), \ np.log(startprob), \ np.log(transmat), \ framelogprob[mask[idx]>0], \ fwdlattice) hmmlearn_fwdlattice.append(fwdlattice) hmmlearn_logprob.append(logsumexp(fwdlattice[-1])) print("hmmlearn forward lattice: \n {}\n".format(hmmlearn_fwdlattice)) print("hmmlearn logporb: {}\n".format(hmmlearn_logprob)) # 2.2 batch forward in PyTorch torch_logprob, torch_fwdlattice = _forward(int(data.shape[1]), \ int(n_components), \ torch.from_numpy(np.log(startprob)), \ torch.from_numpy(np.log(transmat)), \ torch.from_numpy(data), \ torch.from_numpy(mask)) print("torch batch forward: \n {}\n".format(torch_fwdlattice))
def _do_forward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape fwdlattice = np.zeros((n_observations, n_components)) _hmmc._forward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, fwdlattice) return logsumexp(fwdlattice[-1]), fwdlattice