def test_train_sequences_of_different_length(self, tr_params="stmc"): h = HeterogeneousHMM( n_states=self.n_states, n_g_emissions=self.n_g_emissions, n_d_emissions=self.n_d_emissions, n_d_features=self.n_d_features, covariance_type=self.covariance_type, ) h.A = self.A h.pi = self.pi h.B = self.B h.means = self.means h.covars = self.covars h.tr_params = tr_params # Generate observation sequences lengths = [30, 40, 50] X = [ h.sample(n_sequences=1, n_samples=n_samples)[0] for n_samples in lengths ] h, log_likelihoods = h._train(X, n_iter=10, conv_thresh=0.01, return_log_likelihoods=True) # we consider learning if the log_likelihood increases (the first one is discarded, because sometimes it drops # after the first iteration and increases for the rest assert np.all(np.round(np.diff(log_likelihoods[1:]), 10) >= 0)
def test_non_trainable_emission(self, n_samples=100, n_sequences=30, tr_params="ste"): h = HeterogeneousHMM( n_states=self.n_states, n_g_emissions=self.n_g_emissions, n_d_emissions=self.n_d_emissions, n_d_features=self.n_d_features, covariance_type=self.covariance_type, ) h.A = self.A h.pi = self.pi h.B = self.B h.means = self.means h.covars = self.covars h.tr_params = tr_params # Generate observation sequences X = h.sample(n_sequences=n_sequences, n_samples=n_samples) h_tst = HeterogeneousHMM( n_states=self.n_states, n_g_emissions=self.n_g_emissions, n_d_emissions=self.n_d_emissions, n_d_features=self.n_d_features, covariance_type=self.covariance_type, nr_no_train_de=1, ) # Set up the emission probabilities and see if we can re-learn them. B_fix = np.eye(self.n_states, self.n_d_features[-1]) with pytest.raises(AttributeError): h_tst, log_likelihoods = h_tst._train(X, n_iter=100, conv_thresh=0.01, return_log_likelihoods=True, no_init=False) # we consider learning if the log_likelihood increases assert np.all(np.round(np.diff(log_likelihoods), 10) >= 0) # we want that the emissions haven't changed assert np.allclose(B_fix, h_tst.B[-1])