def test_external_file_model_compatibility(self): """ Test StringIO streams for dynamic programming. """ # define the dishonest casino model fair_state = HMM.HiddenDieState(1 / 6.0) loaded_state = HMM.HiddenDieState(0.5) M = np.array([[0.95, 0.05], [0.1, 0.9]]) T = TransitionMatrix.MatrixTransitionObject(M) hidden_states = [fair_state, loaded_state] # define a sequence of observations observations = [1, 2, 6, 6, 1, 2, 3, 4, 5, 6] # define the observation stream o_converter = lineario.IntConverter() o_stream = lineario.SequentialStringIO(o_converter) o_stream.open_write() for x in observations: o_stream.write(x) o_stream.close() # create the reference hidden markov model object hmm_old = HMM.TrainedModel(M, hidden_states) # create the testing hidden markov model object names = ('tmp_f.tmp', 'tmp_s.tmp', 'tmp_b.tmp') hmm_new = ExternalModel(T, hidden_states, names) # get posterior distributions distributions_old = hmm_old.scaled_posterior_durbin(observations) hmm_new.init_dp(o_stream) distributions_new = list(hmm_new.posterior()) # assert that the distributions are the same self.assertTrue(np.allclose(distributions_old, distributions_new))
def test_scaled_ntransitions_expected_compatibility(self): fair_state = HMM.HiddenDieState(1 / 6.0) loaded_state = HMM.HiddenDieState(0.5) states = [fair_state, loaded_state] prandom = 0.1 # define the old hmm transition_matrix = TransitionMatrix.get_uniform_transition_matrix( prandom, len(states)) old_hmm = HMM.TrainedModel(transition_matrix, states) # define the new hmm cache_size = 100 transition_object = TransitionMatrix.UniformTransitionObject( prandom, len(states)) new_hmm = Model(transition_object, [fair_state, loaded_state], cache_size) # define a sequence of observations observations = [1, 2, 6, 6, 1, 2, 3, 4, 5, 6] # define the (degenerate) distances between observations distances = [1] * (len(observations) - 1) # use the old algorithm to get the expected number of transitions e_initial, A = old_hmm.scaled_transition_expectations_durbin( observations) ntransitions_expected_old = np.sum(A) - np.sum(np.diag(A)) # use the new algorithm to get the expected number of transitions dp_info = new_hmm.get_dp_info(observations, distances) ntransitions_expected_new = new_hmm.scaled_ntransitions_expected( dp_info) # assert that the expected number of transitions are almost the same self.assertAlmostEqual(ntransitions_expected_old, ntransitions_expected_new)
def test_model_compatibility(self): # define the dishonest casino model fair_state = HMM.HiddenDieState(1 / 6.0) loaded_state = HMM.HiddenDieState(0.5) M = np.array([[0.95, 0.05], [0.1, 0.9]]) T = TransitionMatrix.MatrixTransitionObject(M) hidden_states = [fair_state, loaded_state] # define a sequence of observations observations = [1, 2, 6, 6, 1, 2, 3, 4, 5, 6] # create the reference hidden markov model object hmm_old = HMM.TrainedModel(M, hidden_states) # create the testing hidden markov model object hmm_new = InternalModel(T, hidden_states) # get posterior distributions distributions_old = hmm_old.scaled_posterior_durbin(observations) distributions_new = hmm_new.posterior( hmm_new.get_dp_info(observations)) # assert that the distributions are the same self.assertTrue(np.allclose(distributions_old, distributions_new))