def test_log(self): #Declare Class object test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) # Log prob function prob = test.log_prob(observation_tuple, quantities_observations) prob = round(prob, 3) self.assertEqual(-67.920, prob)
def test_viterbi(self): #Declare Class object test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) # Viterbi algorithm vit_out = (test.viterbi(observations)) self.assertEqual(['t', 't', 't', 't'], vit_out)
def generate_model(input_dict, input_layout): resource_path = config['config']['resources'] layout_file = resource_path + input_layout + '.json' print("Generating Transition Matrix...") transitionMatrix = transition_matrix.create_transition_matrix(input_dict) # Normalizing transition matrix header, normalized_transition_matrix = normalize_matrix.normalize_dataframe(transitionMatrix,1) # 1 for normalizing with sklearn method, 0 for the other implementation transition_dataframe = pd.DataFrame(normalized_transition_matrix, index=header, columns=header) print("Generating Emission Matrix...") obs, emissionMatrix = emission_matrix.generate_emission_matrix(layout_file) emission_dataframe = pd.DataFrame(emissionMatrix, index=obs, columns=obs) start_prob = np.asmatrix(transition_dataframe.values[0]) states = list(transition_dataframe.index.values) observation = list(transition_dataframe.index.values) transition = np.asmatrix(transition_dataframe.values) emission = np.asmatrix(emission_dataframe.values) #Generate the HMM model using Start prob, Transaction, States, Emissions and Observations model = hidden_markov.hmm(states,observation,start_prob,transition,emission) return model, states, observation, start_prob, transition_dataframe, emission_dataframe
def test_train_hmm(self): #Declare Class object test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) # Baum welch Algorithm num_iter = 1000 e, t, s = test.train_hmm(observation_tuple, num_iter, quantities_observations)
def test_log(self): #Declare Class object test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) # Log prob function prob = test.log_prob(observation_tuple, quantities_observations) prob = round(prob, 3) self.assertEqual(-67.920, prob)
def test_forward(self): #Declare Class object test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) # Forward algorithm forw_prob = (test.forward_algo(observations)) forw_prob = round(forw_prob, 5) self.assertEqual(0.05153, forw_prob)
def test_scale(): states = ('s', 't') #list of possible observations possible_observation = ('A', 'B') state_map = {0: 's', 1: 't'} # The observations that we observe and feed to the model observations = ('A', 'B', 'B', 'A') obs4 = ('B', 'A', 'B') # obs3 = ('R', 'W','W','W') # obs2 = ('W', 'W','R','R') observation_tuple = [] observation_tuple.extend([observations, obs4]) quantities_observations = [10, 20] # Numpy arrays of the data start_probability = np.matrix('0.5 0.5 ') transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') emission_probability = np.matrix('0.3 0.7 ; 0.4 0.6 ') test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) # Forward algorithm print(test.forward_algo(observations)) # Viterbi algorithm print("") print(test.viterbi(observations)) # start_prob,em_prob,trans_prob=start_probability,emission_probability,transition_probability prob = test.log_prob(observation_tuple, quantities_observations) print("probability of sequence with original parameters : %f" % (prob)) print("") num_iter = 1000 print("applied Baum welch on") print(observation_tuple) e, t, s = test.train_hmm(observation_tuple, num_iter, quantities_observations) print("parameters emission,transition and start") print(e) print("") print(t) print("") print(s) prob = test.log_prob(observation_tuple, quantities_observations) print("probability of sequence after %d iterations : %f" % (num_iter, prob))
def test_forward(self): #Declare Class object test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) # Forward algorithm forw_prob = (test.forward_algo(observations)) forw_prob = round(forw_prob, 5) self.assertEqual(0.05153, forw_prob)
def calibrate_sensor_and_transition_matrices( self, observation_sequences: "a list of tuples of observation sequences", update_models=False, debug=False): '''Given a list of observation sequences, calibrates the sensor and transition matrices to most likely probabilities based on Baum-Welch algorithm. Currently does not give consistent results - using EM should always converge to the same distribution parameters for 'reasonable' initial conditions. Assuming that subsequent readings are sampled at uniformly spaced timesteps of x seconds. This means will have to perform 1.5 updates if the RAV moves 1.5 'units' ''' states = [i for i in range(self.no_battery_levels)] possible_observations = [i for i in range(self.no_battery_levels)] print(np.matrix(self.transition_model("move")).shape) print(np.matrix(self.sensor_matrix).shape) markov_hmm = hmm(states, possible_observations, self.initial_distribution.transpose(), self.transition_model("move"), self.sensor_matrix) num_iterations = 10000000 #hard code in quantities of observations as 1 self.trained_sensor_model, self.trained_transition_model, self.trained_initial_distribution = markov_hmm.train_hmm( observation_sequences, num_iterations, [1 for _ in range(len(observation_sequences))]) # e,t,s contain new emission transition and start probabilities if debug: print( "Transition model frobenius error: ", np.linalg.norm( self.trained_transition_model - np.matrix(self.transition_model("move")), 'fro')) print( "Sensor model frobenius error: ", np.linalg.norm( self.trained_sensor_model - np.matrix(self.sensor_matrix), 'fro')) print( "Initial distribution l2 error: ", np.linalg.norm( self.trained_initial_distribution - self.initial_distribution, 2)) if update_models: self.__update_transition_model(self.trained_transition_model) self.__update_sensor_model(self.trained_sensor_model) if debug: print("The battery transition model has now been set to {}". format(self.trained_transition_model)) print("The sensor model has now been set to {}".format( self.trained_sensor_model)) else: if debug: print("Using user-specified transition model: {}".format( self.transition_model('move'))) print("Using user-specified sensor model: {}".format( self.sensor_matrix))
def test_scale(): states = ('s', 't') #list of possible observations possible_observation = ('A','B' ) state_map = { 0 :'s', 1: 't' } # The observations that we observe and feed to the model observations = ('A', 'B','B','A') obs4 = ('B', 'A','B') # obs3 = ('R', 'W','W','W') # obs2 = ('W', 'W','R','R') observation_tuple = [] observation_tuple.extend( [observations,obs4] ) quantities_observations = [10, 20] # Numpy arrays of the data start_probability = np.matrix( '0.5 0.5 ') transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) # Forward algorithm print (test.forward_algo(observations)) # Viterbi algorithm print ("") print (test.viterbi(observations)) # start_prob,em_prob,trans_prob=start_probability,emission_probability,transition_probability prob = test.log_prob(observation_tuple, quantities_observations) print ("probability of sequence with original parameters : %f"%(prob)) print ("") num_iter=1000 print ("applied Baum welch on") print (observation_tuple) e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations) print("parameters emission,transition and start") print(e) print("") print(t) print("") print(s) prob = test.log_prob(observation_tuple, quantities_observations) print ("probability of sequence after %d iterations : %f"%(num_iter,prob))
def generate_model_with_input(states, observation, start_prob, transition, emission): # Convert elements format to HMM library start_prob = np.asmatrix(start_prob) emission = np.asmatrix(emission.values) transition = np.asmatrix(transition.values) #Generate the HMM model using Start prob, Transaction, States, Emissions and Observations model = hidden_markov.hmm(states,observation,start_prob,transition,emission) return model
def init_model(possible_states, possible_obs, possible_states_array, possible_obs_array, train_states_value_seq, train_obs_seq): start_matrix = create_start_matrix(len(possible_states)) trans_matrix = create_trans_matrix(train_states_value_seq, len(possible_states)) em_matrix = create_em_matrix(train_states_value_seq, train_obs_seq, len(possible_states), len(possible_obs)) smarthouse_model = hmm(possible_states_array, possible_obs_array, start_matrix, trans_matrix, em_matrix) return smarthouse_model
def __init__(self, states, evidence, probability, transition, emission): #Hidden states of the net self.states = states #Evidence or observable self.evidence = evidence #starting probability for the initialization self.probability = probability #matrix for transition probability self.transition = transition #matrix of emission self.emission = emission #Call the hidden_markov library model and initialize the model self.model = hidden_markov.hmm(states, evidence, probability, transition, emission)
def __init__(self, n_states, n_possible_observations): # Number of states self.n_states = n_states # Number of possible observations self.n_possible_observations = n_possible_observations # Create states and possible observations self.states, self.possible_observations = self.__init_names() # Create transition matrix, emission matrix and start probability matrix self.pi_prob, self.transition_prob, self.emission_prob = self.__init_probabilities( ) # Create model self.__model = hmm(states=list(self.states), observations=list(self.possible_observations), start_prob=np.matrix(self.pi_prob), trans_prob=np.matrix(self.transition_prob), em_prob=np.matrix(self.emission_prob))
c += 1 temp1[i + 1] = -1 for k in range(0, len(temp1)): if (temp1[k] == -1): temp1[k] = 1 / c elif (temp1[k] == None): temp1[k] = 0 temp.append(temp1) return np.array(temp) # Left,Right,Up,Down #start_probability = inpro(grid) #transition_probability = trans(grid) #each row is one cell states = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] #('s', 't') possible_observation = list() for i in range(0, 118): possible_observation.append(str(i / 10)) #print(possible_observation) start_probability = np.matrix(inpro(grid)[0]) transition_probability = np.matrix(trans(grid)) emission_probability = np.matrix(emm(l1, u1)) observation = ('6.3', '5.6', '7.6', '9.5', '6.0', '9.3', '8.0', '6.4', '5.0', '3.8', '3.3') test = hmm(states, possible_observation, start_probability, transition_probability, emission_probability) print(test.viterbi(observation))
for i in range(1, t): for j in range(0, q): delt[i, j] = np.max(np.multiply(delt[i - 1, :], T[:, j])) * M[j, int(Z[i] - 1)] pre[i, j] = np.argmax(np.multiply(delt[i - 1, :], T[:, j])) s_t = np.argmax(delt[-1, :]) + 1 path = np.zeros(t) path[-1] = s_t for k in range(t - 2, -1, -1): path[k] = pre[k + 1, int(path[k + 1] - 1)] + 1 p = delt[-1, s_t] return path, p if __name__ == "__main__": print("whack a mole") M = np.array([[0.5, 0.5], [0.9, 0.1], [0.1, 0.9]]) Z = np.array([1, 1, 1, 2, 2, 2, 1, 2, 2, 1]) T = np.array([[0.1, 0.4, 0.5], [0.4, 0, 0.6], [0, 0.6, 0.4]]) s0 = np.array([0, 0, 1]) path, p = Viterbi(M, Z, T, s0) print("Most Likely Path:") print(path) print("Joint probability") print(p) states = [1, 2, 3] obs = [12] h = hm.hmm(states, obs, np.asmatrix(s0), np.asmatrix(T), np.asmatrix(M)) print(h.viterbi(Z.tolist()))
import nltk parser = argparse.ArgumentParser() parser.add_argument('-t', '--train', help='Training or not', action='store_true') args = parser.parse_args() if args.train: print "With Training" prior = hmm_init.prior_probability() transitions = hmm_init.transition_model() states = hmm_init.states() possible_obs = hmm_init.observation() emissions = hmm_init.emission_probability(utility.adjacents_bigrams()) model = hmlib.hmm(states, possible_obs, prior, transitions, emissions) afile = open('training/model', 'wb') pickle.dump(model, afile) afile.close() print "End of training\n" else: print "Without Training\n" afile = open('training/model', 'rb') model = pickle.load(afile) afile.close() #print len(states),len(emissions) states = model.states for filename in glob.glob('test/Pontifex_test_of_remains.txt'):
_, frame = cap.read() mask = segment(frame, lower, upper) _, thresh = cv2.threshold(mask, 127, 255, 0) hand = get_my_hand(thresh) features = extract_features(hand, grid) pred = classifier.predict([features]) print('%5d' % (frame_no), end=': ') print(pred) pred = pred.tolist() obs.append(pred) frame_no += 1 except: break cap.release() print(obs) states = ('gaf0', 'gaf1') observations = ('0', '1') start_prob = np.matrix('0.5 0.5') transition_prob = np.matrix('1.0 0.0 ; 0.0 1.0') emission_prob = np.matrix('0.7 0.3 ; 1.0 0 ') good_afternoon = hm.hmm(states, observations, start_prob, transition_prob, emission_prob) observed = [('0', '0', '0', '0', '1', '1')] observed.extend(('0', '0', '1')) e, t, s = good_afternoon.train_hmm(obs, 30, [10, 20]) print(e) print(t) print(s)
nod[i] = noonSum[h] / totalNoon if (findpoint(h, eve_max_ptns)): ed[i] = eveningSum[h] / totalEvening emit_p = [0 for x in range(len(states))] emit_p[0] = md emit_p[1] = nod emit_p[2] = ed emit_p[3] = nd emit_p = np.asmatrix(emit_p) trans_p = np.asmatrix(trans_p) start_p = np.asmatrix(start_p) # In[138]: #create an HMM Class t = hmm(states, obs, start_p, trans_p, emit_p) # In[139]: #calculating alpha def calc_alpha(a, b): x1 = math.floor(a / 100) x2 = math.floor(b / 100) y1 = (a % 100) y2 = (b % 100) d = math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) if (d == 0): return 1 else:
def test_train_hmm(self): #Declare Class object test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) # Baum welch Algorithm num_iter=1000 e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)
import numpy as np from hidden_markov import hmm ob_types = ('W', 'N') states = ('L', 'M') observations = ('W', 'W', 'W', 'N') start = np.matrix('0.1 0.9') transition = np.matrix('0.7 0.3 ; 0.1 0.9') emission = np.matrix('0.2 0.8 ; 0.4 0.6') _hmm = hmm(states, ob_types, start, transition, emission) print("Forward algorithm: ") print(_hmm.forward_algo(observations)) print("\nViterbi algorithm: ") print(_hmm.viterbi(observations))
def test_viterbi(self): #Declare Class object test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) # Viterbi algorithm vit_out = (test.viterbi(observations)) self.assertEqual(['t','t','t','t'] , vit_out)
emis_prob = np.random.random((len(states), num_observable)) emis_prob = np.asmatrix(emis_prob) for i, n in enumerate(start_prob): for j, m in enumerate(n): start_prob[i][j] = start_prob[i][j] / np.sum(n) for i, n in enumerate(trans_prob): for j, m in enumerate(n): trans_prob[i][j] = trans_prob[i][j] / np.sum(n) for i, n in enumerate(emis_prob): for j, m in enumerate(n): emis_prob[i][j] = emis_prob[i][j] / np.sum(n) print len(states) print num_observable print start_prob.shape print trans_prob.shape print emis_prob.shape print "Starting with hmm" test = hmm(states, list_observables, start_prob, trans_prob, emis_prob) print "Done with hmm" iterations = 1 print "Starting with Baum-Welch" e, t, s = test.train_hmm(observations, iterations, quantities) print "Done with Baum-Welch"