Beispiel #1
0
def dhmm_em(observation_i,prior,transition_matrix,emission_matrix,max_iter,thresh):
    
    previous_loglik = -np.inf
    loglik = 0
    converged = False
    num_iter = 1
    LL = []
    
    while (num_iter <= max_iter) and not converged :
         
         #E step
         [loglik,exp_num_visits1,exp_num_visitsT,exp_num_trans,exp_num_emit]=compute_ess_dhmm(observation_i,prior,transition_matrix,emission_matrix, 0)
         #[loglik, prior,A,B] = compute_ess_dhmm(observation_i,hidden,prior,transition_matrix,emission_matrix, 0)
         #print "-----------ITERATION "+str(num_iter)+"----------"
         #M Step
         #prior = prior
         prior=normalize(exp_num_visits1)[0]
         #print "AFTER M STEP:",prior
         transition_matrix = mk_stochastic(exp_num_trans)
         
         #emission_matrix = mk_stochastic(B)
         emission_matrix=mk_stochastic(exp_num_emit)
         
        
         num_iter =  num_iter + 1
         [converged,decrease] = em_converged(loglik, previous_loglik, thresh,False)
         previous_loglik = loglik
         LL.append(loglik)
         
         
         #print "Log Likelihood:",LL
    return [LL, prior, transition_matrix, emission_matrix, num_iter]
Beispiel #2
0
import numpy as np
from underflow_normalize import normalize,mk_stochastic
from sample import *
from dhmm_em import dhmm_em
O = 3
Q = 2

#"true" parameters
prior0 = normalize(np.random.rand(Q,1))[0]
prior0 =np.array([.5,.5])

transmat0 = mk_stochastic(np.random.rand(Q,Q))
transmat0=np.array([[.3 ,.7],[.7,.3]])
obsmat0 = mk_stochastic(np.random.rand(Q,O))
obsmat0=np.array([[.333,.333,.333],[.333,.333,.333]])
print "True Parameters"
print "-"*80
print "Prior:",prior0
print "Observation",obsmat0
print "Transition:",transmat0



# training data
T = 1
nex =5
[obs,hidden] = sample_dhmm(prior0, transmat0, obsmat0, T, nex)

#print hidden,obs# initial guess of parameters
prior1 = normalize(np.random.rand(Q,1))[0]