Ejemplo n.º 1
0
    V, prev = h.viterbi(observations_index)
    label = ""
    for i in observations_index:
        label += "".join("%10s" % observations_index_label[i])
    print(" " * 7 + label)
    for s in range(0, 2):
        print("%7s: " % states_index_label[s] + " ".join("%10s" % ("%f" % v)
                                                         for v in V[s]))
    print("\nThe most possible states and probability are:")
    p, ss = h.state_path(observations_index)
    for s in ss:
        print(states_index_label[s])
    print("%5f" % p)
    # 下面测试 Baum-Welch 算法
    print("---------Baum-Welch---------")
    observations_data = np.array([1, 0, 0, 1, 0, 1, 0, 2, 1, 2])
    states_data = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0])
    guess = HMM(np.array([[0.5, 0.5], [0.5, 0.5]]),
                np.array([[0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]),
                np.array([0.5, 0.5]))
    newA, newB, newpi = guess.baum_welch(observations_data)
    states_out = guess.state_path(observations_data)[1]
    p = 0.0
    for s in states_data:
        if next(states_out) == s:
            p += 1
    print(p / len(states_data))
    print("new A:\n", newA)
    print("new B:\n", newB)
    print("new pi:\n", newpi)
Ejemplo n.º 2
0
M, N, pi, A, B = read_hmm(hmmfile)
T, obs = read_sequence(seqfile)
	
hmm_object = HMM(pi, A, B)

#test forward algorithm
prob, alpha = hmm_object.forward(obs)
print "forward probability is %f" % np.log(prob)
prob, alpha, scale = hmm_object.forward_with_scale(obs)
print "forward probability with scale is %f" % prob

# test backward algorithm
prob, beta = hmm_object.backward(obs)
print "backward probability is %f" % prob 
beta = hmm_object.backward_with_scale(obs, scale)

# test baum-welch algorithm
logprobinit, logprobfinal = hmm_object.baum_welch(obs)
print "------------------------------------------------"
print "estimated parameters are: "
print "pi is:"
print hmm_object.pi
print "A is:"
print hmm_object.A
print "B is:"
print hmm_object.B
print "------------------------------------------------"
print "initial log probability is:"
print logprobinit
print "final log probability is:"
print logprobfinal
Ejemplo n.º 3
0
# -*- coding:utf-8 -*-
import numpy as np
from hmm import HMM

A = np.array([[0.7, 0.3], [0.4, 0.6]])
B = np.array([[0.5, 0.4, 0.1], [0.1, 0.3, 0.6]])
hmm = HMM(A, B, [0.3, 0.7])
# print(hmm.generate_data(5, seed=2018))
observations, states = hmm.generate_data(T=10, seed=2019)
print('observations: {}'.format(observations))
print('hidden states: {}'.format(states))
#  概率计算问题
print('backward prob: {}'.format(hmm.backward(observations)[1]))
print('forward prob: {}'.format(hmm.forward(observations)[1]))

# 学习问题
model = HMM(np.array([[0.5, 0.5], [0.5, 0.5]]),
            np.array([[0.4, 0.4, 0.2], [0.2, 0.3, 0.5]]), np.array([0.5, 0.5]))
a, b, pi, count = model.baum_welch(observations, threshold=0.1)
print('EM iteration: {}'.format(count))
print('a: {}'.format(a))
print('b: {}'.format(b))
print('pi: {}'.format(pi))

# 预测问题
print("predict: {}".format(hmm.viterbi(observations)))
Ejemplo n.º 4
0
# for i in range(len(obs)):
#    print str(obs[i])+' '+path[i]


# Learning Test
hmmLearn = HMM(a2, b2, pi2)
#Defines the influence that new observations have on previous probabilities
hmmLearn.influence = (3, 14)
# hmmLearn.generateMatrix(states, alphabet)


##Supervised Learning
# for x in range(50):
#    hmmLearn.supertrain(hmm.generate(2000))
#    print "-----"
#    print hmmLearn.pi
#    print hmmLearn.a
#    print hmmLearn.b

##Unsupervised Learning (EM/Baum-Welch)
for x in range(100):
   hmmLearn.baum_welch(hmm.generate(100, True))

print pi
print a
print b
print "---"
print hmmLearn.pi
print hmmLearn.a
print hmmLearn.b