Пример #1
0
    def test_dishonest_casino_larger_transition_p(self):
        '''Dishonest Casino Example.'''
        # Create transition probability matrix
        A = np.array([[0.9, 0.1],
                      [0.1, 0.9]])
        # Create observable probability distribution matrix. Casino biased toward "6" in state "1"
        B = statutil.scale_row_sums(np.array([[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],
                                              [ 1.0, 1.0, 1.0, 1.0, 1.0, 5.0 ]]))
        # Create set of all observable symbols
        V = [1, 2, 3, 4, 5, 6]
    
        # Instantiate an HMM, note Pi is uniform probability distribution by default
        m = hmm.HMM(2, A=A, B=B, V=V)
        
        Obs = [ 1, 2, 3, 4, 5, 2, 1, 6, 6, 6, 5, 6 ]
        log_prob_Obs, Alpha, c = hmm.forward(m, Obs, scaling=1)
        assert_almost_equal(log_prob_Obs, -20.124, decimal=3, err_msg='Wrong observation probability')
        
        Q_star, _, _ = hmm.viterbi(m, Obs, scaling=1)
        assert_equal(Q_star, [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], err_msg='Wrong Viterbi path')

        Beta = hmm.backward(m, Obs, c)
        Gamma, Q_star = hmm.individually_optimal_states(Alpha, Beta)
        assert_almost_equal(Gamma,
                            [[0.8189770516168013, 0.8482906260695058, 0.8525027084764197, 0.8329611652077556, 0.7834127024175411, 0.6880018120129073, 0.5161970090643716, 0.2130207566284025, 0.12024202874950358, 0.10797060639721641, 0.15902649827833876, 0.14930464162738483], [0.18102294838319855, 0.15170937393049422, 0.14749729152358024, 0.16703883479224435, 0.21658729758245884, 0.31199818798709256, 0.4838029909356284, 0.7869792433715975, 0.8797579712504964, 0.8920293936027837, 0.8409735017216613, 0.8506953583726152]],
                            decimal=5, err_msg='Wrong state probabilities')        
        assert_equal(Q_star, [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], 'Wrong individually-optimal states')
Пример #2
0
    def test_dishonest_casino(self):
        '''Dishonest Casino Example.'''
        # Create transition probability matrix
        A = np.array([[0.99, 0.01],
                      [0.01, 0.99]])
        # Create observable probability distribution matrix. Casino biased toward "6" in state "1".        
        B = statutil.scale_row_sums(np.array([[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],
                                              [ 1.0, 1.0, 1.0, 1.0, 1.0, 5.0 ]]))
        # Create set of all observable symbols
        V = [1, 2, 3, 4, 5, 6]
    
        # Instantiate an HMM, note Pi is uniform probability distribution by default
        m = hmm.HMM(2, A=A, B=B, V=V)
        
        Obs = [ 1, 2, 3, 4, 5, 2, 1, 6, 6, 6, 5, 6 ]
        log_prob_Obs, Alpha, c = hmm.forward(m, Obs, scaling=1)
        assert_almost_equal(log_prob_Obs, -20.9468006, decimal=5, err_msg='Wrong observation probability')
        
        Q_star, _, _ = hmm.viterbi(m, Obs, scaling=1)
        assert_equal(Q_star, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'Wrong Viterbi path')

        Beta = hmm.backward(m, Obs, c)
        Gamma, Q_star = hmm.individually_optimal_states(Alpha, Beta)
        assert_almost_equal(Gamma,
                            [[0.63711364302936, 0.6348934929050587, 0.6271179131667495, 0.6117100305977996, 0.5845543683193845, 0.5383975935172204, 0.46091113744414974, 0.3313982095474306, 0.28864618346708165, 0.27562909135388625, 0.27498372625848855, 0.26932891011973825], [0.36288635697064003, 0.3651065070949412, 0.3728820868332506, 0.38828996940220045, 0.4154456316806155, 0.4616024064827796, 0.5390888625558502, 0.6686017904525694, 0.7113538165329184, 0.7243709086461138, 0.7250162737415115, 0.7306710898802617]],
                            decimal=5, err_msg='Wrong state probabilities')        
        assert_equal(Q_star, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], 'Wrong individually-optimal states')
Пример #3
0
def get_alignment_posteriors(src_tokens, trg_tokens, transition_model,
                             translation_model):
    "Compute the posterior alignment probability p(a_j=i | f, e) for each target token f_j."
    if isinstance(transition_model, TransitionModel):
        initial, transition = transition_model.get_parameters_for_sentence_pair(
            len(src_tokens))
        translation = translation_model.get_parameters_for_sentence_pair(
            src_tokens, trg_tokens)

        posteriors = np.zeros(
            (len(trg_tokens) - 1, len(src_tokens), len(src_tokens)))
        single_posteriors = np.zeros((len(trg_tokens), len(src_tokens)))

        params = (initial, transition, translation)
        observations = np.arange(len(trg_tokens))
        alpha = forward(params, observations)
        beta = backward(params, observations)
        answers = viterby(*params)

        for t in range(len(trg_tokens) - 1):
            nominator = (alpha[t, :] * transition.T
                         ).T * translation[:, t + 1] * beta[t + 1, :]
            posteriors[t] = nominator / np.sum(nominator)

        nominator = alpha * beta
        single_posteriors = (nominator.T / np.sum(nominator, axis=1)).T

        log_likelihood = (
            np.log(initial[answers[0]]) +
            np.sum(np.log(transition[answers[:-1], answers[1:]])) +
            np.sum(np.log(translation[answers,
                                      np.arange(len(trg_tokens))])))
        return (posteriors, single_posteriors), log_likelihood, answers
    else:
        # here transition_model is a prior_model
        prior = transition_model.get_parameters_for_sentence_pair(
            len(src_tokens), len(trg_tokens))
        traslation = translation_model.get_parameters_for_sentence_pair(
            src_tokens, trg_tokens)

        nominator = prior * traslation
        denominator = np.sum(nominator, axis=0)
        alignment_posteriors = nominator / denominator

        answers = np.argmax(alignment_posteriors, axis=0)
        arange = np.arange(len(trg_tokens))
        log_likelihood = (np.log(prior[answers, arange]).sum() +
                          np.log(traslation[answers, arange]).sum())

        return [len(trg_tokens),
                alignment_posteriors.T], log_likelihood, answers
cov_bis3 = 3.18 * np.eye(2)
covariances_init_bis = np.asarray([cov_bis0, cov_bis1, cov_bis2, cov_bis3])

states = [0, 1, 2, 3]
start_proba_init = np.ones(4)/4
transition_proba_init = np.asarray([[1/2, 1/6, 1/6, 1/6],
                                    [1/6, 1/2, 1/6, 1/6],
                                    [1/6, 1/6, 1/2, 1/6],
                                    [1/6, 1/6, 1/6, 1/2]])

# 2
alpha_scaled, scale_alpha = hmm.forward(data_test, states,
                                        start_proba_init,
                                        transition_proba_init,
                                        means_init, covariances_init)
beta_scaled = hmm.backward(data_test, states, transition_proba_init,
                           means_init, covariances_init, scale_alpha)

gamma = hmm.gammas(data_test, states, alpha_scaled,
                   beta_scaled, scale_alpha)

for i in states:
    y = np.zeros(100)
    for t in range(100):
        y[t] = gamma[t][i]
    plt.figure()
    plt.plot(y)
    plt.title("State %i" % (i+1))

# 4
(start_proba1, transition_proba1, means1, covariances1,
 logllh1, iteration1) = hmm.baum_welch(data_train, states, start_proba_init,
Пример #5
0
    A=np.array([[0.9, 0.1], [0.1, 0.9]]),
    B=np.array(
        np.array([
            [1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6],
            [1 / 10, 1 / 10, 1 / 10, 1 / 10, 1 / 10, 1 / 2],
        ])),
    state_names=["fair", "loaded"],
    obs_names=["1", "2", "3", "4", "5", "6"],
)

hmm.visualize()

obs = np.array([1, 4, 3, 6, 6, 4]) - 1  # -1 because the sides are indices

p, alpha = forward(obs, hmm)
q, beta = backward(obs, hmm)

print("p = %f, q = %f" % (p, q))

print("alpha")
for l in alpha:
    print("%f %f" % (l[0], l[1]))
print()

print("beta")
for l in beta:
    print("%f %f" % (l[0], l[1]))
print()

states, delta = viterbi(obs, hmm)
Пример #6
0
# HMM
####################
(a, b, pi) = datasets.getHMMData()
hmm.viterbi(array([0, 1, 1, 2]), a, b, pi)
#array([0, 0, 0, 1])

hmm.viterbi(array([0, 2, 1, 2]), a, b, pi)
#array([0, 1, 1, 1])

###WU 8
# example 1
hmm.viterbi(array([0, 1, 1, 1]), a, b, pi)  # 0 0 0 0
hmm.viterbi(array([0, 1, 2, 1]), a, b, pi)  # 0 0 1 1

al = hmm.forward(array([0, 1, 1, 2]), a, b, pi)
be = hmm.backward(array([0, 1, 1, 2]), a, b, pi)
hmm.sanityCheck(al, be)

##########
# parameter re-estimation
al = hmm.forward(array([0, 1, 1, 2]), a, b, pi)
be = hmm.backward(array([0, 1, 1, 2]), a, b, pi)
(a_new, b_new, pi_new) = hmm.reestimate(array([0, 1, 1, 2]), al, be, a, b, pi)

# >>> a_new
# array([[ 0.53662942,  0.46337058],
#        [ 0.39886289,  0.60113711]])
# >>> b_new
# array([[ 0.35001693,  0.55333559,  0.09664748],
#        [ 0.14235731,  0.44259786,  0.41504483]])
# >>> pi_new
Пример #7
0
####################
(a,b,pi) = datasets.getHMMData()
hmm.viterbi(array([0,1,1,2]), a, b, pi)
#array([0, 0, 0, 1])

hmm.viterbi(array([0,2,1,2]), a, b, pi)
#array([0, 1, 1, 1])

###WU 8
# example 1
hmm.viterbi(array([0,1,1,1]), a, b, pi) # 0 0 0 0
hmm.viterbi(array([0,1,2,1]), a, b, pi) # 0 0 1 1


al = hmm.forward(array([0,1,1,2]), a, b, pi)
be = hmm.backward(array([0,1,1,2]), a, b, pi)
hmm.sanityCheck(al,be)

##########
# parameter re-estimation
al = hmm.forward(array([0,1,1,2]), a, b, pi)
be = hmm.backward(array([0,1,1,2]), a, b, pi)
(a_new, b_new, pi_new) = hmm.reestimate(array([0,1,1,2]), al, be, a, b, pi)

# >>> a_new
# array([[ 0.53662942,  0.46337058],
#        [ 0.39886289,  0.60113711]])
# >>> b_new
# array([[ 0.35001693,  0.55333559,  0.09664748],
#        [ 0.14235731,  0.44259786,  0.41504483]])
# >>> pi_new