示例#1
0
def fit_coin():
    """
    example from https://github.com/lazyprogrammer/machine_learning_examples/blob/master/hmm_class/hmmd.py
    :return:
    """
    X = []
    for line in open('./data/coin_data.txt'):
        # 1 for H, 0 for T
        x = [1 if e == 'H' else 0 for e in line.rstrip()]
        X.append(x)
    X = np.array(X)
    print(X.shape)

    hmm = HMM(2,2)
    hmm.fit(X,max_iter=50)
    L = hmm.log_likelihood(X).sum()
    print("LL with fitted params:", L)

    # try true values
    hmm.pi = np.array([0.5, 0.5])
    hmm.A = np.array([[0.1, 0.9], [0.8, 0.2]])
    hmm.B = np.array([[0.6, 0.4], [0.3, 0.7]])
    L = hmm.log_likelihood(X).sum()
    print("LL with true params:", L)

    # try viterbi
    print("Best state sequence for:", X[0])
    print(hmm.viterbi(X[0]))
示例#2
0
文件: first_test.py 项目: Zalasyu/HMM
        print("MAX ITERATIONS DONE! (", max_iteration, ")")
    if iteration % 10 == 0:
        print("ITERATION", iteration)
    # alpha pass
    alpha, c = alpha_pass(model, obs)

    # beta pass
    beta = beta_pass(model, obs, c)

    # gamma-pass
    start_probs, trans, emis, gamma, di_gamma = \
        Forward_Backward(model.get_N(), alpha, beta, obs.obs, sequence_syms, model.A, model.B)

    # update model
    model.A = trans
    model.B = emis
    model.pi = start_probs

    # compute lob probs
    new_log_prob = compute_logprob(obs, c)

    # compare
    log_prob_history = np.append(log_prob_history, new_log_prob)
    if new_log_prob > old_log_prob:
        old_log_prob = new_log_prob
        continue
    else:
        print("STOPPED AT ITERATION:", iteration + 1)
        break
"""view some results:"""
# nice pandas display