Esempio n. 1
0
def test():
    vocab_path = '../../data/people_char_vocab.pkl'
    vocabs = load_vocab(vocab_path)
    train_data_path = '../../data/people.txt'

    gen = train_generator(train_data_path, vocabs=vocabs)
    states = ['B', 'M', 'E', 'S']
    hmm = HMM(vocabs=vocabs, states=states)
    #hmm.train(train_generator=gen)
    model_dir = '../../models/hmm'
    #hmm.save_model(model_dir=model_dir)
    hmm.load_model(model_dir=model_dir)

    sentence = "我是中国人,我爱我的祖国"
    decode_states={0: 'B', 1: 'M', 2: 'E', 3: 'S'}
    hiddens = hmm.decode(outputs=sentence, decode_states=decode_states)

    words = hmm.format_hiddens(hiddens, sentence)

    print(hiddens)
    print('/ '.join(words))

    sentence = '4月29日,雄浑悠长的钟声响起,关闭了近百日的武汉黄鹤楼重新开门迎客。这钟声,传递出中华民族从磨难中奋起的昂扬斗志,彰显出伟大民族精神在新时代焕发出的熠熠光辉。'
    hiddens = hmm.decode(outputs=sentence, decode_states=decode_states)
    words= hmm.format_hiddens(hiddens, sentence)
    print('/ '.join(words))
Esempio n. 2
0
def test_hmm():
    vocab_path = '../../data/people_char_vocab.pkl'
    model_dir = '../../models/hmm'
    states = ['B', 'M', 'E', 'S']
    decode_states = {0: 'B', 1: 'M', 2: 'E', 3: 'S'}

    vocabs = load_vocab(vocab_path)
    hmm = HMM(vocabs=vocabs, states=states)
    hmm.load_model(model_dir=model_dir)
    sentence = "我是中国人,我爱我的祖国"

    hiddens = hmm.decode(outputs=sentence, decode_states=decode_states)
    words = hmm.format_hiddens(hiddens, sentence)

    print(hiddens)
    print('/ '.join(words))

    sentence = '4月29日,雄浑悠长的钟声响起,关闭了近百日的武汉黄鹤楼重新开门迎客。' \
               '这钟声,传递出中华民族从磨难中奋起的昂扬斗志,彰显出伟大民族精神在新时代焕发出的熠熠光辉。'
    sentence = 'I love you china'
    hiddens = hmm.decode(outputs=sentence, decode_states=decode_states)
    words = hmm.format_hiddens(hiddens, sentence)
    print('/ '.join(words))
def lihang_example():
    T = np.array([[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]])
    E = np.array([[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]])
    pi = np.array([0.2, 0.4, 0.4])

    #states = [0, 1, 2]
    states = {'a': 0, 'b': 1, 'c': 2}
    vocabs = {'red': 0, 'white': 1}

    hmm = HMM(states=states, vocabs=vocabs, pi=pi, trans_p=T, emit_p=E)

    O = ['red', 'white', 'red']

    f_prob = hmm.forward_evaluate(O)
    print('forward prob', f_prob)

    b_prob = hmm.backward_evaluate(O)
    print('backward prob', b_prob)

    decode_states = {0: 'a', 1: 'b', 2: 'c'}
    hiddens = hmm.decode(O, decode_states=decode_states)
    print('optimal hiddens', hiddens)