Пример #1
0
def classify(hmm):
    unclassified_data = gen.right_turn() + gen.left_turn()

    for time_t in xrange(1, len(unclassified_data)):
        obs_interval = vq.dataset_to_alphabet(unclassified_data[:time_t][-observation_size:] )
        obs_sequence = EmissionSequence( emit_domain, obs_interval ) 
        state = hmm.viterbi( obs_sequence )
#        print state
        print state_to_string( state[0][-1]) , unclassified_data[time_t] , state[1]
Пример #2
0
def train_hmm(hmm, emit_domain):

    for x, label in gen.gen_data_set(size_generated_training_set):
        training_set = vq.dataset_to_alphabet( x )
        ghmm_training_set = EmissionSequence( emit_domain, training_set)
        ghmm_training_set.setSeqLabel(label)
        hmm.baumWelch( ghmm_training_set,loglikelihoodCutoff=0.000001, nrSteps = 10)

    for x in xrange(size_sampled_training_set):
        ghmm_training_set = hmm.sample(100,500)
        hmm.baumWelch( ghmm_training_set )
    return hmm
Пример #3
0
def generate_emit_p():
    straight    = [ vq.dataset_to_alphabet(gen.straight())   for x in xrange(50) ]
    left_turns  = [ vq.dataset_to_alphabet(gen.left_turn())  for x in xrange(100) ]
    right_turns = [ vq.dataset_to_alphabet(gen.right_turn()) for x in xrange(100) ]
    data_sets = [ straight, left_turns, right_turns ]
    gen_data_set = { 0: {}, 1: {}, 2: {} }

    for idx, dataset in enumerate(data_sets):
        for sample in dataset: 
            for alpha in sample:
                if alpha not in gen_data_set[idx]:
                    gen_data_set[idx][alpha] = 1
                else:
                    gen_data_set[idx][alpha] += 1

    alpha_size = len(vq.gen_alphabet() )
    emit_p = [  [0.01 for x in xrange(alpha_size) ] for x in xrange(3) ]

    for idx, stats in gen_data_set.iteritems():
        for k,v in stats.iteritems():
            emit_p[idx][k] = v

    return normalize_emit_probabilites( emit_p )