Esempio n. 1
0
def inference(observations):
    use_graphics = False
    all_possible_hidden_states = robot.get_all_hidden_states()
    all_possible_observed_states = robot.get_all_observed_states()
    prior_distribution = robot.initial_distribution()

    print('Running Viterbi...')
    estimated_states = Viterbi(all_possible_hidden_states,
                               all_possible_observed_states,
                               prior_distribution, robot.transition_model,
                               robot.observation_model, observations)
    print('Request handled.')
    print('Ready to infer robot behavior.')
    return estimated_states
Esempio n. 2
0
            need_to_generate_data = False
            num_time_steps        = len(hidden_states)

    # if no data is loaded, then generate new data
    if need_to_generate_data:
        num_time_steps = 100
        hidden_states, observations = \
            generate_data(robot.initial_distribution,
                          robot.transition_model,
                          robot.observation_model,
                          num_time_steps,
                          make_some_observations_missing)

    all_possible_hidden_states   = robot.get_all_hidden_states()
    all_possible_observed_states = robot.get_all_observed_states()
    prior_distribution           = robot.initial_distribution()

    print 'Running forward-backward...'
    marginals = forward_backward(all_possible_hidden_states,
                                 all_possible_observed_states,
                                 prior_distribution,
                                 robot.transition_model,
                                 robot.observation_model,
                                 observations)
    print

    print "Marginal at time %d:" % (num_time_steps - 1)
    if marginals[-1] is not None:
        print marginals[-1]
    else:
        print '*No marginal computed*'
    for key in phiLast.keys():
        val2 = msgHat[key]
        if val2 == 0:
            val2 = np.inf
        finNode[key] = neglog(phiLast[key]) + val2
    minVal, minKey = myDictMin(finNode)
    mHat = minVal
    tBack = minKey
    return mHat, tBack

# %% main program

# project
all_possible_hidden_states = robot.get_all_hidden_states()
all_possible_observed_states = robot.get_all_observed_states()
prior_distribution = robot.initial_distribution()
transition_model = robot.transition_model
observation_model = robot.observation_model
#observations = [(2, 0), (2, 0), (3, 0), (4, 0), (4, 0),
#                (6, 0), (6, 1), (5, 0), (6, 0), (6, 2)]
observations = [(1, 6), (4, 6), (4, 7), None, (5, 6),
                (6, 5), (6, 6), None, (5, 5), (4, 4)]


## example with class coins
#import classCoinsExample as cc
#all_possible_hidden_states = cc.get_all_hidden_states()
#all_possible_observed_states = cc.get_all_observed_states()
#prior_distribution = cc.initial_distribution()
#transition_model = cc.transition_model
#observation_model = cc.observation_model
Esempio n. 4
0
def baum_welch(all_possible_hidden_states,
               all_possible_observed_states,
               observations):
    """
    Inputs
    ------
    all_possible_hidden_states: a list of possible hidden states

    all_possible_observed_states: a list of possible observed states

    observations: a list of observations, one per hidden state
                  (in this problem, we don't have any missing observations)


    Output
    ------
    A transition model and an observation model
    """
    ### Initialize
    transition_model = robot.uniform_transition_model
    observation_model = robot.spread_observation_model
    initial_state_distribution = robot.initial_distribution()

    num_time_steps = len(observations)

    convergence_reached = False
    while not convergence_reached:
        # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
        ### YOUR CODE HERE: Estimate marginals & pairwise marginals
        pairwise_marginals = [None] * num_time_steps
        marginals = [None] * num_time_steps
        # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

        ### Learning: estimate model parameters
        # maps states to distributions over possible next states
        transition_model_dict = {}

        # maps states to distributions over possible observations
        observation_model_dict = {}

        # initial
        initial_state_distribution = robot.Distribution()

        # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
        # YOUR CODE HERE: Use the estimated marginals & pairwise marginals to
        # estimate the parameters.

        # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        new_transition_model = robot.make_transition_model(transition_model_dict)
        new_obs_model = robot.make_observation_model(observation_model_dict)
        ### Check for convergence
        if check_convergence(all_possible_hidden_states,
                             transition_model,
                             new_transition_model):
            convergence_reached = True
        transition_model = new_transition_model
        observation_model = new_obs_model

    return (transition_model,
            observation_model,
            initial_state_distribution,
            marginals)