Ejemplo n.º 1
0
def test_hmm_decode_aima_umbrella_example():
    """
    This example was taken from AI a Modern Approach

    The state sequence comes from figure 15.5(b) on page 577 of the third
    edition. The correct values were manually compared to the normalized values
    from the figure 15.5(b).
    """

    states = {0: 'No Rain', 1: 'Rain'}
    # obs = {0: 'No Umbrella', 1: 'Umbrella'}

    T0 = torch.tensor([0.5, 0.5])
    T = torch.tensor([[0.7, 0.3], [0.3, 0.7]])
    s1 = CategoricalModel(probs=torch.tensor([0.8, 0.2]))
    s2 = CategoricalModel(probs=torch.tensor([0.1, 0.9]))
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    obs_seq = pack_list([torch.tensor([1, 1, 0, 1, 1])])
    states_seq, path_ll = model.decode(obs_seq)
    ss_unpacked, _ = pad_packed_sequence(states_seq, batch_first=True)
    path_unpacked, _ = pad_packed_sequence(path_ll, batch_first=True)

    most_likely_states = [states[s.item()] for s in ss_unpacked[0]]
    assert most_likely_states == ['Rain', 'Rain', 'No Rain', 'Rain', 'Rain']

    normalized = path_unpacked[0].softmax(1)

    correct = torch.tensor([[0.18181818, 0.81818182], [0.08695652, 0.91304348],
                            [0.77419355, 0.22580645], [0.34146341, 0.65853659],
                            [0.10332103, 0.89667897]])

    assert torch.allclose(normalized, correct)
Ejemplo n.º 2
0
def test_hmm_fit_viterbi_diagnormal():

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_means = torch.tensor([0.0, 0.0, 0.0])
    s1_precs = torch.tensor([1.0, 1.0, 1.0])
    s2_means = torch.tensor([10.0, 10.0, 10.0])
    s2_precs = torch.tensor([1.0, 1.0, 1.0])
    s1 = DiagNormalModel(means=s1_means, precs=s1_precs)
    s2 = DiagNormalModel(means=s2_means, precs=s2_precs)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(50, 100)

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_means = torch.tensor([3.0, 3.0, 3.0])
    s1_precs = torch.tensor([1.0, 1.0, 1.0])
    s2_means = torch.tensor([6.0, 6.0, 6.0])
    s2_precs = torch.tensor([1.0, 1.0, 1.0])
    s1 = DiagNormalModel(means=s1_means, precs=s1_precs)
    s2 = DiagNormalModel(means=s2_means, precs=s2_precs)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    converge = model.fit(obs_seq, max_steps=500, epsilon=1e-2, restarts=3)

    # Not enough samples (only 1) to test
    # assert np.allclose(trans0.data.numpy(), True_pi)
    print("Pi Matrix: ")
    print(model.T0)

    print("Transition Matrix: ")
    print(model.T)
    # assert np.allclose(transition.exp().data.numpy(), True_T, atol=0.1)
    print()
    print("Emission: ")
    for s in model.states:
        p = list(s.parameters())
        print("Means", p[0])
        print("Cov", p[1].abs())
    # assert np.allclose(emission.exp().data.numpy(), True_E, atol=0.1)
    print()
    print("Reached Convergence: ")
    print(converge)

    assert converge

    states_seq, _ = model.decode(obs_seq)

    # state_summary = np.array([model.prob_state_1[i].cpu().numpy() for i in
    #                           range(len(model.prob_state_1))])

    # pred = (1 - state_summary[-2]) > 0.5
    # pred = torch.cat(states_seq, 0).data.numpy()
    # true = np.concatenate(states, 0)
    pred = states_seq
    true = states
    accuracy = torch.mean(torch.abs(pred.data - true.data).float())
    print("Accuracy: ", accuracy)
    assert accuracy >= 0.9 or accuracy <= 0.1
Ejemplo n.º 3
0
def test_hmm_fit_viterbi():

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_orig = torch.tensor([0.99, 0.01])
    s2_orig = torch.tensor([0.05, 0.95])
    s1 = CategoricalModel(probs=s1_orig)
    s2 = CategoricalModel(probs=s2_orig)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(50, 100)

    T0 = torch.tensor([0.5, 0.5])
    T = torch.tensor([[0.6, 0.4], [0.5, 0.5]])
    s1_orig = torch.tensor([0.6, 0.4])
    s2_orig = torch.tensor([0.5, 0.5])
    s1 = CategoricalModel(probs=s1_orig)
    s2 = CategoricalModel(probs=s2_orig)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    converge = model.fit(obs_seq, max_steps=500, epsilon=1e-2, restarts=3)

    # Not enough samples (only 1) to test
    # assert np.allclose(trans0.data.numpy(), True_pi)
    print("Pi Matrix: ")
    print(model.T0)

    print("Transition Matrix: ")
    print(model.T)
    # assert np.allclose(transition.exp().data.numpy(), True_T, atol=0.1)
    print()
    print("Emission Matrix: ")
    for s in model.states:
        print([p.softmax(0) for p in s.parameters()])
    # assert np.allclose(emission.exp().data.numpy(), True_E, atol=0.1)
    print()
    print("Reached Convergence: ")
    print(converge)

    assert converge

    states_seq, _ = model.decode(obs_seq)

    # state_summary = np.array([model.prob_state_1[i].cpu().numpy() for i in
    #                           range(len(model.prob_state_1))])

    # pred = (1 - state_summary[-2]) > 0.5
    # pred = torch.cat(states_seq, 0).data.numpy()
    # true = np.concatenate(states, 0)
    pred = states_seq
    true = states
    accuracy = torch.mean(torch.abs(pred.data - true.data).float())
    print("Accuracy: ", accuracy)
    assert accuracy >= 0.9 or accuracy <= 0.1
Ejemplo n.º 4
0
def test_hmm_decode():
    states = {0: 'Healthy', 1: 'Fever'}
    # obs = {0: 'normal', 1: 'cold', 2: 'dizzy'}

    T0 = torch.tensor([0.6, 0.4])
    T = torch.tensor([[0.7, 0.3], [0.4, 0.6]])
    s1 = CategoricalModel(probs=torch.tensor([0.5, 0.4, 0.1]))
    s2 = CategoricalModel(probs=torch.tensor([0.1, 0.3, 0.6]))
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    obs_seq = pack_list([torch.tensor([1, 0, 1, 2, 2])])
    states_seq, _ = model.decode(obs_seq)
    ss_unpacked, _ = pad_packed_sequence(states_seq, batch_first=True)

    most_likely_states = [states[s.item()] for s in ss_unpacked[0]]
    assert most_likely_states == [
        'Healthy', 'Healthy', 'Healthy', 'Fever', 'Fever'
    ]
Ejemplo n.º 5
0
    print("Pi Matrix: ")
    print(model.T0)
    print()

    print("Transition Matrix: ")
    print(model.T)
    print()

    print("Emission Matrix: ")
    for s in model.states:
        print([p for p in s.parameters()])
    print()

    print("Reached Convergence: ")
    print(converge)
    print()

    states_seq, _ = model.decode(obs_seq)

    # state_summary = np.array([model.prob_state_1[i].cpu().numpy() for i in
    #                           range(len(model.prob_state_1))])

    # pred = (1 - state_summary[-2]) > 0.5
    # pred = torch.cat(states_seq, 0).data.numpy()
    # true = np.concatenate(states, 0)
    pred = torch.stack(states_seq)
    true = torch.stack(states)
    error = torch.mean(torch.abs(pred - true).float())
    print("Error: ", error)
    assert error >= 0.9 or error <= 0.1