Esempio n. 1
0
def test_hmm_fit_viterbi_diagnormal():

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_means = torch.tensor([0.0, 0.0, 0.0])
    s1_precs = torch.tensor([1.0, 1.0, 1.0])
    s2_means = torch.tensor([10.0, 10.0, 10.0])
    s2_precs = torch.tensor([1.0, 1.0, 1.0])
    s1 = DiagNormalModel(means=s1_means, precs=s1_precs)
    s2 = DiagNormalModel(means=s2_means, precs=s2_precs)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(50, 100)

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_means = torch.tensor([3.0, 3.0, 3.0])
    s1_precs = torch.tensor([1.0, 1.0, 1.0])
    s2_means = torch.tensor([6.0, 6.0, 6.0])
    s2_precs = torch.tensor([1.0, 1.0, 1.0])
    s1 = DiagNormalModel(means=s1_means, precs=s1_precs)
    s2 = DiagNormalModel(means=s2_means, precs=s2_precs)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    converge = model.fit(obs_seq, max_steps=500, epsilon=1e-2, restarts=3)

    # Not enough samples (only 1) to test
    # assert np.allclose(trans0.data.numpy(), True_pi)
    print("Pi Matrix: ")
    print(model.T0)

    print("Transition Matrix: ")
    print(model.T)
    # assert np.allclose(transition.exp().data.numpy(), True_T, atol=0.1)
    print()
    print("Emission: ")
    for s in model.states:
        p = list(s.parameters())
        print("Means", p[0])
        print("Cov", p[1].abs())
    # assert np.allclose(emission.exp().data.numpy(), True_E, atol=0.1)
    print()
    print("Reached Convergence: ")
    print(converge)

    assert converge

    states_seq, _ = model.decode(obs_seq)

    # state_summary = np.array([model.prob_state_1[i].cpu().numpy() for i in
    #                           range(len(model.prob_state_1))])

    # pred = (1 - state_summary[-2]) > 0.5
    # pred = torch.cat(states_seq, 0).data.numpy()
    # true = np.concatenate(states, 0)
    pred = states_seq
    true = states
    accuracy = torch.mean(torch.abs(pred.data - true.data).float())
    print("Accuracy: ", accuracy)
    assert accuracy >= 0.9 or accuracy <= 0.1
Esempio n. 2
0
def test_hmm_sample():

    # create categorical models for the states
    T0 = torch.tensor([1.0, 0.0])
    T = torch.tensor([[1.0, 0.0], [0.0, 1.0]])
    s1 = CategoricalModel(probs=torch.tensor([1.0, 0.0]))
    s2 = CategoricalModel(probs=torch.tensor([0.0, 1.0]))
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(3, 10)

    assert isinstance(obs_seq, PackedSequence)
    assert obs_seq.data.shape[0] == 30

    assert isinstance(states, PackedSequence)
    assert states.data.shape[0] == 30
    assert 1 not in obs_seq.data
    assert 1 not in states.data

    T0 = torch.tensor([0.5, 0.5])
    T = torch.tensor([[0.5, 0.5], [0.5, 0.5]])
    s1 = CategoricalModel(probs=torch.tensor([1.0, 0.0]))
    s2 = CategoricalModel(probs=torch.tensor([0.0, 1.0]))
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(4, 20)

    assert isinstance(obs_seq, PackedSequence)
    assert obs_seq.data.shape[0] == 80

    assert isinstance(states, PackedSequence)
    assert states.data.shape[0] == 80
    assert 1 in obs_seq.data and 0 in obs_seq.data
    assert 1 in states.data and 0 in states.data

    T0 = torch.tensor([0.9, 0.1])
    T = torch.tensor([[0.9, 0.1], [0.5, 0.5]])
    s1 = CategoricalModel(probs=torch.tensor([1.0, 0.0]))
    s2 = CategoricalModel(probs=torch.tensor([0.0, 1.0]))
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(1, 20)

    assert isinstance(obs_seq, PackedSequence)
    assert obs_seq.data.shape[0] == 20

    assert isinstance(states, PackedSequence)
    assert states.data.shape[0] == 20
    assert (states.data == 0).sum() > (states.data == 1).sum()
Esempio n. 3
0
def test_hmm_fit_viterbi():

    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_orig = torch.tensor([0.99, 0.01])
    s2_orig = torch.tensor([0.05, 0.95])
    s1 = CategoricalModel(probs=s1_orig)
    s2 = CategoricalModel(probs=s2_orig)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)
    obs_seq, states = model.sample(50, 100)

    T0 = torch.tensor([0.5, 0.5])
    T = torch.tensor([[0.6, 0.4], [0.5, 0.5]])
    s1_orig = torch.tensor([0.6, 0.4])
    s2_orig = torch.tensor([0.5, 0.5])
    s1 = CategoricalModel(probs=s1_orig)
    s2 = CategoricalModel(probs=s2_orig)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    converge = model.fit(obs_seq, max_steps=500, epsilon=1e-2, restarts=3)

    # Not enough samples (only 1) to test
    # assert np.allclose(trans0.data.numpy(), True_pi)
    print("Pi Matrix: ")
    print(model.T0)

    print("Transition Matrix: ")
    print(model.T)
    # assert np.allclose(transition.exp().data.numpy(), True_T, atol=0.1)
    print()
    print("Emission Matrix: ")
    for s in model.states:
        print([p.softmax(0) for p in s.parameters()])
    # assert np.allclose(emission.exp().data.numpy(), True_E, atol=0.1)
    print()
    print("Reached Convergence: ")
    print(converge)

    assert converge

    states_seq, _ = model.decode(obs_seq)

    # state_summary = np.array([model.prob_state_1[i].cpu().numpy() for i in
    #                           range(len(model.prob_state_1))])

    # pred = (1 - state_summary[-2]) > 0.5
    # pred = torch.cat(states_seq, 0).data.numpy()
    # true = np.concatenate(states, 0)
    pred = states_seq
    true = states
    accuracy = torch.mean(torch.abs(pred.data - true.data).float())
    print("Accuracy: ", accuracy)
    assert accuracy >= 0.9 or accuracy <= 0.1
Esempio n. 4
0
                           rand_fun, **kwargs)


if __name__ == "__main__":
    T0 = torch.tensor([0.75, 0.25])
    T = torch.tensor([[0.85, 0.15], [0.12, 0.88]])
    s1_orig = torch.tensor([1.0, 0.0])
    s2_orig = torch.tensor([0.0, 1.0])
    s1 = CategoricalModel(probs=s1_orig, prior=torch.zeros_like(s1_orig))
    s2 = CategoricalModel(probs=s2_orig, prior=torch.zeros_like(s2_orig))
    model = HiddenMarkovModel([s1, s2],
                              T0=T0,
                              T=T,
                              T0_prior=torch.tensor([0., 0.]),
                              T_prior=torch.tensor([[0., 0.], [0., 0.]]))
    obs_seq, states = model.sample(10, 10)

    print("First 5 Obersvations of seq 0:  ", obs_seq[0][:5])
    print("First 5 Hidden States of seq 0: ", states[0][:5])
    print()

    T0 = torch.tensor([0.49, 0.51])
    T = torch.tensor([[0.6, 0.4], [0.48, 0.52]])
    s1_orig = torch.tensor([0.9, 0.1])
    s2_orig = torch.tensor([0.2, 0.8])
    s1 = CategoricalModel(probs=s1_orig)
    s2 = CategoricalModel(probs=s2_orig)
    model = HiddenMarkovModel([s1, s2], T0=T0, T=T)

    converge = model.fit(obs_seq, max_steps=1, epsilon=1e-4)