# In[7]: N_iters = 50 hmm = HMM(K, D, observations="gaussian") hmm_lls = hmm.fit(y, method="em", num_em_iters=N_iters, verbose=True) plt.plot(hmm_lls, label="EM") plt.plot([0, N_iters], true_ll * np.ones(2), ':k', label="True") plt.xlabel("EM Iteration") plt.ylabel("Log Probability") plt.legend(loc="lower right") # In[8]: # Find a permutation of the states that best matches the true and inferred states hmm.permute(find_permutation(z, hmm.most_likely_states(y))) # In[11]: # Plot the true and inferred discrete states hmm_z = hmm.most_likely_states(y) plt.figure(figsize=(8, 4)) plt.subplot(211) plt.imshow(z[None, :], aspect="auto", cmap=cmap, vmin=0, vmax=len(colors) - 1) plt.xlim(0, T) plt.ylabel("$z_{\\mathrm{true}}$") plt.yticks([]) plt.subplot(212) plt.imshow(hmm_z[None, :],
] # Fit with both SGD and EM methods = ["sgd", "em"] results = {} for obs in observations: for method in methods: print("Fitting {} HMM with {}".format(obs, method)) model = HMM(K, D, observations=obs) train_lls = model.fit(y, method=method) test_ll = model.log_likelihood(y_test) smoothed_y = model.smooth(y) # Permute to match the true states model.permute(find_permutation(z, model.most_likely_states(y))) smoothed_z = model.most_likely_states(y) results[(obs, method)] = (model, train_lls, test_ll, smoothed_z, smoothed_y) # Plot the inferred states fig, axs = plt.subplots(len(observations) + 1, 1, figsize=(12, 8)) # Plot the true states plt.sca(axs[0]) plt.imshow(z[None, :], aspect="auto", cmap="jet") plt.title("true") plt.xticks() # Plot the inferred states for i, obs in enumerate(observations):
# Now create a new HMM and fit it to the data with EM N_iters = 50 hmm = HMM(K, D, M, observations="categorical", observation_kwargs=dict(C=C), transitions="inputdriven") # Fit hmm_lps = hmm.fit(y, inputs=inpt, method="em", num_em_iters=N_iters) # In[5]: # Find a permutation of the states that best matches the true and inferred states hmm.permute(find_permutation(z, hmm.most_likely_states(y, input=inpt))) z_inf = hmm.most_likely_states(y, input=inpt) # In[6]: # Plot the log probabilities of the true and fit models plt.plot(hmm_lps, label="EM") plt.plot([0, N_iters], true_lp * np.ones(2), ':k', label="True") plt.legend(loc="lower right") plt.xlabel("EM Iteration") plt.xlim(0, N_iters) plt.ylabel("Log Probability")