# diagonal Gaussian as a special case. observations = [ "diagonal_gaussian", "gaussian", "diagonal_t", "studentst", "diagonal_ar", "ar", "diagonal_robust_ar", "robust_ar" ] # Fit with both SGD and EM methods = ["sgd", "em"] results = {} for obs in observations: for method in methods: print("Fitting {} HMM with {}".format(obs, method)) model = HMM(K, D, observations=obs) train_lls = model.fit(y, method=method) test_ll = model.log_likelihood(y_test) smoothed_y = model.smooth(y) # Permute to match the true states model.permute(find_permutation(z, model.most_likely_states(y))) smoothed_z = model.most_likely_states(y) results[(obs, method)] = (model, train_lls, test_ll, smoothed_z, smoothed_y) # Plot the inferred states fig, axs = plt.subplots(len(observations) + 1, 1, figsize=(12, 8)) # Plot the true states plt.sca(axs[0]) plt.imshow(z[None, :], aspect="auto", cmap="jet") plt.title("true")
print("Fitting Gaussian HMM with EM") hmm = HMM(K, D, observations="gaussian") hmm_em_lls = hmm.fit(y, method="em", num_em_iters=N_em_iters) # Plot log likelihoods (fit model is typically better) plt.figure() plt.plot(hsmm_em_lls, ls='-', label="HSMM (EM)") plt.plot(hmm_em_lls, ls='-', label="HMM (EM)") plt.plot(true_ll * np.ones(N_em_iters), ':', label="true") plt.legend(loc="lower right") # Print the test likelihoods (true model is typically better) print("Test log likelihood") print("True HSMM: ", true_hsmm.log_likelihood(y_test)) print("Fit HSMM: ", hsmm.log_likelihood(y_test)) print("Fit HMM: ", hmm.log_likelihood(y_test)) # Plot the true and inferred states hsmm.permute(find_permutation(z, hsmm.most_likely_states(y))) hsmm_z = hsmm.most_likely_states(y) hmm.permute(find_permutation(z, hmm.most_likely_states(y))) hmm_z = hsmm.most_likely_states(y) # Plot the true and inferred discrete states plt.figure(figsize=(8, 6)) plt.subplot(311) plt.imshow(z[None, :1000], aspect="auto", cmap="cubehelix", vmin=0, vmax=K-1) plt.xlim(0, 1000) plt.ylabel("True $z") plt.yticks([])