def test_KernelExpFiniteGaussian_fit_equals_update_fit(): sigma = 1. lmbda = 2. m = 2 N = 1 D = 2 rng_state = np.random.get_state() np.random.seed(0) est_batch = KernelExpFiniteGaussian(sigma, lmbda, m, D) np.random.seed(0) est_update = KernelExpFiniteGaussian(sigma, lmbda, m, D) np.random.set_state(rng_state) assert_equal(est_batch.b, None) assert_equal(est_update.b, None) assert_equal(est_batch.L_C, None) assert_allclose(est_batch.n, est_update.n) assert_allclose(est_batch.theta, np.zeros(m)) assert_allclose(est_update.theta, np.zeros(m)) X = np.random.randn(N, D) est_batch.fit(X) est_update.update_fit(X) assert_allclose(est_batch.b, est_update.b) assert_allclose(est_batch.L_C, est_update.L_C) assert_allclose(est_batch.n, est_update.n) assert_allclose(est_batch.theta, est_update.theta)
plt.title("Gradient norm, ground truth") # plot initial fit fig_count += 1 plt.subplot(3, 3, fig_count) D, G = pdf_grid(Xs, Ys, est) visualise_array(Xs, Ys, G) plt.title("Gradient norm, no data") plt.tight_layout() # online updates of the model for i in range(7): X = np.random.randn(3 * (i + 1), est.D) # only for plotting all_data.append(X) # API for updating estimator est.update_fit(X) # visualise current fit fig_count += 1 plt.subplot(3, 3, fig_count) D, G = pdf_grid(Xs, Ys, est) visualise_array(Xs, Ys, G, np.vstack(all_data)) plt.title("Gradient norm, N=%d" % est.n) plt.tight_layout() plt.show()