Esempio n. 1
0
    def test_parameter_packing(self):
        num_samples = 10
        d = 4
        k_approx = 3

        tau, nu, phi_mu, phi_var = \
            initialize_parameters(num_samples, d, k_approx)

        self.assert_allclose(packing.unpack_tau(pack_tau(tau), k_approx), tau)
        self.assert_allclose(packing.unpack_phi_mu(
            pack_phi_mu(phi_mu), k_approx, d), phi_mu)
        self.assert_allclose(
            packing.unpack_phi_var(packing.pack_phi_var(phi_var)), phi_var)
        self.assert_allclose(
            packing.unpack_nu(packing.pack_nu(nu), num_samples, k_approx), nu)

        params = packing.pack_params(
            deepcopy(tau), deepcopy(phi_mu), deepcopy(phi_var), deepcopy(nu))
        tau0, phi_mu0, phi_var0, nu0 = packing.unpack_params(
            params, k_approx, d, num_samples)

        self.assert_allclose(tau0, tau)
        self.assert_allclose(phi_mu0, phi_mu)
        self.assert_allclose(phi_var0, phi_var)
        self.assert_allclose(nu0, nu)
Esempio n. 2
0
    def test_moment_packing(self):
        x_dim = 4
        k_approx = 3

        tau, nu, phi_mu, phi_var = \
            initialize_parameters(5, x_dim, k_approx)
        e_log_pi, e_log_pi2, e_mu, phi_moment2, nu_moment = \
            get_moments(tau, nu, phi_mu, phi_var)

        param = packing.pack_moments(e_log_pi, e_mu)
        e_log_pi0, e_mu0 = packing.unpack_moments(param, k_approx, x_dim)

        self.assert_allclose(e_log_pi, e_log_pi0)
        self.assert_allclose(e_mu, e_mu0)
    def test_e_log_lik(self):
        n_test_samples = 10000

        # Our expected log likelihood should only differ from a sample average
        # of the generated log likelihood by a constant as the parameters
        # vary.  Check this using num_param different random parameters.
        num_params = 5
        ell_by_param = np.full(num_params, float('nan'))
        sample_ell_by_param = np.full(num_params, float('nan'))
        standard_error = 0.
        for i in range(num_params):
            tau, nu, phi_mu, phi_var = \
                vi.initialize_parameters(num_samples, x_dim, k_approx)
            phi_var_expanded = np.array([phi_var for d in range(x_dim)])

            # set vb parameters
            vb_params2['phi'].set_vector(
                np.hstack([np.ravel(phi_mu.T), phi_var]))
            vb_params2['pi'].set_vector(np.ravel(tau))
            vb_params2['nu'].set_vector(np.ravel(nu))

            z_sample, a_sample, pi_sample = \
                vi.generate_parameter_draws(nu, phi_mu, phi_var_expanded, \
                                            tau, n_test_samples)

            sample_e_log_lik = [
                vi.log_lik(x, z_sample[n, :, :], a_sample[n, :, :],
                           pi_sample[n, :], sigma_eps, sigma_a, alpha,
                           k_approx) \
                for n in range(n_test_samples) ]

            sample_ell_by_param[i] = np.mean(sample_e_log_lik)
            standard_error = \
                np.max([ standard_error,
                         np.std(sample_e_log_lik) / np.sqrt(n_test_samples) ])

            # get moments
            e_log_pi1, e_log_pi2, phi_moment1, phi_moment2, nu_moment =\
                            vi.get_moments_VB(vb_params2)

            ell_by_param[i] = vi.exp_log_likelihood(nu_moment, phi_moment1,
                                                    phi_moment2, e_log_pi1,
                                                    e_log_pi2, sigma_a,
                                                    sigma_eps, x, alpha)

        print('Mean log likelihood standard error: %0.5f' % standard_error)
        self.assertTrue(np.std(ell_by_param - sample_ell_by_param) < \
                        3. * standard_error)
Esempio n. 4
0
    def test_parameter_packing(self):
        num_samples = 10
        x_dim = 4
        k_approx = 3

        tau, nu, phi_mu, phi_var = \
            initialize_parameters(num_samples, x_dim, k_approx)
        params = packing.flatten_params(tau, nu, phi_mu, phi_var)

        tau0, phi_mu0, phi_var0, nu0 = \
            packing.unflatten_params(params, k_approx, x_dim, num_samples)

        self.assert_allclose(tau, tau0)
        self.assert_allclose(nu, nu0)
        self.assert_allclose(phi_mu, phi_mu0)
        self.assert_allclose(phi_var, phi_var0)
    def test_basics(self):
        alpha = 10
        num_samples = 5
        x_dim = 3
        sigma_a = 3.0**2
        sigma_eps = 1.0**2
        k_inf = 4

        pi, Z, A, X = vi.generate_data(num_samples, x_dim, k_inf, sigma_a,
                                       sigma_eps, alpha)

        k_approx = k_inf  # variational truncation

        tau_init, nu_init, phi_mu_init, phi_var_init = \
            vi.initialize_parameters(num_samples, x_dim, k_approx)

        params_init = packing.pack_params(deepcopy(tau_init),
                                          deepcopy(phi_mu_init),
                                          deepcopy(phi_var_init),
                                          deepcopy(nu_init))

        hyper_params = packing.pack_hyperparameters(alpha, sigma_a, sigma_eps)

        data_set = DataSet(X, k_approx, alpha, sigma_eps, sigma_a)
        tau, phi_mu, phi_var, nu = data_set.unpack_params(params_init)
        self.assert_allclose(tau, tau_init)
        self.assert_allclose(nu, nu_init)
        self.assert_allclose(phi_mu, phi_mu_init)
        self.assert_allclose(phi_var, phi_var_init)

        # Just check that these run.
        kl = data_set.wrapped_kl(params_init)
        grad = data_set.get_kl_grad(params_init)
        hess = data_set.get_kl_hessian(params_init)
        hvp = data_set.get_kl_hvp(params_init, grad)
        self.assert_allclose(hvp, np.matmul(hess, grad))
        kl_sens_hess = data_set.get_kl_sens_hess(params_init, hyper_params)

        # Just check that these run.
        data_set.run_cavi(tau, nu, phi_mu, phi_var, max_iter=2)
        data_set.run_newton_tr(params_init, maxiter=2)
        data_set.get_prediction(params_init)
np.random.seed(534)

num_samples = 50  # sample size
x_d = 2  # dimension

alpha = 10
sigma_a = 3.0**2
sigma_eps = 1.0**2  # variance of noise
k_approx = 3

# generate data
_, _, _, x = vi.generate_data\
                    (num_samples, x_d, k_approx, sigma_a, sigma_eps, alpha)
# VI paramters
tau_init, nu_init, phi_mu_init, phi_var_init = \
    vi.initialize_parameters(num_samples, x_d, k_approx)

vb_model = vi.set_ibp_vb_model(num_samples, x_d, k_approx)

# set parameters
vb_model['phi'].set_vector(np.hstack([np.ravel(phi_mu_init.T), phi_var_init]))
# the integration apparently is better for larger tau
vb_model['pi'].set_vector(np.ravel(np.random.uniform(20, 40,
                                                     (k_approx, 2))))  #
vb_model['nu'].set_vector(np.ravel(nu_init))

# consolidate hyper parameters
hyper_params = ModelParamsDict('hyper_params')
hyper_params.push_param(ScalarParam('alpha', lb=0.0))
hyper_params.push_param(ScalarParam('var_a', lb=0.0))
hyper_params.push_param(ScalarParam('var_eps', lb=0.0))