Ejemplo n.º 1
0
    def test_linear_gaussian_inference(self):
        # set random seed, so the data is reproducible each time
        np.random.seed(1)

        nobs = 10  # number of observations
        noise_stdev = .1  # standard deviation of noise
        x = np.linspace(0., 9., nobs)
        Amatrix = np.hstack([np.ones((nobs, 1)), x[:, np.newaxis]])

        univariate_variables = [norm(1, 1), norm(0, 4)]
        variables = IndependentMultivariateRandomVariable(univariate_variables)

        mtrue = 0.4  # true gradient
        ctrue = 2.  # true y-intercept
        true_sample = np.array([[ctrue, mtrue]]).T

        model = LinearModel(Amatrix)

        # make data
        data = noise_stdev * np.random.randn(nobs) + model(true_sample)[0, :]
        loglike = GaussianLogLike(model, data, noise_stdev**2)
        loglike = PYMC3LogLikeWrapper(loglike)

        # number of draws from the distribution
        ndraws = 5000
        # number of "burn-in points" (which we'll discard)
        nburn = min(1000, int(ndraws * 0.1))
        # number of parallel chains
        njobs = 4

        #algorithm='nuts'
        algorithm = 'metropolis'
        samples, effective_sample_size, map_sample = \
            run_bayesian_inference_gaussian_error_model(
                loglike,variables,ndraws,nburn,njobs,
                algorithm=algorithm,get_map=True,print_summary=False)

        prior_mean = np.asarray(
            [rv.mean() for rv in variables.all_variables()])
        prior_hessian = np.diag(
            [1. / rv.var() for rv in variables.all_variables()])
        noise_covariance_inv = 1. / noise_stdev**2 * np.eye(nobs)

        from pyapprox.bayesian_inference.laplace import \
                laplace_posterior_approximation_for_linear_models
        exact_mean, exact_covariance = \
            laplace_posterior_approximation_for_linear_models(
                Amatrix, prior_mean, prior_hessian,
                noise_covariance_inv, data)

        print('mcmc mean error', samples.mean(axis=1) - exact_mean)
        print('mcmc cov error', np.cov(samples) - exact_covariance)
        print('MAP sample', map_sample)
        print('exact mean', exact_mean.squeeze())
        print('exact cov', exact_covariance)
        assert np.allclose(map_sample, exact_mean)
        assert np.allclose(exact_mean.squeeze(),
                           samples.mean(axis=1),
                           atol=1e-2)
        assert np.allclose(exact_covariance, np.cov(samples), atol=1e-2)
    def test_marginalize_polynomial_chaos_expansions(self):
        univariate_variables = [uniform(-1, 2), norm(0, 1), uniform(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        degree = 2
        indices = compute_hyperbolic_indices(num_vars, degree, 1)
        poly.set_indices(indices)
        poly.set_coefficients(np.ones((indices.shape[1], 1)))

        for ii in range(num_vars):
            # Marginalize out 2 variables
            xx = np.linspace(-1, 1, 101)
            inactive_idx = np.hstack(
                (np.arange(ii), np.arange(ii + 1, num_vars)))
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx[None, :])
            variable_ii = variable.all_variables()[ii:ii + 1]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(1, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx[None, :])
            # import matplotlib.pyplot as plt
            # plt.plot(xx, pvals)
            # plt.plot(xx, mvals, '--')
            # plt.show()
            assert np.allclose(mvals, pvals)

            # Marginalize out 1 variable
            xx = cartesian_product([xx] * 2)
            inactive_idx = np.array([ii])
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx)
            variable_ii = variable.all_variables()[:ii]+\
                variable.all_variables()[ii+1:]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(2, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx)
            assert np.allclose(mvals, pvals)
Ejemplo n.º 3
0
    name, scales, shapes = get_distribution_info(rv)
    if ii not in [0, 2]:
        opts = {'rv_type': name, 'shapes': shapes,
                'var_nums': re_variable.unique_variable_indices[ii]}
        basis_opts['basis%d' % ii] = opts
        continue

    #identity_map_indices += re_variable.unique_variable_indices[ii] # wrong
    identity_map_indices += list(re_variable.unique_variable_indices[ii]) # right
    
    quad_rules = []    
    inds = index_product[cnt]
    nquad_samples_1d = 50

    for jj in inds:
        a, b = variable.all_variables()[jj].interval(1)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
        x = (x+1)/2 # map to [0, 1]
        x = (b-a)*x+a # map to [a,b]
        quad_rules.append((x, w))
    funs = [identity_fun]*len(inds)
    basis_opts['basis%d' % ii] = {'poly_type': 'product_indpnt_vars',
                                    'var_nums': [ii], 'funs': funs,
                                    'quad_rules': quad_rules}
    cnt += 1
        
poly_opts = {'var_trans': re_var_trans}
poly_opts['poly_types'] = basis_opts
#var_trans.set_identity_maps(identity_map_indices) #wrong
re_var_trans.set_identity_maps(identity_map_indices) #right