Ejemplo n.º 1
0
    def test_multiply_pce(self):
        np.random.seed(1)
        np.set_printoptions(precision=16)
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 1, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))

        #coef1 = np.random.normal(0,1,(poly1.indices.shape[1],1))
        #coef2 = np.random.normal(0,1,(poly2.indices.shape[1],1))
        coef1 = np.arange(poly1.indices.shape[1])[:, np.newaxis]
        coef2 = np.arange(poly2.indices.shape[1])[:, np.newaxis]
        poly1.set_coefficients(coef1)
        poly2.set_coefficients(coef2)

        poly3 = poly1 * poly2
        samples = generate_independent_random_samples(variable, 10)
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))

        for order in range(4):
            poly = poly1**order
            assert np.allclose(poly(samples), poly1(samples)**order)
Ejemplo n.º 2
0
    def test_multiply_multivariate_orthonormal_polynomial_expansions(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        degree1, degree2 = 3, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        max_degrees1 = poly1.indices.max(axis=1)
        max_degrees2 = poly2.indices.max(axis=1)
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        indices, coefs = multiply_multivariate_orthonormal_polynomial_expansions(
            product_coefs_1d, poly1.get_indices(), poly1.get_coefficients(),
            poly2.get_indices(), poly2.get_coefficients())

        poly3 = get_polynomial_from_variable(variable)
        poly3.set_indices(indices)
        poly3.set_coefficients(coefs)

        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))
Ejemplo n.º 3
0
    def test_add_pce(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 2, 3
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        poly3 = poly1 + poly2 + poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) + 2 * poly2(samples))

        poly4 = poly1 - poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly4(samples), poly1(samples) - poly2(samples))
Ejemplo n.º 4
0
    for jj in inds:
        a, b = variable.all_variables()[jj].interval(1)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
        x = (x+1)/2 # map to [0, 1]
        x = (b-a)*x+a # map to [a,b]
        quad_rules.append((x, w))
    funs = [identity_fun]*len(inds)
    basis_opts['basis%d' % ii] = {'poly_type': 'product_indpnt_vars',
                                    'var_nums': [ii], 'funs': funs,
                                    'quad_rules': quad_rules}
    cnt += 1
        
poly_opts = {'var_trans': re_var_trans}
poly_opts['poly_types'] = basis_opts
#var_trans.set_identity_maps(identity_map_indices) #wrong
re_var_trans.set_identity_maps(identity_map_indices) #right

indices = compute_hyperbolic_indices(re_variable.num_vars(), degree)
nterms = total_degree_space_dimension(samples_adjust.shape[0], degree)
options = {'basis_type': 'fixed', 'variable': re_variable,
            'poly_opts': poly_opts,
            'options': {'linear_solver_options': dict(),
                        'indices': indices, 'solver_type': 'lstsq'}}
                        
approx_res = approximate(samples_adjust[:, 0:(2 * nterms)], values[0:(2 * nterms)], 'polynomial_chaos', options).approx
y_hat = approx_res(samples_adjust[:, 2 * nterms:])
print((y_hat - values[2 * nterms:]).mean())
print(f'Mean of samples: {values.mean()}')
print(f'Mean of pce: {approx_res.mean()}')
def genz_example(max_num_samples, precond_type):
    error_tol = 1e-12

    univariate_variables = [uniform(), beta(3, 3)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)

    c = np.array([10, 0.00])
    model = GenzFunction("oscillatory",
                         variable.num_vars(),
                         c=c,
                         w=np.zeros_like(c))
    # model.set_coefficients(4,'exponential-decay')

    validation_samples = generate_independent_random_samples(
        var_trans.variable, int(1e3))
    validation_values = model(validation_samples)

    errors = []
    num_samples = []

    def callback(pce):
        error = compute_l2_error(validation_samples, validation_values, pce)
        errors.append(error)
        num_samples.append(pce.samples.shape[1])

    candidate_samples = -np.cos(
        np.random.uniform(0, np.pi, (var_trans.num_vars(), int(1e4))))
    pce = AdaptiveLejaPCE(var_trans.num_vars(),
                          candidate_samples,
                          factorization_type='fast')
    if precond_type == 'density':

        def precond_function(basis_matrix, samples):
            trans_samples = var_trans.map_from_canonical_space(samples)
            vals = np.ones(samples.shape[1])
            for ii in range(len(univariate_variables)):
                rv = univariate_variables[ii]
                vals *= np.sqrt(rv.pdf(trans_samples[ii, :]))
            return vals
    elif precond_type == 'christoffel':
        precond_function = chistoffel_preconditioning_function
    else:
        raise Exception(f'Preconditioner: {precond_type} not supported')
    pce.set_preconditioning_function(precond_function)

    max_level = np.inf
    max_level_1d = [max_level] * (pce.num_vars)

    admissibility_function = partial(max_level_admissibility_function,
                                     max_level, max_level_1d, max_num_samples,
                                     error_tol)

    growth_rule = partial(constant_increment_growth_rule, 2)
    #growth_rule = clenshaw_curtis_rule_growth
    pce.set_function(model, var_trans)
    pce.set_refinement_functions(variance_pce_refinement_indicator,
                                 admissibility_function, growth_rule)

    while (not pce.active_subspace_queue.empty()
           or pce.subspace_indices.shape[1] == 0):
        pce.refine()
        pce.recompute_active_subspace_priorities()
        if callback is not None:
            callback(pce)

    from pyapprox.sparse_grid import plot_sparse_grid_2d
    plot_sparse_grid_2d(pce.samples, np.ones(pce.samples.shape[1]),
                        pce.pce.indices, pce.subspace_indices)

    plt.figure()
    plt.loglog(num_samples, errors, 'o-')
    plt.show()
Ejemplo n.º 6
0
    def test_exponential_quartic(self):
        # set random seed, so the data is reproducible each time
        np.random.seed(2)  
        
        univariate_variables = [uniform(-2,4),uniform(-2,4)]
        plot_range = np.asarray([-1,1,-1,1])*2
        variables = IndependentMultivariateRandomVariable(
            univariate_variables)

        loglike = ExponentialQuarticLogLikelihoodModel()
        loglike = PYMC3LogLikeWrapper(loglike,loglike.gradient)

        # number of draws from the distribution
        ndraws = 500
        # number of "burn-in points" (which we'll discard)
        nburn = min(1000,int(ndraws*0.1))
        # number of parallel chains
        njobs=4

        def unnormalized_posterior(x):
            # avoid use of pymc3 wrapper which only evaluates samples 1 at
            # a time
            vals = np.exp(loglike.loglike(x))
            rvs = variables.all_variables()
            for ii in range(variables.num_vars()):
                vals[:,0] *= rvs[ii].pdf(x[ii,:])
            return vals

        def univariate_quadrature_rule(n):
            x,w = gauss_jacobi_pts_wts_1D(n,0,0)
            x*=2
            return x,w
        x,w = get_tensor_product_quadrature_rule(
            100,variables.num_vars(),univariate_quadrature_rule)
        evidence = unnormalized_posterior(x)[:,0].dot(w)
        #print('evidence',evidence)

        exact_mean = ((x*unnormalized_posterior(x)[:,0]).dot(w)/evidence)
        #print(exact_mean)

        algorithm = 'nuts'
        #algorithm = 'smc'
        samples, effective_sample_size, map_sample = \
            run_bayesian_inference_gaussian_error_model(
                loglike,variables,ndraws,nburn,njobs,
                algorithm=algorithm,get_map=True,print_summary=True,
                loglike_grad = loglike.gradient, seed=2)

        # from pyapprox.visualization import get_meshgrid_function_data
        # import matplotlib
        # X,Y,Z = get_meshgrid_function_data(
        #     lambda x: unnormalized_posterior(x)/evidence, plot_range, 50)
        # plt.contourf(
        #     X, Y, Z, levels=np.linspace(Z.min(),Z.max(),30),
        #     cmap=matplotlib.cm.coolwarm)
        # plt.plot(samples[0,:],samples[1,:],'ko')
        # plt.show()
        
        print('mcmc mean error',samples.mean(axis=1)-exact_mean)
        print('MAP sample',map_sample)
        print('exact mean',exact_mean.squeeze())
        print('MCMC mean',samples.mean(axis=1))
        assert np.allclose(map_sample,np.zeros((variables.num_vars(),1)))
        #tolerance 3e-2 can be exceeded for certain random runs
        assert np.allclose(
            exact_mean.squeeze(), samples.mean(axis=1),atol=3e-2)