Exemple #1
0
    def test_modified_chebyshev(self):
        nterms = 10
        alpha_stat, beta_stat = 2, 2
        probability_measure = True
        # using scipy to compute moments is extermely slow
        # moments = [stats.beta.moment(n,alpha_stat,beta_stat,loc=-1,scale=2)
        #           for n in range(2*nterms)]
        quad_x, quad_w = gauss_jacobi_pts_wts_1D(4 * nterms, beta_stat - 1,
                                                 alpha_stat - 1)

        true_ab = jacobi_recurrence(nterms,
                                    alpha=beta_stat - 1,
                                    beta=alpha_stat - 1,
                                    probability=probability_measure)

        ab = modified_chebyshev_orthonormal(nterms, [quad_x, quad_w],
                                            get_input_coefs=None,
                                            probability=True)
        assert np.allclose(true_ab, ab)

        get_input_coefs = partial(jacobi_recurrence,
                                  alpha=beta_stat - 2,
                                  beta=alpha_stat - 2)
        ab = modified_chebyshev_orthonormal(nterms, [quad_x, quad_w],
                                            get_input_coefs=get_input_coefs,
                                            probability=True)
        assert np.allclose(true_ab, ab)
Exemple #2
0
def oscillatory_genz_pdf(c,w1,values):
    nvars = c.shape[0]
    x,w = gauss_jacobi_pts_wts_1D(100,0,0)
    x = (x+1)/2 #scale from [-1,1] to [0,1]
    pdf1 = partial(uniform.pdf,loc=0+2*np.pi*w1,scale=c[0])
    quad_rules = [[c[ii]*x,w] for ii in range(1,nvars)]
    conv_pdf = partial(sum_of_independent_random_variables_pdf,
        pdf1,[[x,w]]*(nvars-1))

    # samples = np.random.uniform(0,1,(nvars,10000))
    # Y = np.sum(c[:,np.newaxis]*samples,axis=0)+w1*np.pi*2
    # plt.hist(Y,bins=100,density=True)
    # zz = np.linspace(Y.min(),Y.max(),100)
    # plt.plot(zz,conv_pdf(zz))
    # plt.show()

    # approximate cos(x)
    N=20
    lb,ub=2*np.pi*w1,c.sum()+2*np.pi*w1
    nonzero_coef = [1]+[
        (-1)**n * (1)**(2*n)/factorial(2*n) for n in range(1,N+1)]
    coef = np.zeros(2*N+2); coef[::2]=nonzero_coef
    z_pdf_vals = get_pdf_from_monomial_expansion(
        coef,lb,ub,conv_pdf,values[:,0])
    return z_pdf_vals
    def test_sum_of_independent_uniform_and_gaussian_variables(self):
        lb,ub=1,3
        mu,sigma=0.,0.25
        uniform_dist = stats.uniform(loc=lb, scale=ub-lb)
        normal_dist = stats.norm(loc=mu, scale=sigma)  

        pdf1 = uniform_dist.pdf
        pdf2 = normal_dist.pdf

        zz = np.linspace(-3,3,100)
        # using gauss hermite quadrature does not work because it is
        # using polynomial quadrature to integrate a discontinous function
        # i.e. uniform PDF
        #x,w = gauss_hermite_pts_wts_1D(100)
        #x = x*sigma+mu #scale from standard normal
        #conv_pdf = sum_of_independent_random_variables_pdf(
        #    pdf1,[[x,w]],zz)
        
        # but since normal PDF is smooth integration in reverse order works well
        x,w = gauss_jacobi_pts_wts_1D(100,0,0)
        x = x+2 #scale from [-1,1] to [1,3]
        conv_pdf = sum_of_independent_random_variables_pdf(
            pdf2,[[x,w]],zz)

        plt.plot(zz, pdf1(zz), label='Uniform')
        plt.plot(zz, pdf2(zz), label='Gaussian')
        plt.plot(zz,conv_pdf, label='Sum')
        plt.legend(loc='best'), plt.suptitle('PDFs')
Exemple #4
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
Exemple #5
0
    def test_predictor_corrector_function_of_independent_variables(self):
        """
        Test 1: Sum of Gaussians is a Gaussian

        Test 2: Product of uniforms on [0,1]
        """
        nvars, nterms = 2, 5
        variables = [stats.norm(0, 1)] * nvars

        nquad_samples_1d = 50
        quad_rules = [gauss_hermite_pts_wts_1D(nquad_samples_1d)] * nvars

        def fun(x):
            return x.sum(axis=0)

        ab = predictor_corrector_function_of_independent_variables(
            nterms, quad_rules, fun)

        rv = stats.norm(0, np.sqrt(nvars))
        measures = rv.pdf
        lb, ub = rv.interval(1)
        interval_size = rv.interval(0.99)[1] - rv.interval(0.99)[0]
        ab_full = predictor_corrector(nterms, rv.pdf, lb, ub, interval_size)
        assert np.allclose(ab_full, ab)

        nvars = 2

        def measure(x):
            return (-1)**(nvars - 1) * np.log(x)**(nvars -
                                                   1) / factorial(nvars - 1)

        def fun(x):
            return x.prod(axis=0)

        quad_opts = {'verbose': 0, 'atol': 1e-6, 'rtol': 1e-6}
        ab_full = predictor_corrector(nterms, measure, 0, 1, 1, quad_opts)
        xx, ww = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
        xx = (xx + 1) / 2
        quad_rules = [(xx, ww)] * nvars
        ab = predictor_corrector_function_of_independent_variables(
            nterms, quad_rules, fun)
        assert np.allclose(ab_full, ab)
    def test_sum_of_independent_uniform_variables(self):
        lb1, ub1 = [0, 2]
        lb2, ub2 = [10, 13]
        pdfs = [stats.uniform(lb1, ub1 - lb1).pdf]

        # transfomation not defined at 0
        zz = np.linspace(lb1 + lb2, ub1 + ub2, 100)
        x, w = gauss_jacobi_pts_wts_1D(200, 0, 0)
        x = (x + 1) / 2 * (ub2 - lb2) + lb2  # map to [lb2,ub2]
        quad_rules = [[x, w]]
        product_pdf = sum_of_independent_random_variables_pdf(
            pdfs[0], quad_rules, zz)

        true_pdf = partial(sum_two_uniform_variables, [lb1, ub1, lb2, ub2])
        # plt.plot(zz,true_pdf(zz), label='True PDF')
        # plt.plot(zz,product_pdf, '--', label='Approx PDF')
        #     nsamples=10000
        # vals = np.random.uniform(lb1,ub1,nsamples)+np.random.uniform(
        #    lb2,ub2,nsamples)
        # plt.hist(vals,bins=100,density=True)
        # plt.legend();plt.show()
        # print(np.linalg.norm(true_pdf-product_pdf,ord=np.inf))
        assert np.linalg.norm(true_pdf(zz) - product_pdf, ord=np.inf) < 0.03
Exemple #7
0
    def test_predictor_corrector_product_of_functions_of_independent_variables(
            self):
        nvars, nterms = 3, 4

        def measure(x):
            return (-1)**(nvars - 1) * np.log(x)**(nvars -
                                                   1) / factorial(nvars - 1)

        def fun(x):
            return x.prod(axis=0)

        nquad_samples_1d = 20
        xx, ww = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
        xx = (xx + 1) / 2
        quad_rules = [(xx, ww)] * nvars
        funs = [lambda x: x] * nvars
        ab = predictor_corrector_product_of_functions_of_independent_variables(
            nterms, quad_rules, funs)

        quad_opts = {'verbose': 3, 'atol': 1e-5, 'rtol': 1e-5}
        ab_full = predictor_corrector(nterms, measure, 0, 1, 1, quad_opts)

        assert np.allclose(ab, ab_full, atol=1e-5, rtol=1e-5)
    def test_product_of_independent_uniform_variables(self):
        nvars = 3
        pdfs = [stats.uniform(0, 1).pdf] * nvars

        # transfomation not defined at 0
        zz = np.linspace(1e-1, 1, 100)
        x, w = gauss_jacobi_pts_wts_1D(200, 0, 0)
        x = (x + 1) / 2  # map to [0,1]
        quad_rules = [[x, w]] * (nvars - 1)
        product_pdf = product_of_independent_random_variables_pdf(
            pdfs[0], quad_rules, zz)

        if nvars == 2:
            true_pdf = -np.log(zz)
        if nvars == 3:
            true_pdf = 0.5 * np.log(zz)**2
        # for ii in range(nvars):
        #     plt.plot(zz, pdfs[ii](zz),label=r'$X_%d$'%ii)
        # plt.plot(zz,true_pdf, label='Product')
        # plt.plot(zz,product_pdf, '--', label='True Product')
        # plt.legend()
        # plt.show()
        # print(np.linalg.norm(true_pdf-product_pdf,ord=np.inf))
        assert np.linalg.norm(true_pdf - product_pdf, ord=np.inf) < 0.03
 def univariate_quadrature_rule(n):
     x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
     x = random_var_trans.map_from_canonical_space(
         x[np.newaxis, :])[0, :]
     return x, w
 def univariate_quadrature_rule(n):
     x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
     x = (x + 1) / 2.
     return x, w
 def uniform_univariate_quadrature_rule(n):
     x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
     x = (x + 1.) / 2.
     return x, w
Exemple #12
0
 def univariate_quadrature_rule(x):
     x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
     x = (x + 1) / 2
     return x, w
Exemple #13
0
    if ii not in [0, 2]:
        opts = {'rv_type': name, 'shapes': shapes,
                'var_nums': re_variable.unique_variable_indices[ii]}
        basis_opts['basis%d' % ii] = opts
        continue

    #identity_map_indices += re_variable.unique_variable_indices[ii] # wrong
    identity_map_indices += list(re_variable.unique_variable_indices[ii]) # right
    
    quad_rules = []    
    inds = index_product[cnt]
    nquad_samples_1d = 50

    for jj in inds:
        a, b = variable.all_variables()[jj].interval(1)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
        x = (x+1)/2 # map to [0, 1]
        x = (b-a)*x+a # map to [a,b]
        quad_rules.append((x, w))
    funs = [identity_fun]*len(inds)
    basis_opts['basis%d' % ii] = {'poly_type': 'product_indpnt_vars',
                                    'var_nums': [ii], 'funs': funs,
                                    'quad_rules': quad_rules}
    cnt += 1
        
poly_opts = {'var_trans': re_var_trans}
poly_opts['poly_types'] = basis_opts
#var_trans.set_identity_maps(identity_map_indices) #wrong
re_var_trans.set_identity_maps(identity_map_indices) #right

indices = compute_hyperbolic_indices(re_variable.num_vars(), degree)
Exemple #14
0
    def test_pce_product_of_beta_variables(self):
        def fun(x):
            return np.sqrt(x.prod(axis=0))[:, None]

        dist_alpha1, dist_beta1 = 1, 1
        dist_alpha2, dist_beta2 = dist_alpha1 + 0.5, dist_beta1
        nvars = 2

        x_1d, w_1d = [], []
        nquad_samples_1d = 100
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta1 - 1,
                                       dist_alpha1 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta2 - 1,
                                       dist_alpha2 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)

        quad_samples = cartesian_product(x_1d)
        quad_weights = outer_product(w_1d)

        mean = fun(quad_samples)[:, 0].dot(quad_weights)
        variance = (fun(quad_samples)[:, 0]**2).dot(quad_weights) - mean**2
        assert np.allclose(mean, beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(variance,
                           beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        degree = 10
        poly = PolynomialChaosExpansion()
        # the distribution and ranges of univariate variables is ignored
        # when var_trans.set_identity_maps([0]) is used
        univariate_variables = [uniform(0, 1)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        # the following means do not map samples
        var_trans.set_identity_maps([0])
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        poly.configure({
            'poly_types': {
                0: {
                    'poly_type': 'function_indpnt_vars',
                    'var_nums': [0],
                    'fun': fun,
                    'quad_rules': quad_rules
                }
            },
            'var_trans': var_trans
        })
        from pyapprox.indexing import tensor_product_indices
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        poly = PolynomialChaosExpansion()
        # the distribution and ranges of univariate variables is ignored
        # when var_trans.set_identity_maps([0]) is used
        univariate_variables = [uniform(0, 1)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        # the following means do not map samples
        var_trans.set_identity_maps([0])
        funs = [lambda x: np.sqrt(x)] * nvars
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        poly.configure({
            'poly_types': {
                0: {
                    'poly_type': 'product_indpnt_vars',
                    'var_nums': [0],
                    'funs': funs,
                    'quad_rules': quad_rules
                }
            },
            'var_trans': var_trans
        })
        from pyapprox.indexing import tensor_product_indices
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           beta(dist_alpha1 * 2, dist_beta1 * 2).var())
Exemple #15
0
 def univariate_quadrature_rule(n):
     x,w = gauss_jacobi_pts_wts_1D(n,0,0)
     x*=2
     return x,w