def test_christoffel_function(self):
        num_vars = 1
        degree = 2
        alpha_poly = 0
        beta_poly = 0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1., 1., num_samples)[np.newaxis, :]
        basis_matrix = poly.basis_matrix(samples)
        true_weights = 1./np.linalg.norm(basis_matrix, axis=1)**2
        weights = 1./christoffel_function(samples, poly.basis_matrix)
        assert weights.shape[0] == num_samples
        assert np.allclose(true_weights, weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples, quad_weights = gauss_jacobi_pts_wts_1D(
            degree, alpha_poly, beta_poly)
        quad_samples = quad_samples[np.newaxis, :]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples, poly.basis_matrix)
        assert np.allclose(weights, quad_weights)
Exemple #2
0
def random_induced_measure_sampling(num_samples, num_vars,
                                    basis_matrix_generator,
                                    probability_density, proposal_density,
                                    generate_proposal_samples,
                                    envelope_factor):
    """
    Draw independent samples from the induced measure.

    Returns
    -------
    samples : np.ndarray (num_vars,num_samples)
        Samples from the induced measure. 

    Notes
    -----
    Unlike fekete sampling, leja sampling, discrete_induced_sampling, and 
    generate_induced_sampling this function should use
    basis_matrix_generator=pce.basis_matrix here. If use 
    pce.canonical_basis_matrix then densities must be mapped to this 
    space also which can be difficult.
    """

    target_density = lambda x: probability_density(x)*\
        christoffel_function(x,basis_matrix_generator,normalize=True)

    samples = rejection_sampling(target_density,
                                 proposal_density,
                                 generate_proposal_samples,
                                 envelope_factor,
                                 num_vars,
                                 num_samples,
                                 verbose=False)

    return samples
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
Exemple #4
0
    def test_discrete_induced_sampling(self):
        degree = 3

        nmasses1 = 10
        mass_locations1 = np.geomspace(1.0, 512.0, num=nmasses1)
        #mass_locations1 = np.arange(0,nmasses1)
        masses1 = np.ones(nmasses1, dtype=float) / nmasses1
        var1 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations1, masses1))()

        nmasses2 = 10
        mass_locations2 = np.arange(0, nmasses2)
        # if increase from 16 unmodififed becomes ill conditioned
        masses2 = np.geomspace(1.0, 16.0, num=nmasses2)
        #masses2  = np.ones(nmasses2,dtype=float)/nmasses2

        masses2 /= masses2.sum()
        var2 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations2, masses2))()

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(1e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        canonical_xk = [
            2 * get_distribution_info(var1)[2]['xk'] - 1,
            2 * get_distribution_info(var2)[2]['xk'] - 1
        ]
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(
            basis_matrix_generator, pce.indices, canonical_xk,
            [var1.dist.pk, var2.dist.pk], num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def density(x):
            return var1.pdf(x[0, :]) * var2.pdf(x[1, :])

        envelope_factor = 30

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([var1.dist.xk, var2.dist.xk])
        quad_weights = outer_product([var1.dist.pk, var2.dist.pk])

        #print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        #print(samples.min(axis=1),samples.max(axis=1))
        #print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        #print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        print(true_induced_mean)
        print(samples.mean(axis=1))
        print(samples1.mean(axis=1))
        print(samples2.mean(axis=1))
        print(
            samples1.mean(axis=1) - true_induced_mean,
            true_induced_mean * rtol)
        #print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
Exemple #5
0
 def induced_density(x):
     vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
     return vals
Exemple #6
0
    def test_least_interpolation_lu_equivalence_in_1d(self):
        num_vars = 1
        alpha_stat = 2; beta_stat  = 5
        max_num_pts = 100
        
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi',}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)

        # univariate_beta_pdf = partial(beta.pdf,a=alpha_stat,b=beta_stat)
        # univariate_pdf = lambda x: univariate_beta_pdf(x)
        # preconditioning_function = partial(
        #     tensor_product_pdf,univariate_pdfs=univariate_pdf)
        from pyapprox.indexing import get_total_degree
        max_degree = get_total_degree(num_vars,max_num_pts)
        indices = compute_hyperbolic_indices(num_vars, max_degree, 1.)
        pce.set_indices(indices)
        
        from pyapprox.polynomial_sampling import christoffel_function
        preconditioning_function = lambda samples: 1./christoffel_function(
            samples,pce.basis_matrix)
    
        oli_solver.set_preconditioning_function(preconditioning_function)
        oli_solver.set_basis_generator(basis_generator)
        
        initial_pts = None
        candidate_samples = np.linspace(0.,1.,1000)[np.newaxis,:]

        oli_solver.factorize(
            candidate_samples, initial_pts,
            num_selected_pts = max_num_pts)

        oli_samples = oli_solver.get_current_points()

        from pyapprox.utilities import truncated_pivoted_lu_factorization
        pce.set_indices(oli_solver.selected_basis_indices)
        basis_matrix = pce.basis_matrix(candidate_samples)
        weights = np.sqrt(preconditioning_function(candidate_samples))
        basis_matrix = np.dot(np.diag(weights),basis_matrix)
        L,U,p = truncated_pivoted_lu_factorization(
            basis_matrix,max_num_pts)
        assert p.shape[0]==max_num_pts
        lu_samples = candidate_samples[:,p]

        assert np.allclose(lu_samples,oli_samples)

        L1,U1,H1 = oli_solver.get_current_LUH_factors()
        
        true_permuted_matrix = (pce.basis_matrix(lu_samples).T*weights[p]).T
        assert np.allclose(np.dot(L,U),true_permuted_matrix)
        assert np.allclose(np.dot(L1,np.dot(U1,H1)),true_permuted_matrix)
    def help_discrete_induced_sampling(self, var1, var2, envelope_factor):
        degree = 3

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(3e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        #canonical_xk = [2*get_distribution_info(var1)[2]['xk']-1,
        #                2*get_distribution_info(var2)[2]['xk']-1]
        xk = np.array([
            get_probability_masses(var)[0]
            for var in var_trans.variable.all_variables()
        ])
        pk = np.array([
            get_probability_masses(var)[1]
            for var in var_trans.variable.all_variables()
        ])
        canonical_xk = var_trans.map_to_canonical_space(xk)
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(basis_matrix_generator,
                                                       pce.indices,
                                                       canonical_xk, pk,
                                                       num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def univariate_pdf(var, x):
            if hasattr(var.dist, 'pdf'):
                return var.pdf(x)
            else:
                return var.pmf(x)
                xk, pk = get_probability_masses(var)
                x = np.atleast_1d(x)
                vals = np.zeros(x.shape[0])
                for jj in range(x.shape[0]):
                    for ii in range(xk.shape[0]):
                        if xk[ii] == x[jj]:
                            vals[jj] = pk[ii]
                            break
                return vals

        def density(x):
            # some issue with native scipy.pmf
            #assert np.allclose(var1.pdf(x[0, :]),var1.pmf(x[0, :]))
            return univariate_pdf(var1, x[0, :]) * univariate_pdf(
                var2, x[1, :])

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([xk[0], xk[1]])
        quad_weights = outer_product([pk[0], pk[1]])

        # print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        # print(samples.min(axis=1),samples.max(axis=1))
        # print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        # print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        # print(true_induced_mean)
        # print(samples.mean(axis=1))
        # print(samples1.mean(axis=1))
        # print(samples2.mean(axis=1))
        # print(samples1.mean(axis=1)-true_induced_mean, true_induced_mean*rtol)
        # print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
Exemple #8
0
    def target_density(x): return probability_density(x) *\
        christoffel_function(x, basis_matrix_generator, normalize=True)

    samples = rejection_sampling(
Exemple #9
0
 def preconditioning_function(samples):
     return 1. / christoffel_function(samples, pce.basis_matrix)
 def precond_func(samples): return 1./christoffel_function(
     samples, poly.basis_matrix)
 samples, data_structures = get_oli_leja_samples(