Exemple #1
0
    def test_fekete_gauss_lobatto(self):
        num_vars=1
        degree=3
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.linspace(-1.,1.,n)[np.newaxis,:]

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        precond_func = lambda matrix, samples: 0.25*np.ones(matrix.shape[0])
        samples,_ = get_fekete_samples(
            poly.basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        assert samples.shape[1]==degree+1

        # The samples should be close to the Gauss-Lobatto samples
        gauss_lobatto_samples =  np.asarray(
            [-1.0, - 0.447213595499957939281834733746,
             0.447213595499957939281834733746, 1.0 ])
        assert np.allclose(np.sort(samples),gauss_lobatto_samples,atol=1e-1)
Exemple #2
0
    def setup(self, num_vars, alpha_stat, beta_stat):
        def univariate_weight_function(x):
            return beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x)

        def univariate_weight_function_deriv(x):
            return beta_pdf_derivative(alpha_stat, beta_stat, (x + 1) / 2) / 4

        weight_function = partial(evaluate_tensor_product_function,
                                  [univariate_weight_function] * num_vars)

        weight_function_deriv = partial(
            gradient_of_tensor_product_function,
            [univariate_weight_function] * num_vars,
            [univariate_weight_function_deriv] * num_vars)

        assert np.allclose((univariate_weight_function(0.5 + 1e-6) -
                            univariate_weight_function(0.5)) / 1e-6,
                           univariate_weight_function_deriv(0.5),
                           atol=1e-6)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-2, 1), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        return weight_function, weight_function_deriv, poly
Exemple #3
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
    def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
            self):
        num_vars = 2
        degree = 3
        # rv_params = [[6,2],[2,6]]
        rv_params = [[1, 1]]
        leja_basename = None
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, leja_basename)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.configure(poly_opts)
        poly.set_indices(indices)

        num_mixtures = len(rv_params)
        mixture_univariate_growth_rules = [leja_growth_rule] * num_mixtures
        grammian_matrix = \
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
                poly.basis_matrix, indices,
                mixture_univariate_quadrature_rules,
                mixture_univariate_growth_rules, num_vars)

        assert (np.all(np.isfinite(grammian_matrix)))

        if num_mixtures == 1:
            II = np.where(abs(grammian_matrix) > 1e-8)
            # check only non-zero inner-products are along diagonal, i.e.
            # for integrals of indices multiplied by themselves
            assert np.allclose(II, np.tile(np.arange(indices.shape[1]),
                                           (2, 1)))
Exemple #5
0
    def test_uniform_2d_subset_of_points(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^2                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, YES return subset of points         #
        # ----------------------------------------------------- #

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}
        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([x[0]**2 + x[1]**2 + x[0] * x[1]]).T

        # define points to interpolate
        pts = get_tensor_product_points(1, var_trans, 'CC')
        helper_least_factorization(pts,
                                   model,
                                   var_trans,
                                   pce_opts,
                                   oli_opts,
                                   basis_generator,
                                   max_num_pts=6,
                                   exact_mean=11. / 12.)
Exemple #6
0
    def test_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 2
        alph = 1
        bet = 1.
        univ_inv = partial(idistinv_jacobi, alph=alph, bet=bet)
        num_samples = 10
        indices = np.ones((2, num_samples), dtype=int) * degree
        indices[1, :] = degree - 1
        xx = np.tile(
            np.linspace(0.01, 0.99, (num_samples))[np.newaxis, :],
            (num_vars, 1))
        samples = univ_inv(xx, indices)

        var_trans = AffineRandomVariableTransformation(
            [beta(bet + 1, alph + 1, -1, 2),
             beta(bet + 1, alph + 1, -1, 2)])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree, xx[0, :])
        # differences are just caused by different tolerances in optimizes
        # used to find roots of CDF
        assert np.allclose(reference_samples, samples[0, :], atol=1e-7)
        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree - 1, xx[0, :])
        assert np.allclose(reference_samples, samples[1, :], atol=1e-7)
    def test_sample_based_apc_orthonormality(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        num_moment_samples = 10000
        moment_matrix_samples = generate_independent_random_samples(
            random_var_trans.variable, num_moment_samples)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_from_samples, samples=moment_matrix_samples)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)

        num_samples = 10000
        samples = generate_independent_random_samples(
            random_var_trans.variable, num_samples)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T, basis_matrix) / num_samples,
                           np.eye(basis_matrix.shape[1]),
                           atol=1e-1)
Exemple #8
0
def analyze_sensitivity_sparse_grid(
        fun,
        univariate_variables,
        max_nsamples=100,
        tol=0,
        verbose=False,
        max_order=2,
        univariate_quad_rule_info=None,
        refinement_indicator=variance_refinement_indicator):
    sparse_grid = approximate_sparse_grid(
        fun,
        univariate_variables,
        max_nsamples=max_nsamples,
        tol=tol,
        verbose=verbose,
        univariate_quad_rule_info=univariate_quad_rule_info,
        refinement_indicator=refinement_indicator)
    pce_opts = define_poly_options_from_variable_transformation(
        sparse_grid.variable_transformation)
    pce = convert_sparse_grid_to_polynomial_chaos_expansion(
        sparse_grid, pce_opts)
    pce_main_effects,pce_total_effects=\
        get_main_and_total_effect_indices_from_pce(
            pce.get_coefficients(),pce.get_indices())

    interaction_terms, pce_sobol_indices = get_sobol_indices(
        pce.get_coefficients(), pce.get_indices(), max_order=max_order)

    return SensivitityResult({
        'main_effects': pce_main_effects,
        'total_effects': pce_total_effects,
        'sobol_indices': pce_sobol_indices,
        'sobol_interaction_indices': interaction_terms
    })
Exemple #9
0
def preconditioned_barycentric_weights():
    nmasses = 20
    xk = np.array(range(nmasses), dtype='float')
    pk = np.ones(nmasses) / nmasses
    var1 = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
    univariate_variables = [var1]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)
    growth_rule = partial(constant_increment_growth_rule, 2)
    quad_rule = get_univariate_leja_quadrature_rule(var1, growth_rule)
    samples = quad_rule(3)[0]
    num_samples = samples.shape[0]
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-5
    poly.configure(poly_opts)
    poly.set_indices(np.arange(num_samples))

    # precond_weights = np.sqrt(
    #    (poly.basis_matrix(samples[np.newaxis,:])**2).mean(axis=1))
    precond_weights = np.ones(num_samples)

    bary_weights = compute_barycentric_weights_1d(
        samples, interval_length=samples.max() - samples.min())

    def barysum(x, y, w, f):
        x = x[:, np.newaxis]
        y = y[np.newaxis, :]
        temp = w * f / (x - y)
        return np.sum(temp, axis=1)

    def function(x):
        return np.cos(2 * np.pi * x)

    y = samples
    print(samples)
    w = precond_weights * bary_weights
    # x = np.linspace(-3,3,301)
    x = np.linspace(-1, 1, 301)
    f = function(y) / precond_weights

    # cannot interpolate on data
    II = []
    for ii, xx in enumerate(x):
        if xx in samples:
            II.append(ii)
    x = np.delete(x, II)

    r1 = barysum(x, y, w, f)
    r2 = barysum(x, y, w, 1 / precond_weights)
    interp_vals = r1 / r2
    # import matplotlib.pyplot as plt
    # plt.plot(x, interp_vals, 'k')
    # plt.plot(samples, function(samples), 'ro')
    # plt.plot(x, function(x), 'r--')
    # plt.plot(samples,function(samples),'ro')
    # print(num_samples)
    # print(precond_weights)
    print(np.linalg.norm(interp_vals - function(x)))
 def set_polynomial_chaos_expansion(self, pce=None):
     if pce is None:
         poly_opts = define_poly_options_from_variable_transformation(
             self.variable_transformation)
         self.pce = PolynomialChaosExpansion()
         self.pce.configure(poly_opts)
     else:
         self.pce = pce
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        # print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
Exemple #12
0
def analyze_sensitivity_sparse_grid(sparse_grid, max_order=2):
    r"""
    Compute sensitivity indices from a sparse grid
    by converting it to a polynomial chaos expansion

    Parameters
    ----------
    sparse_grid :class:`pyapprox.adaptive_sparse_grid:CombinationSparseGrid`
       The sparse grid

    max_order : integer
        The maximum interaction order of Sobol indices to compute. A value
        of 2 will compute all pairwise interactions, a value of 3 will
        compute indices for all interactions involving 3 variables. The number
        of indices returned will be nchoosek(nvars+max_order,nvars). Warning
        when nvars is high the number of indices will increase rapidly with
        max_order.

    Returns
    -------
    result : :class:`pyapprox.sensitivity_analysis.SensitivityResult`
         Result object with the following attributes

    main_effects : np.ndarray (nvars)
        The variance based main effect sensitivity indices

    total_effects : np.ndarray (nvars)
        The variance based total effect sensitivity indices

    sobol_indices : np.ndarray (nchoosek(nvars+max_order,nvars),nqoi)
        The variance based Sobol sensitivity indices

    sobol_interaction_indices : np.ndarray(nvars,nchoosek(nvars+max_order,nvars))
        Indices specifying the variables in each interaction in
        ``sobol_indices``

    pce : :class:`multivariate_polynomials.PolynomialChaosExpansion`
       The pce respresentation of the sparse grid ``approx``
    """
    pce_opts = define_poly_options_from_variable_transformation(
        sparse_grid.variable_transformation)
    pce = convert_sparse_grid_to_polynomial_chaos_expansion(
        sparse_grid, pce_opts)
    pce_main_effects, pce_total_effects =\
        get_main_and_total_effect_indices_from_pce(
            pce.get_coefficients(), pce.get_indices())

    interaction_terms, pce_sobol_indices = get_sobol_indices(
        pce.get_coefficients(), pce.get_indices(), max_order=max_order)

    return SensitivityResult(
        {'main_effects': pce_main_effects,
         'total_effects': pce_total_effects,
         'sobol_indices': pce_sobol_indices,
         'sobol_interaction_indices': interaction_terms,
         'pce': pce})
Exemple #13
0
    def test_beta_2d_preconditioning(self):
        """
        Interpolate a set of points using preconditioing. First select
        all initial points then adding a subset of the remaining points.

        x in Beta(2,5)[0,1]^2
        """

        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # from scipy.special import beta as beta_fn
        # def beta_pdf(x,alpha_poly,beta_poly):
        #     values = (1.-x)**(alpha_poly) * (1.+x)**(beta_poly)
        #     values /= 2.**(beta_poly+alpha_poly+1)*beta_fn(
        #         beta_poly+1,alpha_poly+1)
        #     return values
        # univariate_pdf = partial(beta_pdf,alpha_poly=beta_stat-1,beta_poly=alpha_stat-1)

        univariate_beta_pdf = partial(beta.pdf, a=alpha_stat, b=beta_stat)

        def univariate_pdf(x):
            return univariate_beta_pdf((x + 1.) / 2.) / 2.

        preconditioning_function = partial(tensor_product_pdf,
                                           univariate_pdfs=univariate_pdf)

        # define target function
        def model(x):
            return np.asarray([(x[0]**2 - 1) + (x[1]**2 - 1) + x[0] * x[1]]).T

        # define points to interpolate
        pts = generate_independent_random_samples(var_trans.variable, 12)
        initial_pts = np.array([pts[:, 0]]).T

        helper_least_factorization(
            pts,
            model,
            var_trans,
            pce_opts,
            oli_opts,
            basis_generator,
            initial_pts=initial_pts,
            max_num_pts=12,
            preconditioning_function=preconditioning_function)
    def test_lu_leja_interpolation(self):
        num_vars = 2
        degree = 15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        def generate_candidate_samples(n): return np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        def precond_func(matrix, samples): return christoffel_weights(matrix)
        samples, data_structures = get_lu_leja_samples(
            poly.canonical_basis_matrix, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max() <= 1 and samples.min() >= 0.

        c = np.random.uniform(0., 1., num_vars)
        c *= 20/c.sum()
        w = np.zeros_like(c)
        w[0] = np.random.uniform(0., 1., 1)
        genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        values = genz_function(samples)

        # Ensure coef produce an interpolant
        coef = interpolate_lu_leja_samples(samples, values, data_structures)

        # Ignore basis functions (columns) that were not considered during the
        # incomplete LU factorization
        poly.set_indices(poly.indices[:, :num_leja_samples])
        poly.set_coefficients(coef)

        assert np.allclose(poly(samples), values)

        quad_w = get_quadrature_weights_from_lu_leja_samples(
            samples, data_structures)
        values_at_quad_x = values[:, 0]

        # will get closer if degree is increased
        # print (np.dot(values_at_quad_x,quad_w),genz_function.integrate())
        assert np.allclose(
            np.dot(values_at_quad_x, quad_w), genz_function.integrate(),
            atol=1e-4)
    def test_analytical_moment_based_apc_orthonormality_identity(self):
        """
        Test that when the correct orthonormal basis is used and integrated
        using quadrature that the rotation matrix is the identity. Test sets
        user domain to be different to canonical domain
        """
        num_vars = 1
        alpha_stat = 1
        beta_stat = 1
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = random_var_trans.map_from_canonical_space(
                x[np.newaxis, :])[0, :]
            return x, w

        # Test qr factorization to compute rotation matrix
        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        assert np.allclose(pce.R_inv, np.eye(pce.R_inv.shape[0]))

        # Test cholesky factorization to compute rotation matrix
        def compute_grammian_function(basis_matrix_function, indices):
            num_samples = 10 * degree
            basis_matrix = \
                compute_moment_matrix_using_tensor_product_quadrature(
                    basis_matrix_function, num_samples, num_vars,
                    univariate_quadrature_rule=univariate_quadrature_rule)
            return basis_matrix.T.dot(basis_matrix)

        pce_chol = APC(compute_grammian_function=compute_grammian_function)
        pce_chol.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce_chol.set_indices(indices)

        assert np.allclose(pce_chol.R_inv, np.eye(pce_chol.R_inv.shape[0]))
    def test_get_unrotated_basis_coefficients(self):
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1.) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # set pce coefficients randomly
        pce.coefficients = np.random.normal(0., 1., (indices.shape[1], 1))

        unrotated_basis_coefficients = compute_coefficients_of_unrotated_basis(
            pce.coefficients, pce.R_inv)

        num_samples = 10
        samples = np.random.uniform(0., 1., (num_vars, num_samples))
        true_values = pce(samples)
        values = np.dot(pce.unrotated_basis_matrix(samples),
                        unrotated_basis_coefficients)
        assert np.allclose(values, true_values)
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
Exemple #18
0
    def test_uniform_2d_degenerate_initial_and_subset_points(self):
        """
        Interpolate a set of points, by first selecting all initial points
        which are degenerate then adding a subset of the remaining points.

        CHECK: Orthogonal least interpolation produces an interpolant but does
        not approximate the function exactly.

        x in U[0,1]^2
        """

        num_vars = 2
        var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {
            'verbosity': 0,
            'assume_non_degeneracy': False,
            'enforce_all_initial_points_used': True,
            'enforce_ordering_of_initial_points': True
        }

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.asarray([
                0.5 * (3 * x[0]**2 - 1) + 0.5 * (3 * x[1]**2 - 1) + x[0] * x[1]
            ]).T

        # define points to interpolate
        pts = get_tensor_product_points(2, var_trans, 'CC')
        initial_pts = get_tensor_product_points(1, var_trans, 'CC')
        self.assertRaises(Exception,
                          helper_least_factorization,
                          pts,
                          model,
                          var_trans,
                          pce_opts,
                          oli_opts,
                          basis_generator,
                          initial_pts=initial_pts,
                          max_num_pts=12,
                          use_preconditioning=1)
def get_total_degree_polynomials(univariate_variables, degrees):
    assert type(univariate_variables[0]) == list
    assert len(univariate_variables) == len(degrees)
    polys, nparams = [], []
    for ii in range(len(degrees)):
        poly = PolynomialChaosExpansion()
        var_trans = AffineRandomVariableTransformation(
            univariate_variables[ii])
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        indices = compute_hyperbolic_indices(var_trans.num_vars(), degrees[ii],
                                             1.0)
        poly.set_indices(indices)
        polys.append(poly)
        nparams.append(indices.shape[1])
    return polys, np.array(nparams)
    def test_oli_leja_interpolation(self):
        num_vars = 2
        degree = 5

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000

        # oli_leja requires candidates in user space
        def generate_candidate_samples(n): return (np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-3
        def precond_func(samples): return 1./christoffel_function(
            samples, poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)

        assert samples.max() <= 1 and samples.min() >= 0.

        # c = np.random.uniform(0., 1., num_vars)
        # c *= 20/c.sum()
        # w = np.zeros_like(c)
        # w[0] = np.random.uniform(0., 1., 1)
        # genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        # values = genz_function(samples)
        # exact_integral = genz_function.integrate()

        values = np.sum(samples**2, axis=0)[:, None]
        # exact_integral = num_vars/3

        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples, values)
        assert np.allclose(poly(samples), values)
Exemple #21
0
    def test_fekete_interpolation(self):
        num_vars=2
        degree=15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)


        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        # must use canonical_basis_matrix to generate basis matrix
        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(samples,values,data_structures)
        poly.set_coefficients(coef)
        assert np.allclose(poly(samples),values)

        quad_w = get_quadrature_weights_from_fekete_samples(
            samples,data_structures)
        values_at_quad_x = values[:,0]
        # increase degree if want smaller atol
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),genz_function.integrate(),
            atol=1e-4)
Exemple #22
0
def compute_mean_and_variance_sparse_grid(sparse_grid, max_order=2):
    """
    Compute the mean and variance of a sparse_grid by converting it to 
    a polynomial chaos expansion

    Parameters
    ----------
    sparse_grid :class:`pyapprox.adaptive_sparse_grid:CombinationSparseGrid`
       The sparse grid

    Returns
    -------
    result : :class:`pyapprox.quadrature.QuadratureResult`
        Result object with the following attributes

    mean : np.ndarray (nqoi)
        The mean of each quantitity of interest

    variance : np.ndarray (nqoi)
        The variance of each quantitity of interest

    pce : :class:`multivariate_polynomials.PolynomialChaosExpansion`
       The pce respresentation of the sparse grid ``approx``
    """
    from pyapprox.multivariate_polynomials import \
        define_poly_options_from_variable_transformation
    from pyapprox.adaptive_sparse_grid import \
        convert_sparse_grid_to_polynomial_chaos_expansion
    pce_opts = define_poly_options_from_variable_transformation(
        sparse_grid.variable_transformation)
    pce = convert_sparse_grid_to_polynomial_chaos_expansion(
        sparse_grid, pce_opts)
    pce_main_effects,pce_total_effects=\
        get_main_and_total_effect_indices_from_pce(
            pce.get_coefficients(),pce.get_indices())

    interaction_terms, pce_sobol_indices = get_sobol_indices(
        pce.get_coefficients(), pce.get_indices(), max_order=max_order)

    return QuadratureResult({
        'mean': pce.mean(),
        'variance': pce.variance(),
        'pce': pce
    })
    def test_compute_grammian_using_sparse_grid_quadrature(self):
        """
        Test compute_grammian_of_mixture_models_using_sparse_grid_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        rv_params = [[alpha_stat, beta_stat]]
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, None)

        compute_grammian_function = partial(
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature,
            mixture_univariate_quadrature_rules=
            mixture_univariate_quadrature_rules,
            mixture_univariate_growth_rules=[leja_growth_rule],
            num_vars=num_vars)

        pce = APC(compute_grammian_function=compute_grammian_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # use Gauss quadrature for true distribution to integrate APC basis
        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        # print (np.dot(basis_matrix.T*weights,basis_matrix))
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
    def test_solve_linear_system_method(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 2
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        poly_moments = \
            compute_polynomial_moments_using_tensor_product_quadrature(
                pce.basis_matrix, 2*degree, num_vars,
                univariate_quadrature_rule)

        R_inv = compute_rotation_from_moments_linear_system(poly_moments)

        R_inv_gs = compute_rotation_from_moments_gram_schmidt(poly_moments)
        assert np.allclose(R_inv, R_inv_gs)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)
        assert np.allclose(R_inv, apc.R_inv)
Exemple #25
0
    def test_oli_leja_interpolation(self):
        num_vars=2
        degree=5
        
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))
        generate_candidate_samples=lambda n: (np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        precond_func = lambda samples: 1./christoffel_function(
            samples,poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly,generate_candidate_samples,
            num_candidate_samples,num_leja_samples,
            preconditioning_function=precond_func)
        #samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples,values)
        assert np.allclose(poly(samples),values)
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)
        pce_opts["truncation_tol"] = 1e-5

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = FPC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        # print np.dot(basis_matrix.T*weights,basis_matrix)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
Exemple #27
0
    def test_adaptive_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 6
        alph = 5
        bet = 5.

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 3)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, 1, 1.0)
        pce.set_indices(indices)
        cond_tol = 1e2
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)

        for dd in range(2, degree):
            num_prev_samples = samples.shape[1]
            new_indices = compute_hyperbolic_level_indices(num_vars, dd, 1.)
            samples = increment_induced_samples_migliorati(
                pce, cond_tol, samples, indices, new_indices)
            indices = np.hstack((indices, new_indices))
            pce.set_indices(indices)
            new_samples = samples[:, num_prev_samples:]
            prev_samples = samples[:, :num_prev_samples]
            #fig,axs = plt.subplots(1,2,figsize=(2*8,6))
            #from pyapprox.visualization import plot_2d_indices
            #axs[0].plot(prev_samples[0,:],prev_samples[1,:],'ko');
            #axs[0].plot(new_samples[0,:],new_samples[1,:],'ro');
            #plot_2d_indices(indices,other_indices=new_indices,ax=axs[1]);
            #plt.show()

        samples = var_trans.map_from_canonical_space(samples)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.basis_matrix, samples)
        assert cond < cond_tol
Exemple #28
0
    def test_multivariate_migliorati_sampling_jacobi(self):

        num_vars = 1
        degree = 20
        alph = 5
        bet = 5.
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 2)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        cond_tol = 1e1
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.canonical_basis_matrix, samples)
        assert cond < cond_tol
Exemple #29
0
    def test_uniform_3d_user_domain(self):
        # ----------------------------------------------------- #
        # x in U[0,1]^3                                         #
        # no intial pts, no candidate basis no preconditioning, #
        # no pivot weights, no return subset of points          #
        # ----------------------------------------------------- #

        # Set PCE options
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # define target function
        def model(x):
            return np.array([
                np.sum(x**2, axis=0) + x[0] * x[1] + x[1] * x[2] +
                x[0] * x[1] * x[2]
            ]).T

        # define points to interpolate
        pts = get_tensor_product_points(2, var_trans, 'CC')
        helper_least_factorization(pts,
                                   model,
                                   var_trans,
                                   pce_opts,
                                   oli_opts,
                                   basis_generator,
                                   exact_mean=13. / 8.)
Exemple #30
0
def approximate_polynomial_chaos(train_samples,
                                 train_vals,
                                 verbosity=0,
                                 basis_type='expanding_basis',
                                 variable=None,
                                 options=None):
    r"""
    Compute a Polynomial Chaos Expansion of a function from a fixed data set.

    Parameters
    ----------
    train_samples : np.ndarray (nvars,nsamples)
        The inputs of the function used to train the approximation

    train_vals : np.ndarray (nvars,nsamples)
        The values of the function at ``train_samples``

    basis_type : string
        Type of approximation. Should be one of

        - 'expanding_basis' see :func:`pyapprox.approximate.cross_validate_pce_degree` 
        - 'hyperbolic_cross' see :func:`pyapprox.approximate.expanding_basis_omp_pce`

    variable : pya.IndependentMultivariateRandomVariable
        Object containing information of the joint density of the inputs z.
        This is used to generate random samples from this join density

    verbosity : integer
        Controls the amount of information printed to screen


    Returns
    -------
    pce : :class:`pyapprox.multivariate_polynomials.PolynomialChaosExpansion`
        The PCE approximation
    """
    funcs = {
        'expanding_basis': expanding_basis_omp_pce,
        'hyperbolic_cross': cross_validate_pce_degree
    }
    if variable is None:
        msg = 'pce requires that variable be defined'
        raise Exception(msg)
    if basis_type not in funcs:
        msg = f'Basis type {basis_type} not found.\n Available types are:\n'
        for key in funcs.keys():
            msg += f"\t{key}\n"
        raise Exception(msg)

    from pyapprox.multivariate_polynomials import PolynomialChaosExpansion, \
        define_poly_options_from_variable_transformation
    var_trans = AffineRandomVariableTransformation(variable)
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly.configure(poly_opts)

    if options is None:
        options = {}

    res = funcs[basis_type](poly, train_samples, train_vals, **options)[0]
    return res