def inverse_rosenblatt_transformation(samples,
                                      joint_density,
                                      limits,
                                      num_quad_samples_1d=100,
                                      tol=1e-12,
                                      num_bins=101):
    assert samples.ndim == 2
    num_vars, num_samples = samples.shape
    quad_x, quad_w = get_tensor_product_quadrature_rule(
        num_quad_samples_1d, num_vars, np.polynomial.legendre.leggauss)

    def cdffun(x):
        if np.isscalar(x):
            x = np.asarray([x])
        assert x.ndim == 1
        return marginalized_cumulative_distribution_function(
            joint_density, limits, np.arange(1), x[np.newaxis, :],
            np.arange(1, num_vars), np.empty((0, x.shape[0])),
            num_quad_samples_1d, (quad_x, quad_w))

    trans_samples = np.empty((num_vars, num_samples), dtype=float)
    for jj in range(num_samples):
        trans_samples[0, jj] = invert_cdf(samples[0, jj:jj + 1], cdffun,
                                          limits[:2], tol, num_bins)

    for ii in range(1, num_vars):
        active_vars = np.arange(ii)

        quad_x, quad_w = get_tensor_product_quadrature_rule(
            num_quad_samples_1d, 1 + (num_vars - ii - 1),
            np.polynomial.legendre.leggauss)

        # Even though invert_cdf can be used for multiple samples
        # The following cdf impicitly uses the inactive samples which
        # are size of num_samples but invert_cdf solves problem
        # one point at a time so active_samples and inactive samples
        # will be inconsistent
        for jj in range(num_samples):

            def cdffun(x):
                if np.isscalar(x):
                    x = np.asarray([x])
                assert x.ndim == 1
                cdf_val = marginalized_cumulative_distribution_function(
                    joint_density, limits, np.asarray([ii]), x[np.newaxis, :],
                    np.arange(ii + 1, num_vars), trans_samples[:ii, jj:jj + 1],
                    num_quad_samples_1d, (quad_x, quad_w))
                pdf_val = marginal_pdf(joint_density,
                                       active_vars,
                                       limits,
                                       trans_samples[:ii, jj:jj + 1],
                                       num_quad_samples_1d,
                                       quad_rule=(quad_x, quad_w))
                return cdf_val / pdf_val

            icdf_val = invert_cdf(samples[ii, jj:jj + 1], cdffun,
                                  limits[2 * ii:2 * ii + 2], tol, num_bins)
            trans_samples[ii, jj] = icdf_val
    return trans_samples
def compute_moment_matrix_using_tensor_product_quadrature(
        basis_matrix_func,
        num_samples,
        num_vars,
        univariate_quadrature_rule,
        density_function=None):
    """
    Parameters
    ----------
    num_samples : integer
       The number of samples in the 1D quadrature rule

    univariate_quadrature_rule : tuple (x,w)
       x : np.ndarray (num_samples) the quadrature points in the user space
       w : np.ndarray (num_samples) the quadrature weights

    density_function : callable
       v = density_function(x)
       A probability density function. If not None then quadrature rule
       should be for lebesque measure and weights will be multiplied by the 
       value of the density at the quarature points
    """
    samples, weights = get_tensor_product_quadrature_rule(
        num_samples, num_vars, univariate_quadrature_rule, None,
        density_function)
    basis_matrix = basis_matrix_func(samples)
    moment_matrix = np.dot(np.diag(np.sqrt(weights)), basis_matrix)
    return moment_matrix
Пример #3
0
    def test_evaluate_multivariate_mixed_basis_pce_moments(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D,
                    alpha_poly=beta_stat - 1,
                    beta_poly=alpha_stat - 1), gauss_hermite_pts_wts_1D
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)
        basis_matrix = poly.basis_matrix(samples)
        values = basis_matrix.dot(coef)
        true_mean = values.T.dot(weights)
        true_variance = (values.T**2).dot(weights) - true_mean**2

        assert np.allclose(poly.mean(), true_mean)
        assert np.allclose(poly.variance(), true_variance)
Пример #4
0
def get_tensor_product_quadrature_rule_from_pce(pce, degrees):
    num_vars = pce.num_vars()
    degrees = np.atleast_1d(degrees)
    if degrees.shape[0] == 1 and num_vars > 1:
        degrees = np.array([degrees[0]] * num_vars)
    if np.any(pce.max_degree < degrees):
        pce.update_recursion_coefficients(degrees, pce.config_opts)
    if len(pce.recursion_coeffs) == 1:
        # update_recursion_coefficients may not return coefficients
        # up to degree specified if using recursion for polynomial
        # orthogonal to a discrete variable with finite non-zero
        # probability measures
        assert pce.recursion_coeffs[0].shape[0] >= degrees.max() + 1
        univariate_quadrature_rules = [
            partial(gauss_quadrature, pce.recursion_coeffs[0])
        ] * num_vars
    else:
        univariate_quadrature_rules = []
        for dd in range(num_vars):
            # update_recursion_coefficients may not return coefficients
            # up to degree specified if using recursion for polynomial
            # orthogonal to a discrete variable with finite non-zero
            # probability measures
            assert (pce.recursion_coeffs[basis_type_index_map[dd]].shape[0] >=
                    degrees[dd] + 1)
            univariate_quadrature_rules.append(
                partial(gauss_quadrature,
                        pce.recursion_coeffs[basis_type_index_map[dd]]))

    canonical_samples,weights = \
        get_tensor_product_quadrature_rule(
        degrees+1,num_vars,univariate_quadrature_rules)
    samples = pce.var_trans.map_from_canonical_space(canonical_samples)
    return samples, weights
Пример #5
0
def get_tensor_product_quadrature_rule_from_pce(pce, degrees):
    univariate_quadrature_rules = get_univariate_quadrature_rules_from_pce(
        pce, degrees)
    canonical_samples, weights = \
        get_tensor_product_quadrature_rule(
            degrees+1, pce.num_vars(), univariate_quadrature_rules)
    samples = pce.var_trans.map_from_canonical_space(canonical_samples)
    return samples, weights
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        def uniform_univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, 0, 0)
            x = (x + 1.) / 2.
            return x, w

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=uniform_univariate_quadrature_rule,
            density_function=density_function)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        # print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
Пример #7
0
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
def marginal_pdf(joint_density,
                 active_vars,
                 limits,
                 samples,
                 num_quad_samples_1d=100,
                 quad_rule=None):
    """
    Parameters
    ----------

    num_quad_samples_1d : integer
        The number of quadrature samples in the univariate quadrature rule
        used to construct tensor product quadrature rule
    """
    assert samples.ndim == 2
    assert active_vars.shape[0] == samples.shape[0]
    diff = np.diff(limits)[::2]
    assert np.all(diff > 0)

    num_vars = limits.shape[0] // 2
    mask = np.ones((num_vars), dtype=bool)
    mask[active_vars] = False
    marginalized_vars = np.arange(num_vars)[mask]

    num_marginalized_vars = num_vars - samples.shape[0]
    if num_marginalized_vars > 0:
        if quad_rule is None:
            quad_x, quad_w = get_tensor_product_quadrature_rule(
                num_quad_samples_1d, num_marginalized_vars,
                np.polynomial.legendre.leggauss)
        else:
            quad_x, quad_w = quad_rule[0].copy(), quad_rule[1].copy()
            assert quad_x.min() >= -1. and quad_x.max() <= 1.
            assert quad_x.shape[0] == num_marginalized_vars

        for ii in range(num_marginalized_vars):
            lb = limits[2 * marginalized_vars[ii]]
            ub = limits[2 * marginalized_vars[ii] + 1]
            quad_x[ii, :] = (quad_x[ii, :] + 1.) / 2 * (ub - lb) + lb
            quad_w *= (ub - lb) / 2.0

    num_samples = samples.shape[1]
    values = np.empty((num_samples), dtype=float)
    for jj in range(num_samples):
        fixed_data = samples[:, jj]
        xx = combine_samples_with_fixed_data(fixed_data, active_vars, quad_x)
        density_vals = joint_density(xx)
        values[jj] = np.dot(density_vals, quad_w)
    return values
Пример #9
0
def get_mixture_tensor_product_gauss_quadrature(
        mixture_univariate_quadrature_rules,nquad_samples_1d,num_vars):
    """
    Assumes a given mixture is tensor product of one univariate density
    """
    num_mixtures = len(mixture_univariate_quadrature_rules)

    samples = np.empty((num_vars,0),dtype=float)
    weights = np.empty((0),dtype=float)
    for ii in range(num_mixtures):
        samples_ii, weights_ii = get_tensor_product_quadrature_rule(
            nquad_samples_1d,num_vars,mixture_univariate_quadrature_rules[ii])
        samples = np.hstack((samples,samples_ii))
        weights = np.hstack((weights,weights_ii))
    return samples,weights/num_mixtures
    def test_compute_grammian_using_sparse_grid_quadrature(self):
        """
        Test compute_grammian_of_mixture_models_using_sparse_grid_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        rv_params = [[alpha_stat, beta_stat]]
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, None)

        compute_grammian_function = partial(
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature,
            mixture_univariate_quadrature_rules=
            mixture_univariate_quadrature_rules,
            mixture_univariate_growth_rules=[leja_growth_rule],
            num_vars=num_vars)

        pce = APC(compute_grammian_function=compute_grammian_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        # use Gauss quadrature for true distribution to integrate APC basis
        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        # print (np.dot(basis_matrix.T*weights,basis_matrix))
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
def compute_polynomial_moments_using_tensor_product_quadrature(
        basis_matrix_function,
        num_samples,
        num_vars,
        univariate_quadrature_rule,
        density_function=None):
    """
    Compute the moments of a polynomial basis using tensor product quadrature

    Parameters
    ----------
    num_samples : integer
       The number of samples in the 1D quadrature rule

    univariate_quadrature_rule : tuple (x,w)
       x : np.ndarray (num_samples) the quadrature points in the user space
       w : np.ndarray (num_samples) the quadrature weights

    density_function : callable
       v = density_function(x)
       A probability density function. If not None then quadrature rule
       should be for lebesque measure and weights will be multiplied by the 
       value of the density at the quarature points

    Returns
    -------
    poly_moments : np.ndarray (num_terms, num_terms)
       The symmetric matrix containing the inner product of each polynomial
       basis with every polynomial basis (including itself - diagonal entries)
    """
    samples, weights = get_tensor_product_quadrature_rule(
        num_samples, num_vars, univariate_quadrature_rule, None,
        density_function)
    basis_matrix = basis_matrix_function(samples)

    poly_moments = np.empty((basis_matrix.shape[1], basis_matrix.shape[1]),
                            dtype=float)
    for ii in range(basis_matrix.shape[1]):
        for jj in range(ii, basis_matrix.shape[1]):
            poly_moments[ii, jj] = np.dot(
                basis_matrix[:, ii] * basis_matrix[:, jj], weights)
            poly_moments[jj, ii] = poly_moments[ii, jj]
    return poly_moments
Пример #12
0
    def test_conditional_moments_of_polynomial_chaos_expansion(self):
        num_vars = 3
        degree = 2
        inactive_idx = [0, 2]
        np.random.seed(1)
        # keep variables on canonical domain to make constructing
        # tensor product quadrature rule, used for testing, easier
        var = [uniform(-1, 2), beta(2, 2, -1, 2), norm(0, 1)]
        quad_rules = [
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0),
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=1, beta_poly=1),
            partial(gauss_hermite_pts_wts_1D)
        ]
        var_trans = AffineRandomVariableTransformation(var)
        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(compute_hyperbolic_indices(num_vars, degree, 1.0))
        poly.set_coefficients(
            np.arange(poly.indices.shape[1], dtype=float)[:, np.newaxis])

        fixed_samples = np.array(
            [[vv.rvs() for vv in np.array(var)[inactive_idx]]]).T
        mean, variance = conditional_moments_of_polynomial_chaos_expansion(
            poly, fixed_samples, inactive_idx, True)

        from pyapprox.utilities import get_all_sample_combinations
        from pyapprox.probability_measure_sampling import \
            generate_independent_random_samples
        active_idx = np.setdiff1d(np.arange(num_vars), inactive_idx)
        random_samples, weights = get_tensor_product_quadrature_rule(
            [2 * degree] * len(active_idx), len(active_idx),
            [quad_rules[ii] for ii in range(num_vars) if ii in active_idx])
        samples = get_all_sample_combinations(fixed_samples, random_samples)
        temp = samples[len(inactive_idx):].copy()
        samples[inactive_idx] = samples[:len(inactive_idx)]
        samples[active_idx] = temp

        true_mean = (poly(samples).T.dot(weights).T)
        true_variance = ((poly(samples)**2).T.dot(weights).T) - true_mean**2
        assert np.allclose(true_mean, mean)
        assert np.allclose(true_variance, variance)
    def test_compute_moment_matrix_using_tensor_product_quadrature(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 3

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)
        pce_opts["truncation_tol"] = 1e-5

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        pce = FPC(compute_moment_matrix_function)
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rule)

        basis_matrix = pce.basis_matrix(samples)
        # print np.dot(basis_matrix.T*weights,basis_matrix)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           np.eye(basis_matrix.shape[1]))
Пример #14
0
    def test_evaluate_multivariate_monomial_pce(self):
        num_vars = 2
        alpha = 0.
        beta = 0.
        degree = 2
        deriv_order = 1
        probability_measure = True

        poly = PolynomialChaosExpansion()
        var_trans = IdentityTransformation(num_vars)
        poly.configure({'poly_type': 'monomial', 'var_trans': var_trans})

        def univariate_quadrature_rule(nn):
            x, w = gauss_jacobi_pts_wts_1D(nn, 0, 0)
            x = (x + 1) / 2.
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree, num_vars, univariate_quadrature_rule)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        I = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, I]
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(np.asarray([1 + 0. * x, x, x**2]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        assert np.allclose(exact_basis_matrix, basis_matrix)
def marginalized_cumulative_distribution_function(joint_density,
                                                  limits,
                                                  active_vars,
                                                  active_var_samples,
                                                  inactive_vars,
                                                  fixed_var_samples,
                                                  num_quad_samples_1d=100,
                                                  quad_rule=None):
    """
    Given a set of fixed values for variables, indexed by I, and a set of
    variables to be marginalized, indexed by J, compute the CDF
    at samples of the values indexed by K, where 

    I\\intersect J\\intersect K=\\emptyset and
    I\\union J\\union = {1,...,num_vars}.

    Parameters
    ----------
    joint_density : callable vals = joint_density(samples)
        The joint density f(x) of the random variables x

    limits : np.ndarray (2*num_vars)
        The bounds of the random variables

    active_vars : np.ndarray (num_active_vars)
        The indices (K) of the variables at which we will be evaluating
        the CDF.

    active_var_samples : np.ndarray (num_vars, num_samples)
        The point at which to evaluate the CDF

    inactive_vars : np.ndarray (num_active_vars)
        The indices (J) of the variables that will be marginalized out.

    fixed_var_samples : np.ndarray (num_vars, num_samples)
        The samples of at which the joint density will be fixed before CDF
        is computed and marginalized.

    num_quad_samples_1d : integer
        The number of quadrature samples in the univariate quadrature rule
        used to construct tensor product quadrature rule

    Returns
    -------
    values : np.ndarray (num_samples)
       The values of the CDF at the samples
    """
    num_samples = active_var_samples.shape[1]
    if (fixed_var_samples.shape[1] == 1
            and fixed_var_samples.shape[1] != num_samples):
        fixed_var_samples = np.tile(fixed_var_samples, (1, num_samples))

    num_samples = active_var_samples.shape[1]
    num_vars = limits.shape[0] // 2
    num_active_vars = active_vars.shape[0]
    num_inactive_vars = inactive_vars.shape[0]
    num_fixed_vars = num_vars - (num_active_vars + num_inactive_vars)

    diff = np.diff(limits)[::2]
    assert np.all(diff > 0)
    assert active_var_samples.ndim == 2
    assert active_var_samples.shape[0] == num_active_vars
    assert num_fixed_vars == fixed_var_samples.shape[0]
    assert fixed_var_samples.shape[1] == num_samples

    mask = np.ones((num_vars), dtype=bool)
    mask[active_vars] = False
    mask[inactive_vars] = False
    fixed_vars = np.arange(num_vars)[mask]

    if quad_rule is None:
        quad_x, quad_w = get_tensor_product_quadrature_rule(
            num_quad_samples_1d, num_active_vars + num_inactive_vars,
            np.polynomial.legendre.leggauss)
    else:
        quad_x, quad_w = quad_rule
        assert quad_x.shape[0] == num_active_vars + num_inactive_vars

    values = np.empty((num_samples), dtype=float)
    integration_vars = np.hstack((active_vars, inactive_vars))
    inactive_ubs = limits[2 * inactive_vars + 1]
    for jj in range(num_samples):
        # limits of integration
        w = quad_w.copy()
        active_x = np.empty_like(quad_x)
        ubs = np.hstack((active_var_samples[:, jj], inactive_ubs))
        for ii in range(integration_vars.shape[0]):
            lb = limits[2 * integration_vars[ii]]
            ub = ubs[ii]
            #assert (ub-lb)>0
            active_x[ii, :] = (quad_x[ii, :] + 1.) / 2. * (ub - lb) + lb
            w *= (ub - lb) / 2.0

        fixed_data = fixed_var_samples[:, jj]
        xx = combine_samples_with_fixed_data(fixed_data, fixed_vars, active_x)
        density_vals = joint_density(xx)
        values[jj] = np.dot(density_vals.T, w)
    return values
Пример #16
0
    def test_exponential_quartic(self):
        # set random seed, so the data is reproducible each time
        np.random.seed(2)  
        
        univariate_variables = [uniform(-2,4),uniform(-2,4)]
        plot_range = np.asarray([-1,1,-1,1])*2
        variables = IndependentMultivariateRandomVariable(
            univariate_variables)

        loglike = ExponentialQuarticLogLikelihoodModel()
        loglike = PYMC3LogLikeWrapper(loglike,loglike.gradient)

        # number of draws from the distribution
        ndraws = 500
        # number of "burn-in points" (which we'll discard)
        nburn = min(1000,int(ndraws*0.1))
        # number of parallel chains
        njobs=4

        def unnormalized_posterior(x):
            # avoid use of pymc3 wrapper which only evaluates samples 1 at
            # a time
            vals = np.exp(loglike.loglike(x))
            rvs = variables.all_variables()
            for ii in range(variables.num_vars()):
                vals[:,0] *= rvs[ii].pdf(x[ii,:])
            return vals

        def univariate_quadrature_rule(n):
            x,w = gauss_jacobi_pts_wts_1D(n,0,0)
            x*=2
            return x,w
        x,w = get_tensor_product_quadrature_rule(
            100,variables.num_vars(),univariate_quadrature_rule)
        evidence = unnormalized_posterior(x)[:,0].dot(w)
        #print('evidence',evidence)

        exact_mean = ((x*unnormalized_posterior(x)[:,0]).dot(w)/evidence)
        #print(exact_mean)

        algorithm = 'nuts'
        #algorithm = 'smc'
        samples, effective_sample_size, map_sample = \
            run_bayesian_inference_gaussian_error_model(
                loglike,variables,ndraws,nburn,njobs,
                algorithm=algorithm,get_map=True,print_summary=True,
                loglike_grad = loglike.gradient, seed=2)

        # from pyapprox.visualization import get_meshgrid_function_data
        # import matplotlib
        # X,Y,Z = get_meshgrid_function_data(
        #     lambda x: unnormalized_posterior(x)/evidence, plot_range, 50)
        # plt.contourf(
        #     X, Y, Z, levels=np.linspace(Z.min(),Z.max(),30),
        #     cmap=matplotlib.cm.coolwarm)
        # plt.plot(samples[0,:],samples[1,:],'ko')
        # plt.show()
        
        print('mcmc mean error',samples.mean(axis=1)-exact_mean)
        print('MAP sample',map_sample)
        print('exact mean',exact_mean.squeeze())
        print('MCMC mean',samples.mean(axis=1))
        assert np.allclose(map_sample,np.zeros((variables.num_vars(),1)))
        #tolerance 3e-2 can be exceeded for certain random runs
        assert np.allclose(
            exact_mean.squeeze(), samples.mean(axis=1),atol=3e-2)
    def test_correlated_beta(self):

        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        bisection_opts = {'tol': 1e-10, 'max_iterations': 100}

        beta_cdf = lambda x: beta_rv.cdf(x, a=alpha_stat, b=beta_stat)
        beta_icdf = lambda x: beta_rv.ppf(x, a=alpha_stat, b=beta_stat)
        x_marginal_cdfs = [beta_cdf] * num_vars
        x_marginal_inv_cdfs = [beta_icdf] * num_vars
        x_marginal_means = np.asarray(
            [beta_rv.mean(a=alpha_stat, b=beta_stat)] * num_vars)
        x_marginal_stdevs = np.asarray(
            [beta_rv.std(a=alpha_stat, b=beta_stat)] * num_vars)
        beta_pdf = lambda x: beta_rv.pdf(x, a=alpha_stat, b=beta_stat)
        x_marginal_pdfs = [beta_pdf] * num_vars

        x_correlation = np.array([[1, 0.7], [0.7, 1]])

        quad_rule = gauss_hermite_pts_wts_1D(11)
        z_correlation = transform_correlations(x_correlation,
                                               x_marginal_inv_cdfs,
                                               x_marginal_means,
                                               x_marginal_stdevs, quad_rule,
                                               bisection_opts)
        assert np.allclose(z_correlation[0, 1], z_correlation[1, 0])

        x_correlation_recovered = \
            gaussian_copula_compute_x_correlation_from_z_correlation(
                x_marginal_inv_cdfs,x_marginal_means,x_marginal_stdevs,
                z_correlation)
        assert np.allclose(x_correlation, x_correlation_recovered)

        z_variable = multivariate_normal(mean=np.zeros((num_vars)),
                                         cov=z_correlation)
        z_joint_density = lambda x: z_variable.pdf(x.T)
        target_density = partial(nataf_joint_density,
                                 x_marginal_cdfs=x_marginal_cdfs,
                                 x_marginal_pdfs=x_marginal_pdfs,
                                 z_joint_density=z_joint_density)

        # all variances are the same so
        #true_x_covariance  = x_correlation.copy()*x_marginal_stdevs[0]**2
        true_x_covariance = correlation_to_covariance(x_correlation,
                                                      x_marginal_stdevs)

        def univariate_quad_rule(n):
            x, w = np.polynomial.legendre.leggauss(n)
            x = (x + 1.) / 2.
            w /= 2.
            return x, w

        x, w = get_tensor_product_quadrature_rule(100, num_vars,
                                                  univariate_quad_rule)
        assert np.allclose(np.dot(target_density(x), w), 1.0)

        # test covariance of computed by aplying quadrature to joint density
        mean = np.dot(x * target_density(x), w)
        x_covariance = np.empty((num_vars, num_vars))
        x_covariance[0, 0] = np.dot(x[0, :]**2 * target_density(x),
                                    w) - mean[0]**2
        x_covariance[1, 1] = np.dot(x[1, :]**2 * target_density(x),
                                    w) - mean[1]**2
        x_covariance[0, 1] = np.dot(x[0, :] * x[1, :] * target_density(x),
                                    w) - mean[0] * mean[1]
        x_covariance[1, 0] = x_covariance[0, 1]
        # error is influenced by bisection_opts['tol']
        assert np.allclose(x_covariance,
                           true_x_covariance,
                           atol=bisection_opts['tol'])

        # test samples generated using Gaussian copula are correct
        num_samples = 10000
        x_samples, true_u_samples = generate_x_samples_using_gaussian_copula(
            num_vars, z_correlation, x_marginal_inv_cdfs, num_samples)

        x_sample_covariance = np.cov(x_samples)
        assert np.allclose(true_x_covariance, x_sample_covariance, atol=1e-2)

        u_samples = nataf_transformation(x_samples, true_x_covariance,
                                         x_marginal_cdfs, x_marginal_inv_cdfs,
                                         x_marginal_means, x_marginal_stdevs,
                                         bisection_opts)

        assert np.allclose(u_samples, true_u_samples)

        trans_samples = inverse_nataf_transformation(
            u_samples, x_covariance, x_marginal_cdfs, x_marginal_inv_cdfs,
            x_marginal_means, x_marginal_stdevs, bisection_opts)

        assert np.allclose(x_samples, trans_samples)
Пример #18
0
    def test_evaluate_multivariate_jacobi_pce(self):
        num_vars = 2
        alpha = 0.
        beta = 0.
        degree = 2
        deriv_order = 1
        probability_measure = True

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1, 2), num_vars)
        poly.configure({'poly_type': 'legendre', 'var_trans': var_trans})

        samples, weights = get_tensor_product_quadrature_rule(
            degree - 1, num_vars, np.polynomial.legendre.leggauss)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        I = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, I]
        # remove [0,2] index so max_level is not the same for every dimension
        # also remove [1,0] and [1,1] to make sure can handle index sets that
        # have missing univariate degrees not at the ends
        J = [1, 5, 4]
        reduced_indices = np.delete(indices, J, axis=1)
        poly.set_indices(reduced_indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(
                np.asarray([1 + 0. * x, x, 0.5 * (3. * x**2 - 1)]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 3. * x]).T)
            exact_basis_vals_1d[-1] /= np.sqrt(1. /
                                               (2 * np.arange(degree + 1) + 1))
            exact_basis_derivs_1d[-1] /= np.sqrt(
                1. / (2 * np.arange(degree + 1) + 1))

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        exact_basis_matrix = np.delete(exact_basis_matrix, J, axis=1)

        assert np.allclose(exact_basis_matrix, basis_matrix)
    def test_compute_moment_matrix_combination_sparse_grid(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        from pyapprox.univariate_quadrature import \
            clenshaw_curtis_in_polynomial_order, clenshaw_curtis_rule_growth
        quad_rule_opts = {
            'quad_rules': clenshaw_curtis_in_polynomial_order,
            'growth_rules': clenshaw_curtis_rule_growth,
            'unique_quadrule_indices': None
        }

        compute_grammian_function = partial(
            compute_grammian_matrix_using_combination_sparse_grid,
            var_trans=pce_var_trans,
            max_num_samples=100,
            density_function=density_function,
            quad_rule_opts=quad_rule_opts)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           compute_grammian_function(pce.basis_matrix, None))

        apc = APC(compute_grammian_function=compute_grammian_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        # print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
Пример #20
0
    def test_evaluate_multivariate_hermite_pce(self):
        num_vars = 2
        degree = 2
        deriv_order = 1
        probability_measure = True

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            norm(0, 1), num_vars)
        poly.configure({'poly_type': 'hermite', 'var_trans': var_trans})

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, gauss_hermite_pts_wts_1D)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        I = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, I]
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        vals_basis_matrix = basis_matrix[:samples.shape[1], :]
        inner_products = (vals_basis_matrix.T * weights).dot(vals_basis_matrix)
        assert np.allclose(inner_products, np.eye(basis_matrix.shape[1]))

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(np.asarray([1 + 0. * x, x, x**2 - 1]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)
            exact_basis_vals_1d[-1] /= np.sqrt(
                sp.factorial(np.arange(degree + 1)))
            exact_basis_derivs_1d[-1] /= np.sqrt(
                sp.factorial(np.arange(degree + 1)))

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        assert np.allclose(exact_basis_matrix, basis_matrix)
Пример #21
0
    def test_evaluate_multivariate_mixed_basis_pce(self):
        degree = 2
        deriv_order = 1
        probability_measure = True

        gauss_mean, gauss_var = -1, 4
        univariate_variables = [
            uniform(-1, 2),
            norm(gauss_mean, np.sqrt(gauss_var)),
            uniform(0, 3)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0),
            gauss_hermite_pts_wts_1D,
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0)
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        indices = sort_indices_lexiographically(indices)
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})
        vals_basis_matrix = basis_matrix[:samples.shape[1], :]
        inner_products = (vals_basis_matrix.T * weights).dot(vals_basis_matrix)
        assert np.allclose(inner_products, np.eye(basis_matrix.shape[1]))

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :].copy()
            if dd == 0 or dd == 2:
                if dd == 2:
                    # y = x/3
                    # z = 2*y-1=2*x/3-1=2/3*x-3/2*2/3=2/3*(x-3/2)=(x-3/2)/(3/2)
                    loc, scale = 3 / 2, 3 / 2
                    x = (x - loc) / scale
                exact_basis_vals_1d.append(
                    np.asarray([1 + 0. * x, x, 0.5 * (3. * x**2 - 1)]).T)
                exact_basis_derivs_1d.append(
                    np.asarray([0. * x, 1.0 + 0. * x, 3. * x]).T)
                exact_basis_vals_1d[-1] /= np.sqrt(
                    1. / (2 * np.arange(degree + 1) + 1))
                exact_basis_derivs_1d[-1] /= np.sqrt(
                    1. / (2 * np.arange(degree + 1) + 1))
                # account for affine transformation in derivs
                if dd == 2:
                    exact_basis_derivs_1d[-1] /= scale
            if dd == 1:
                loc, scale = gauss_mean, np.sqrt(gauss_var)
                x = (x - loc) / scale
                exact_basis_vals_1d.append(
                    np.asarray([1 + 0. * x, x, x**2 - 1]).T)
                exact_basis_derivs_1d.append(
                    np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)
                exact_basis_vals_1d[-1] /= np.sqrt(
                    sp.factorial(np.arange(degree + 1)))
                exact_basis_derivs_1d[-1] /= np.sqrt(
                    sp.factorial(np.arange(degree + 1)))
                # account for affine transformation in derivs
                exact_basis_derivs_1d[-1] /= scale

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[1][:, 1] * exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[2][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x, 0 * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x,
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[2][:, 1],
                 0. * x, 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x, 0 * x,
                 exact_basis_derivs_1d[1][:, 1] * exact_basis_vals_1d[0][:, 1],
                 exact_basis_derivs_1d[1][:, 2], 0. * x,
                 exact_basis_derivs_1d[1][:, 1] * exact_basis_vals_1d[2][:, 1],
                 0. * x
             ]).T))

        # x3 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, 0. * x, exact_basis_derivs_1d[2][:, 1], 0 * x,
                 0 * x, 0 * x,
                 exact_basis_derivs_1d[2][:, 1] * exact_basis_vals_1d[0][:, 1],
                 exact_basis_derivs_1d[2][:, 1] * exact_basis_vals_1d[1][:, 1],
                 exact_basis_derivs_1d[2][:, 2]
             ]).T))

        func = poly.basis_matrix
        exact_basis_matrix_derivs = exact_basis_matrix[samples.shape[1]:]
        basis_matrix_derivs_fd = np.empty_like(exact_basis_matrix_derivs)
        for ii in range(samples.shape[1]):
            basis_matrix_derivs_fd[ii::samples.shape[1], :] = approx_fprime(
                samples[:, ii:ii + 1], func)

        # print(np.linalg.norm(
        #    exact_basis_matrix_derivs-basis_matrix_derivs_fd,
        #    ord=np.inf))
        assert np.allclose(exact_basis_matrix_derivs,
                           basis_matrix_derivs_fd,
                           atol=1e-7,
                           rtol=1e-7)
        assert np.allclose(exact_basis_matrix, basis_matrix)
    def test_evaluate_multivariate_monomial_pce(self):
        num_vars = 2
        degree = 2

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            rv_continuous(name="continuous_monomial")(), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        def univariate_quadrature_rule(nn):
            x, w = gauss_jacobi_pts_wts_1D(nn, 0, 0)
            x = (x + 1) / 2.
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree, num_vars, univariate_quadrature_rule)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        II = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, II]
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(np.asarray([1 + 0. * x, x, x**2]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        assert np.allclose(exact_basis_matrix, basis_matrix)