コード例 #1
0
    def test_pce_jacobian(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [
            stats.beta(alpha_stat, beta_stat, 0, 1),
            stats.norm(-1, 2)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        sample = generate_independent_random_samples(variable, 1)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)

        jac = poly.jacobian(sample)
        fd_jac = approx_jacobian(lambda x: poly(x[:, np.newaxis])[0, :],
                                 sample[:, 0])
        assert np.allclose(jac, fd_jac)
コード例 #2
0
 def test_float_rv_discrete_chebyshev(self):
     N, degree = 10, 5
     xk, pk = np.geomspace(1.0, 512.0, num=N), np.ones(N) / N
     rv = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-9
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     p = poly.basis_matrix(xk[np.newaxis, :])
     w = pk
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
コード例 #3
0
 def test_discrete_chebyshev(self):
     N, degree = 10, 5
     xk, pk = np.arange(N), np.ones(N) / N
     rv = float_rv_discrete(name='discrete_chebyshev', values=(xk, pk))()
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     p = poly.basis_matrix(xk[np.newaxis, :])
     w = pk
     # print((np.dot(p.T*w, p), np.eye(degree+1)))
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
コード例 #4
0
 def test_krawtchouk_binomial(self):
     degree = 4
     n, p = 10, 0.5
     rv = stats.binom(n, p)
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     xk = np.arange(0, n + 1)[np.newaxis, :]
     p = poly.basis_matrix(xk)
     w = rv.pmf(xk[0, :])
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
コード例 #5
0
 def test_hahn_hypergeometric(self):
     degree = 4
     M, n, N = 20, 7, 12
     rv = stats.hypergeom(M, n, N)
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     xk = np.arange(0, n + 1)[np.newaxis, :]
     p = poly.basis_matrix(xk)
     w = rv.pmf(xk[0, :])
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
コード例 #6
0
    def test_evaluate_multivariate_mixed_basis_pce_moments(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [
            stats.beta(alpha_stat, beta_stat, 0, 1),
            stats.norm(-1, 2)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D,
                    alpha_poly=beta_stat - 1,
                    beta_poly=alpha_stat - 1), gauss_hermite_pts_wts_1D
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)
        basis_matrix = poly.basis_matrix(samples)
        values = basis_matrix.dot(coef)
        true_mean = values.T.dot(weights)
        true_variance = (values.T**2).dot(weights) - true_mean**2

        assert np.allclose(poly.mean(), true_mean)
        assert np.allclose(poly.variance(), true_variance)

        assert np.allclose(np.diag(poly.covariance()), poly.variance())
        assert np.allclose(poly.covariance()[0, 1], coef[1:, 0].dot(coef[1:,
                                                                         1]))
コード例 #7
0
    def test_pce_for_gumbel_variable(self):
        degree = 3
        mean, std = 1e4, 7.5e3
        beta = std * np.sqrt(6) / np.pi
        mu = mean - beta * np.euler_gamma
        rv1 = stats.gumbel_r(loc=mu, scale=beta)
        assert np.allclose(rv1.mean(), mean) and np.allclose(rv1.std(), std)
        rv2 = stats.lognorm(1)
        for rv in [rv2, rv1]:
            var_trans = AffineRandomVariableTransformation([rv])
            poly = PolynomialChaosExpansion()
            poly_opts = define_poly_options_from_variable_transformation(
                var_trans)
            poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-9
            poly.configure(poly_opts)
            poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
            poly.set_coefficients(np.ones((poly.indices.shape[1], 1)))

            def integrand(x):
                p = poly.basis_matrix(x[np.newaxis, :])
                G = np.empty((x.shape[0], p.shape[1]**2))
                kk = 0
                for ii in range(p.shape[1]):
                    for jj in range(p.shape[1]):
                        G[:, kk] = p[:, ii] * p[:, jj]
                        kk += 1
                return G * rv.pdf(x)[:, None]

            lb, ub = rv.interval(1)
            interval_size = rv.interval(0.99)[1] - rv.interval(0.99)[0]
            interval_size *= 10
            res = \
                integrate_using_univariate_gauss_legendre_quadrature_unbounded(
                    integrand, lb, ub, 10, interval_size=interval_size,
                    verbose=0, max_steps=10000)
            res = np.reshape(res,
                             (poly.indices.shape[1], poly.indices.shape[1]),
                             order='C')
            # print('r', res-np.eye(degree+1))
            assert np.allclose(res, np.eye(degree + 1), atol=1e-6)
コード例 #8
0
    def test_conditional_moments_of_polynomial_chaos_expansion(self):
        num_vars = 3
        degree = 2
        inactive_idx = [0, 2]
        np.random.seed(1)
        # keep variables on canonical domain to make constructing
        # tensor product quadrature rule, used for testing, easier
        var = [stats.uniform(-1, 2), stats.beta(2, 2, -1, 2), stats.norm(0, 1)]
        quad_rules = [
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0),
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=1, beta_poly=1),
            partial(gauss_hermite_pts_wts_1D)
        ]
        var_trans = AffineRandomVariableTransformation(var)
        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(compute_hyperbolic_indices(num_vars, degree, 1.0))
        poly.set_coefficients(
            np.arange(poly.indices.shape[1], dtype=float)[:, np.newaxis])

        fixed_samples = np.array(
            [[vv.rvs() for vv in np.array(var)[inactive_idx]]]).T
        mean, variance = conditional_moments_of_polynomial_chaos_expansion(
            poly, fixed_samples, inactive_idx, True)

        active_idx = np.setdiff1d(np.arange(num_vars), inactive_idx)
        random_samples, weights = get_tensor_product_quadrature_rule(
            [2 * degree] * len(active_idx), len(active_idx),
            [quad_rules[ii] for ii in range(num_vars) if ii in active_idx])
        samples = get_all_sample_combinations(fixed_samples, random_samples)
        temp = samples[len(inactive_idx):].copy()
        samples[inactive_idx] = samples[:len(inactive_idx)]
        samples[active_idx] = temp

        true_mean = (poly(samples).T.dot(weights).T)
        true_variance = ((poly(samples)**2).T.dot(weights).T) - true_mean**2
        assert np.allclose(true_mean, mean)
        assert np.allclose(true_variance, variance)
コード例 #9
0
    def test_hermite_basis_for_lognormal_variables(self):
        def function(x):
            return (x.T)**2

        degree = 2
        # mu_g, sigma_g = 1e1, 0.1
        mu_l, sigma_l = 2.1e11, 2.1e10
        mu_g = np.log(mu_l**2 / np.sqrt(mu_l**2 + sigma_l**2))
        sigma_g = np.sqrt(np.log(1 + sigma_l**2 / mu_l**2))

        lognorm = stats.lognorm(s=sigma_g, scale=np.exp(mu_g))
        # assert np.allclose([lognorm.mean(), lognorm.std()], [mu_l, sigma_l])

        univariate_variables = [stats.norm(mu_g, sigma_g)]
        var_trans = AffineRandomVariableTransformation(univariate_variables)
        pce = PolynomialChaosExpansion()
        pce_opts = define_poly_options_from_variable_transformation(var_trans)
        pce.configure(pce_opts)
        pce.set_indices(
            compute_hyperbolic_indices(var_trans.num_vars(), degree, 1.))

        nsamples = int(1e6)
        samples = lognorm.rvs(nsamples)[None, :]
        values = function(samples)

        ntrain_samples = 20
        train_samples = lognorm.rvs(ntrain_samples)[None, :]
        train_values = function(train_samples)
        from pyapprox.quantile_regression import solve_quantile_regression, \
            solve_least_squares_regression
        coef = solve_quantile_regression(0.5,
                                         np.log(train_samples),
                                         train_values,
                                         pce.basis_matrix,
                                         normalize_vals=True)
        pce.set_coefficients(coef)
        print(pce.mean(), values.mean())
        assert np.allclose(pce.mean(), values.mean(), rtol=1e-3)
コード例 #10
0
    def test_pce_product_of_beta_variables(self):
        def fun(x):
            return np.sqrt(x.prod(axis=0))[:, None]

        dist_alpha1, dist_beta1 = 1, 1
        dist_alpha2, dist_beta2 = dist_alpha1 + 0.5, dist_beta1
        nvars = 2

        x_1d, w_1d = [], []
        nquad_samples_1d = 100
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta1 - 1,
                                       dist_alpha1 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta2 - 1,
                                       dist_alpha2 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)

        quad_samples = cartesian_product(x_1d)
        quad_weights = outer_product(w_1d)

        mean = fun(quad_samples)[:, 0].dot(quad_weights)
        variance = (fun(quad_samples)[:, 0]**2).dot(quad_weights) - mean**2
        assert np.allclose(mean,
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(variance,
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        degree = 10
        poly = PolynomialChaosExpansion()
        # the distribution and ranges of univariate variables is ignored
        # when var_trans.set_identity_maps([0]) is used
        initial_variables = [stats.uniform(0, 1)]
        # TODO get quad rules from initial variables
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        univariate_variables = [
            rv_function_indpndt_vars(fun, initial_variables, quad_rules)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        poly = PolynomialChaosExpansion()
        initial_variables = [stats.uniform(0, 1)]
        funs = [lambda x: np.sqrt(x)] * nvars
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        # TODO get quad rules from initial variables
        univariate_variables = [
            rv_product_indpndt_vars(funs, initial_variables, quad_rules)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())
コード例 #11
0
    def test_composition_of_orthonormal_polynomials(self):
        def fn1(z):
            # return W_1
            return (z[0, :] + 3 * z[0, :]**2)[:, None]

        def fn2(z):
            # return W_2
            return (1 + z[0, :] * z[1, :])[:, None]

        def fn3(z):
            # z is just random variables
            return z[0:1, :].T + 35 * (3 * fn1(z)**2 -
                                       1) + 3 * z[0:1, :].T * fn2(z)

        def fn3_trans(x):
            """
            x is z_1, W_1, W_2
            """
            return (x[0:1, :] + 35 * (3 * x[1:2, :]**2 - 1) +
                    3 * x[0:1, :] * x[2:3, :])

        nvars = 2
        samples = np.random.uniform(-1, 1, (nvars, 100))
        values = fn3(samples)

        indices = compute_hyperbolic_indices(nvars, 4, 1)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), nvars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(indices)
        basis_mat = poly.basis_matrix(samples)
        coef = np.linalg.lstsq(basis_mat, values, rcond=None)[0]
        mean = coef[0]
        variance = np.sum(coef[1:]**2)
        # print(mean, variance, 2595584/15-mean**2, 2059769/15)
        assert np.allclose(mean, 189)
        assert np.allclose(variance, 2595584 / 15 - mean**2)

        samples = np.random.uniform(-1, 1, (nvars, 100000))
        basis_mat = poly.basis_matrix(samples)
        x = samples[0:1, :].T
        y = samples[1:2, :].T

        assert np.allclose(
            basis_mat.dot(coef),
            -35 + 4 * x + 3 * x**2 * y + 105 * x**2 + 630 * x**3 + 945 * x**4)
        assert np.allclose(2 / np.sqrt(5) * basis_mat[:, 3:4], (3 * x**2 - 1))
        assert np.allclose(
            basis_mat.dot(coef), 4 * x + 3 * x**2 * y +
            2 / np.sqrt(5) * 35 * basis_mat[:, 3:4] + 630 * x**3 + 945 * x**4)
        assert np.allclose(
            basis_mat.dot(coef),
            382 * x + 3 * x**2 * y + 2 / np.sqrt(5) * 35 * basis_mat[:, 3:4] +
            2 / np.sqrt(7) * 126 * basis_mat[:, 6:7] + 945 * x**4)
        assert np.allclose(
            basis_mat.dot(coef),
            382 * x + 3 * x**2 * y + 2 / np.sqrt(5) * 35 * basis_mat[:, 3:4] +
            2 / np.sqrt(7) * 126 * basis_mat[:, 6:7] +
            8 / np.sqrt(9) * 27 * basis_mat[:, 10:11] + 810 * x**2 - 81)
        assert np.allclose(
            basis_mat.dot(coef), -81 + 270 + 382 * x + 3 * x**2 * y +
            2 / np.sqrt(5) * 305 * basis_mat[:, 3:4] +
            2 / np.sqrt(7) * 126 * basis_mat[:, 6:7] +
            8 / np.sqrt(9) * 27 * basis_mat[:, 10:11])
        assert np.allclose(
            basis_mat.dot(coef),
            189 + 382 * x + 2 / np.sqrt(5) * 305 * basis_mat[:, 3:4] +
            2 / np.sqrt(7) * 126 * basis_mat[:, 6:7] +
            8 / np.sqrt(9) * 27 * basis_mat[:, 10:11] +
            2 / np.sqrt(15) * basis_mat[:, 8:9] + y)

        assert np.allclose(
            basis_mat.dot(coef),
            189 + 1 / np.sqrt(3) * 382 * basis_mat[:, 1:2] +
            2 / np.sqrt(5) * 305 * basis_mat[:, 3:4] +
            2 / np.sqrt(7) * 126 * basis_mat[:, 6:7] +
            8 / np.sqrt(9) * 27. * basis_mat[:, 10:11] +
            2 / np.sqrt(15) * 1.0 * basis_mat[:, 8:9] +
            1 / np.sqrt(3) * 1.0 * basis_mat[:, 2:3])

        assert np.allclose(
            variance, 382**2 / 3 + (2 * 305)**2 / 5 + (2 * 126)**2 / 7 +
            (8 * 27)**2 / 9 + 4 / 15 + 1 / 3)
コード例 #12
0
    def test_evaluate_multivariate_monomial_pce(self):
        num_vars = 2
        degree = 2

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            rv_continuous(name="continuous_monomial")(), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        def univariate_quadrature_rule(nn):
            x, w = gauss_jacobi_pts_wts_1D(nn, 0, 0)
            x = (x + 1) / 2.
            return x, w

        samples, weights = get_tensor_product_quadrature_rule(
            degree, num_vars, univariate_quadrature_rule)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        II = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, II]
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(np.asarray([1 + 0. * x, x, x**2]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        assert np.allclose(exact_basis_matrix, basis_matrix)
コード例 #13
0
    def test_evaluate_multivariate_mixed_basis_pce(self):
        degree = 2

        gauss_mean, gauss_var = -1, 4
        univariate_variables = [
            stats.uniform(-1, 2),
            stats.norm(gauss_mean, np.sqrt(gauss_var)),
            stats.uniform(0, 3)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0),
            gauss_hermite_pts_wts_1D,
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0)
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        indices = sort_indices_lexiographically(indices)
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})
        vals_basis_matrix = basis_matrix[:samples.shape[1], :]
        inner_products = (vals_basis_matrix.T * weights).dot(vals_basis_matrix)
        assert np.allclose(inner_products, np.eye(basis_matrix.shape[1]))

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :].copy()
            if dd == 0 or dd == 2:
                if dd == 2:
                    # y = x/3
                    # z = 2*y-1=2*x/3-1=2/3*x-3/2*2/3=2/3*(x-3/2)=(x-3/2)/(3/2)
                    loc, scale = 3 / 2, 3 / 2
                    x = (x - loc) / scale
                exact_basis_vals_1d.append(
                    np.asarray([1 + 0. * x, x, 0.5 * (3. * x**2 - 1)]).T)
                exact_basis_derivs_1d.append(
                    np.asarray([0. * x, 1.0 + 0. * x, 3. * x]).T)
                exact_basis_vals_1d[-1] /= np.sqrt(
                    1. / (2 * np.arange(degree + 1) + 1))
                exact_basis_derivs_1d[-1] /= np.sqrt(
                    1. / (2 * np.arange(degree + 1) + 1))
                # account for affine transformation in derivs
                if dd == 2:
                    exact_basis_derivs_1d[-1] /= scale
            if dd == 1:
                loc, scale = gauss_mean, np.sqrt(gauss_var)
                x = (x - loc) / scale
                exact_basis_vals_1d.append(
                    np.asarray([1 + 0. * x, x, x**2 - 1]).T)
                exact_basis_derivs_1d.append(
                    np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)
                exact_basis_vals_1d[-1] /= np.sqrt(
                    sp.factorial(np.arange(degree + 1)))
                exact_basis_derivs_1d[-1] /= np.sqrt(
                    sp.factorial(np.arange(degree + 1)))
                # account for affine transformation in derivs
                exact_basis_derivs_1d[-1] /= scale

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[1][:, 1] * exact_basis_vals_1d[2][:, 1],
            exact_basis_vals_1d[2][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x, 0 * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x,
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[2][:, 1],
                 0. * x, 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x, 0 * x,
                 exact_basis_derivs_1d[1][:, 1] * exact_basis_vals_1d[0][:, 1],
                 exact_basis_derivs_1d[1][:, 2], 0. * x,
                 exact_basis_derivs_1d[1][:, 1] * exact_basis_vals_1d[2][:, 1],
                 0. * x
             ]).T))

        # x3 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, 0. * x, exact_basis_derivs_1d[2][:, 1], 0 * x,
                 0 * x, 0 * x,
                 exact_basis_derivs_1d[2][:, 1] * exact_basis_vals_1d[0][:, 1],
                 exact_basis_derivs_1d[2][:, 1] * exact_basis_vals_1d[1][:, 1],
                 exact_basis_derivs_1d[2][:, 2]
             ]).T))

        func = poly.basis_matrix
        exact_basis_matrix_derivs = exact_basis_matrix[samples.shape[1]:]
        basis_matrix_derivs_fd = np.empty_like(exact_basis_matrix_derivs)
        for ii in range(samples.shape[1]):
            basis_matrix_derivs_fd[ii::samples.shape[1], :] = approx_fprime(
                samples[:, ii:ii + 1], func)

        # print(np.linalg.stats.norm(
        #    exact_basis_matrix_derivs-basis_matrix_derivs_fd,
        #    ord=np.inf))
        assert np.allclose(exact_basis_matrix_derivs,
                           basis_matrix_derivs_fd,
                           atol=1e-7,
                           rtol=1e-7)
        assert np.allclose(exact_basis_matrix, basis_matrix)
コード例 #14
0
    def test_evaluate_multivariate_hermite_pce(self):
        num_vars = 2
        degree = 2

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.norm(0, 1), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, gauss_hermite_pts_wts_1D)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        II = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, II]
        poly.set_indices(indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        vals_basis_matrix = basis_matrix[:samples.shape[1], :]
        inner_products = (vals_basis_matrix.T * weights).dot(vals_basis_matrix)
        assert np.allclose(inner_products, np.eye(basis_matrix.shape[1]))

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(np.asarray([1 + 0. * x, x, x**2 - 1]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 2. * x]).T)
            exact_basis_vals_1d[-1] /= np.sqrt(
                sp.factorial(np.arange(degree + 1)))
            exact_basis_derivs_1d[-1] /= np.sqrt(
                sp.factorial(np.arange(degree + 1)))

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        assert np.allclose(exact_basis_matrix, basis_matrix)
コード例 #15
0
    def test_evaluate_multivariate_jacobi_pce(self):
        num_vars = 2
        degree = 2

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        samples, weights = get_tensor_product_quadrature_rule(
            degree - 1, num_vars, np.polynomial.legendre.leggauss)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        II = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, II]
        # remove [0,2] index so max_level is not the same for every dimension
        # also remove [1,0] and [1,1] to make sure can handle index sets that
        # have missing univariate degrees not at the ends
        J = [1, 5, 4]
        reduced_indices = np.delete(indices, J, axis=1)
        poly.set_indices(reduced_indices)

        basis_matrix = poly.basis_matrix(samples, {'deriv_order': 1})

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(
                np.asarray([1 + 0. * x, x, 0.5 * (3. * x**2 - 1)]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 3. * x]).T)
            exact_basis_vals_1d[-1] /= np.sqrt(1. /
                                               (2 * np.arange(degree + 1) + 1))
            exact_basis_derivs_1d[-1] /= np.sqrt(
                1. / (2 * np.arange(degree + 1) + 1))

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        exact_basis_matrix = np.delete(exact_basis_matrix, J, axis=1)

        assert np.allclose(exact_basis_matrix, basis_matrix)