Пример #1
0
    def test_gq_modification_global(self):
        """ gq_modification for a global integral
        Testing of gq_modification on an interval [-1,1] with specified
        integrand.
        """

        alpha = -1. + 6 * np.random.rand()
        beta = -1. + 6 * np.random.rand()
        J = JacobiPolynomials(alpha=alpha, beta=beta)

        delta = 1e-8
        N = 10

        G = np.zeros([N, N])

        for n in range(N):
            for m in range(N):
                # Integrate the entire integrand
                integrand = lambda x: J.eval(x, m).flatten() * J.eval(
                    x, n).flatten() * jacobi_weight_normalized(x, alpha, beta)
                G[n, m] = quad.gq_modification(integrand,
                                               -1,
                                               1,
                                               ceil(1 + (n + m) / 2.),
                                               gamma=[alpha, beta])

        errstr = 'Failed for (N,alpha,beta) = ({0:d}, {1:1.6f}, {2:1.6f})'.format(
            N, alpha, beta)

        self.assertAlmostEqual(np.linalg.norm(G - np.eye(N), ord=np.inf),
                               0,
                               delta=delta,
                               msg=errstr)
Пример #2
0
    def test_ratio(self):
        """ Evaluation of orthogonal polynomial ratios.  """

        alpha = -1. + 10*np.random.rand(1)[0]
        beta = -1. + 10*np.random.rand(1)[0]
        J = JacobiPolynomials(alpha=alpha, beta=beta)

        N = int(np.ceil(60*np.random.rand(1)))
        x = (1 + 5*np.random.rand(1)) * (1 + np.random.rand(50))
        y = (1 + 5*np.random.rand(1)) * (-1 - np.random.rand(50))
        x = np.concatenate([x, y])

        P = J.eval(x, range(N+1))
        rdirect = np.zeros([x.size, N+1])
        rdirect[:, 0] = P[:, 0]
        rdirect[:, 1:] = P[:, 1:]/P[:, :-1]

        r = J.r_eval(x, range(N+1))

        delta = 1e-6
        errs = np.abs(r-rdirect)
        i, j = np.where(errs > delta)[:2]
        if i.size > 0:
            errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, x={3:1.6f}'.format(alpha, beta, j[0], x[i[0]])
        else:
            errstr = ''

        self.assertAlmostEqual(np.linalg.norm(errs, ord=np.inf), 0, delta=delta, msg=errstr)
Пример #3
0
    def __init__(self,
                 alpha=None,
                 beta=None,
                 mean=None,
                 stdev=None,
                 dim=None,
                 domain=None,
                 bounds=None):

        # Convert mean/stdev inputs to alpha/beta
        if mean is not None and stdev is not None:
            if alpha is not None or beta is not None:
                raise ValueError('Cannot simultaneously specify alpha/beta \
                        parameters and mean/stdev parameters')

            alpha, beta = self._convert_meanstdev_to_alphabeta(mean, stdev)

        alpha, beta = self._convert_alphabeta_to_iterable(alpha, beta)

        if (domain is not None) and (bounds is not None):
            raise ValueError(
                "Inputs 'domain' and 'bounds' cannot both be given")
        elif bounds is not None:
            domain = bounds
        # Sets self.dim, self.alpha, self.beta, self.domain
        self._detect_dimension(alpha, beta, dim, domain)

        for qd in range(self.dim):
            assert self.alpha[qd] > 0 and self.beta[qd] > 0, \
                    "Parameter vectors alpha and beta must have strictly \
                    positive components"

        assert self.dim > 0, "Dimension must be positive"
        assert self.domain.shape == (2, self.dim)

        # Construct affine map transformations

        # Standard domain is [0,1]^dim
        self.standard_domain = np.ones([2, self.dim])
        self.standard_domain[0, :] = 0.

        # Low-level routines use Jacobi Polynomials, which operate on [-1,1]
        # instead of the standard Beta domain of [0,1]
        self.poly_domain = np.ones([2, self.dim])
        self.poly_domain[0, :] = -1.
        self.transform_standard_dist_to_poly = AffineTransform(
            domain=self.standard_domain, image=self.poly_domain)

        self.transform_to_standard = AffineTransform(
            domain=self.domain, image=self.standard_domain)

        # Construct 1D polynomial families
        Js = []
        for qd in range(self.dim):
            Js.append(
                JacobiPolynomials(alpha=self.beta[qd] - 1.,
                                  beta=self.alpha[qd] - 1.))
        self.polys = TensorialPolynomials(polys1d=Js)

        self.indices = None
Пример #4
0
    def test_tensor_gauss_quadrature(self):

        dim = 3
        alpha = 10*np.random.rand(dim)[0]
        beta = 10*np.random.rand(dim)[0]

        N = np.random.randint(5, 10)
        Inds = LpSet(dim=dim, order=N-1, p=np.Inf)

        J = [None,]*dim
        for q in range(dim):
            J[q] = JacobiPolynomials(alpha=alpha, beta=beta)

        P = TensorialPolynomials(polys1d=J)

        x, w = P.tensor_gauss_quadrature(N)

        V = P.eval(x, Inds.get_indices())

        G = V.T @ np.diag(w) @ V

        err = np.linalg.norm(G - np.eye(G.shape[0]), ord='fro')/G.shape[0]

        msg = "Failed for (N, alpha, beta)=({0:d}, {1:1.6f}, {2:1.6f})".format(N, alpha, beta)

        delta = 1e-8
        self.assertAlmostEqual(err, 0, delta=delta, msg=msg)
Пример #5
0
    def test_gq(self):
        """Gaussian quadrature integration accuracy"""

        alpha = -1. + 10*np.random.rand(1)[0]
        beta = -1. + 10*np.random.rand(1)[0]

        J = JacobiPolynomials(alpha=alpha, beta=beta)
        N = int(np.ceil(60*np.random.rand(1)))

        x, w = J.gauss_quadrature(N)
        w /= w.sum()    # Force probability measure

        V = J.eval(x, range(2*N))

        integrals = np.dot(w, V)
        integrals[0] -= V[0, 0]  # Exact value

        self.assertAlmostEqual(np.linalg.norm(integrals, ord=np.inf), 0.)
Пример #6
0
    def test_idist_legendre(self):
        """Evaluation of Legendre induced distribution function."""

        J = JacobiPolynomials(alpha=0., beta=0.)

        n = int(np.ceil(25*np.random.rand(1))[0])
        M = 25
        x = -1. + 2*np.random.rand(M)

        # JacobiPolynomials method
        F1 = J.idist(x, n)

        y, w = J.gauss_quadrature(n+1)

        # Exact: integrate density
        F2 = np.zeros(F1.shape)
        for xind, xval in enumerate(x):
            yquad = (y+1)/2.*(xval+1) - 1.
            F2[xind] = np.dot(w, J.eval(yquad, n)**2) * (xval+1)/2

        self.assertAlmostEqual(np.linalg.norm(F1-F2, ord=np.inf), 0.)
Пример #7
0
    def test_n0(self):

        alpha = -1. + 10 * np.random.rand(1)[0]
        beta = -1. + 10 * np.random.rand(1)[0]
        J = JacobiPolynomials(alpha=alpha, beta=beta)

        n = np.random.randint(5)
        u = np.random.rand(1)[0]
        correct_x = J.idistinv(u, n)
        x = J.fidistinv(u, n)

        delta = 1e-2
        errs = np.abs(x - correct_x)

        errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, u={2:1.6f}, n = {3:d}'.format(
            alpha, beta, u, n)

        self.assertAlmostEqual(np.linalg.norm(errs, ord=np.inf),
                               0,
                               delta=delta,
                               msg=errstr)
Пример #8
0
    def test_fidist_jacobi(self):
        """Fast induced sampling routine for Jacobi polynomials."""

        alpha = np.random.random()*11 - 1.
        beta = np.random.random()*11 - 1.

        nmax = 4
        M = 10
        u = np.random.random(M)

        J = JacobiPolynomials(alpha=alpha, beta=beta)

        delta = 1e-2
        for n in range(nmax)[::-1]:
            x0 = J.idistinv(u, n)
            x1 = J.fidistinv(u, n)

            ind = np.where(np.abs(x0-x1) > delta)[:2][0]
            if ind.size > 0:
                errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, u={3:1.6f}'.format(alpha, beta, n, u[ind[0]])
            else:
                errstr = ''

            self.assertAlmostEqual(np.linalg.norm(x0-x1, ord=np.inf), 0., delta=delta, msg=errstr)

        n = np.array(np.round(np.random.random(M)), dtype=int)
        x0 = J.idistinv(u, n)
        x1 = J.fidistinv(u, n)
        ind = np.where(np.abs(x0-x1) > delta)[:2][0]
        if ind.size > 0:
            errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, u={3:1.6f}'.format(alpha, beta, n[ind[0]], u[ind[0]])
        else:
            errstr = ''

        self.assertAlmostEqual(np.linalg.norm(x0-x1, ord=np.inf), 0., delta=delta, msg=errstr)
Пример #9
0
    def test_derivative_expansion(self):
        """
        Expansion coefficients of derivatives.
        """

        alpha = 10*np.random.rand(1)[0]
        beta = 10*np.random.rand(1)[0]
        J = JacobiPolynomials(alpha=alpha, beta=beta)

        N = 13
        K = 11

        x, w = J.gauss_quadrature(2*N)
        V = J.eval(x, range(K+1))

        for s in range(4):
            C = J.derivative_expansion(s, N, K)

            Vd = J.eval(x, range(N+1), d=s)
            C2 = Vd.T @ np.diag(w) @ V

            reserror = np.linalg.norm(C-C2)
            msg = "Failed for (s, alpha, beta)=({0:d}, {1:1.6f}, {2:1.6f})".format(s, alpha, beta)
            delta = 1e-8
            self.assertAlmostEqual(reserror, 0, delta=delta, msg=msg)
Пример #10
0
    def test_idist_laguerre(self):
        """Evaluation of Laguerre induced distribution function."""

        rho = 11 * np.random.random() - 1
        L = LaguerrePolynomials(rho=rho)

        n = int(np.ceil(10 * np.random.rand(1))[0])
        M = 25
        x = 4 * (n + 1) * np.random.rand(M)

        # LaguerrePolynomials method
        F1 = L.idist(x, n)

        J = JacobiPolynomials(alpha=0., beta=rho, probability_measure=False)

        y, w = J.gauss_quadrature(500)

        # Exact: integrate density
        F2 = np.zeros(F1.shape)

        for xind, xval in enumerate(x):
            yquad = (y + 1) / 2. * xval  # Map [-1,1] to [0, xval]
            wquad = w * (xval / 2)**(1 + rho)
            F2[xind] = np.dot(
                wquad,
                np.exp(-yquad) / sp.gamma(1 + rho) *
                L.eval(yquad, n).flatten()**2)

        delta = 1e-3
        ind = np.where(np.abs(F1 - F2) > delta)[:2][0]
        if ind.size > 0:
            errstr = 'Failed for rho={0:1.3f}, n={1:d}'.format(rho, n)
        else:
            errstr = ''

        self.assertAlmostEqual(np.linalg.norm(F1 - F2, ord=np.inf),
                               0.,
                               delta=delta,
                               msg=errstr)
Пример #11
0
    def test_gq(self):
        """
        Tests construction of a bivariate Gauss quadrature rule.
        """

        a1 = 10 * np.random.rand(1)[0]
        b1 = 10 * np.random.rand(1)[0]
        a2 = 10 * np.random.rand(1)[0]
        b2 = 10 * np.random.rand(1)[0]

        M1 = 1 + int(np.ceil(30 * np.random.random()))
        M2 = 1 + int(np.ceil(30 * np.random.random()))

        bounds1 = np.sort(np.random.randn(2))
        bounds2 = np.sort(np.random.randn(2))

        # First "manually" create Gauss quadrature grid
        J1 = JacobiPolynomials(alpha=b1 - 1, beta=a1 - 1)
        J2 = JacobiPolynomials(alpha=b2 - 1, beta=a2 - 1)

        x1, w1 = J1.gauss_quadrature(M1)
        x1 = (x1 + 1) / 2 * (bounds1[1] - bounds1[0]) + bounds1[0]
        x2, w2 = J2.gauss_quadrature(M2)
        x2 = (x2 + 1) / 2 * (bounds2[1] - bounds2[0]) + bounds2[0]

        X = np.meshgrid(x1, x2)
        x = np.vstack([X[0].flatten(), X[1].flatten()]).T
        W = np.meshgrid(w1, w2)
        w = W[0].flatten() * W[1].flatten()

        p1 = BetaDistribution(alpha=a1, beta=b1, bounds=bounds1)
        p2 = BetaDistribution(alpha=a2, beta=b2, bounds=bounds2)

        # order is irrelevant
        pce = PolynomialChaosExpansion(order=3,
                                       distribution=[p1, p2],
                                       sampling='gq',
                                       M=[M1, M2])
        pce.generate_samples()

        xerr = np.linalg.norm(pce.samples - x)
        werr = np.linalg.norm(pce.weights - w)

        msg = 'Failed for (M1, alpha1, beta1)=({0:d}, '\
              '{1:1.6f}, {2:1.6f}), (M2, alpha2, beta2)=({3:d}, '\
              '{4:1.6f}, {5:1.6f})'.format(M1, a1, b1, M2, a2, b2)

        delta = 1e-10
        self.assertAlmostEqual(xerr, 0, delta=delta, msg=msg)
        self.assertAlmostEqual(werr, 0, delta=delta, msg=msg)
Пример #12
0
    def test_gq_modification_composite(self):
        """ gq_modification using a composite strategy
        Testing of gq_modification on an interval [-1,1] using a composite
        quadrature rule over a partition of [-1,1].
        """

        alpha = -1. + 6 * np.random.rand()
        beta = -1. + 6 * np.random.rand()
        J = JacobiPolynomials(alpha=alpha, beta=beta)

        delta = 5e-6
        N = 10

        G = np.zeros([N, N])

        # Integrate just the weight function. We'll use modifications for the polynomial part
        integrand = lambda x: jacobi_weight_normalized(x, alpha, beta)

        for n in range(N):
            for m in range(N):

                coeffs = J.leading_coefficient(max(n, m) + 1)
                if n != m:
                    zeros = np.sort(
                        np.hstack((J.gauss_quadrature(n)[0],
                                   J.gauss_quadrature(m)[0])))
                    quadzeros = np.zeros(0)
                    leading_coefficient = coeffs[n] * coeffs[m]
                else:
                    zeros = np.zeros(0)
                    quadzeros = J.gauss_quadrature(n)[0]
                    leading_coefficient = coeffs[n]**2

                demarcations = np.sort(zeros)
                D = demarcations.size

                subintervals = np.zeros([D + 1, 4])
                if D == 0:
                    subintervals[0, :] = [-1, 1, beta, alpha]
                else:
                    subintervals[0, :] = [-1, demarcations[0], beta, 0]
                    for q in range(D - 1):
                        subintervals[q + 1, :] = [
                            demarcations[q], demarcations[q + 1], 0, 0
                        ]

                    subintervals[D, :] = [demarcations[-1], 1, 0, alpha]

                G[n, m] = quad.gq_modification_composite(
                    integrand,
                    -1,
                    1,
                    20,
                    subintervals=subintervals,
                    roots=zeros,
                    quadroots=quadzeros,
                    leading_coefficient=leading_coefficient)

        errstr = 'Failed for (N,alpha,beta) = ({0:d}, {1:1.6f}, {2:1.6f})'.format(
            N, alpha, beta)

        self.assertAlmostEqual(np.linalg.norm(G - np.eye(N), ord=np.inf),
                               0,
                               delta=delta,
                               msg=errstr)