Exemple #1
0
def markov_stiltjies_initial_guess(u, n, a, b, supp):
    """
    intervals = markov_stiltjies_initial_guess(u, n, a, b, supp)
    
    Uses the Markov-Stiltjies inequalities to provide a bounding interval for x
    the solution to 
    
      F_n(x) = u,
    
    where n is the the order-n induced distribution function associated to the
    measure with three-term recurrrence coefficients a, b, having support on 
    the real-line interval defined by the length-2 vector supp.
    
    If u is a length-M vector, the output intervals is an (M x 2) matrix, with
    row m the bounding interval for u = u(m).
    """
    n = np.asscalar(n)
    assert (a.shape[0] == b.shape[0])
    assert (a.shape[0] > 2 * n)

    # Compute quadratic modifications modifications.
    if n > 0:
        [x, w] = gauss_quadrature(np.hstack((a, np.sqrt(b))), n)
    b[0] = 1
    for k in range(n):
        [a, b] = quadratic_modification_C(a, b, x[k])
        b[0] = 1

    ## Markov-Stiltjies inequalities
    # Use all the remaining coefficients for the Markov-Stiltjies inequalities
    N = a.shape[0]
    [y, w] = gauss_quadrature(np.hstack((a, np.sqrt(b))), N)
    if supp[1] > y[-1]:
        X = np.hstack((supp[0], y, supp[1]))
        W = np.hstack((0, np.cumsum(w)))
    else:
        X = np.hstack((supp[0], y, y[-1]))
        W = np.hstack((0, np.cumsum(w)))

    W = W / W[-1]

    W[W > 1] = 1  # Just in case for machine eps issues
    W[-1] = 1

    #[~,j] = histc(u, W)
    j = np.digitize(u, W)
    #j = j(:)
    jleft = j
    jright = jleft + 2

    # Fix endpoints
    flags = (jleft == (N + 1))
    jleft[flags] = N + 2
    jright[flags] = N + 2

    intervals = np.hstack(
        (X[jleft - 1][:, np.newaxis], X[jright - 1][:, np.newaxis]))
    return intervals
Exemple #2
0
def compute_univariate_orthonormal_basis_products(get_recursion_coefficients,
                                                  max_degree1, max_degree2):
    """
    Compute all the products of univariate orthonormal bases and re-express 
    them as expansions using the orthnormal basis.
    """
    assert max_degree1 >= max_degree2
    max_degree = max_degree1 + max_degree2
    num_quad_points = max_degree + 1

    recursion_coefs = get_recursion_coefficients(num_quad_points)
    x_quad, w_quad = gauss_quadrature(recursion_coefs, num_quad_points)
    w_quad = w_quad[:, np.newaxis]

    # evaluate the orthonormal basis at the quadrature points. This can
    # be computed once for all degrees up to the maximum degree
    ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(
        x_quad, max_degree, recursion_coefs)

    # compute coefficients of orthonormal basis using pseudo
    # spectral projection
    product_coefs = []
    for d1 in range(max_degree1 + 1):
        for d2 in range(min(d1 + 1, max_degree2 + 1)):
            product_vals = ortho_basis_matrix[:, d1] * ortho_basis_matrix[:,
                                                                          d2]
            coefs = w_quad.T.dot(product_vals[:, np.newaxis] *
                                 ortho_basis_matrix[:, :d1 + d2 + 1]).T
            product_coefs.append(coefs)
    return product_coefs
def predictor_corrector_product_of_functions_of_independent_variables(
        nterms, univariate_quad_rules, funs):
    nvars = len(univariate_quad_rules)
    assert len(funs) == nvars
    ab = predictor_corrector_function_of_independent_variables(
        nterms, univariate_quad_rules[:2], lambda x: funs[0](x[0, :]) * funs[1]
        (x[1, :]))
    for ii in range(2, nvars):
        x, w = gauss_quadrature(ab, nterms)
        ab = predictor_corrector_function_of_independent_variables(
            nterms, [(x, w), univariate_quad_rules[ii]],
            lambda x: x[0, :] * funs[ii](x[1, :]))
    return ab
Exemple #4
0
def convert_univariate_lagrange_basis_to_orthonormal_polynomials(
        samples_1d, get_recursion_coefficients):
    """
    Returns
    -------
    coeffs_1d : list [np.ndarray(num_terms_i,num_terms_i)]
        The coefficients of the orthonormal polynomial representation of
        each Lagrange basis. The columns are the coefficients of each 
        lagrange basis. The rows are the coefficient of the degree i 
        orthonormalbasis
    """
    # Get the maximum number of terms in the orthonormal polynomial that
    # are need to interpolate all the interpolation nodes in samples_1d
    max_num_terms = samples_1d[-1].shape[0]
    num_quad_points = max_num_terms + 1
    # Get the recursion coefficients of the orthonormal basis
    recursion_coeffs = get_recursion_coefficients(num_quad_points)
    # compute the points and weights of the correct quadrature rule
    x_quad, w_quad = gauss_quadrature(recursion_coeffs, num_quad_points)
    # evaluate the orthonormal basis at the quadrature points. This can
    # be computed once for all degrees up to the maximum degree
    ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(
        x_quad, max_num_terms, recursion_coeffs)

    # compute coefficients of orthonormal basis using pseudo spectral projection
    coeffs_1d = []
    w_quad = w_quad[:, np.newaxis]
    for ll in range(len(samples_1d)):
        num_terms = samples_1d[ll].shape[0]
        # evaluate the lagrange basis at the quadrature points
        barycentric_weights_1d = [
            compute_barycentric_weights_1d(samples_1d[ll])
        ]
        values = np.eye((num_terms), dtype=float)
        # Sometimes the following function will cause the erro
        # interpolation abscissa are not unique. This can be due to x_quad
        # not abscissa. E.g. x_quad may have points far enough outside
        # range of abscissa, e.g. abscissa are clenshaw curtis points and
        # x_quad points are Gauss-Hermite quadrature points
        lagrange_basis_vals = multivariate_barycentric_lagrange_interpolation(
            x_quad[np.newaxis, :], samples_1d[ll][np.newaxis, :],
            barycentric_weights_1d, values, np.zeros(1, dtype=int))
        # compute fourier like coefficients
        basis_coeffs = []
        for ii in range(num_terms):
            basis_coeffs.append(
                np.dot(w_quad.T, lagrange_basis_vals *
                       ortho_basis_matrix[:, ii:ii + 1])[0, :])
        coeffs_1d.append(np.asarray(basis_coeffs))
    return coeffs_1d
Exemple #5
0
def gauss_jacobi_pts_wts_1D(num_samples, alpha_poly, beta_poly):
    """
    Return Gauss Jacobi quadrature rule that exactly integrates polynomials
    of num_samples 2*num_samples-1 with respect to the probabilty density 
    function of Beta random variables on [-1,1]

    C*(1+x)^(beta_poly)*(1-x)^alpha_poly
    
    where
    
    C = 1/(2**(alpha_poly+beta_poly)*beta_fn(beta_poly+1,alpha_poly+1))

    or equivalently
    
    C*(1+x)**(alpha_stat-1)*(1-x)**(beta_stat-1)

    where 

    C = 1/(2**(alpha_stat+beta_stat-2)*beta_fn(alpha_stat,beta_stat))

    Parameters
    ----------
    num_samples : integer
        The number of samples in the quadrature rule

    alpha_poly : float
        The Jaocbi parameter alpha = beta_stat-1

    beta_poly : float
        The Jacobi parameter beta = alpha_stat-1 

    Returns
    -------
    x : np.ndarray(num_samples)
        Quadrature samples

    w : np.ndarray(num_samples)
        Quadrature weights
    """
    ab = jacobi_recurrence(num_samples,
                           alpha=alpha_poly,
                           beta=beta_poly,
                           probability=True)
    return gauss_quadrature(ab, num_samples)
Exemple #6
0
def gauss_hermite_pts_wts_1D(num_samples):
    """
    Return Gauss Hermite quadrature rule that exactly integrates polynomials
    of degree 2*num_samples-1 with respect to the Gaussian probability measure
    1/sqrt(2*pi)exp(-x**2/2)

    Parameters
    ----------
    num_samples : integer
        The number of samples in the quadrature rule

    Returns
    -------
    x : np.ndarray(num_samples)
        Quadrature samples

    w : np.ndarray(num_samples)
        Quadrature weights
    """
    rho = 0.0
    ab = hermite_recurrence(num_samples, rho, probability=True)
    x, w = gauss_quadrature(ab, num_samples)
    return x, w
Exemple #7
0
def idist_jacobi(x, n, alph, bet, M=10):
    r"""
    idist_jacobi -- Evaluation of induced distribution

    F = idist_jacobi(x, n, alph, bet, {M = 10})
    
    Evaluates the integral
    
        F = \int_{-1}**x p_n**2(x) \dx{\mu(x)},

    where mu is the (a,b) Jacobi polynomial measure, scaled to be a 
    probability distribution on [-1,1], and p_n is the corresponding 
    degree-n orthonormal polynomial.

    This function evaluates this via a transformation, measure modification,
    and Gauss quadrature, the ending Gauss quadrature has M points.
    """

    assert ((alph > -1) and (bet > -1))
    assert (np.all(np.abs(x) <= 1))
    assert np.all(n >= 0)
    if x.ndim == 2:
        assert x.shape[1] == 1
        x = x[:, 0]

    A = int(np.floor(abs(alph)))  # is an integer
    Aa = alph - A

    F = np.zeros((x.shape[0], 1))

    mrs_centroid = medapprox_jacobi(alph, bet, n)
    xreflect = x > mrs_centroid

    if x[xreflect].shape[0] > 0:
        F[xreflect] = 1 - idist_jacobi(-x[xreflect], n, bet, alph, M)

    recursion_coeffs = jacobi_recurrence(n + 1, alph, bet, True)
    # All functions that accept b assume they are receiving b
    # but recusion_coeffs:,1=np.sqrt(b)
    a = recursion_coeffs[:, 0]
    b = recursion_coeffs[:, 1]**2
    assert b[0] == 1  # To make it a probability measure

    if n > 0:
        # Zeros of p_n
        xn = gauss_quadrature(recursion_coeffs, n)[0]

        # This is the (inverse) n'th root of the leading coefficient square
        # of p_n. We'll use it for scaling later

        kn_factor = np.exp(-1. / n * np.sum(np.log(b)))

    for xq in range(x.shape[0]):

        if x[xq] == -1:
            F[xq] = 0
            continue

        if xreflect[xq]:
            continue

        # Recurrence coefficients for quadrature rule
        recursion_coeffs = jacobi_recurrence(2 * n + A + M + 1, 0, bet, True)
        # All functions that accept b assume they are receiving b
        # but recusion_coeffs:,1=np.sqrt(b)
        a = recursion_coeffs[:, 0:1]
        b = recursion_coeffs[:, 1:]**2
        assert b[0] == 1  # To make it a probability measure

        if n > 0:
            # Transformed
            un = (2. / (x[xq] + 1.)) * (xn + 1) - 1

        # Keep this so that bet(1) always equals what it did before
        logfactor = 0

        # Successive quadratic measure modifications
        for j in range(n):
            a, b = quadratic_modification_C(a, b, un[j])

            logfactor += np.log(b[0] * ((x[xq] + 1) / 2)**2 * kn_factor)
            b[0] = 1

        # Linear modification by factors (2 - 1/2*(u+1)*(x+1)),
        # having root u = (3-x)/(1+x)
        root = (3. - x[xq]) / (1. + x[xq])
        for aq in range(A):
            [a, b] = linear_modification(a, b, root)

            logfactor += np.log(b[0] * 1 / 2 * (x[xq] + 1))
            b[0] = 1

        # M-point Gauss quadrature for evaluation of auxilliary integral I
        # gauss quadrature requires np.sqrt(b)
        u, w = gauss_quadrature(np.hstack((a, np.sqrt(b))), M)
        I = np.dot(w.T, (2. - 1. / 2. * (u + 1.) * (x[xq] + 1))**Aa)
        F[xq] = np.exp(logfactor - alph * np.log(2) -
                       betaln(bet + 1, alph + 1) - np.log(bet + 1) +
                       (bet + 1) * np.log((x[xq] + 1) / 2)) * I
    return F
def predictor_corrector(nterms,
                        measure,
                        lb,
                        ub,
                        interval_size=1,
                        quad_options={}):
    """
    Use predictor corrector method to compute the recursion coefficients
    of a univariate orthonormal polynomial

    Parameters
    ----------
    nterms : integer
        The number of coefficients requested

    measure : callable or tuple
        The function (measure) used to compute orthogonality. 
        If a discrete measure then measure = tuple(xk, pk) where
        xk are the probability masses locoation and pk are the weights
        

    lb: float
        The lower bound of the measure (can be -infinity)

    ub: float
        The upper bound of the measure (can be infinity)

    interval_size : float
        The size of the initial interval used for quadrature
        For bounded variables this should be ub-lb. For semi- or un-bounded
        variables the larger this value the larger nquad_samples should
        be set

    quad_options : dict
        Options to the numerical quadrature function with attributes

    nquad_samples : integer
        The number of samples in the Gauss quadrature rule

    Note the entry ab[-1, :] will likely be wrong when compared to analytical
    formula if they exist. This does not matter because eval_poly does not 
    use this value. If you want the correct value just request num_coef+1
    coefficients.
    """

    discrete_measure = not callable(measure)
    if discrete_measure is True:
        xk, pk = measure
        assert xk.shape[0] == pk.shape[0]
        assert nterms < xk.shape[0]

        def measure(x):
            return np.ones_like(x)

    ab = np.zeros((nterms, 2))
    nquad_samples = quad_options.get('nquad_samples', 100)
    quad_opts = quad_options.copy()
    if 'nquad_samples' in quad_opts:
        del quad_opts['nquad_samples']

    if np.isfinite(lb) and np.isfinite(ub):
        assert interval_size == ub - lb

    def integrate_continuous(integrand, nquad_samples, interval_size):
        return integrate_using_univariate_gauss_legendre_quadrature_unbounded(
            integrand,
            lb,
            ub,
            nquad_samples,
            **quad_opts,
            interval_size=interval_size)

    def integrate_discrete(integrand, nquad_samples, interval_size):
        return integrand(xk).dot(pk)

    if discrete_measure is True:
        integrate = integrate_discrete
    else:
        integrate = integrate_continuous

    # for probablity measures the following will always be one, but
    # this is not true for other measures
    ab[0, 1] = np.sqrt(
        integrate(measure, nquad_samples, interval_size=interval_size))

    for ii in range(1, nterms):
        # predict
        ab[ii, 1] = ab[ii - 1, 1]
        if ii > 1:
            ab[ii - 1, 0] = ab[ii - 2, 0]
        else:
            ab[ii - 1, 0] = 0

        if np.isfinite(lb) and np.isfinite(ub) and ii > 1:
            # use previous intervals size for last degree as initial guess
            # of size needed here
            xx, __ = gauss_quadrature(ab, nterms)
            interval_size = xx.max() - xx.min()

        def integrand(measure, x):
            pvals = evaluate_orthonormal_polynomial_1d(x, ii, ab)
            return measure(x) * pvals[:, ii] * pvals[:, ii - 1]

        G_ii_iim1 = integrate(partial(integrand, measure),
                              nquad_samples + ii,
                              interval_size=interval_size)
        ab[ii - 1, 0] += ab[ii - 1, 1] * G_ii_iim1

        def integrand(measure, x):
            # Note eval orthogonal poly uses the new value for ab[ii, 0]
            # This is the desired behavior
            pvals = evaluate_orthonormal_polynomial_1d(x, ii, ab)
            return measure(x) * pvals[:, ii]**2

        G_ii_ii = integrate(partial(integrand, measure),
                            nquad_samples + ii,
                            interval_size=interval_size)
        ab[ii, 1] *= np.sqrt(G_ii_ii)

    return ab
def predictor_corrector(nterms,
                        measure,
                        lb,
                        ub,
                        interval_size=1,
                        quad_options={}):

    ab = np.zeros((nterms, 2))
    # for probablity measures the following will always be one, but forall
    # this is not true for other measures
    nquad_samples = quad_options.get('nquad_samples', 100)
    quad_opts = quad_options.copy()
    if 'nquad_samples' in quad_opts:
        del quad_opts['nquad_samples']
    ab[0, 1] = np.sqrt(
        integrate_using_univariate_gauss_legendre_quadrature_unbounded(
            measure,
            lb,
            ub,
            nquad_samples,
            **quad_opts,
            interval_size=interval_size))

    for ii in range(1, nterms):
        # predict
        ab[ii, 1] = ab[ii - 1, 1]
        if ii > 1:
            ab[ii - 1, 0] = ab[ii - 2, 0]
        else:
            ab[ii - 1, 0] = 0

        def integrand(x):
            pvals = evaluate_orthonormal_polynomial_1d(x, ii, ab)
            #from matplotlib import pyplot as plt
            #print(ab[0, 1])
            #plt.plot(x, pvals[:, 1])
            #plt.plot(x, x/ab[0, 1]**2)
            #plt.show()
            return measure(x) * pvals[:, ii] * pvals[:, ii - 1]

        if ii > 1:
            xx, __ = gauss_quadrature(ab, nterms)
            interval_size = xx.max() - xx.min()

        # correct
        G_ii_iim1 = \
            integrate_using_univariate_gauss_legendre_quadrature_unbounded(
                integrand, lb, ub, nquad_samples+ii, **quad_opts,
                interval_size=interval_size)
        ab[ii - 1, 0] += ab[ii - 1, 1] * G_ii_iim1

        def integrand(x):
            # Note eval orthogonal poly uses the new value for ab[ii, 0]
            # This is the desired behavior
            pvals = evaluate_orthonormal_polynomial_1d(x, ii, ab)
            return measure(x) * pvals[:, ii]**2
        G_ii_ii = \
            integrate_using_univariate_gauss_legendre_quadrature_unbounded(
                integrand, lb, ub, nquad_samples+ii,
                interval_size=interval_size, **quad_opts)
        ab[ii, 1] *= np.sqrt(G_ii_ii)

    return ab