コード例 #1
0
def idistinv_jacobi(u, n, alph, bet):
    r"""
    [x] = idistinv_jacobi(u, n, alph, bet)

    Computes the inverse of the order-n induced primitive for the Jacobi
    distribution with parameters alph and bet. Uses a bisection method in
    conjunction with forward evaluation given by idist_jacobi.
    """
    if np.isscalar(n):
        n = np.array([n])

    assert((np.all(u >= 0)) and (np.all(u <= 1)))
    assert((alph > -1) and (bet > -1))
    assert(np.all(n >= 0))

    x = np.zeros(u.shape)

    supp = [-1., 1.]

    if n.shape[0] == 1:
        primitive = partial(idist_jacobi, n=n, alph=alph, bet=bet)

        # Need 2*n + K coefficients, where K is the size of the
        # Markov-Stiltjies binning procedure
        recursion_coeffs = jacobi_recurrence(2*n + 400, alph, bet)
        # All functions that accept b assume they are receiving b
        # but recusion_coeffs:,1=np.sqrt(b)
        a = recursion_coeffs[:, 0:1]
        b = recursion_coeffs[:, 1:]**2

        x = idist_inverse(u, n, primitive, a, b, supp)

    else:

        nmax = n.max()
        # [nn, ~, bin] = histcounts(n, -0.5:(nmax+0.5))
        edges = np.arange(-0.5, nmax+0.5)
        nn, bins = histcounts(n, edges)

        recursion_coeffs = jacobi_recurrence(2*nmax + 400, alph, bet)
        # All functions that accept b assume they are receiving b
        # but recusion_coeffs:,1=np.sqrt(b)
        a = recursion_coeffs[:, 0:1]
        b = recursion_coeffs[:, 1:]**2

        # need to use u_flat code to be be consistent with akils code
        # inside idist_inverse. but if that code is correct final result
        # will be the same
        #u_flat = u.flatten(order='F')
        for qq in range(0, nmax+1):
            flags = bins == (qq+1)
            #flat_flags = flags.flatten(order='F')
            primitive = partial(idist_jacobi, n=qq, alph=alph, bet=bet)
            # xtemp = idist_inverse(
            #    u_flat[flat_flags], qq, primitive, a, b, supp)
            xtemp = idist_inverse(
                u[flags], qq, primitive, a, b, supp)
            x[flags] = xtemp
    return x
コード例 #2
0
def demo_idist_jacobi():
    alph = -0.8
    bet = np.sqrt(101)
    #n = 8;    M = 1001
    n = 4
    M = 5

    x = np.cos(np.linspace(0, np.pi, M + 2))[:, np.newaxis]
    x = x[1:-1]

    F = idist_jacobi(x, n, alph, bet)

    recursion_coeffs = jacobi_recurrence(n + 1, alph, bet, True)
    #recursion_coeffs[:,1]=np.sqrt(recursion_coeffs[:,1])
    polys = evaluate_orthonormal_polynomial_1d(x[:, 0], n, recursion_coeffs)
    wt_function = (1 - x)**alph * (1 + x)**bet / (2**(alph + bet + 1) *
                                                  betafn(bet + 1, alph + 1))
    f = polys[:, -1:]**2 * wt_function

    fig, ax = plt.subplots(1, 1)
    plt.plot(x, f)
    ax.set_xlabel('$x$')
    ax.set_ylabel('$p_n^2(x) \mathrm{d}\mu(x)$')
    ax.set_xlim(-1, 1)
    ax.set_ylim(0, 4)

    fig, ax = plt.subplots(1, 1)
    plt.plot(x, F)
    ax.set_xlabel('$x$')
    ax.set_ylabel('$F_n(x)$')
    ax.set_xlim(-1, 1)
    plt.show()
コード例 #3
0
def gauss_jacobi_pts_wts_1D(num_samples, alpha_poly, beta_poly):
    """
    Return Gauss Jacobi quadrature rule that exactly integrates polynomials
    of num_samples 2*num_samples-1 with respect to the probabilty density 
    function of Beta random variables on [-1,1]

    C*(1+x)^(beta_poly)*(1-x)^alpha_poly
    
    where
    
    C = 1/(2**(alpha_poly+beta_poly)*beta_fn(beta_poly+1,alpha_poly+1))

    or equivalently
    
    C*(1+x)**(alpha_stat-1)*(1-x)**(beta_stat-1)

    where 

    C = 1/(2**(alpha_stat+beta_stat-2)*beta_fn(alpha_stat,beta_stat))

    Parameters
    ----------
    num_samples : integer
        The number of samples in the quadrature rule

    alpha_poly : float
        The Jaocbi parameter alpha = beta_stat-1

    beta_poly : float
        The Jacobi parameter beta = alpha_stat-1 

    Returns
    -------
    x : np.ndarray(num_samples)
        Quadrature samples

    w : np.ndarray(num_samples)
        Quadrature weights
    """
    ab = jacobi_recurrence(num_samples,
                           alpha=alpha_poly,
                           beta=beta_poly,
                           probability=True)
    return gauss_quadrature(ab, num_samples)
コード例 #4
0
def get_jacobi_recursion_coefficients(poly_type, opts, num_coefs):
    """
    Get the recursion coefficients of a Jacobi polynomial.

    Parameters
    ----------
    opts : dictionary
       Dictionary with the following attributes
    
    alpha_poly : float
        The alpha parameter of the jacobi polynomial. Only used and required
        if poly_type is not None

    beta_poly : float
        The beta parameter of the jacobi polynomial. Only used and required
        if poly_type is not None
    
    shapes : dictionary
        Shape parameters of the Beta distribution. shapes['a'] is the 
        a parameter of the Beta distribution and shapes['a'] is the 
        b parameter of the Beta distribution. 
        The parameter of the Jacobi polynomial are determined using the 
        following relationships: alpha_poly = b-1, beta_poly = a-1.
        This option is not required or ignored when poly_type is not None

    Returns
    -------
    recursion_coeffs : np.ndarray (num_coefs, 2)
    """
    if poly_type is not None:
        alpha_poly, beta_poly = opts['alpha_poly'], opts['beta_poly']
    else:
        alpha_poly, beta_poly = opts['shapes']['b'] - \
            1, opts['shapes']['a']-1
    return jacobi_recurrence(
        num_coefs, alpha=alpha_poly, beta=beta_poly, probability=True)
コード例 #5
0
def idist_jacobi(x, n, alph, bet, M=10):
    r"""
    idist_jacobi -- Evaluation of induced distribution

    F = idist_jacobi(x, n, alph, bet, {M = 10})
    
    Evaluates the integral
    
        F = \int_{-1}**x p_n**2(x) \dx{\mu(x)},

    where mu is the (a,b) Jacobi polynomial measure, scaled to be a 
    probability distribution on [-1,1], and p_n is the corresponding 
    degree-n orthonormal polynomial.

    This function evaluates this via a transformation, measure modification,
    and Gauss quadrature, the ending Gauss quadrature has M points.
    """

    assert ((alph > -1) and (bet > -1))
    assert (np.all(np.abs(x) <= 1))
    assert np.all(n >= 0)
    if x.ndim == 2:
        assert x.shape[1] == 1
        x = x[:, 0]

    A = int(np.floor(abs(alph)))  # is an integer
    Aa = alph - A

    F = np.zeros((x.shape[0], 1))

    mrs_centroid = medapprox_jacobi(alph, bet, n)
    xreflect = x > mrs_centroid

    if x[xreflect].shape[0] > 0:
        F[xreflect] = 1 - idist_jacobi(-x[xreflect], n, bet, alph, M)

    recursion_coeffs = jacobi_recurrence(n + 1, alph, bet, True)
    # All functions that accept b assume they are receiving b
    # but recusion_coeffs:,1=np.sqrt(b)
    a = recursion_coeffs[:, 0]
    b = recursion_coeffs[:, 1]**2
    assert b[0] == 1  # To make it a probability measure

    if n > 0:
        # Zeros of p_n
        xn = gauss_quadrature(recursion_coeffs, n)[0]

        # This is the (inverse) n'th root of the leading coefficient square
        # of p_n. We'll use it for scaling later

        kn_factor = np.exp(-1. / n * np.sum(np.log(b)))

    for xq in range(x.shape[0]):

        if x[xq] == -1:
            F[xq] = 0
            continue

        if xreflect[xq]:
            continue

        # Recurrence coefficients for quadrature rule
        recursion_coeffs = jacobi_recurrence(2 * n + A + M + 1, 0, bet, True)
        # All functions that accept b assume they are receiving b
        # but recusion_coeffs:,1=np.sqrt(b)
        a = recursion_coeffs[:, 0:1]
        b = recursion_coeffs[:, 1:]**2
        assert b[0] == 1  # To make it a probability measure

        if n > 0:
            # Transformed
            un = (2. / (x[xq] + 1.)) * (xn + 1) - 1

        # Keep this so that bet(1) always equals what it did before
        logfactor = 0

        # Successive quadratic measure modifications
        for j in range(n):
            a, b = quadratic_modification_C(a, b, un[j])

            logfactor += np.log(b[0] * ((x[xq] + 1) / 2)**2 * kn_factor)
            b[0] = 1

        # Linear modification by factors (2 - 1/2*(u+1)*(x+1)),
        # having root u = (3-x)/(1+x)
        root = (3. - x[xq]) / (1. + x[xq])
        for aq in range(A):
            [a, b] = linear_modification(a, b, root)

            logfactor += np.log(b[0] * 1 / 2 * (x[xq] + 1))
            b[0] = 1

        # M-point Gauss quadrature for evaluation of auxilliary integral I
        # gauss quadrature requires np.sqrt(b)
        u, w = gauss_quadrature(np.hstack((a, np.sqrt(b))), M)
        I = np.dot(w.T, (2. - 1. / 2. * (u + 1.) * (x[xq] + 1))**Aa)
        F[xq] = np.exp(logfactor - alph * np.log(2) -
                       betaln(bet + 1, alph + 1) - np.log(bet + 1) +
                       (bet + 1) * np.log((x[xq] + 1) / 2)) * I
    return F
コード例 #6
0
    def test_random_function_train(self):
        np.random.seed(5)
        num_vars = 2
        degree = 5
        rank = 2

        sparsity_ratio = 0.2
        sample_ratio   = .9
        
        ranks = rank*np.ones(num_vars+1,dtype=np.uint64)
        ranks[0]=1; ranks[-1]=1

        alpha=0; beta=0; 
        recursion_coeffs = jacobi_recurrence(
            degree+1, alpha=alpha,beta=beta,probability=True)

        ft_data = generate_random_sparse_function_train(
            num_vars,rank,degree+1,sparsity_ratio)
        true_sol = ft_data[1]

        num_ft_params = true_sol.shape[0]
        num_samples = int(sample_ratio*num_ft_params)
        samples = np.random.uniform(-1.,1.,(num_vars,num_samples))

        function = lambda samples: evaluate_function_train(
            samples,ft_data,recursion_coeffs)
        
        values = function(samples)

        assert np.linalg.norm(values)>0, (np.linalg.norm(values))

        num_validation_samples=100
        validation_samples = np.random.uniform(
            -1.,1.,(num_vars,num_validation_samples))
        validation_values = function(validation_samples)

        zero_ft_data = copy.deepcopy(ft_data)
        zero_ft_data[1]=np.zeros_like(zero_ft_data[1])
        # DO NOT use ft_data in following two functions.
        # These function only overwrites parameters associated with the
        # active indices the rest of the parameters are taken from ft_data.
        # If ft_data is used some of the true data will be kept and give
        # an unrealisticaly accurate answer
        approx_eval = partial(modify_and_evaluate_function_train,samples,
                              zero_ft_data,recursion_coeffs,None)

        apply_approx_adjoint_jacobian = partial(
            apply_function_train_adjoint_jacobian,samples,zero_ft_data,
            recursion_coeffs,1e-3)
        
        sparsity = np.where(true_sol!=0)[0].shape[0]
        print(('sparsity',sparsity,'num_samples',num_samples))
        
        # sparse
        project = partial(s_sparse_projection,sparsity=sparsity)
        # non-linear least squres
        #project = partial(s_sparse_projection,sparsity=num_ft_params)

        # use uninormative initial guess
        #initial_guess = np.zeros_like(true_sol)

        # use linear approximation as initial guess
        linear_ft_data =  ft_linear_least_squares_regression(
            samples,values,degree,perturb=None)
        initial_guess = linear_ft_data[1]
        
        # use initial guess that is close to true solution
        # num_samples required to obtain accruate answer decreases signficantly
        # over linear or uniformative guesses. As size of perturbation from
        # truth increases num_samples must increase
        initial_guess = true_sol.copy()+np.random.normal(0.,.1,(num_ft_params))
        
        tol = 5e-3
        max_iter=1000
        result = iterative_hard_thresholding(
            approx_eval, apply_approx_adjoint_jacobian, project,
            values[:,0], initial_guess, tol, max_iter, verbosity=1)
        sol = result[0]
        residnorm = result[1]

        recovered_ft_data=copy.deepcopy(ft_data)
        recovered_ft_data[1]=sol
        ft_validation_values = evaluate_function_train(
            validation_samples,recovered_ft_data,recursion_coeffs)

        validation_error = np.linalg.norm(
            validation_values-ft_validation_values)
        rel_validation_error=validation_error/np.linalg.norm(validation_values)
        # compare relative error because exit condition is based upon
        # relative residual
        assert rel_validation_error<10*tol, rel_validation_error
コード例 #7
0
    def test_sparse_function_train(self):
        np.random.seed(5)
        num_vars = 2
        degree = 5
        rank = 2

        tol = 1e-5
        
        sparsity_ratio = 0.2
        sample_ratio   = 0.6
        
        ranks = rank*np.ones(num_vars+1,dtype=np.uint64)
        ranks[0]=1; ranks[-1]=1

        alpha=0; beta=0; 
        recursion_coeffs = jacobi_recurrence(
            degree+1, alpha=alpha,beta=beta,probability=True)

        ft_data = generate_random_sparse_function_train(
            num_vars,rank,degree+1,sparsity_ratio)
        true_sol = ft_data[1]

        num_ft_params = true_sol.shape[0]
        num_samples = int(sample_ratio*num_ft_params)
        samples = np.random.uniform(-1.,1.,(num_vars,num_samples))

        
        function = lambda samples: evaluate_function_train(
            samples,ft_data,recursion_coeffs)
        #function = lambda samples: np.cos(samples.sum(axis=0))[:,np.newaxis]

        values = function(samples)
        print(values.shape)

        assert np.linalg.norm(values)>0, (np.linalg.norm(values))

        num_validation_samples=100
        validation_samples = np.random.uniform(
            -1.,1.,(num_vars,num_validation_samples))
        validation_values = function(validation_samples)

        zero_ft_data = copy.deepcopy(ft_data)
        zero_ft_data[1]=np.zeros_like(zero_ft_data[1])
        # DO NOT use ft_data in following two functions.
        # These function only overwrites parameters associated with the
        # active indices the rest of the parameters are taken from ft_data.
        # If ft_data is used some of the true data will be kept and give
        # an unrealisticaly accurate answer
        approx_eval = partial(modify_and_evaluate_function_train,samples,
                              zero_ft_data,recursion_coeffs,None)

        apply_approx_adjoint_jacobian = partial(
            apply_function_train_adjoint_jacobian,samples,zero_ft_data,
            recursion_coeffs,1e-3)

        def least_squares_regression(indices,initial_guess):
            #if initial_guess is None:
            st0 = np.random.get_state()
            np.random.seed(1)
            initial_guess = np.random.normal(0.,.01,indices.shape[0])
            np.random.set_state(st0)            
            result = ft_non_linear_least_squares_regression(
                samples, values, ft_data, recursion_coeffs, initial_guess,
                indices,{'gtol':tol,'ftol':tol,'xtol':tol,'verbosity':0})
            return result[indices]

        sparsity = np.where(true_sol!=0)[0].shape[0]
        print(('sparsity',sparsity,'num_samples',num_samples,
               'num_ft_params',num_ft_params))

        print(true_sol)
        active_indices = None

        use_omp = True
        #use_omp = False
        if not use_omp:
            sol = least_squares_regression(np.arange(num_ft_params),None)
        else:
            result = orthogonal_matching_pursuit(
                approx_eval, apply_approx_adjoint_jacobian,
                least_squares_regression, values[:,0], active_indices,
                num_ft_params, tol, min(num_samples,num_ft_params), verbosity=1)
            sol = result[0]
            residnorm = result[1]

        
        recovered_ft_data=copy.deepcopy(ft_data)
        recovered_ft_data[1]=sol
        ft_validation_values = evaluate_function_train(
            validation_samples,recovered_ft_data,recursion_coeffs)


        validation_error = np.linalg.norm(
            validation_values-ft_validation_values)
        rel_validation_error=validation_error/np.linalg.norm(validation_values)
        # compare relative error because exit condition is based upon
        # relative residual
        print(rel_validation_error)
        assert rel_validation_error<100*tol, rel_validation_error
コード例 #8
0
 def get_recursion_coefficients(self, opts, num_coefs):
     poly_type = opts.get('poly_type', None)
     var_type = None
     if poly_type is None:
         var_type = opts['rv_type']
     if poly_type == 'legendre' or var_type == 'uniform':
         recursion_coeffs = jacobi_recurrence(num_coefs,
                                              alpha=0,
                                              beta=0,
                                              probability=True)
     elif poly_type == 'jacobi' or var_type == 'beta':
         if poly_type is not None:
             alpha_poly, beta_poly = opts['alpha_poly'], opts['beta_poly']
         else:
             alpha_poly, beta_poly = opts['shapes']['b'] - 1, opts[
                 'shapes']['a'] - 1
         recursion_coeffs = jacobi_recurrence(num_coefs,
                                              alpha=alpha_poly,
                                              beta=beta_poly,
                                              probability=True)
     elif poly_type == 'hermite' or var_type == 'norm':
         recursion_coeffs = hermite_recurrence(num_coefs,
                                               rho=0.,
                                               probability=True)
     elif poly_type == 'krawtchouk' or var_type == 'binom':
         if poly_type is None:
             opts = opts['shapes']
         n, p = opts['n'], opts['p']
         num_coefs = min(num_coefs, n)
         recursion_coeffs = krawtchouk_recurrence(num_coefs, n, p)
     elif poly_type == 'hahn' or var_type == 'hypergeom':
         if poly_type is not None:
             apoly, bpoly = opts['alpha_poly'], opts['beta_poly']
             N = opts['N']
         else:
             M, n, N = [opts['shapes'][key] for key in ['M', 'n', 'N']]
             apoly, bpoly = -(n + 1), -M - 1 + n
         num_coefs = min(num_coefs, N)
         recursion_coeffs = hahn_recurrence(num_coefs, N, apoly, bpoly)
     elif poly_type == 'discrete_chebyshev' or var_type == 'discrete_chebyshev':
         if poly_type is not None:
             N = opts['N']
         else:
             N = opts['shapes']['xk'].shape[0]
             assert np.allclose(opts['shapes']['xk'], np.arange(N))
             assert np.allclose(opts['shapes']['pk'], np.ones(N) / N)
         num_coefs = min(num_coefs, N)
         recursion_coeffs = discrete_chebyshev_recurrence(num_coefs, N)
     elif poly_type == 'discrete_numeric' or var_type == 'float_rv_discrete':
         if poly_type is None:
             opts = opts['shapes']
         xk, pk = opts['xk'], opts['pk']
         #shapes['xk'] will be in [0,1] but canonical domain is [-1,1]
         xk = xk * 2 - 1
         assert xk.min() >= -1 and xk.max() <= 1
         if num_coefs > xk.shape[0]:
             msg = 'Number of coefs requested is larger than number of '
             msg += 'probability masses'
             raise Exception(msg)
         recursion_coeffs = modified_chebyshev_orthonormal(
             num_coefs, [xk, pk])
         p = evaluate_orthonormal_polynomial_1d(np.asarray(xk, dtype=float),
                                                num_coefs - 1,
                                                recursion_coeffs)
         error = np.absolute((p.T * pk).dot(p) - np.eye(num_coefs)).max()
         if error > self.numerically_generated_poly_accuracy_tolerance:
             msg = f'basis created is ill conditioned. '
             msg += f'Max error: {error}. Max terms: {xk.shape[0]}, '
             msg += f'Terms requested: {num_coefs}'
             raise Exception(msg)
     elif poly_type == 'monomial':
         recursion_coeffs = None
     else:
         if poly_type is not None:
             raise Exception('poly_type (%s) not supported' % poly_type)
         else:
             raise Exception('var_type (%s) not supported' % var_type)
     return recursion_coeffs
コード例 #9
0
    def test_evaluate_multivariate_orthonormal_polynomial(self):
        num_vars = 2
        alpha = 0.
        beta = 0.
        degree = 2
        deriv_order = 1
        probability_measure = True

        ab = jacobi_recurrence(degree + 1,
                               alpha=alpha,
                               beta=beta,
                               probability=probability_measure)

        x, w = np.polynomial.legendre.leggauss(degree)
        samples = cartesian_product([x] * num_vars, 1)
        weights = outer_product([w] * num_vars)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        # sort lexographically to make testing easier
        I = np.lexsort((indices[0, :], indices[1, :], indices.sum(axis=0)))
        indices = indices[:, I]

        basis_matrix = evaluate_multivariate_orthonormal_polynomial(
            samples, indices, ab, deriv_order)

        exact_basis_vals_1d = []
        exact_basis_derivs_1d = []
        for dd in range(num_vars):
            x = samples[dd, :]
            exact_basis_vals_1d.append(
                np.asarray([1 + 0. * x, x, 0.5 * (3. * x**2 - 1)]).T)
            exact_basis_derivs_1d.append(
                np.asarray([0. * x, 1.0 + 0. * x, 3. * x]).T)
            exact_basis_vals_1d[-1] /= np.sqrt(1. /
                                               (2 * np.arange(degree + 1) + 1))
            exact_basis_derivs_1d[-1] /= np.sqrt(
                1. / (2 * np.arange(degree + 1) + 1))

        exact_basis_matrix = np.asarray([
            exact_basis_vals_1d[0][:, 0], exact_basis_vals_1d[0][:, 1],
            exact_basis_vals_1d[1][:, 1], exact_basis_vals_1d[0][:, 2],
            exact_basis_vals_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
            exact_basis_vals_1d[1][:, 2]
        ]).T

        # x1 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, exact_basis_derivs_1d[0][:, 1], 0. * x,
                 exact_basis_derivs_1d[0][:, 2],
                 exact_basis_derivs_1d[0][:, 1] * exact_basis_vals_1d[1][:, 1],
                 0. * x
             ]).T))

        # x2 derivative
        exact_basis_matrix = np.vstack(
            (exact_basis_matrix,
             np.asarray([
                 0. * x, 0. * x, exact_basis_derivs_1d[1][:, 1], 0. * x,
                 exact_basis_vals_1d[0][:, 1] * exact_basis_derivs_1d[1][:, 1],
                 exact_basis_derivs_1d[1][:, 2]
             ]).T))

        def func(x):
            return evaluate_multivariate_orthonormal_polynomial(
                x, indices, ab, 0)

        basis_matrix_derivs = basis_matrix[samples.shape[1]:]
        basis_matrix_derivs_fd = np.empty_like(basis_matrix_derivs)
        for ii in range(samples.shape[1]):
            basis_matrix_derivs_fd[ii::samples.shape[1], :] = approx_fprime(
                samples[:, ii:ii + 1], func, 1e-7)
        assert np.allclose(exact_basis_matrix[samples.shape[1]:],
                           basis_matrix_derivs_fd)

        assert np.allclose(exact_basis_matrix, basis_matrix)
コード例 #10
0
def get_recursion_coefficients(
        opts,
        num_coefs,
        numerically_generated_poly_accuracy_tolerance=1e-12):
    """
    Parameters
    ----------
    num_coefs : interger
        The number of recursion coefficients desired

    numerically_generated_poly_accuracy_tolerance : float
            Tolerance used to construct any numerically generated polynomial
            basis functions.

    opts : dictionary
        Dictionary with the following attributes

    rv_type : string
        The type of variable associated with the polynomial. If poly_type
        is not provided then the recursion coefficients chosen is selected
        using the Askey scheme. E.g. uniform -> legendre, norm -> hermite.
        rv_type is assumed to be the name of the distribution of scipy.stats
        variables, e.g. for gaussian rv_type = norm(0, 1).dist

    poly_type : string
        The type of polynomial which overides rv_type. Supported types
        ['legendre', 'hermite', 'jacobi', 'krawtchouk', 'hahn',
        'discrete_chebyshev', 'discrete_numeric', 'continuous_numeric',
        'function_indpnt_vars', 'product_indpnt_vars', 'monomial']
        Note 'monomial' does not produce an orthogonal basis

    The remaining options are specific to rv_type and poly_type. See
     - :func:`pyapprox.univariate_quadrature.get_jacobi_recursion_coefficients`
     - :func:`pyapprox.univariate_quadrature.get_function_independent_vars_recursion_coefficients`
     - :func:`pyapprox.univariate_quadrature.get_product_independent_vars_recursion_coefficients`
    
        Note Legendre is just a special instance of a Jacobi polynomial with
        alpha_poly, beta_poly = 0, 0 and alpha_stat, beta_stat = 1, 1

    Returns
    -------
    recursion_coeffs : np.ndarray (num_coefs, 2)
    """

    # variables that require numerically generated polynomials with
    # predictor corrector method
    from scipy import stats
    from scipy.stats import _continuous_distns

    poly_type = opts.get('poly_type', None)
    var_type = None
    if poly_type is None:
        var_type = opts['rv_type']
    if poly_type == 'legendre' or var_type == 'uniform':
        recursion_coeffs = jacobi_recurrence(
            num_coefs, alpha=0, beta=0, probability=True)
    elif poly_type == 'jacobi' or var_type == 'beta':
        recursion_coeffs = get_jacobi_recursion_coefficients(
            poly_type, opts, num_coefs)
    elif poly_type == 'hermite' or var_type == 'norm':
        recursion_coeffs = hermite_recurrence(
            num_coefs, rho=0., probability=True)
    elif poly_type == 'krawtchouk' or var_type == 'binom':
        # although bounded the krwatchouk polynomials are not defined
        # on the canonical domain [-1,1] but rather the user and
        # canconical domain are the same
        if poly_type is None:
            opts = opts['shapes']
        n, p = opts['n'], opts['p']
        num_coefs = min(num_coefs, n)
        recursion_coeffs = krawtchouk_recurrence(
            num_coefs, n, p)
    elif poly_type == 'hahn' or var_type == 'hypergeom':
        # although bounded the hahn polynomials are not defined
        # on the canonical domain [-1,1] but rather the user and
        # canconical domain are the same
        if poly_type is not None:
            apoly, bpoly = opts['alpha_poly'], opts['beta_poly']
            N = opts['N']
        else:
            M, n, N = [opts['shapes'][key] for key in ['M', 'n', 'N']]
            apoly, bpoly = -(n+1), -M-1+n
        num_coefs = min(num_coefs, N)
        recursion_coeffs = hahn_recurrence(num_coefs, N, apoly, bpoly)
        xk = np.arange(max(0, N-M+n), min(n, N)+1, dtype=float)
    elif poly_type == 'discrete_chebyshev' or var_type == 'discrete_chebyshev':
        # although bounded the discrete_chebyshev polynomials are not defined
        # on the canonical domain [-1,1] but rather the user and
        # canconical domain are the same
        if poly_type is not None:
            N = opts['N']
        else:
            N = opts['shapes']['xk'].shape[0]
            assert np.allclose(opts['shapes']['xk'], np.arange(N))
            assert np.allclose(opts['shapes']['pk'], np.ones(N)/N)
        num_coefs = min(num_coefs, N)
        recursion_coeffs = discrete_chebyshev_recurrence(num_coefs, N)
    elif poly_type == 'discrete_numeric' or var_type == 'float_rv_discrete':
        if poly_type is None:
            opts = opts['shapes']
        xk, pk = opts['xk'], opts['pk']
        # shapes['xk'] will be in [0, 1] but canonical domain is [-1, 1]
        xk = xk*2-1
        assert xk.min() >= -1 and xk.max() <= 1
        if num_coefs > xk.shape[0]:
            msg = 'Number of coefs requested is larger than number of '
            msg += 'probability masses'
            raise Exception(msg)
        #recursion_coeffs = modified_chebyshev_orthonormal(num_coefs, [xk, pk])
        recursion_coeffs = lanczos(xk, pk, num_coefs)
        p = evaluate_orthonormal_polynomial_1d(
            np.asarray(xk, dtype=float), num_coefs-1, recursion_coeffs)
        error = np.absolute((p.T*pk).dot(p)-np.eye(num_coefs)).max()
        if error > numerically_generated_poly_accuracy_tolerance:
            msg = f'basis created is ill conditioned. '
            msg += f'Max error: {error}. Max terms: {xk.shape[0]}, '
            msg += f'Terms requested: {num_coefs}'
            raise Exception(msg)
    elif (poly_type == 'continuous_numeric' or
          var_type == 'continuous_rv_sample'):
        if poly_type is None:
            opts = opts['shapes']
        xk, pk = opts['xk'], opts['pk']
        if num_coefs > xk.shape[0]:
            msg = 'Number of coefs requested is larger than number of '
            msg += 'samples'
            raise Exception(msg)
        #print(num_coefs)
        #recursion_coeffs = modified_chebyshev_orthonormal(num_coefs, [xk, pk])
        #recursion_coeffs = lanczos(xk, pk, num_coefs)
        recursion_coeffs = predictor_corrector(
            num_coefs, (xk, pk), xk.min(), xk.max(),
            interval_size=xk.max()-xk.min())
        p = evaluate_orthonormal_polynomial_1d(
            np.asarray(xk, dtype=float), num_coefs-1, recursion_coeffs)
        error = np.absolute((p.T*pk).dot(p)-np.eye(num_coefs)).max()
        if error > numerically_generated_poly_accuracy_tolerance:
            msg = f'basis created is ill conditioned. '
            msg += f'Max error: {error}. Max terms: {xk.shape[0]}, '
            msg += f'Terms requested: {num_coefs}'
            raise Exception(msg)
    elif poly_type == 'monomial':
        recursion_coeffs = None
    elif var_type in _continuous_distns._distn_names:
        quad_options = {
            'nquad_samples': 10,
            'atol': numerically_generated_poly_accuracy_tolerance,
            'rtol': numerically_generated_poly_accuracy_tolerance,
            'max_steps': 10000, 'verbose': 0}
        rv = getattr(stats, var_type)(**opts['shapes'])
        recursion_coeffs = predictor_corrector_known_scipy_pdf(
            num_coefs, rv, quad_options)
    elif poly_type == 'function_indpnt_vars':
        recursion_coeffs = get_function_independent_vars_recursion_coefficients(
            opts, num_coefs)
    elif poly_type == 'product_indpnt_vars':
        recursion_coeffs = get_product_independent_vars_recursion_coefficients(
            opts, num_coefs)
    else:
        if poly_type is not None:
            raise Exception('poly_type (%s) not supported' % poly_type)
        else:
            raise Exception('var_type (%s) not supported' % var_type)
    return recursion_coeffs