Beispiel #1
0
def jacobi_e2_1d(order, alpha, beta):
    """
    jacobi_e2_1d.m - Evaluate the inner product of 1d Jacobi-chaos doubles

    Syntax     e = jacobi_e2_1d(order, alpha, beta)

    Input:     order = order of Jacobi-chaos
                alpha, beta = parameters of Jacobi-chaos (alpha, beta>-1)
    Output:    e = 1 x (p+1) row vector containing the result.

    NO WARNING MESSAGE IS GIVEN WHEN PARAMETERS ARE OUT OF RANGE.

    Original Matlab version by Dongbin Xiu   04/13/2003
    """

    e = zeros((order + 1, 1))
    np = ceil((2.0 * order + 1.0) / 2.0)

    j = jacobi(np, alpha, beta).weights
    z = j[:, 0]
    w = j[:, 1]

    factor = 2 ** (alpha + beta + 1) * gamma(alpha + 1) * gamma(beta + 1) / gamma(alpha + beta + 2)

    for i in range(0, order + 1):
        e[i] = sum(jacobi(i, alpha, beta)(z) ** 2 * w) / factor
    return e
Beispiel #2
0
def test_normsOfPolynomials():
    """
    make sure the products are computed as expected
    """
    mydpp = DPP(20, 2, [[-.5,-.5],[.4,.6]], "test")
    print("hey", spi.quad(lambda x: 1/np.sqrt(1-x**2) * sps.jacobi(2,-.5,-.5,monic=1)(x)**2, -1, 1)[0])
    expected = spi.quad(lambda x: 1/np.sqrt(1-x**2) * sps.jacobi(2,-.5,-.5,monic=1)(x)**2, -1, 1)[0] * spi.quad(lambda x: (1-x)**.4*(1+x)**.6*sps.jacobi(3,.4,.6,monic=1)(x)**2, -1, 1)[0]
    computed = mydpp.squaredNormsOfPolys[(2,3)]
    assert round(computed - expected, 5) == 0
Beispiel #3
0
def wigner_d(j, mp, m, beta):
    """
    Compute the Wigner d-matrix d_{m', m}^j(\beta) using Jacobi polynomials.
    Taken from wikipedia which claims Wigner, E.P. 1931 as a source. Matches
    the recursion based method in wigner_d_rec.
    """

    j = int(j)
    mp = int(mp)
    m = int(m)

    k = min(j + m, j - m, j + mp, j - mp)

    if k == j + m:
        a = mp - m
        l = mp - m
    elif k == j - m:
        a = m - mp
        l = 0
    elif k == j + mp:
        a = m - mp
        l = 0
    elif k == j - mp:
        a = mp - m
        l = mp - m
    else:
        raise RuntimeError("could not compute a,l")

    b = 2 * (j - k) - a

    return ((-1.)**l) * (binom(2*j - k, k + a)**0.5) * \
           (binom(k+b, b)**-0.5) * (math.sin(0.5*beta)**a) * \
           (math.cos(0.5*beta)**b) * jacobi(k,a,b)(math.cos(beta))
Beispiel #4
0
def jacobi_d(n, k, a=1, b=1):
    """
    kth derivative of Jacobi polynomial.
    2016-07-02
    """
    from scipy.special import gamma, jacobi
    return lambda x: gamma(a + b + n + 1 + k) / 2**k / gamma(
        a + b + n + 1) * jacobi(n - k, a + k, b + k)(x)
Beispiel #5
0
def poly_to_jacobi(x):
    """x is a poly1d object"""
    xc = x.coeffs
    N = x.order+1
    matrix = zeros(shape=(N,N), dtype=float)
    for i in range(N):
        matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
    return solve(matrix, xc)
Beispiel #6
0
def poly_to_jacobi(x):
    """x is a poly1d object"""
    xc = x.coeffs
    N = x.order + 1
    matrix = zeros(shape=(N, N), dtype=float)
    for i in range(N):
        matrix[N - i - 1:N, i] = jacobi(i, a, b).coeffs
    return solve(matrix, xc)
def get_wignerD_3d(phi_ea, theta_ea, chi_ea, l, m, k):
    # Return array of D matrices with L, M, K as l, [-l, l], m
    # Calculation based off of below derivation
    # https://en.wikipedia.org/wiki/Wigner_D-matrix#Wigner_(small)_d-matrix

    m = np.expand_dims(m, 0)
    phi_ea = np.expand_dims(phi_ea, axis=-1)
    theta_ea = np.expand_dims(theta_ea, axis=-1)
    chi_ea = np.expand_dims(chi_ea, axis=-1)

    k_array = np.array([l + k, l - k, l, l], dtype=int)
    k_matrix = np.tile(np.expand_dims(k_array, -1), m.shape[1])

    k_matrix[2, :] += m[0]
    k_matrix[3, :] -= m[0]

    a_array = np.array([-1 * k, k, k, -1 * k])
    a_matrix = np.tile(np.expand_dims(a_array, -1), m.shape[1])
    a_matrix += np.array([[1, -1, -1, 1]]).transpose() * m

    k_inds = np.argmin(k_matrix, axis=0)
    k_, a = np.zeros_like(m), np.zeros_like(m)
    ii = np.arange(k_inds.shape[0])
    k_[0, :] = k_matrix[k_inds[ii], ii]
    a[0, :] = a_matrix[k_inds[ii], ii]
    b = 2 * l - 2 * k_ - a
    lmbd = (m - k) * np.maximum(0, (1 - (k_inds % 3)))
    #lmbd = np.maximum(0, (m - k))
    #print(lmbd, (m-k), (1-(k_inds%3)), k_inds)
    #print("ll",lmbd)

    # Calculate little d
    d_coeff = (-1)**lmbd*np.sqrt(nCk_np(2*l-k_, k_+a))/np.sqrt(nCk_np(k_+b, b))\
        *np.sin(theta_ea/2)**a*np.cos(theta_ea/2)**b
    d = np.zeros_like(d_coeff)
    cos_theta = np.cos(theta_ea)
    jac = []
    for im in range(m.shape[-1]):
        p = jacobi(k_[0, im], a[0, im], b[0, im])
        jac.append(np.polyval(p, cos_theta)[:, 0])
        d[:, im] = d_coeff[:, im] * np.polyval(p, cos_theta)[:, 0]

    #print("d", d)
    #print("jac", jac)
    #print("l", lmbd)
    #print("mmmmmmmmmmm", m, np.complex(0,-1))
    #print("phi", phi_ea)
    #print("res",np.exp(np.complex(0,-1)*m*phi_ea))
    # Calculate D matrix element
    D_ME = np.exp(np.complex(0, -1) * m * phi_ea) * d * np.exp(
        np.complex(0, -1) * k * chi_ea)
    #D_ME = d*(np.cos(-1*m*phi_ea) + np.cos(-1*k*chi_ea)\
    #    + np.complex(0,1)*(np.sin(-1*m*phi_ea) + np.sin(-1*k*chi_ea)))
    #print("WTF", np.sin(-1*m*phi_ea))
    #print(-1*m*phi_ea)

    return D_ME.transpose().astype(np.complex64)
Beispiel #8
0
def cone_jacobi(N, n, t):
    '''
    returns the orthogonal polynomials on the vertical direction 
    for the cone interior. Here t in [0,1]
    '''

    jac = ss.jacobi(N - n, 0, 2 + 2 * n)
    res = jac(2 * t - 1) * t**n / np.sqrt(8. / (2 * N + 3.))

    return res
Beispiel #9
0
def jacobi_lobatto(n, a, b):
    """
    Weights and collocation points for Gauss-Jacobi-Lobatto quadrature, i.e. Gaussian quadrature using Jacobi polynomials with collocation points as the extrema of the polynomials.
    Based on formulas from Huang (
    2016-06-30

    Params:
    -------
    n (int)
        Collocation points of polynomial of degree n.
    a,b (floats)
        alpha and beta

    Value:
    ------
    coX (ndarray)
        Lobatto collocation points.
    weights (ndarray)
        Weights at collocation points.
    """
    assert type(n) is int
    assert a > -1 and b > -1

    from scipy.special import gamma, gammaln, jacobi, j_roots, factorial
    from scipy.special import binom as binomk
    pdn = lambda n, a, b, x: (n + a + b + 1) / 2 * jacobi(n - 1, a + 1, b + 1)(
        x)
    log_g_tilde = lambda a, b, n: (np.log(2) * (
        a + b + 1) + gammaln(n + a + 2) + gammaln(n + b + 2) - np.log(
            factorial(n + 1)) - gammaln(n + a + b + 2))
    an = lambda n: -(a**2 - b**2 + 2 * (a - b)) / ((2 * n + a + b) *
                                                   (2 * n + a + b + 2))
    bn = lambda n: 4 * (n - 1) * (n + a) * (n + b) * (n + a + b + 1) / (
        2 * n + a + b)**2 / (2 * n + a + b + 1) / (2 * n + a + b - 1)

    Jn = np.diag([an(i) for i in range(1, n + 1)]) + np.sqrt(
        np.diag([bn(i) for i in range(2, n + 1)], k=1) +
        np.diag([bn(i) for i in range(2, n + 1)], k=-1))
    coX = np.concatenate(([-1], np.sort(np.linalg.eig(Jn)[0]).real, [1]))

    weights = np.zeros_like(coX)
    n += 1  # In Teng, N+1 is the otal number of points including endpoints.
    loggtilde = log_g_tilde(a + 1, b + 1, n - 2)
    denom = (1 - coX[1:-1]**2)**2 * (pdn(n - 1, a + 1, b + 1, coX[1:-1]))**2
    weights[1:-1] = np.exp(loggtilde) / denom

    weights[0] = np.exp(
        np.log(2) * (a + b + 1) + np.log(b + 1) + 2 * gammaln(b + 1) +
        gammaln(n) + gammaln(n + a + 1) - gammaln(n + b + 1) -
        gammaln(n + a + b + 2))
    weights[-1] = np.exp(
        np.log(2) * (a + b + 1) + np.log(a + 1) + 2 * gammaln(a + 1) +
        gammaln(n) + gammaln(n + b + 1) - gammaln(n + a + 1) -
        gammaln(n + a + b + 2))
    return coX, weights
Beispiel #10
0
def wigner_d(s1, s2, theta, l, l_use_bessel=1.e4):
    """
    Function to compute the wigner-d matrices
    Parameters
    ----------
    s1,s2:
        Spin factors for the wigner-d matrix.
    theta:
        Angular separation for which to compute the wigner-d matrix. The matrix depends on cos(theta).
    l:
        The spherical harmonics mode ell for which to compute the matrix.
    l_use_bessel:
        Due to numerical issues, we need to switch from wigner-d matrix to bessel functions at high ell (see the note below).
        This defines the scale at which the switch happens.
    """
    l0 = np.copy(l)
    if l_use_bessel is not None:
        #FIXME: This is not great. Due to a issues with the scipy hypergeometric function,
        #jacobi can output nan for large ell, l>1.e4
        # As a temporary fix, for ell>1.e4, we are replacing the wigner function with the
        # bessel function. Fingers and toes crossed!!!
        # mpmath is slower and also has convergence issues at large ell.
        #https://github.com/scipy/scipy/issues/4446
        l = np.atleast_1d(l)
        x = l < l_use_bessel
        l = np.atleast_1d(l[x])
    k = np.amin([l - s1, l - s2, l + s1, l + s2], axis=0)
    a = np.absolute(s1 - s2)
    lamb = 0  #lambda
    if s2 > s1:
        lamb = s2 - s1
    b = 2 * l - 2 * k - a
    d_mat = (-1)**lamb
    d_mat *= np.sqrt(
        binom(2 * l - k, k + a)
    )  #this gives array of shape l with elements choose(2l[i]-k[i], k[i]+a)
    d_mat /= np.sqrt(binom(k + b, b))
    d_mat = np.atleast_1d(d_mat)
    x = k < 0
    d_mat[x] = 0

    d_mat = d_mat.reshape(1, len(d_mat))
    theta = theta.reshape(len(theta), 1)
    d_mat = d_mat * ((np.sin(theta / 2.0)**a) * (np.cos(theta / 2.0)**b))
    d_mat *= jacobi(l, a, b, np.cos(theta))

    if l_use_bessel is not None:
        l = np.atleast_1d(l0)
        x = l >= l_use_bessel
        l = np.atleast_1d(l[x])
        #         d_mat[:,x]=jn(s1-s2,l[x]*theta)
        d_mat = np.append(d_mat, jn(s1 - s2, l * theta), axis=1)
    return d_mat
Beispiel #11
0
def poly_to_jacobi(x):
    """x is a poly1d object representing a function *after dividing by the invariant_distribution*.

    This means that:
    jacobi_to_poly(poly_to_jacobi(p)) == invariant_distribution * p

    up to numerical errors.
    """
    xc = x.coeffs
    N = x.order + 1
    matrix = zeros(shape=(N, N), dtype=float)
    for i in range(N):
        matrix[N - i - 1:N, i] = jacobi(i, a, b).coeffs
    return solve(matrix, xc)
Beispiel #12
0
def poly_to_jacobi(x):
    """x is a poly1d object representing a function *after dividing by the invariant_distribution*.

    This means that:
    jacobi_to_poly(poly_to_jacobi(p)) == invariant_distribution * p

    up to numerical errors.
    """
    xc = x.coeffs
    N = x.order+1
    matrix = zeros(shape=(N,N), dtype=float)
    for i in range(N):
        matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
    return solve(matrix, xc)
Beispiel #13
0
    def __init__(self, n, m):
        k = n - m

        if k % 2 == 1:
            self.is_zero = True
        else:
            self.is_zero = False

        n_jacobi = k // 2
        self.sign = (-1)**n_jacobi
        self.m = m
        self.n = n


        # jacobi form
        self.jacobi_pol = jacobi(n=n_jacobi, alpha=m, beta=0)
Beispiel #14
0
def normalized_jacobi(n,alpha,beta):
    """
    Returns a Jacobi polynomial that is appropriately normalized for
    orthonomality under integration with the correct wieght.

    Polynomials are a class in python, a powerful one.
    
    Note: not normalized for n = 0. This is to take full advantage of
    recurrence relations.
    """
    # Our polynomial
    poly = jacobi(n,alpha,beta)
    # Normalize it
    poly /= poly.normcoef
    if n == 0:
        poly *= np.sqrt(2.0**(-alpha - beta -1)*gamma(alpha+beta+2)/(gamma(alpha+1)*gamma(beta+1)))
    return poly
def wigner_d(m1, m2, theta, l, l_use_bessel=1.e4):
    """
    Function to compute wigner small-d matrices used in wigner transforms.
    """
    l0 = np.copy(l)
    if l_use_bessel is not None:
        #FIXME: This is not great. Due to a issues with the scipy hypergeometric function,
        #jacobi can output nan for large ell, l>1.e4
        # As a temporary fix, for ell>1.e4, we are replacing the wigner function with the
        # bessel function. Fingers and toes crossed!!!
        # mpmath is slower and also has convergence issues at large ell.
        #https://github.com/scipy/scipy/issues/4446

        l = np.atleast_1d(l)
        x = l < l_use_bessel
        l = np.atleast_1d(l[x])
    k = np.amin([l - m1, l - m2, l + m1, l + m2], axis=0)
    a = np.absolute(m1 - m2)
    lamb = 0  #lambda
    if m2 > m1:
        lamb = m2 - m1
    b = 2 * l - 2 * k - a
    d_mat = (-1)**lamb
    d_mat *= np.sqrt(
        binom(2 * l - k, k + a)
    )  #this gives array of shape l with elements choose(2l[i]-k[i], k[i]+a)
    d_mat /= np.sqrt(binom(k + b, b))
    d_mat = np.atleast_1d(d_mat)
    x = k < 0
    d_mat[x] = 0

    d_mat = d_mat.reshape(1, len(d_mat))
    theta = theta.reshape(len(theta), 1)
    d_mat = d_mat * ((np.sin(theta / 2.0)**a) * (np.cos(theta / 2.0)**b))
    x = d_mat == 0
    d_mat *= jacobi(k, a, b, np.cos(theta))  #l
    d_mat[x] = 0

    if l_use_bessel is not None:
        l = np.atleast_1d(l0)
        x = l >= l_use_bessel
        l = np.atleast_1d(l[x])
        #         d_mat[:,x]=jn(m1-m2,l[x]*theta)
        d_mat = np.append(d_mat, jn(m1 - m2, l * theta), axis=1)
    return d_mat
Beispiel #16
0
def WignerD( the , l , m , n ) :
    """
    Wigner D matrices:  d_{mn}^{l}(the)
    the -- angle in radians
    l -- degree (integer)
    m -- order  (integer)
    n -- order  (integer)
    """
    if m >= n :
        factor = (-1)**(l-m) * np.sqrt( ( scimcm.factorial( l+m ) * scimcm.factorial( l-m ) ) /
                                           ( scimcm.factorial( l+n ) * scimcm.factorial( l-n ) ) )
        a = m + n
        b = m - n
        print a
        print b
        return factor * np.cos( the/2. )**a * ( -np.sin( the/2. ) )**b * scisp.jacobi( l-m , a , b )( -np.cos( the ) )

    else :
        return (-1)**(m-n) * WignerD( the , l , n , m )
def wignerd(j, m, n=0, approx_lim=100):

    '''
        Wigner "small d" matrix. (Euler z-y-z convention)
        example:
            j = 2
            m = 1
            n = 0
            beta = linspace(0,pi,100)
            wd210 = wignerd(j,m,n)(beta)

        some conditions have to be met:
             j >= 0
            -j <= m <= j
            -j <= n <= j

        The approx_lim determines at what point
        bessel functions are used. Default is when:
            j > m+10
              and
            j > n+10

        for integer l and n=0, we can use the spherical harmonics. If in
        addition m=0, we can use the ordinary legendre polynomials.
    '''

    if (j < 0) or (abs(m) > j) or (abs(n) > j):
        raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
            + " Valid range for parameters: j>=0, -j<=m,n<=j.")

    if (j > (m + approx_lim)) and (j > (n + approx_lim)):
        #print 'bessel (approximation)'
        return lambda beta: jv(m-n, j*beta)

    if (floor(j) == j) and (n == 0):
        if m == 0:
            #print 'legendre (exact)'
            return lambda beta: legendre(j)(np.cos(beta))
        elif False:
            #print 'spherical harmonics (exact)'
            a = np.sqrt(4.*pi / (2.*j + 1.))
            return lambda beta: a * np.conjugate(sph_harm(m,j,beta,0.))

    jmn_terms = {
        j+n : (m-n,m-n),
        j-n : (n-m,0.),
        j+m : (n-m,0.),
        j-m : (m-n,m-n),
        }

    k = min(jmn_terms)
    a, lmb = jmn_terms[k]

    b = 2.*j - 2.*k - a

    if (a < 0) or (b < 0):
        raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
            + " Encountered negative values in (a,b) = ({0},{1})".format(a,b))

    coeff = np.power(-1.,lmb) * np.sqrt(comb(2.*j-k,k+a)) * (1./np.sqrt(comb(k+b,b)))
    
    
    #print 'jacobi (exact)'
    return lambda beta: coeff\
        * np.power(np.sin(0.5*beta),a) \
        * np.power(np.cos(0.5*beta),b) \
        * jacobi(k,a,b)(np.cos(beta))
Beispiel #18
0
def wignerd(j,m,n=0,approx_lim=10):
    '''
        Wigner "small d" matrix. (Euler z-y-z convention)
        example:
            j = 2
            m = 1
            n = 0
            beta = linspace(0,pi,100)
            wd210 = wignerd(j,m,n)(beta)

        some conditions have to be met:
             j >= 0
            -j <= m <= j
            -j <= n <= j

        The approx_lim determines at what point
        bessel functions are used. Default is when:
            j > m+10
              and
            j > n+10

        for integer l and n=0, we can use the spherical harmonics. If in
        addition m=0, we can use the ordinary legendre polynomials.
    '''

    if (j < 0) or (abs(m) > j) or (abs(n) > j):
        raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
            + " Valid range for parameters: j>=0, -j<=m,n<=j.")

    if (j > (m + approx_lim)) and (j > (n + approx_lim)):
        #print('bessel (approximation)')
        return lambda beta: jv(m-n, j*beta)

    if (floor(j) == j) and (n == 0):
        if m == 0:
            #print('legendre (exact)')
            return lambda beta: legendre(j)(cos(beta))
        elif False:
            #print('spherical harmonics (exact)')
            a = sqrt(4.*pi / (2.*j + 1.))
            return lambda beta: a * conjugate(sph_harm(m,j,beta,0.))

    jmn_terms = {
        j+n : (m-n,m-n),
        j-n : (n-m,0.),
        j+m : (n-m,0.),
        j-m : (m-n,m-n),
        }

    k = min(jmn_terms)
    a, lmb = jmn_terms[k]

    b = 2.*j - 2.*k - a

    if (a < 0) or (b < 0):
        raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
            + " Encountered negative values in (a,b) = ({0},{1})".format(a,b))

    coeff = power(-1.,lmb) * sqrt(comb(2.*j-k,k+a)) * (1./sqrt(comb(k+b,b)))

    #print('jacobi (exact)')
    return lambda beta: coeff \
        * power(sin(0.5*beta),a) \
        * power(cos(0.5*beta),b) \
        * jacobi(k,a,b)(cos(beta))
Beispiel #19
0
def jacobi_to_poly_no_invariant(x):
    result = poly1d([0])
    for i in range(x.shape[0]):
        result = result + jacobi(i,a,b)*x[i]
    return result
Beispiel #20
0
    I0 = I_star[0:1, :]
    R0 = R_star[0:1, :]
    D0 = D_star[0:1, :]
    S0 = N - I0 - R0 - D0
    U0 = [S0, I0, R0, D0]

    #Residual points
    N_f = 500  #5 * len(t_star)
    t_f = np.linspace(
        lb, ub, num=N_f)  #uniformed grid for evaluating fractional derivative

    poly_order = 10
    t_f_mapped = -1 + 2 / (ub - lb) * (t_f - lb)
    t_star_mapped = -1 + 2 / (ub - lb) * (t_star - lb)
    Jacobi_polys = np.asarray(
        [jacobi(n, 0, 0)(t_f_mapped.flatten()) for n in range(0, 15)])
    Jacobi_polys_plots = np.asarray(
        [jacobi(n, 0, 0)(t_star_mapped.flatten()) for n in range(0, 15)])

    #%%
    ######################################################################
    ######################## Training and Predicting ###############################
    ######################################################################
    # t_train = (t_star-lb)/(ub-lb)
    t_train = t_star
    I_train = I_star
    R_train = R_star
    D_train = D_star

    #%%
    from datetime import datetime
Beispiel #21
0
 def radialfunction_norm(self, n, m, xp, yp):
     rho = np.sqrt(xp**2 + yp**2)
     omega = abs(m)
     sumlimit = (n-omega)//2
     return (-1)**sumlimit*rho**omega*jacobi(sumlimit, omega, 0)(
             1. - 2.*rho**2)
Beispiel #22
0
def jacobi_polynomial(n, a, b, x):
    return jacobi(n, a, b)(x)
Beispiel #23
0
def jacobi_polynomial_derivative(n, a, b, x, k):
    " return derivative of oder k "
    ctemp = gamma(a + b + n + 1 + k) / (2**k) / gamma(a + b + n + 1)
    return ctemp * jacobi(n - k, a + k, b + k)(x)
Beispiel #24
0
def jacobi_to_poly(x):
    result = poly1d([0])
    for i in range(x.shape[0]):
        result = result + (jacobi(i,a,b)*invariant_distribution)*x[i]
    return result
Beispiel #25
0
def JacobiP_scipy(x,alpha,beta,N):
    n = N
    return jacobi(n, alpha, beta, monic=0)
Beispiel #26
0
def jacobi_to_poly(x):
    result = poly1d([0])
    for i in range(x.shape[0]):
        result = result + (jacobi(i, a, b) * invariant_distribution) * x[i]
    return result
Beispiel #27
0
def Jacobi(n, a, b, x):
    x = np.array(x)
    return (jacobi(n, a, b)(x))
Beispiel #28
0
def jacobi_to_poly_no_invariant(x):
    result = poly1d([0])
    for i in range(x.shape[0]):
        result = result + jacobi(i, a, b) * x[i]
    return result