Example #1
0
def jacobi_e2_1d(order, alpha, beta):
    """
    jacobi_e2_1d.m - Evaluate the inner product of 1d Jacobi-chaos doubles

    Syntax     e = jacobi_e2_1d(order, alpha, beta)

    Input:     order = order of Jacobi-chaos
                alpha, beta = parameters of Jacobi-chaos (alpha, beta>-1)
    Output:    e = 1 x (p+1) row vector containing the result.

    NO WARNING MESSAGE IS GIVEN WHEN PARAMETERS ARE OUT OF RANGE.

    Original Matlab version by Dongbin Xiu   04/13/2003
    """

    e = zeros((order + 1, 1))
    np = ceil((2.0 * order + 1.0) / 2.0)

    j = jacobi(np, alpha, beta).weights
    z = j[:, 0]
    w = j[:, 1]

    factor = 2 ** (alpha + beta + 1) * gamma(alpha + 1) * gamma(beta + 1) / gamma(alpha + beta + 2)

    for i in range(0, order + 1):
        e[i] = sum(jacobi(i, alpha, beta)(z) ** 2 * w) / factor
    return e
Example #2
0
    def second_order_score(y, mean, scale, shape, skewness):
        """ GAS Skew t Update term potentially using second-order information - native Python function

        Parameters
        ----------
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Adjusted score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Example #3
0
    def neg_loglikelihood(y, mean, scale, shape, skewness):
        """ Negative loglikelihood function

        Parameters
        ----------
        y : np.ndarray
            univariate time series

        mean : np.ndarray
            array of location parameters for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Negative loglikelihood of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        return -np.sum(Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale))
Example #4
0
def getProbaTransformedGamma(x0, paramshere,paramsbackground, posshift):
    """ Given a coupling x, calculate the log-odds ratio (positive / background) """
    alpha_pos,beta_pos,b_pos,alpha_neg,b_neg,w,shift = [paramshere[p] for p in ["alpha_pos","beta_pos","b_pos","alpha_neg","b_neg","w","shift"]]
    alpha_pos_bg,beta_pos_bg,b_pos_bg,alpha_neg_bg,b_neg_bg,w_bg,shift_bg = [paramsbackground[p] for p in ["alpha_pos","beta_pos","b_pos","alpha_neg","b_neg","w","shift"]]

    x = x0-shift
    xbg = x0-shift_bg
    if x<=0:
        res = np.log((1-w) * b_neg**(-1/alpha_neg)/gamma(1/alpha_neg) * np.fabs(alpha_neg) * np.exp(-np.fabs(x)**alpha_neg/b_neg))
    else:
        res = np.log(w * b_pos**(-1/alpha_pos)/gamma(1/alpha_pos) * np.fabs(alpha_pos/beta_pos) * 1/(1+(x/beta_pos)**alpha_pos)**(1/alpha_pos+1) * np.exp(-(x/beta_pos)**alpha_pos /(b_pos * (1+ (x/beta_pos)**alpha_pos))))
    if xbg <= 0:
        res -= np.log((1-w_bg) * b_neg_bg**(-1/alpha_neg_bg)/gamma(1/alpha_neg_bg) * np.fabs(alpha_neg_bg) * np.exp(-np.fabs(xbg)**alpha_neg_bg/b_neg_bg))
    else:
        res -= np.log(w_bg * b_pos_bg**(-1/alpha_pos_bg)/gamma(1/alpha_pos_bg) * np.fabs(alpha_pos_bg/beta_pos_bg) * 1/(1+(xbg/beta_pos_bg)**alpha_pos_bg)**(1/alpha_pos_bg+1) * np.exp(-(xbg/beta_pos_bg)**alpha_pos_bg /(b_pos_bg * (1+ (xbg/beta_pos_bg)**alpha_pos_bg))))

    # This is to ensure that however the fits for the coupling distributions look like,
    # on the negative side we force the log-odds ratio to be maximum 0 (i.e. we force the background to be above the signal)
    # and in the right-side part of the positive side we force the log-odds ratio to be minimum 0 (i.e. we force the signal to be above the background)
    if xbg < 0 and res > 0:
        res = 0
    if x0 > posshift+0.1 and res < 0:
        res = 0

    return res
Example #5
0
    def reg_score_function(X, y, mean, scale, shape, skewness):
        """ GAS Skew t Regression Update term using gradient only - native Python function

        Parameters
        ----------
        X : float
            datapoint for the right hand side variable
    
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Example #6
0
def sersic(p, zp, x, y=None, w=None, comp=False, show=False):
	"""use mue, Re and n"""
	if not p['deltaRe'].vary:
		mue, Re, n = p['MB'].value, p['ReB'].value, p['nB'].value
		b = get_b_n(n)
		Ie = convert_I(mue, zp)
		SB = Ie * np.exp(-1.* b * ( ((x / Re) ** (1 / n)) - 1 ))
	else:
		meB, ReB, nB, BD_ratio, ReD = p['MB'].value, p['ReB'].value, p['nB'].value, p['BD_ratio'].value, p['ReD'].value
		bB = get_b_n(nB)
		bD = get_b_n(1.)
		IeB = convert_I(meB, zp)
		try:
			front = nB * gamma(2*nB) * np.exp(bB) / (bB ** (2*nB))
		except ValueError:
			front = nB * gamma(2*nB) * np.exp(bB) / (-1.*(abs(bB) ** (2*nB)))
		h = ReD / bD
		IeD = front * ReB * ReB * IeB / BD_ratio / h / h / np.exp(bD)
		B = IeB * np.exp(-1.* bB * ( ((x / ReB) ** (1 / nB)) - 1 ))
		D = IeD * np.exp(-1.* bD * ( (x / ReD) - 1 ))
		SB = B + D
	if y is None:
		if comp == False:
			return SB
		else:
			return B, D
	else:
		if w is None: w = np.ones(y.shape)
		return (y - SB) / w
Example #7
0
    def markov_blanket(y, mean, scale, shape, skewness):
        """ Markov blanket for each likelihood term

        Parameters
        ----------
        y : np.ndarray
            univariate time series

        mean : np.ndarray
            array of location parameters for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Markov blanket of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        return Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale)
    def wake(self, z, maxi=25, L=None, bunlen=80e-6, convolved=True):
        gaus_to_si = 120*_c/4  # _Z0 _c / (4 pi)
        L = L or (2 * _np.pi * self.rho)

        # Free Space Term
        inds = z < 0
        W0 = _np.zeros(len(z))
        W0[inds] = (-2/3**(4/3)/self.rho**(2/3) /
                    _np.power((-z[inds]), 4/3)*gaus_to_si*L)

        # Shielding Term
        W1 = _np.zeros(len(z))
        zshield = self.shielding / self.bl * z
        for i in range(1, maxi):
            uai = self._getY(3*zshield/i**(3/2))
            W1 += 8*_np.pi*(-1)**(i+1)/i/i*uai*(3 - uai)/(1 + uai)**3
        W1 *= -1/self.h**2 / (2 * _np.pi) * gaus_to_si * L

        # If want the convolved values
        if convolved:
            # For the free space used analytical formulas of convolution
            bl = bunlen
            C = _Z0*_c/2**(13/6)/_np.pi**(3/2)/(3*self.rho**2*bl**10)**(1/3)*L
            W0 = C*(2**(1/2)*_scyspe.gamma(5/6)*(
                        bl**2*_scyspe.hyp1f1(-1/3, 1/2, -z*z/2/bl**2) -
                        z**2*_scyspe.hyp1f1(2/3, 3/2, -z*z/2/bl**2)) +
                    z*bl*_scyspe.gamma(4/3)*(
                        3*_scyspe.hyp1f1(1/6, 1/2, -z*z/2/bl**2) -
                        2*_scyspe.hyp1f1(1/6, 3/2, -z*z/2/bl**2)))
            # For shielding perform convolution numerically
            bunch = _np.exp(-(z*z/bl**2)/2)/_np.sqrt(2*_np.pi)/bl  # gaussian
            W1 = _scysig.fftconvolve(W1, bunch, mode='same') * (z[1]-z[0])
        return W0, W1
Example #9
0
    def test_sh_jacobi(self):
        # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
        conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p)
        psub = np.poly1d([2,-1])
        q = 4 * np.random.random()
        p = q-1 + 2*np.random.random()
        #print "shifted jacobi p,q = ", p, q
        G0 = orth.sh_jacobi(0,p,q)
        G1 = orth.sh_jacobi(1,p,q)
        G2 = orth.sh_jacobi(2,p,q)
        G3 = orth.sh_jacobi(3,p,q)
        G4 = orth.sh_jacobi(4,p,q)
        G5 = orth.sh_jacobi(5,p,q)
        ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
        ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
        ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
        ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
        ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
        ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)

        assert_array_almost_equal(G0.c,ge0.c,13)
        assert_array_almost_equal(G1.c,ge1.c,13)
        assert_array_almost_equal(G2.c,ge2.c,13)
        assert_array_almost_equal(G3.c,ge3.c,13)
        assert_array_almost_equal(G4.c,ge4.c,13)
        assert_array_almost_equal(G5.c,ge5.c,13)
Example #10
0
def beta(a, b, mew):
    e1 = ss.gamma(a + b)
    e2 = ss.gamma(a)
    e3 = ss.gamma(b)
    e4 = mew ** (a - 1)
    e5 = (1 - mew) ** (b - 1)
    return (e1/(e2*e3)) * e4 * e5
Example #11
0
def l ( nu, psi, k, n ):
    """log likelihood for psi

    :Parameters:
        *nu* :
            scalar value nu, for which the likelihood should be evaluated
        *psi* :
            m array that gives the psychometric function evaluated at stimulus
            levels x_i, i=1,...,m
        *k* :
            m array that gives the numbers of correct responses (in Yes/No: Yes responses)
            at stimulus levels x_i, i=1,...,m
        *n* :
            m array that gives the numbers of presented trials at stimulus
            levels x_i, i=1,...,m

    :Example:

    >>> psi = [ 0.52370051,  0.58115041,  0.70565915,  0.83343107,  0.89467234,  0.91364765,  0.91867512]
    >>> k   = [28, 29, 35, 41, 46, 46, 45]
    >>> n   = [50]*7
    >>> l ( 1, psi, k, n )
    13.752858759933943
    """
    psi = N.array(psi,'d')
    k = N.array(k,'d')
    n = N.array(n,'d')
    p = k/n
    return (N.log(gamma(nu*n+2))).sum()-(N.log(gamma(psi*nu*n+1))).sum()-(N.log(gamma((1-psi)*nu*n+1))).sum() \
            + (psi*nu*n*N.log(p)).sum() + ((1-psi)*nu*n*N.log(1-p)).sum()
 def _evaluate(self,R,z,phi=0.,t=0.,_forceFloatEval=False):
     """
     NAME:
        _evaluate
     PURPOSE:
        evaluate the potential at R,z
     INPUT:
        R - Galactocentric cylindrical radius
        z - vertical height
        phi - azimuth
        t - time
     OUTPUT:
        Phi(R,z)
     HISTORY:
        2010-07-09 - Started - Bovy (NYU)
     """
     if not _forceFloatEval and not self.integerSelf == None:
         return self.integerSelf._evaluate(R,z,phi=phi,t=t)
     elif self.beta == 3.:
         r= numpy.sqrt(R**2.+z**2.)
         return (1./self.a)\
             *(r-self.a*(r/self.a)**(3.-self.alpha)/(3.-self.alpha)\
                   *special.hyp2f1(3.-self.alpha,
                                   2.-self.alpha,
                                   4.-self.alpha,
                                   -r/self.a))/(self.alpha-2.)/r
     else:
         r= numpy.sqrt(R**2.+z**2.)
         return special.gamma(self.beta-3.)\
             *((r/self.a)**(3.-self.beta)/special.gamma(self.beta-1.)\
                   *special.hyp2f1(self.beta-3.,
                                   self.beta-self.alpha,
                                   self.beta-1.,
                                   -self.a/r)
               -special.gamma(3.-self.alpha)/special.gamma(self.beta-self.alpha))/r
Example #13
0
def dbeta(x, a, b):
    """Beta derivative.

    >>> round(dbeta(0.5, 2, 2), 10)
    0.0
    >>> round(dbeta(0.6, 2, 2), 10)
    -1.2
    >>> round(dbeta(0.9, 1, 1), 10)
    0.0

    """
    x = np.array(x)
    # http://www.math.uah.edu/stat/special/Beta.html
    # B(a,b)=Gamma(a)*Gamma(b)/Gamma(a+b)
    # derivative of beta distribution
    # f'(x) = (1/B(a,b)) * x^(a-2) * (1-x)^(b-2) * [(a-1)-(a+b-2)*x], 0<x<1
    #   print "dbeta: x=",x
    assert (x > 0).all()
    assert (x < 1).all()

    return (
        gamma(a + b)
        / (gamma(a) * gamma(b))
        * ((a - 1) * x ** (a - 2) * (1 - x) ** (b - 1) - x ** (a - 1) * (b - 1) * (1 - x) ** (b - 2))
    )
Example #14
0
 def canonical(s,a1=6,a2=16,b1=1,b2=1,c=1/6,amplitude=1):
     #Canonical HRF as defined here: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3318970/
     # arguments: s seconds
     gamma1 = (s**(a1-1)*b1**a1*np.exp(-b1*s))/gamma(a1)
     gamma2 = (s**(a2-1)*b2**a2*np.exp(-b2*s))/gamma(a2)
     tsConvoluted = amplitude*(gamma1-c*gamma2)
     return tsConvoluted
Example #15
0
def sampling_distribution_cv(x, cv, num):
    """
    Sampling distribution of the coefficient of variation, see
    W. A. Hendricks and K. W. Robey. The Sampling Distribution of the
    Coefficient of Variation. Ann. Math. Statist. 7, 129-132 (1936).
    """
    if x < 0:
        return 0
    x2 = x*x

    # calculate the sum
    factorial = misc.factorial
    res = sum(
        factorial(num - 1) * special.gamma(0.5*(num - i)) * num**(0.5*i) / (
            factorial(num - 1 - i) * factorial(i) * 2**(0.5*i) * cv**i
            * (1 + x2)**(0.5*i)
        )
        for i in range(1 - num%2, num, 2)
    )

    # multiply by the other terms
    res *= 2./(np.sqrt(np.pi)*special.gamma(0.5*(num-1)))
    res *= x**(num-2)/((1 + x2)**(0.5*num))
    res *= np.exp(-0.5*num/(cv**2)*x2/(1 + x2))

    return res
def beta_prior(delta,a,b):
    beta = 0
    if (delta>1) or (delta<0):
        return beta
    else:
        beta = (gamma(a+b)/(gamma(a)*gamma(b)))*(delta**(a-1))*((1-delta)**(b-1))
        return beta
Example #17
0
def covariance_wind(f, c0, rho, distance, L, Cv, steps=10, initial=0.001):
    """Covariance. Wind fluctuations only.
    
    :param f: Frequency
    :param c0: Speed of sound
    :param rho: Spatia separation
    :param distance: Distance
    :param L: Correlation length
    :param Cv: Variance of wind speed
    :param initial: Initial value
    
    
    """
    f = np.asarray(f)
    k = 2.*np.pi*f / c0
    K0 = 2.*np.pi / L
    
    A = 5.0/(18.0*np.pi*gamma(1./3.)) # Equation 11, see text below. Approximate result is 0.033
    gamma_v = 3./10.*np.pi**2.*A*k**2.*K0**(-5./3.)*4.*(Cv/c0)**2.  # Equation 28, only wind fluctuations

    krho = k * rho
    
    t = krho[:, None] * np.linspace(0.00000000001, 1., steps) # Fine discretization for integration

    #t[t==0.0] = 1.e-20

    gamma56 = gamma(5./6.)
    bessel56 = besselk(5./6., t)
    bessel16 = besselk(1./6., t)
    
    integration = cumtrapz(ne.evaluate("2.0**(1./6.)*t**(5./6.)/gamma56 * (bessel56 - t/2.0 * bessel16 )"), initial=initial)[:,-1]
    B = ne.evaluate("2.0*gamma_v * distance / krho * integration")
    
    #B = 2.0*gamma_v * distance / krho * cumtrapz((2.0**(1./6.)*t**(5./6.)/gamma(5./6.) * (besselk(5./6., t) - t/2.0*besselk(1./6., t)) ), initial=initial)[:,-1]
    return B
Example #18
0
def zr_a(mu, N0):
    """Assuming a rainfall parameter relationship of Z=AR^b,
    Compute the A prefactor using gamma distribution.

    Ulbrich and Atlas (JAMC 2007), Eqn T5

    Parameters
    ----------
    mu: float
        Shape parameter of gamma DSD model [unitless]
    N0: float
        Intercept parameter [m^(-1-shape) m^-3]

    Returns
    -------
    A: float
        Z-R prefactor (see description)
    """

    # Mask 0. values, otherwise gamma function returns "inf"
    mucopy = np.ma.masked_equal(mu, 0.)

    # gamma_fix is a patch for versions earlier than NCL v6.2,
    # which cannot handle missing data in the gamma function
    ANumer = 10E6 * scifunct.gamma(7 + mucopy) * N0 ** (-2.33 / (4.67 + mu))
    ADenom = (33.31 * scifunct.gamma(4.67 + mucopy)) ** ((7 + mu) / (4.67 + mu))

    # Mask any zero values from Denom
    ADenom = np.ma.masked_equal(ADenom, 0.)

    A = ANumer / ADenom

    return A
Example #19
0
    def odf_sh(self):
        r""" Calculates the real analytical ODF in terms of Spherical
        Harmonics.
        """
        # Number of Spherical Harmonics involved in the estimation
        J = (self.radial_order + 1) * (self.radial_order + 2) // 2

        # Compute the Spherical Harmonics Coefficients
        c_sh = np.zeros(J)
        counter = 0

        for l in range(0, self.radial_order + 1, 2):
            for n in range(l, int((self.radial_order + l) / 2) + 1):
                for m in range(-l, l + 1):

                    j = int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())

                    Cnl = (
                        ((-1) ** (n - l / 2)) /
                        (2.0 * (4.0 * np.pi ** 2 * self.zeta) ** (3.0 / 2.0)) *
                        ((2.0 * (4.0 * np.pi ** 2 * self.zeta) ** (3.0 / 2.0) *
                          factorial(n - l)) /
                         (gamma(n + 3.0 / 2.0))) ** (1.0 / 2.0)
                    )
                    Gnl = (gamma(l / 2 + 3.0 / 2.0) * gamma(3.0 / 2.0 + n)) / \
                        (gamma(l + 3.0 / 2.0) * factorial(n - l)) * \
                        (1.0 / 2.0) ** (-l / 2 - 3.0 / 2.0)
                    Fnl = hyp2f1(-n + l, l / 2 + 3.0 / 2.0, l + 3.0 / 2.0, 2.0)

                    c_sh[j] += self._shore_coef[counter] * Cnl * Gnl * Fnl
                    counter += 1

        return c_sh
Example #20
0
def factorial2(n,exact=0):
    """n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
           = 2**(n) * n!                                 n even

    If exact==0, then floating point precision is used, otherwise
    exact long integer is computed.

    Notes:
      - Array argument accepted only for exact=0 case.
      - If n<0, the return value is 0.
    """
    if exact:
        if n < -1:
            return 0L
        if n <= 0:
            return 1L
        val = 1L
        for k in xrange(n,0,-2):
            val *= k
        return val
    else:
        from scipy import special
        n = asarray(n)
        vals = zeros(n.shape,'d')
        cond1 = (n % 2) & (n >= -1)
        cond2 = (1-(n % 2)) & (n >= -1)
        oddn = extract(cond1,n)
        evenn = extract(cond2,n)
        nd2o = oddn / 2.0
        nd2e = evenn / 2.0
        place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
        place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
        return vals
def custom_incomplete_gamma(a, x):
    """ Incomplete gamma function.

    For the case covered by scipy, a > 0, scipy is called. Otherwise the gamma function
    recurrence relations are called, extending the scipy behavior.

    Parameters
    -----------
    a : array_like

    x : array_like

    Returns
    --------
    gamma : array_like

    Examples
    --------
    >>> a, x = 1, np.linspace(1, 10, 100)
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = 0
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = -1
    >>> g = custom_incomplete_gamma(a, x)
    """

    if isinstance(a, np.ndarray):

        if not isinstance(x, np.ndarray):
            x = np.repeat(x, len(a))

        if len(a) != len(x):
            msg = ("The ``a`` and ``x`` arguments of the "
                   "``custom_incomplete_gamma`` function must have the same"
                   "length.\n")
            raise HalotoolsError(msg)

        result = np.zeros(len(a))

        mask = (a < 0)
        if np.any(mask):
            result[mask] = ((custom_incomplete_gamma(a[mask]+1, x[mask]) -
                x[mask]**a[mask] * np.exp(-x[mask])) / a[mask])
        mask = (a == 0)
        if np.any(mask):
            result[mask] = -expi(-x[mask])
        mask = a > 0
        if np.any(mask):
            result[mask] = gammaincc(a[mask], x[mask]) * gamma(a[mask])

        return result

    else:

        if a < 0:
            return (custom_incomplete_gamma(a+1, x) - x**a * np.exp(-x))/a
        elif a == 0:
            return -expi(-x)
        else:
            return gammaincc(a, x) * gamma(a)
Example #22
0
        def _ll(self, m, p, a, xn, xln, **kwargs):
            """Computation of log likelihood

            Dimensions
            ----------
            m :  n_unique x n_features
            p :  n_unique x n_features x n_features
            a :  n_unique x n_lags (shared_alpha=F)
                 OR     1 x n_lags (shared_alpha=T)
            xn:  N x n_features
            xln: N x n_features x n_lags
            """

            samples = xn.shape[0]
            xn = xn.reshape(samples, 1, self.n_features)
            m = m.reshape(1, self.n_unique, self.n_features)
            det = np.linalg.det(np.linalg.inv(p))
            det = det.reshape(1, self.n_unique)

            lagged = np.dot(xln, a.T)  # NFU
            lagged = np.swapaxes(lagged, 1, 2)  # NUF
            xm = xn-(lagged + m)
            tem = np.einsum('NUF,UFX,NUX->NU', xm, p, xm)

            # TODO division in gamma function
            res = np.log(gamma((self.degree_freedom + self.n_features)/2)) - \
                  np.log(gamma(self.degree_freedom/2)) - (self.n_features/2.0) * \
                  np.log(self.degree_freedom) - \
                  (self.n_features/2.0) * np.log(np.pi) - 0.5 * np.log(det) - \
                  ((self.degree_freedom + self.n_features) / 2.0) * \
                  np.log(1 + (1/self.degree_freedom) * tem)

            return res
Example #23
0
def pearson7_area(x, amp, cen, wid, expon):
    """scaled pearson peak function """
    xp = 1.0 * expon
    scale = gamma(xp) * sqrt((2**(1/xp) - 1)) / (gamma(xp-0.5))
    return scale * pearson7(x, amp, cen, wid, xp) / (wid*sqrt(pi))

    return scale * pearson7(x, amp, cen, sigma, expon) / (sigma*sqrt(pi))
def dirichlet(mu, alpha):
    mu = np.array(mu)
    alpha = np.array(alpha)
    product = np.product(mu ** (alpha - 1))
    normaliser = gamma(alpha.sum())/np.product(gamma(alpha))
    result = product * normaliser
    return result
Example #25
0
File: exppow.py Project: HMP1/bumps
def exppow_pars(B):
    """
    Return w(B) and c(B) for the exponential power density::

        p(v|S,B) = w(B)/S exp(-c(B) |v/S|^(2/(1+B)))

    *B* in (-1,1] is a measure of kurtosis::

        B = 1: double exponential
        B = 0: normal
        B -> -1: uniform

    [1] Thiemann, M., M. Trosser, H. Gupta, and S. Sorooshian (2001).
    "Bayesian recursive parameter estimation for hydrologic models",
    Water Resour. Res. 37(10) 2521-2535.
    """

    # First calculate some dummy variables
    A1 = gamma(3*(1+B)/2)
    A2 = gamma((1+B)/2)
    # And use these to derive Cb and Wb
    cB = (A1/A2)**(1/(1+B))
    wB = sqrt(A1)/((1+B)*(A2**(1.5)))

    return cB, wB
Example #26
0
def jacobi(N,a,b,x,NOPT=1):
    """ Compute the Jacobi polynomials which are 
        orthogonal on [-1,1] with respect to the weight 
        w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them on the 
        given grid up to P_N(x). Setting NOPT=2 returns
        the L2-normalized polynomials """
    
    m = len(x)
    P = np.zeros((m,N+1))

    apb = a+b
    a1 = a-1
    b1 = b-1
    c = apb*(a-b)

    P[:,0] = 1

    if N>0:
        P[:,1] = 0.5*(a-b+(apb+2)*x) 
     
    if N>1:
        for k in xrange(2,N+1):
            k2 = 2*k
            g = k2+apb
            g1 = g-1
            g2 = g-2
            d =  2.0*(k + a1)*(k + b1)*g
            P[:,k] = (g1*(c + g2*g*x)*P[:,k-1]-d*P[:,k-2])/(k2*(k + apb)*g2)

    if NOPT == 2:
        from scipy.special import gamma
        k = np.arange(N+1)
        pnorm = 2**(apb+1)*gamma(k+a+1)*gamma(k+b+1)/((2*k+a+b+1)*(gamma(k+1)*gamma(k+a+b+1)))
        P *= 1/np.sqrt(pnorm) 
    return P
Example #27
0
 def __init__(self, beta, mu):
     self.m = (5 * beta[2] - 9) / (2 * (3 - beta[2]))
     self.a = math.sqrt(2 * mu[2] * beta[2] / (3 - beta[2]))
     self.y0 = sp.gamma((2 * self.m + 1) / 2) / (self.a * math.sqrt(math.pi) * sp.gamma(self.m + 1))
     self.l[1] = -self.a
     self.l[2] = self.a
     return
Example #28
0
def gen_hrf(frame_rate=120, tf=30,
            c=1/6.0, a1=6, a2=16, A=1/0.833657):
    ts = 1/float(frame_rate)
    A = A*ts
    t = np.arange(0,tf,ts)
    h = A*np.exp(-t)*(t**(a1-1)/gamma(a1) - c*t**(a2-1)/gamma(a2))
    return(h[::-1])
Example #29
0
def phase_covariance(r, r0, L0):
    """
    Calculate the phase covariance between two points seperated by `r`, 
    in turbulence with a given `r0 and `L0`.
    Uses equation 5 from Assemat and Wilson, 2006.

    Parameters:
        r (float, ndarray): Seperation between points in metres (can be ndarray)
        r0 (float): Fried parameter of turbulence in metres
        L0 (float): Outer scale of turbulence in metres
    """
    # Make sure everything is a float to avoid nasty surprises in division!
    r = numpy.float32(r)
    r0 = float(r0)
    L0 = float(L0)

    # Get rid of any zeros
    r += 1e-40

    A = (L0 / r0) ** (5. / 3)

    B1 = (2 ** (-5. / 6)) * gamma(11. / 6) / (numpy.pi ** (8. / 3))
    B2 = ((24. / 5) * gamma(6. / 5)) ** (5. / 6)

    C = (((2 * numpy.pi * r) / L0) ** (5. / 6)) * kv(5. / 6, (2 * numpy.pi * r) / L0)

    cov = A * B1 * B2 * C

    return cov
Example #30
0
 def _Block(self,x):
     #if self.params['fractal_dim']<0:
     #    self.params['fractal_dim']=-self.params['fractal_dim']
     try:
         if x<0:
             x=-x
         if self.params['radius']<0:
             self.params['radius']=-self.params['radius']
             
         if x==0 or self.params['radius']==0 :
              return 1e+32
         elif self.params['fractal_dim']==0:
             return 1.0 + (math.sin((self.params['fractal_dim']-1.0) * math.atan(x * self.params['corr_length']))\
                           * self.params['fractal_dim'] * gamma(self.params['fractal_dim']-1.0))\
                           *( math.pow( 1.0 + 1.0/((x**2)*(self.params['corr_length']**2)),1/2.0)) 
         elif self.params['corr_length']==0 or self.params['fractal_dim']==1:
             return 1.0 + (math.sin((self.params['fractal_dim']-1.0) * math.atan(x * self.params['corr_length']))\
                           * self.params['fractal_dim'] * gamma(self.params['fractal_dim']-1.0))\
                           /( math.pow( (x*self.params['radius']), self.params['fractal_dim']))   
             
         elif self.params['fractal_dim']<1:
             return 1.0 + (math.sin((self.params['fractal_dim']-1.0) * math.atan(x * self.params['corr_length']))\
                           * self.params['fractal_dim'] * gamma(self.params['fractal_dim']-1.0))\
                           /( math.pow( (x*self.params['radius']), self.params['fractal_dim']))*\
                              math.pow( 1.0 + 1.0/((x**2)*(self.params['corr_length']**2)),(1-self.params['fractal_dim'])/2.0)   
         else:
             return 1.0 + (math.sin((self.params['fractal_dim']-1.0) * math.atan(x * self.params['corr_length']))\
                           * self.params['fractal_dim'] * gamma(self.params['fractal_dim']-1.0))\
                           / math.pow( (x*self.params['radius']), self.params['fractal_dim'])\
                              /math.pow( 1.0 + 1.0/((x**2)*(self.params['corr_length']**2)),(self.params['fractal_dim']-1.0)/2.0)   
     except:
         return 1 # Need a real fix. 
def q_Gauss_pdf(x, q, l, mu):
    constant = np.sqrt(np.pi) * gamma(
        (3 - q) / (2 * (q - 1))) / (np.sqrt(q - 1) * gamma(1 / (q - 1)))
    pdf = np.sqrt(l) / constant * (1 + (1 - q) * (-l * (x - mu)**2))**(1 /
                                                                       (1 - q))
    return pdf
Example #32
0
    def fit(self, data):

        Lshore = l_shore(self.radial_order)
        Nshore = n_shore(self.radial_order)
        # Generate the SHORE basis
        M = self.cache_get('shore_matrix', key=self.gtab)
        if M is None:
            M = shore_matrix(self.radial_order, self.zeta, self.gtab, self.tau)
            self.cache_set('shore_matrix', self.gtab, M)

        MpseudoInv = self.cache_get('shore_matrix_reg_pinv', key=self.gtab)
        if MpseudoInv is None:
            MpseudoInv = np.dot(
                np.linalg.inv(
                    np.dot(M.T, M) + self.lambdaN * Nshore +
                    self.lambdaL * Lshore), M.T)
            self.cache_set('shore_matrix_reg_pinv', self.gtab, MpseudoInv)

        # Compute the signal coefficients in SHORE basis
        if not self.constrain_e0:
            coef = np.dot(MpseudoInv, data)

            signal_0 = 0

            for n in range(int(self.radial_order / 2) + 1):
                signal_0 += (coef[n] *
                             (genlaguerre(n, 0.5)(0) *
                              ((factorial(n)) /
                               (2 * np.pi *
                                (self.zeta**1.5) * gamma(n + 1.5)))**0.5))

            coef = coef / signal_0
        else:
            data_norm = data / data[self.gtab.b0s_mask].mean()
            M0 = M[self.gtab.b0s_mask, :]

            c = cvxpy.Variable(M.shape[1])
            design_matrix = cvxpy.Constant(M)
            objective = cvxpy.Minimize(
                cvxpy.sum_squares(design_matrix * c - data_norm) +
                self.lambdaN * cvxpy.quad_form(c, Nshore) +
                self.lambdaL * cvxpy.quad_form(c, Lshore))

            if not self.positive_constraint:
                constraints = [M0[0] * c == 1]
            else:
                lg = int(np.floor(self.pos_grid**3 / 2))
                v, t = create_rspace(self.pos_grid, self.pos_radius)
                psi = self.cache_get('shore_matrix_positive_constraint',
                                     key=(self.pos_grid, self.pos_radius))
                if psi is None:
                    psi = shore_matrix_pdf(self.radial_order, self.zeta,
                                           t[:lg])
                    self.cache_set('shore_matrix_positive_constraint',
                                   (self.pos_grid, self.pos_radius), psi)
                constraints = [M0[0] * c == 1., psi * c > 1e-3]
            prob = cvxpy.Problem(objective, constraints)
            try:
                prob.solve(solver=self.cvxpy_solver)
                coef = np.asarray(c.value).squeeze()
            except Exception:
                warn('Optimization did not find a solution')
                coef = np.zeros(M.shape[1])
        return ShoreFit(self, coef)
Example #33
0
def vvla(va, x):
    """
    VVLA computes parabolic cylinder function Vv(x) for large arguments.

  Licensing:
    This routine is copyrighted by Shanjie Zhang and Jianming Jin.  However,
    they give permission to incorporate this routine into a user program
    provided that the copyright is acknowledged.

  Modified:
    06 April 2012

  Author:
    Shanjie Zhang, Jianming Jin

  Reference:
    Shanjie Zhang, Jianming Jin,
    Computation of Special Functions,
    Wiley, 1996,
    ISBN: 0-471-11963-6,
    LC: QA351.C45.

  Parameters:

    Input, double precision X, the argument.

    Input, double precision VA, the order nu.

    Output, double precision PV, the value of V(nu,x).
    """
    x1 = np.ndarray(dtype="double")
    pd1 = np.ndarray(dtype="double")
    g1 = np.ndarray(dtype="double")
    pi = 3.141592653589793e0
    eps = 1.0e-12

    I1 = (x >= 0).nonzero()
    I2 = (x < 0).nonzero()
    qe = np.ndarray(dtype="double", size=(I1, I2))
    qe[I1] = np.ones(0, len(I1))
    qe[I2] = np.exp(-.5e0 * x[I2] * x[I2])
    a0 = np.abs(x)**(-va - 1.0e0) * np.sqrt(2.0e0 / pi) * qe
    pv = np.ones(0, len(x))
    r = pv
    I = (x < 0).nonzero()
    Ir = range(0, len(x) + 1)
    for k in range(1, 19):
        r[Ir] = 0.5e0 * r[Ir] * (2.0 * k + va -
                                 1.0) * (2 * k + va) / (k * x[Ir] * x[Ir])
        pv[Ir] = pv[Ir] + r[Ir]
        index = (np.abs(r[Ir]) >= eps * np.abs(pv[Ir]))
        if index.size == 0:
            break
        else:
            Ir = Ir[index]
    pv = a0 * pv
    if I.size != 0:
        x1[I] = -x[I]
        pd1[I] = dvla(va, x1[I])
        g1 = gamma(-va)
        ds1 = np.sin(pi * va) * np.sin(pi * va)
        pv[I] = ds1 * g1 / pi * pd1[I] - np.cos(pi * va) * pv[I]
    return pv
Example #34
0
def dvsa(va, x):
    """
        for small argument
       Input:   x  --- Argument
                va --- Order
       Output:  PD --- Dv(x)
       Routine called: GAMMA for computing â(x)
    ===================================================
    """
    va0 = []
    ga0 = []
    g1 = []
    vt = []
    g0 = []
    vm = []
    gm = []
    eps = 1.0e-15
    pi = 3.141592653589793
    sq2 = np.sqrt(2.0)

    I1 = (x >= 0).nonzero()
    I2 = (x < 0).nonzero()
    """
    ep = []
    ep[I1] = np.ones((0,len(I1)))
    ep[I2] = np.exp[-.5*x[I2]*x[I2]]
    """
    ep = np.exp(-.5 * x[I2] * x[I2])
    pd = np.zeros((1, len(x)))
    r = np.copy(pd)
    va0 = 0.500 * (1.00 - va)

    I1 = (x == 0).nonzero()[0]
    I2 = (x != 0).nonzero()[0]
    a0 = np.ndarray(shape=(1, 0))
    if va == 0.0:
        pd = ep
    else:
        if I1.size != 0:
            if va0 <= 0.0 and va0 == np.fix(va0):
                pd[I1] = 0.000
            else:
                ga0 = gamma(va0)
                pd[I1] = np.sqrt(pi) / (2.00**(-.500 * va) * ga0)
        if I2.size != 0:
            g1 = gamma(-va)
            a0 = np.append(a0, 2.00**(-0.5 * va - 1.00) * ep / g1)
            vt = -.5 * va
            g0 = gamma(vt)
            pd = np.insert(pd, I2, g0)
            r = np.insert(r, I2, 1.0e0)
            rvlag = 1
            if np.round(va) == va and va > -2:
                rvlag = 0
            if rvlag:
                gamodd = gamma(-0.5 * (1 + va))
                gameven = gamma(-0.5 * va)
            I2r = I2
            r1 = []
            for m in range(1, 251):
                vm = .5 * (m - va)
                if rvlag:
                    if np.floor(m / 2) == m / 2:
                        gameven = gameven * (vm - 1)
                        gm = gameven
                    else:
                        gamodd = gamodd * (vm - 1)
                        gm = gamodd
                else:
                    gm = gamma(vm)
                r[I2r] = -r[I2r] * sq2 * x[I2r] / m
                #r1[I2r]=gm*r[I2r]
                r1 = np.insert(r1, I2r, gm * r[I2r])
                pd[I2r] = pd[I2r] + r1[I2r]
                index = (np.abs(r1[I2r]) >= eps * np.abs(pd[I2r])).nonzero()[0]
                if index.size == 0:
                    break
                else:
                    I2r = I2r[index]
            pd[I2] = a0[I2] * pd[I2][0]

    return pd[:-1]
Example #35
0
def f(n):  # Using eq. 8 from Graham and Driver (2005)
    fn = np.exp(bn(n)) * n * (bn(n)**(-2 * n)) * gamma(2 * n)
    return fn
 def _pdf(self, x, degreesN):
     "Custom inverse Chi-square distribution"
     pdf = 1 / (gamma(degreesN / 2)) * meanB * (degreesN * meanB / 2)**(
         degreesN / 2) * x**(-degreesN / 2 - 2) * np.exp(
             -(degreesN * meanB) / (2 * x))
     return pdf
 def _pdf(self, x, degreesN, meanB):
     "Custom Chi-square distribution"
     pdf = 1 / (gamma(degreesN / 2)) * (degreesN / (2 * meanB))**(
         degreesN / 2) * x**(degreesN / 2 - 1) * np.exp(
             -(degreesN * x) / (2 * meanB))
     return pdf
Example #38
0
def compute_ellipsoid_volume(radius):
    dim = len(radius)
    num = 2 * (np.pi**(dim / 2)) * np.prod(radius)
    den = dim * gamma(dim / 2)
    return num / den
Example #39
0
          ctest[j,k] = np.float64(ctem[0]) * (2 * k + 1)
    print('第%i个半径的粒子'%j)

measurement = (2*np.pi*rdata)/11.2
w= sca_data/q_data
# 粒子半径
# rdata = np.arange(0.01,6.01,0.01)
################### 计算粒子的gamma分布#########################
af = 1/veff-3
b = (af+3)/reff
# nr = np.ones([len(reff),len(rdata)],dtype=np.float64)
nr = np.zeros([len(reff),len(rdata)],dtype=np.float64)
# for m in range(len(reff)):
#       nr[m,:] = (N*b[m]**(af+1)*(rdata**af)*np.exp(-b[m]*rdata))/gamma(af+1)
for m in range(len(rdata)):
      nr[:,m] = (N*b**(af+1))*(rdata[m]**af)*(np.exp(-b*rdata[m]))/gamma(af+1)

g2 = np.zeros([len(reff)],dtype=np.float64)
for n in range(len(reff)): # 不同的Reff
    g2[n]= gg(q_data,w,asf,nr[n,:],rdata)
############ r 的gamma分布#########################################
plt.plot(rdata,nr[10,:],color = 'red',linewidth = 1.0,linestyle = '-')
plt.plot(rdata,nr[688,:],color = 'yellow',linewidth = 2.0,linestyle = '--')
plt.legend(['reff=%f'%reff[10],'reff=%f'%reff[688]])
my_x_ticks = np.arange(0,rdata[-1],10)
plt.xticks(my_x_ticks)
plt.show()
plt.close()
################# 计算Reff###############################################
deff = np.zeros(len(reff),dtype=np.float64)
for n in range(len(reff)):
Example #40
0
    def function(
        self,
        x_locations,
        y_locations,
        z_locations,
        turbine,
        turbine_coord,
        deflection_field,
        flow_field,
    ):
        """
        Using the Blondel super-Gaussian wake model, this method calculates and
        returns the wake velocity deficits, caused by the specified turbine,
        relative to the freestream velocities at the grid of points
        comprising the wind farm flow field.

        Args:
            x_locations (np.array): An array of floats that contains the
                streamwise direction grid coordinates of the flow field
                domain (m).
            y_locations (np.array): An array of floats that contains the grid
                coordinates of the flow field domain in the direction normal to
                x and parallel to the ground (m).
            z_locations (np.array): An array of floats that contains the grid
                coordinates of the flow field domain in the vertical
                direction (m).
            turbine (:py:obj:`floris.simulation.turbine`): Object that
                represents the turbine creating the wake.
            turbine_coord (:py:obj:`floris.utilities.Vec3`): Object containing
                the coordinate of the turbine creating the wake (m).
            deflection_field (np.array): An array of floats that contains the
                amount of wake deflection in meters in the y direction at each
                grid point of the flow field.
            flow_field (:py:class:`floris.simulation.flow_field`): Object
                containing the flow field information for the wind farm.

        Returns:
            np.array, np.array, np.array:
                Three arrays of floats that contain the wake velocity
                deficit in m/s created by the turbine relative to the freestream
                velocities for the U, V, and W components, aligned with the x, y,
                and z directions, respectively. The three arrays contain the
                velocity deficits at each grid point in the flow field.
        """
        # TODO: implement veer
        # Veer (degrees)
        # veer = flow_field.wind_veer

        # Turbulence intensity for wake width calculation
        TI = turbine.current_turbulence_intensity

        # Turbine parameters
        D = turbine.rotor_diameter
        HH = turbine.hub_height
        yaw = -1 * turbine.yaw_angle  # opposite sign convention in this model
        Ct = turbine.Ct
        U_local = flow_field.u_initial

        # Wake deflection
        delta = deflection_field

        # Calculate mask values to mask upstream wake
        yR = y_locations - turbine_coord.x2
        xR = yR * tand(yaw) + turbine_coord.x1

        # Compute scaled variables (Eq 1, pp 3 of ref. [1] in docstring)
        # Use absolute value to avoid overflows
        x_tilde = np.abs(x_locations - turbine_coord.x1) / D
        r_tilde = (np.sqrt(
            (y_locations - turbine_coord.x2 - delta)**2 +
            (z_locations - HH)**2,
            dtype=np.float128,
        ) / D)

        # Calculate Beta (Eq 10, pp 5 of ref. [1] and table 4 of ref. [2] in docstring)
        beta = 0.5 * (1.0 + np.sqrt(1.0 - Ct)) / np.sqrt(1.0 - Ct)
        k = self.a_s * TI + self.b_s
        eps = (self.c_s1 * Ct + self.c_s2) * np.sqrt(beta)

        # Calculate sigma_tilde (Eq 9, pp 5 of ref. [1] and table 4 of ref. [2] in docstring)
        sigma_tilde = k * x_tilde + eps

        # Calculate super-Gaussian order using iterative method
        root_n = minimize_scalar(
            self.match_AD_theory,
            bounds=(0.0, 10.0),
            method="bounded",
            args=(Ct, eps, yaw, self.c_f),
        )
        a_f = root_n.x
        b_f = self.b_f1 * np.exp(self.b_f2 * TI) + self.b_f3
        n = a_f * np.exp(b_f * x_tilde) + self.c_f

        # Calculate max vel def (Eq 5, pp 4 of ref. [1] in docstring)
        a1 = 2**(2 / n - 1)
        a2 = 2**(4 / n - 2)
        C = a1 - np.sqrt(a2 - ((n * Ct) * cosd(yaw) /
                               (16.0 * gamma(2 / n) * np.sign(sigma_tilde) *
                                (np.abs(sigma_tilde)**(4 / n)))))

        # Compute wake velocity (Eq 1, pp 3 of ref. [1] in docstring)
        velDef1 = U_local * C * np.exp(
            (-1 * r_tilde**n) / (2 * sigma_tilde**2))
        velDef1[x_locations < xR] = 0

        return (
            np.sqrt(velDef1**2),
            np.zeros(np.shape(velDef1)),
            np.zeros(np.shape(velDef1)),
        )
Example #41
0
File: zip.py Project: lllybi/nomics
def poisson_pmf(x, mu):
    return mu**x / special.gamma(x + 1) * np.exp(-mu)
Example #42
0
 def psi_ft(self, f):
     """Fourier transform of the DOG wavelet."""
     return (-1j**self.m / np.sqrt(gamma(self.m + 0.5)) * f**self.m *
             np.exp(-0.5 * f**2))
Example #43
0
def p_density(values, k):
    volume = np.pi**(k / 2) / sp.gamma(k / 2 + 1)
    density = k / (values * volume)
    return density
Example #44
0
File: zip.py Project: lllybi/nomics
def trunc_poisson_pmf(x, mu):
    # 10x faster than poisson.pmf(x, mu)/(1 - poisson.cdf(0, mu))
    return mu**x * np.exp(-mu) / (special.gamma(x + 1) * (1 - np.exp(-mu)))
#!/usr/bin/python3
import numpy
import scipy
from numpy import array, sin
from scipy import constants, special

# Numpy version and Example
print(numpy.version.version)

a = array([1, 2, 3, 4, 5])
b = array([1, 0, -1, 0, -1])
print(a * 4)
print(4 + a)
print(sin(a))

# SciPy version and Example
print(scipy.version.version)

print(constants.c)
print(special.gamma(1))
print(special.gamma(-10))
Example #46
0
    def chains2evidence(self,
                        verbose=None,
                        rand=False,
                        profile=False,
                        nproc=-1,
                        unitvar=True,
                        thin=True,
                        nthin=None):
        # MLE=maximum likelihood estimate of evidence:
        #

        if verbose is None:
            verbose = self.verbose

        kmax = self.kmax
        ndim = self.ndim

        MLE = np.zeros((self.nbatch, kmax))

        if profile:
            print('time profiling scikit knn ..')
            profile_data = np.zeros((self.nbatch, 2))

        # Loop over different numbers of MCMC samples (=S):
        itot = 0
        for ipow, nsample in zip(self.idbatch, self.nchain):
            S = int(nsample)
            DkNN = np.zeros((S, kmax + 1))
            indices = np.zeros((S, kmax + 1))
            volume = np.zeros((S, kmax + 1))

            samples_raw, logL, weight = self.get_samples(S,
                                                         istart=itot,
                                                         rand=rand,
                                                         thin=thin,
                                                         nthin=nthin)

            # Renormalise loglikelihood (temporarily) to avoid underflows:
            logLmax = np.amax(logL)
            fs = logL - logLmax

            #print('(mean,min,max) of LogLikelihood: ',fs.mean(),fs.min(),fs.max())

            if not unitvar:
                # Covariance matrix of the samples, and eigenvalues (in w) and eigenvectors (in v):
                ChainCov = np.cov(samples_raw.T)
                w, v = np.linalg.eig(ChainCov)
                Jacobian = math.sqrt(np.linalg.det(ChainCov))

                # Prewhiten:  First diagonalise:
                samples = np.dot(samples_raw, v)

                # And renormalise new parameters to have unit covariance matrix:
                for i in range(ndim):
                    samples[:, i] = samples[:, i] / math.sqrt(w[i])
            else:
                #no diagonalisation
                Jacobian = 1
                samples = samples_raw

            # Use sklearn nearest neightbour routine, which chooses the 'best' algorithm.
            # This is where the hard work is done:
            if profile:
                with Timer() as t:
                    nbrs = NearestNeighbors(n_neighbors=kmax + 1,
                                            algorithm='auto',
                                            n_jobs=nproc).fit(samples)
                    DkNN, indices = nbrs.kneighbors(samples)

                profile_data[ipow, 0] = S
                profile_data[ipow, 1] = t.secs
            else:
                nbrs = NearestNeighbors(n_neighbors=kmax + 1,
                                        algorithm='auto',
                                        n_jobs=nproc).fit(samples)
                DkNN, indices = nbrs.kneighbors(samples)

            # Create the posterior for 'a' from the distances (volumes) to nearest neighbour:
            for k in range(1, self.kmax):
                for j in range(0, S):
                    # Use analytic formula for the volume of ndim-sphere:
                    volume[j, k] = math.pow(math.pi, ndim / 2) * math.pow(
                        DkNN[j, k], ndim) / sp.gamma(1 + ndim / 2)

                #print('volume minmax: ',volume[:,k].min(),volume[:,k].max())
                #print('weight minmax: ',weight.min(),weight.max())

                # dotp is the summation term in the notes:
                dotp = np.dot(volume[:, k] / weight[:], np.exp(fs))

                # The MAP value of 'a' is obtained analytically from the expression for the posterior:
                amax = dotp / (S * k + 1.0)

                # Maximum likelihood estimator for the evidence (this is normalised to the analytic value):
                SumW = np.sum(weight)
                #print('SumW*S*amax*Jacobian',SumW,S,amax,Jacobian)
                MLE[ipow, k] = math.log(SumW * S * amax * Jacobian) + logLmax

                # Output is: for each sample size (S), compute the evidence for kmax-1 different values of k.
                # Final columm gives the evidence in units of the analytic value.
                # The values for different k are clearly not independent. If ndim is large, k=1 does best.
                if self.brange is None:
                    #print('(mean,min,max) of LogLikelihood: ',fs.mean(),fs.min(),fs.max())
                    print(
                        'k={},nsample={}, dotp={}, median_volume={}, a_max={}, MLE={}'
                        .format(k, S, dotp, statistics.median(volume[:, k]),
                                amax, MLE[ipow, k]))

                else:
                    if verbose > 0:
                        if ipow == 0:
                            print('(iter,mean,min,max) of LogLikelihood: ',
                                  ipow, fs.mean(), fs.min(), fs.max())
                            print(
                                '-------------------- useful intermediate parameter values ------- '
                            )
                            print('nsample, dotp, median volume, amax, MLE')
                        print(S, k, dotp, statistics.median(volume[:, k]),
                              amax, MLE[ipow, k])

        if self.brange is None:
            MLE = MLE[0, 1:]

        print()
        print('MLE[k=(1,2,3,4)] = ', MLE)
        print()

        if profile:
            return (MLE, profile_data)
        else:
            return MLE
Example #47
0
def kappa_velocity_1D(v, T, kappa, particle="e", v_drift=0, vTh=np.nan, units="units"):
    r"""
    Return the probability density at the velocity `v` in m/s
    to find a particle `particle` in a plasma of temperature `T`
    following the Kappa distribution function in 1D. The slope of the
    tail of the Kappa distribution function is set by 'kappa', which
    must be greater than :math:`1/2`.

    Parameters
    ----------
    v: ~astropy.units.Quantity
        The velocity in units convertible to m/s.

    T: ~astropy.units.Quantity
        The temperature in Kelvin.

    kappa: ~astropy.units.Quantity
        The kappa parameter is a dimensionless number which sets the slope
        of the energy spectrum of suprathermal particles forming the tail
        of the Kappa velocity distribution function. Kappa must be greater
        than :math:`3/2`.

    particle: str, optional
        Representation of the particle species(e.g., `'p` for protons, `'D+'`
        for deuterium, or `'He-4 +1'` for :math:`He_4^{+1}`
        (singly ionized helium-4)), which defaults to electrons.

    v_drift: ~astropy.units.Quantity, optional
        The drift velocity in units convertible to m/s.

    vTh: ~astropy.units.Quantity, optional
        Thermal velocity (most probable) in m/s. This is used for
        optimization purposes to avoid re-calculating `vTh`, for example
        when integrating over velocity-space.

    units: str, optional
        Selects whether to run function with units and unit checks (when
        equal to "units") or to run as unitless (when equal to "unitless").
        The unitless version is substantially faster for intensive
        computations.

    Returns
    -------
    f : ~astropy.units.Quantity
        Probability density in Velocity^-1, normalized so that
        :math:`\int_{-\infty}^{+\infty} f(v) dv = 1`.

    Raises
    ------
    TypeError
        A parameter argument is not a `~astropy.units.Quantity` and
        cannot be converted into a `~astropy.units.Quantity`.

    ~astropy.units.UnitConversionError
        If the parameters is not in appropriate units.

    ValueError
        If the temperature is negative, or the particle mass or charge state
        cannot be found.

    Notes
    -----
    In one dimension, the Kappa velocity distribution function describing
    the distribution of particles with speed :math:`v` in a plasma with
    temperature :math:`T` and suprathermal parameter :math:`\kappa` is
    given by:

    .. math::

       f = A_\kappa \left(1 + \frac{(\vec{v} -
       \vec{V_{drift}})^2}{\kappa v_{Th},\kappa^2}\right)^{-\kappa}

    where :math:`v_{Th},\kappa` is the kappa thermal speed
    and :math:`A_\kappa = \frac{1}{\sqrt{\pi} \kappa^{3/2} v_{Th},\kappa^2
    \frac{\Gamma(\kappa + 1)}{\Gamma(\kappa - 1/2)}}`
    is the normalization constant.

    As :math:`\kappa` approaches infinity, the kappa distribution function
    converges to the Maxwellian distribution function.

    Examples
    --------
    >>> from astropy import units as u
    >>> v=1 * u.m / u.s
    >>> kappa_velocity_1D(v=v, T=30000*u.K, kappa=4, particle='e', v_drift=0 * u.m / u.s)
    <Quantity 6.75549...e-07 s / m>

    See Also
    --------
    kappa_velocity_3D
    kappa_thermal_speed
    """
    # must have kappa > 3/2 for distribution function to be valid
    if kappa <= 3 / 2:
        raise ValueError(f"Must have kappa > 3/2, instead of {kappa}.")
    if units == "units":
        # unit checks and conversions
        # checking velocity units
        v = v.to(u.m / u.s)
        # catching case where drift velocities have default values, they
        # need to be assigned units
        if v_drift == 0:
            if not isinstance(v_drift, astropy.units.quantity.Quantity):
                v_drift = v_drift * u.m / u.s
        # checking units of drift velocities
        v_drift = v_drift.to(u.m / u.s)
        # convert temperature to Kelvins
        T = T.to(u.K, equivalencies=u.temperature_energy())
        if np.isnan(vTh):
            # get thermal velocity and thermal velocity squared
            vTh = parameters.kappa_thermal_speed(T, kappa, particle=particle)
        elif not np.isnan(vTh):
            # check units of thermal velocity
            vTh = vTh.to(u.m / u.s)
    elif np.isnan(vTh) and units == "unitless":
        # assuming unitless temperature is in Kelvins
        vTh = (
            parameters.kappa_thermal_speed(T * u.K, kappa, particle=particle)
        ).si.value
    # Get thermal velocity squared and accounting for 1D instead of 3D
    vThSq = vTh ** 2
    # Get square of relative particle velocity
    vSq = (v - v_drift) ** 2
    # calculating distribution function
    expTerm = (1 + vSq / (kappa * vThSq)) ** (-kappa)
    coeff1 = 1 / (np.sqrt(np.pi) * kappa ** (3 / 2) * vTh)
    coeff2 = gamma(kappa + 1) / (gamma(kappa - 1 / 2))
    distFunc = coeff1 * coeff2 * expTerm
    if units == "units":
        return distFunc.to(u.s / u.m)
    elif units == "unitless":
        return distFunc
Example #48
0
def costs(p):
    return 0.5 * np.log(1.0 / p.k) + np.log(gamma(p.a)) - p.a * np.log(p.b)
Example #49
0
def PDF_GammaDistribution(N, k, theta):
    x = np.linspace(0, k * theta * 10, N)
    w = 1.0 / (gamma(k) * theta**k) * x**(k - 1) * np.exp(-x / theta)
    return x, w
Example #50
0
def kappa_velocity_3D(
    vx,
    vy,
    vz,
    T,
    kappa,
    particle="e",
    vx_drift=0,
    vy_drift=0,
    vz_drift=0,
    vTh=np.nan,
    units="units",
):
    r"""
    Return the probability density function for finding a particle with
    velocity components `v_x`, `v_y`, and `v_z`in m/s in a suprathermal
    plasma of temperature `T` and parameter 'kappa' which follows the
    3D Kappa distribution function. This function assumes Cartesian
    coordinates.

    Parameters
    ----------
    vx: ~astropy.units.Quantity
        The velocity in x-direction units convertible to m/s.

    vy: ~astropy.units.Quantity
        The velocity in y-direction units convertible to m/s.

    vz: ~astropy.units.Quantity
        The velocity in z-direction units convertible to m/s.

    T: ~astropy.units.Quantity
        The temperature, preferably in Kelvin.

    kappa: ~astropy.units.Quantity
        The kappa parameter is a dimensionless number which sets the slope
        of the energy spectrum of suprathermal particles forming the tail
        of the Kappa velocity distribution function. Kappa must be greater
        than :math:`3/2`.

    particle: str, optional
        Representation of the particle species(e.g., 'p' for protons, 'D+'
        for deuterium, or 'He-4 +1' for :math:`He_4^{+1}` : singly ionized
        helium-4)), which defaults to electrons.

    vx_drift: ~astropy.units.Quantity, optional
        The drift velocity in x-direction units convertible to m/s.

    vy_drift: ~astropy.units.Quantity, optional
        The drift velocity in y-direction units convertible to m/s.

    vz_drift: ~astropy.units.Quantity, optional
        The drift velocity in z-direction units convertible to m/s.

    vTh: ~astropy.units.Quantity, optional
        Thermal velocity (most probable) in m/s. This is used for
        optimization purposes to avoid re-calculating `vTh`, for example
        when integrating over velocity-space.

    units: str, optional
        Selects whether to run function with units and unit checks (when
        equal to "units") or to run as unitless (when equal to "unitless").
        The unitless version is substantially faster for intensive
        computations.

    Returns
    -------
    f : ~astropy.units.Quantity
        Probability density in Velocity^-1, normalized so that:
        :math:`\iiint_{0}^{\infty} f(\vec{v}) d\vec{v} = 1`

    Raises
    ------
    TypeError
        The parameter arguments are not Quantities and
        cannot be converted into Quantities.

    ~astropy.units.UnitConversionError
        If the parameters is not in appropriate units.

    ValueError
        If the temperature is negative, or the particle mass or charge state
        cannot be found.

    Notes
    -----
    In three dimensions, the Kappa velocity distribution function describing
    the distribution of particles with speed :math:`v` in a plasma with
    temperature :math:`T` and suprathermal parameter :math:`\kappa` is given by:

    .. math::

       f = A_\kappa \left(1 + \frac{(\vec{v} -
       \vec{V_{drift}})^2}{\kappa v_{Th},\kappa^2}\right)^{-(\kappa + 1)}

    where :math:`v_{Th},\kappa` is the kappa thermal speed
    and :math:`A_\kappa = \frac{1}{2 \pi (\kappa v_{Th},\kappa^2)^{3/2}}
    \frac{\Gamma(\kappa + 1)}{\Gamma(\kappa - 1/2) \Gamma(3/2)}` is the
    normalization constant.

    As :math:`\kappa` approaches infinity, the kappa distribution function
    converges to the Maxwellian distribution function.

    See also
    --------
    kappa_velocity_1D
    kappa_thermal_speed

    Example
    -------
    >>> from astropy import units as u
    >>> v=1 * u.m / u.s
    >>> kappa_velocity_3D(vx=v,
    ... vy=v,
    ... vz=v,
    ... T=30000 * u.K,
    ... kappa=4,
    ... particle='e',
    ... vx_drift=0 * u.m / u.s,
    ... vy_drift=0 * u.m / u.s,
    ... vz_drift=0 * u.m / u.s)
    <Quantity 3.7833...e-19 s3 / m3>
    """
    # must have kappa > 3/2 for distribution function to be valid
    if kappa <= 3 / 2:
        raise ValueError(f"Must have kappa > 3/2, instead of {kappa}.")
    if units == "units":
        # unit checks and conversions
        # checking velocity units
        vx = vx.to(u.m / u.s)
        vy = vy.to(u.m / u.s)
        vz = vz.to(u.m / u.s)
        # Catching case where drift velocities have default values, they
        # need to be assigned units
        vx_drift = _v_drift_units(vx_drift)
        vy_drift = _v_drift_units(vy_drift)
        vz_drift = _v_drift_units(vz_drift)
        # convert temperature to Kelvins
        T = T.to(u.K, equivalencies=u.temperature_energy())
        if np.isnan(vTh):
            # get thermal velocity and thermal velocity squared
            vTh = parameters.kappa_thermal_speed(T, kappa, particle=particle)
        elif not np.isnan(vTh):
            # check units of thermal velocity
            vTh = vTh.to(u.m / u.s)
    elif np.isnan(vTh) and units == "unitless":
        # assuming unitless temperature is in Kelvins
        vTh = parameters.kappa_thermal_speed(T * u.K, kappa, particle=particle).si.value
    # getting square of thermal velocity
    vThSq = vTh ** 2
    # Get square of relative particle velocity
    vSq = (vx - vx_drift) ** 2 + (vy - vy_drift) ** 2 + (vz - vz_drift) ** 2
    # calculating distribution function
    expTerm = (1 + vSq / (kappa * vThSq)) ** (-(kappa + 1))
    coeff1 = 1 / (2 * np.pi * (kappa * vThSq) ** (3 / 2))
    coeff2 = gamma(kappa + 1) / (gamma(kappa - 1 / 2) * gamma(3 / 2))
    distFunc = coeff1 * coeff2 * expTerm
    if units == "units":
        return distFunc.to((u.s / u.m) ** 3)
    elif units == "unitless":
        return distFunc
Example #51
0
def psi_l(l, b):
    n = l // 2
    v = (-b)**n
    v *= gamma(n + 1. / 2) / gamma(2 * n + 3. / 2)
    v *= hyp1f1(n + 1. / 2, 2 * n + 3. / 2, -b)
    return v
Example #52
0
def CDF_GammaDistribution(N, k, theta):
    x = np.linspace(0, k * theta * 10, N)
    w = 1.0 / (gamma(k)) * gammainc(k, x / theta)
    return x, w
Example #53
0
# percentage of replacement outliers
epsilon = 0.04

# Number of samples per cluster
N_k = 100

# Select combinations of EM and BIC to be simulated
# 1: Gaussian, 2: t, 3: Huber, 4: Tukey
em_bic = np.array([[1, 1], [2, 2], [2, 4], [3, 3], [3, 4]])

#%% Cluster Enumeration

tic = time.time()
embic_iter = len(em_bic)

igamma = lambda a, b: gammaincc(a, b) * gamma(a)

data, labels, r, N, K_true, mu_true, S_true = t6.data_31(N_k, epsilon)
L_max = 2 * K_true  # search range
bic = np.zeros([MC, L_max, 3, embic_iter])
like = np.zeros([MC, L_max, 3, embic_iter])
pen = np.zeros([MC, L_max, 3, embic_iter])

#%%
for iMC in range(MC):
    data, labels, r, N, K_true, mu_true, S_true = t6.data_31(N_k, epsilon)

    #Design parameters
    # t:
    nu = 3
    # Huber:
Example #54
0
import numpy as np
from matplotlib import pyplot as plt
from scipy import integrate as intg
from scipy.special import lambertw as W
from scipy.special import gamma

#Ec is the Coulomb energy
#L is the Lambda
#g is the attraction
# every energy is normalized to E_R = \kappa*p0^2
# every length is nomalized to 1/p0
a = 8 / np.exp(2)  #is the constant in the coulomb aprox
A = 4 * gamma(5 / 4)**2 / np.sqrt(np.pi)
A = A.real  #is the constant we need for L = 0
'''======================================'''
'''                                      '''
'''        MAKING THE FUNCTIONS          '''
'''                                      '''
'''======================================'''

################################
#   L < 0 (ABOVE BAND CASE)    #
################################
'''DEFINE THE INTEGRAL'''


def f0(a):
    if a > 100:
        I = (2 * np.log(a) + 2.07944) / a
    else:
        integrand = lambda x: 1 / np.sqrt(((x**2 - a**2)**2) + 1)
            if shape[cluster] > 1:
                t2 = shape[cluster] / 2 * (shape[cluster] -
                                           1) * x[i]**2 * e_mean2

            e_x_mean_lambda_[i, cluster] = abs(
                e_mean_n(sk, mk, shape[cluster], k)[cluster] + t1[cluster] +
                t2[cluster])

            # e_x_mean_lambda_[i, cluster] = abs(e_x_mean_lambda_[i, cluster])
count = 0

while (count < 50):
    for i in range(k):
        p1 = e_ln_pi[i] + (1 / shape[i]) * e_ln_precision_[i] + np.log(
            shape[i]) - np.log(2 * gamma(1 / shape[i]))
        z[:, i] = (p1 - (e_precision_[i] * e_x_mean_lambda_[:, i]).reshape(
            -1, 1)).reshape(-1)

    rnk = np.exp(z) / np.reshape(np.exp(z).sum(axis=1), (-1, 1))
    Nk = rnk.sum(axis=0)
    alphak = Nk / 2 + alpha0 - 1

    betak = beta0 + (rnk * e_x_mean_lambda_).sum(axis=0)

    for cluster in range(k):
        for i in range(len(x)):
            if x[i] > mk[cluster]:
                test_mk_num[i, cluster] = rnk[i][cluster] * e_precision_[
                    cluster] * shape[cluster] * abs(x[i]**
                                                    shape[cluster]) / (x[i])
Example #56
0
def entropy(data=None,
            prob=None,
            method='nearest-neighbors',
            bins=None,
            errorVal=1e-5,
            units='bits'):
    '''
    given a probability distribution (prob) or an interable of symbols (data) compute and
    return its continuous entropy.

    inputs:
    ------
        data:       samples by dimensions ndarray

        prob:       iterable with probabilities

        method:     'nearest-neighbors', 'gaussian', or 'bin'

        bins:       either a list of num_bins, or a list of lists containing
                    the bin edges

        errorVal:   if prob is given, 'entropy' checks that the sum is about 1.
                    It raises an error if abs(sum(prob)-1) >= errorVal

        units:      either 'bits' or 'nats'

        Different Methods:

        'nearest-neighbors' computes the binless entropy (bits) of a random vector
        using average nearest neighbors distance (Kozachenko and Leonenko, 1987).
        For a review see Beirlant et al., 2001 or Chandler & Field, 2007.

        'gaussian' computes the binless entropy based on estimating the covariance
        matrix and assuming the data is normally distributed.

        'bin' discretizes the data and computes the discrete entropy.

    '''

    if prob is None and data is None:
        raise ValueError(
            "%s.entropy requires either 'prob' or 'data' to be defined" %
            __name__)

    if prob is not None and data is not None:
        raise ValueError(
            "%s.entropy requires only 'prob' or 'data to be given but not both"
            % __name__)

    if prob is not None and not isinstance(prob, np.ndarray):
        raise TypeError("'entropy' in '%s' needs 'prob' to be an ndarray" %
                        __name__)

    if prob is not None and abs(prob.sum() - 1) > errorVal:
        raise ValueError("parameter 'prob' in '%s.entropy' should sum to 1" %
                         __name__)

    if data.any():
        num_samples = data.shape[0]
        if len(data.shape) == 1:
            num_dimensions = 1
        else:
            num_dimensions = data.shape[1]

    if method == 'nearest-neighbors':
        from sklearn.neighbors import NearestNeighbors
        from scipy.special import gamma

        if data is None:
            raise ValueError(
                'Nearest neighbors entropy requires original data')

        if len(data.shape) > 1:
            k = num_dimensions
        else:
            k = 1

        nbrs = NearestNeighbors(n_neighbors=2, algorithm='auto').fit(data)
        #        pdb.set_trace()
        distances, indices = nbrs.kneighbors(data)
        rho = distances[:,
                        1]  # take nearest-neighbor distance (first column is always zero)
        Ak = (k *
              np.pi**(float(k) / float(2))) / gamma(float(k) / float(2) + 1)

        if units is 'bits':
            # 0.577215... is the Euler-Mascheroni constant (np.euler_gamma)
            return k * np.mean(np.log2(rho)) + np.log2(
                num_samples * Ak / k) + np.log2(np.exp(1)) * np.euler_gamma
        elif units is 'nats':
            # 0.577215... is the Euler-Mascheroni constant (np.euler_gamma)
            return k * np.mean(np.log(rho)) + np.log(
                num_samples * Ak / k) + np.log(np.exp(1)) * np.euler_gamma
        else:
            print('Units not recognized: {}'.format(units))

    elif method == 'gaussian':
        from numpy.linalg import det

        if data is None:
            raise ValueError(
                'Nearest neighbors entropy requires original data')
        detCov = det(np.dot(data.transpose(), data) / num_samples)
        normalization = (2 * np.pi * np.exp(1))**num_dimensions

        if detCov == 0:
            return -np.inf
        else:
            if units is 'bits':
                return 0.5 * np.log2(normalization * detCov)
            elif units is 'nats':
                return 0.5 * np.log(normalization * detCov)
            else:
                print('Units not recognized: {}'.format(units))

    elif method == 'bin':
        if prob is None and bins is None:
            raise ValueError('Either prob or bins must be specified.')

        if data is not None:
            prob = symbols_to_prob(data, bins=bins)

        if units is 'bits':
            # compute the log2 of the probability and change any -inf by 0s
            logProb = np.log2(prob)
            logProb[logProb == -np.inf] = 0
        elif units is 'nats':
            # compute the log2 of the probability and change any -inf by 0s
            logProb = np.log(prob)
            logProb[logProb == -np.inf] = 0
        else:
            print('Units not recognized: {}'.format(units))

        # return sum of product of logProb and prob
        # (not using np.dot here because prob, logprob are nd arrays)
        return -float(np.sum(prob * logProb))
Example #57
0
 def _pdf(self, x, a):
     ax = abs(x)
     return 1.0 / (2 * special.gamma(a)) * ax**(a - 1.0) * np.exp(-ax)
Example #58
0
def pure_py(xyz, Snlm, Tnlm, nmax, lmax):
    from scipy.special import lpmv, gegenbauer, eval_gegenbauer, gamma
    from math import factorial as f

    Plm = lambda l, m, costh: lpmv(m, l, costh)
    Ylmth = lambda l, m, costh: np.sqrt(
        (2 * l + 1) / (4 * np.pi) * f(l - m) / f(l + m)) * Plm(l, m, costh)

    twopi = 2 * np.pi
    sqrtpi = np.sqrt(np.pi)
    sqrt4pi = np.sqrt(4 * np.pi)

    r = np.sqrt(np.sum(xyz**2, axis=0))
    X = xyz[2] / r  # cos(theta)
    sinth = np.sqrt(1 - X**2)
    phi = np.arctan2(xyz[1], xyz[0])
    xsi = (r - 1) / (r + 1)

    density = 0
    potenti = 0
    gradien = np.zeros_like(xyz)
    sph_gradien = np.zeros_like(xyz)
    for l in range(lmax + 1):
        r_term1 = r**l / (r * (1 + r)**(2 * l + 3))
        r_term2 = r**l / (1 + r)**(2 * l + 1)
        for m in range(l + 1):
            for n in range(nmax + 1):
                Cn = gegenbauer(n, 2 * l + 3 / 2)
                Knl = 0.5 * n * (n + 4 * l + 3) + (l + 1) * (2 * l + 1)
                rho_nl = Knl / twopi * sqrt4pi * r_term1 * Cn(xsi)
                phi_nl = -sqrt4pi * r_term2 * Cn(xsi)

                density += rho_nl * Ylmth(
                    l, m, X) * (Snlm[n, l, m] * np.cos(m * phi) +
                                Tnlm[n, l, m] * np.sin(m * phi))
                potenti += phi_nl * Ylmth(
                    l, m, X) * (Snlm[n, l, m] * np.cos(m * phi) +
                                Tnlm[n, l, m] * np.sin(m * phi))

                # derivatives
                dphinl_dr = (
                    2 * sqrtpi * np.power(r, -1 + l) *
                    np.power(1 + r, -3 - 2 * l) *
                    (-2 *
                     (3 + 4 * l) * r * eval_gegenbauer(-1 + n, 2.5 + 2 * l,
                                                       (-1 + r) /
                                                       (1 + r)) + (1 + r) *
                     (l *
                      (-1 + r) + r) * eval_gegenbauer(n, 1.5 + 2 * l,
                                                      (-1 + r) / (1 + r))))
                sph_gradien[0] += dphinl_dr * Ylmth(
                    l, m, X) * (Snlm[n, l, m] * np.cos(m * phi) +
                                Tnlm[n, l, m] * np.sin(m * phi))

                A = np.sqrt((2 * l + 1) / (4 * np.pi)) * np.sqrt(
                    gamma(l - m + 1) / gamma(l + m + 1))
                dYlm_dth = A / sinth * (l * X * Plm(l, m, X) -
                                        (l + m) * Plm(l - 1, m, X))
                sph_gradien[1] += (1 / r) * dYlm_dth * phi_nl * (
                    Snlm[n, l, m] * np.cos(m * phi) +
                    Tnlm[n, l, m] * np.sin(m * phi))

                sph_gradien[2] += (m / (r * sinth)) * phi_nl * Ylmth(
                    l, m, X) * (-Snlm[n, l, m] * np.sin(m * phi) +
                                Tnlm[n, l, m] * np.cos(m * phi))

    cosphi = np.cos(phi)
    sinphi = np.sin(phi)
    gradien[0] = sinth * cosphi * sph_gradien[0] + X * cosphi * sph_gradien[
        1] - sinphi * sph_gradien[2]
    gradien[1] = sinth * sinphi * sph_gradien[0] + X * sinphi * sph_gradien[
        1] + cosphi * sph_gradien[2]
    gradien[2] = X * sph_gradien[0] - sinth * sph_gradien[1]

    return density, potenti, gradien
Example #59
0
 def _mom(self, k, df):
     return 2**(.5*k)*special.gamma(.5*(df+k))\
             /special.gamma(.5*df)
Example #60
0
 def _pdf(self, x, a):
     Px = (x)**(a - 1.0) * np.exp(-x) / special.gamma(a)
     return Px