Exemplo n.º 1
0
def b_mat(ind_mat):
    r""" Calculates the B coefficients from [1]_ Eq. 27.

    Parameters
    ----------
    index_matrix : array, shape (N,3)
        ordering of the basis in x, y, z

    Returns
    -------
    B : array, shape (N,)
        B coefficients for the basis

    References
    ----------
    .. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
    diffusion imaging method for mapping tissue microstructure",
    NeuroImage, 2013.
    """

    B = np.zeros(ind_mat.shape[0])
    for i in range(ind_mat.shape[0]):
        n1, n2, n3 = ind_mat[i]
        K = int(not(n1 % 2) and not(n2 % 2) and not(n3 % 2))
        B[i] = K * np.sqrt(factorial(n1) * factorial(n2) * factorial(n3)
                           ) / (factorial2(n1) * factorial2(n2) * factorial2(n3))

    return B
Exemplo n.º 2
0
def radon_n_bspline_general( y , theta , m , n ):
    exponent = 2*m - n + 1
    y_plus = positive_power( exponent )
    

    ##  Calculate  m+1 fold derivative (analytically)
    deriv_positive_power = scalar_product( ( misc.factorial( exponent ) / \
                   misc.factorial( exponent - (m +1) ) ) , positive_power( m - n ) )

    if theta == 0.0 or theta == np.pi/2.0 or theta == np.pi:
        y_plus = deriv_positive_power
        
    
    ##  Consider special case of theta = 0 , pi
    if np.abs( theta - np.pi/2.0 ) > eps:
        for i in range( m + 1 ):
            y_plus = finite_difference( y_plus, np.cos(theta) )
    

    ##  Consider special case of theta = pi/2
    if np.abs( theta ) > eps  and np.abs( theta - np.pi ) > eps:
        for i in range( m + 1 ):
            y_plus = finite_difference( y_plus , np.sin(theta) )
    
    return y_plus(y)/float( misc.factorial( exponent ) )  
Exemplo n.º 3
0
    def normal_reference_constant(self):
        """
        Constant used for silverman normal reference asymtotic bandwidth
        calculation.

        C  = 2((pi^(1/2)*(nu!)^3 R(k))/(2nu(2nu)!kap_nu(k)^2))^(1/(2nu+1))
        nu = kernel order
        kap_nu = nu'th moment of kernel
        R = kernel roughness (square of L^2 norm)

        Note: L2Norm property returns square of norm.
        """
        nu = self._order

        if not nu == 2:
            msg = "Only implemented for second order kernels"
            raise NotImplementedError(msg)

        if self._normal_reference_constant is None:
            C = np.pi ** (0.5) * factorial(nu) ** 3 * self.L2Norm
            C /= 2 * nu * factorial(2 * nu) * self.moments(nu) ** 2
            C = 2 * C ** (1.0 / (2 * nu + 1))
            self._normal_reference_constant = C

        return self._normal_reference_constant
Exemplo n.º 4
0
    def update_rho(self, rho):
        """
        calculate probability distribution for quadrature measurement
        outcomes given a two-mode density matrix
        """

        X1, X2 = np.meshgrid(self.xvecs[0], self.xvecs[1])

        p = np.zeros((len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex)
        N = rho.dims[0][0]

        M1 = np.zeros((N, N, len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex)
        M2 = np.zeros((N, N, len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex)

        for m in range(N):
            for n in range(N):
                M1[m,n] = exp(-1j * self.theta1 * (m - n)) / \
                    sqrt(pi * 2 ** (m + n) * factorial(n) * factorial(m)) * \
                    exp(-X1 ** 2) * np.polyval(hermite(m), X1) * np.polyval(hermite(n), X1)
                M2[m,n] = exp(-1j * self.theta2 * (m - n)) / \
                    sqrt(pi * 2 ** (m + n) * factorial(n) * factorial(m)) * \
                    exp(-X2 ** 2) * np.polyval(hermite(m), X2) * np.polyval(hermite(n), X2)

        for n1 in range(N):
            for n2 in range(N):
                i = state_number_index([N, N], [n1, n2])
                for p1 in range(N):
                    for p2 in range(N):
                        j = state_number_index([N, N], [p1, p2])
                        p += M1[n1, p1] * M2[n2, p2] * rho.data[i, j]

        self.data = p
Exemplo n.º 5
0
def K(dim=4, dfn=7, dfd=np.inf):
    r"""
    Determine the polynomial K in:

        Worsley, K.J. (1994). 'Local maxima and the expected Euler
        characteristic of excursion sets of \chi^2, F and t fields.' Advances in
        Applied Probability, 26:13-42.

    If dfd=inf, return the limiting polynomial.
    """
    def lbinom(n, j):
        return gammaln(n+1) - gammaln(j+1) - gammaln(n-j+1)

    m = dfd
    n = dfn
    D = dim
    k = np.arange(D)
    coef = 0
    for j in range(int(np.floor((D-1)/2.)+1)):
        if np.isfinite(m):
            t = (gammaln((m+n-D)/2.+j) - 
                 gammaln(j+1) -
                 gammaln((m+n-D)/2.))
            t += lbinom(m-1, k-j) - k * np.log(m)
        else:
            _t = np.power(2., -j) / (factorial(k-j) * factorial(j))
            t = np.log(_t)
            t[np.isinf(_t)] = -np.inf
        t += lbinom(n-1, D-1-j-k) 
        coef += (-1)**(D-1) * factorial(D-1) * np.exp(t) * np.power(-1.*n, k) 
    return np.poly1d(coef[::-1])
Exemplo n.º 6
0
def beta(p,a):
    """Returns the pth coefficient beta.

    inputs:
    p -- (int) pth coefficient
    a -- (float) uncorrected fractional distance of MC

    output:

    beta_p -- pth beta coefficient for given vel. or accel.
    """

    beta_p = a ** (p+1) / factorial(p+1)

    if p > 0:

        for q in range(p):

            if a >= 0:

                beta_p -= beta(q,a) / factorial(p + 1 - q)

            else: # a < 0

                beta_p -= (-1) ** (p+q) * beta(q,a) / factorial(p + 1 - q)

    return beta_p
Exemplo n.º 7
0
def shore_odf_matrix(radial_order, mu, smoment, vertices):
    """
    Eq. 33 (choose ux=uy=uz)
    """

    ind_mat = shore_index_matrix(radial_order)

    n_vert = vertices.shape[0]

    n_elem = ind_mat.shape[0]

    odf_mat = np.zeros((n_vert, n_elem))

    rho = mu

    for i in range(n_vert):

        vx, vy, vz = vertices[i]

        for j in range(n_elem):

            n1, n2, n3 = ind_mat[j]
            f = np.sqrt(factorial(n1) * factorial(n2) * factorial(n3))

            k = mu ** (smoment) / np.sqrt(2 ** (2 - smoment) * np.pi ** 3)

            odf_mat[i, j] = k * f * _odf_cfunc(n1, n2, n3, vx, vy, vz, smoment)

    return odf_mat
Exemplo n.º 8
0
def arcsin(n,x):
    #construct a backwards order of your n's
    integers = np.arange(n,-1,-1, dtype=np.float64)
    coeff = factorial(integers*2)/((2*integers+1)*(factorial(integers)**2)*(4**integers))
    allcoeff = np.zeros(2*n+2)
    allcoeff[::2]=coeff[:]
    return np.polyval(allcoeff,x)
Exemplo n.º 9
0
 def eval_expx(lamb,i,j):
     '''get the i,j element of exp(lamb(a+a^dag))'''
     res=0.
     r=min(i,j)+1
     for m in xrange(r):
         res+=lamb**(i+j-2*m)*sqrt(factorial(j)*factorial(i))/factorial(m)/factorial(i-m)/factorial(j-m)
     return exp(lamb**2/2)*res #it is a pending issue whether '-' sign make sense here.
Exemplo n.º 10
0
def _sch_lpmv(n, x):
    '''
    Outputs array of Schmidt Seminormalized Associated Legendre Functions
    S_{n}^{m} for m<=n.

    Parameters
    ----------
    n : int
        Degree of polynomial.

    x : float
        Point at which to evaluate

    Returns
    -------
    array of values for Legendre functions.

    '''
    from scipy.special import lpmv
    n = int(n)
    sch = np.array([1.0])
    sch2 = np.array([(-1.0) ** m * np.sqrt(
        (2.0 * factorial(n - m)) / factorial(n + m)) for m in range(1, n + 1)])
    sch = np.append(sch, sch2)
    if isinstance(x, float) or len(x) == 1:
        leg = lpmv(np.arange(0, n + 1), n, x)
        return np.array([sch * leg]).T
    else:
        for j in range(0, len(x)):
            leg = lpmv(range(0, n + 1), n, x[j])
            if j == 0:
                out = np.array([sch * leg]).T
            else:
                out = np.append(out, np.array([sch * leg]).T, axis=1)
    return out
Exemplo n.º 11
0
def cumulant_from_moments(momt, n):
    """Compute n-th cumulant given moments.

    Parameters
    ----------
    momt: array_like
        `momt[j]` contains `(j+1)`-th moment.
        These can be raw moments around zero, or central moments
        (in which case, `momt[0]` == 0).
    n: integer
        which cumulant to calculate (must be >1)

    Returns
    -------
    kappa: float
        n-th cumulant.

    """
    if n < 1:
        raise ValueError("Expected a positive integer. Got %s instead." % n)
    if len(momt) < n:
        raise ValueError("%s-th cumulant requires %s moments, "
                         "only got %s." % (n, n, len(momt)))
    kappa = 0.
    for p in _faa_di_bruno_partitions(n):
        r = sum(k for (m, k) in p)
        term = (-1)**(r - 1) * factorial(r - 1)
        for (m, k) in p:
            term *= np.power(momt[m - 1] / factorial(m), k) / factorial(k)
        kappa += term
    kappa *= factorial(n)
    return kappa
Exemplo n.º 12
0
def get_n_combos(n,k):
    """
    Returns the number of combinations of n choose k.
    Uses Stirling's approximation when numbers are very large.
    """

    result = None
    try:
        fac_n = factorial(n)
        fac_k = factorial(k)
        fac_nk = factorial(n-k)
        summ = fac_n + fac_k + fac_nk
        if summ == np.inf or summ != summ:
            raise ValueError("Values too large. Using Stirling's approximation")
        result = fac_n/fac_k
        result /= fac_nk
    except: # Catch all large number exceptions.  
        pass
    if not result or result == np.inf or result != result: # No result yet.  
        if n == k or k == 0:
            result = 1
        else:
            x = stirling(n) - stirling(k) - stirling(n-k) # Use Stirling's approx.
            result = np.exp(x) 
        
    return result
Exemplo n.º 13
0
    def wdm_linearity(self, domain):
        # Calculate dispersive terms:
        if self.beta is None:
            self.factor = (0.0, 0.0)
        else:
            if self.centre_omega is None:
                self.Domega = (domain.omega - domain.centre_omega,
                               domain.omega - domain.centre_omega)
            else:
                self.Domega = (domain.omega - self.centre_omega[0],
                               domain.omega - self.centre_omega[1])

            terms = [0.0, 0.0]
            for n, beta in enumerate(self.beta[0]):
                terms[0] += beta * np.power(self.Domega[0], n) / factorial(n)
            for n, beta in enumerate(self.beta[1]):
                terms[1] += beta * np.power(self.Domega[1], n) / factorial(n)
            self.factor = (1j * fftshift(terms[0]), 1j * fftshift(terms[1]))

        # Include attenuation terms if available:
        if self.alpha is None:
            return self.factor
        else:
            self.factor[0] -= 0.5 * self.alpha[0]
            self.factor[1] -= 0.5 * self.alpha[1]
            return factor
Exemplo n.º 14
0
    def update(self, rho):
        """
        Calculate the probability function for the given state of an harmonic
        oscillator (as density matrix)
        """

        if isket(rho):
            rho = ket2dm(rho)

        self.data = np.zeros(len(self.xvecs[0]), dtype=complex)
        M, N = rho.shape

        for m in range(M):
            k_m = pow(self.omega / pi, 0.25) / \
                sqrt(2 ** m * factorial(m)) * \
                exp(-self.xvecs[0] ** 2 / 2.0) * \
                np.polyval(hermite(m), self.xvecs[0])

            for n in range(N):
                k_n = pow(self.omega / pi, 0.25) / \
                    sqrt(2 ** n * factorial(n)) * \
                    exp(-self.xvecs[0] ** 2 / 2.0) * \
                    np.polyval(hermite(n), self.xvecs[0])

                self.data += np.conjugate(k_n) * k_m * rho.data[m, n]
Exemplo n.º 15
0
def GL_mode(u,p):
	r = len(u[:,0])
	z = len(u[0,:])
	L = zeros((r,z))
	for i in range(p+1):
		L += float(factorial(p))*(-u)**i/( float(factorial(p - i))*(float(factorial(i)))**2. )
	return L
Exemplo n.º 16
0
    def time(self, t, s=1.0):
        """
        Complex Paul wavelet, centred at zero.

        Parameters
        ----------
        t : float
            Time. If `s` is not specified, i.e. set to 1, this can be
            used as the non-dimensional time t/s.
        s : float
            Scaling factor. Default is 1.

        Returns
        -------
        out : complex
            Value of the Paul wavelet at the given time

        The Paul wavelet is defined (in time) as::

            (2 ** m * i ** m * m!) / (pi * (2 * m)!) \
                    * (1 - i * t / s) ** -(m + 1)

        """
        m = self.m
        x = t / s

        const = (2 ** m * 1j ** m * factorial(m)) \
            / (np.pi * factorial(2 * m)) ** .5
        functional_form = (1 - 1j * x) ** -(m + 1)

        output = const * functional_form

        return output
Exemplo n.º 17
0
    def B(l,ll,r,rr,i,l1,l2,Ra,Rb,Rp,g1,l3,l4,Rc,Rd,Rq,g2):
        """
        Expansion coefficient B.

        Source:
            Handbook of Computational Chemistry
            David Cook
            Oxford University Press
            1998
        """

        b = 1
        b *= (-1)**(l) * theta(l,l1,l2,Rp-Ra,Rp-Rb,r,g1)
        b *= theta(ll,l3,l4,Rq-Rc,Rq-Rd,rr,g2)
        b *= (-1)**i * (2*delta)**(2*(r+rr))
        b *= misc.factorial(l + ll - 2*r - 2*rr,exact=True)
        b *= delta**i * (Rp-Rq)**(l+ll-2*(r+rr+i))

        tmp = 1
        tmp *= (4*delta)**(l+ll) * misc.factorial(i,exact=True)
        tmp *= misc.factorial(l+ll-2*(r+rr+i),exact=True)

        b /= tmp

        return b
Exemplo n.º 18
0
def series_problem_a():
	c = np.arange(70, -1, -1) # original values for n
	c = factorial(2*c) / ((2*c+1) * factorial(c)**2 * 4**c) #series coeff's
	p = np.zeros(2*c.size) # make space for skipped zero-terms
	p[::2] = c # set nonzero polynomial terms to the series coeff's
	P = np.poly1d(p) # make a polynomial out of it
	return 6 * P(.5) #return pi (since pi/6 = arcsin(1/2))
Exemplo n.º 19
0
def arcsin_approx():
	n = 70
	s = 1. * np.arange(70,-1,-1)
	r = factorial(2*s)/((2*s+1)*(factorial(s)**2)*(4**s)) # computes coefficients
	q = np.zeros(142)
	q[0::2] = r
	P = np.poly1d(q)
	return P(1/math.sqrt(2))*4
Exemplo n.º 20
0
def likelihood(theta, n, x):
    """
    :param theta: probability of infection
    :param n: n testing
    :param x: x positive
    :return likelihood probability
    """
    return (factorial(n) / (factorial(n-x) * factorial(x))) * (1 - theta) ** (n - x) * (theta ** x)
Exemplo n.º 21
0
def beta_m(a, B, N):
    A = np.zeros([N,N])
    alpha = np.zeros([N,1])
    for i in range(N):
        alpha[i,0] = a ** (i+1) / factorial(i+1)
        for j in range(i+1):
            A[i,j] = B[i-j]/factorial(i-j)
    return A.dot(alpha)
Exemplo n.º 22
0
 def integrand(zbar, T=T, data=data):
     fromb_times = data[1][data[2]=="B"]
     #total = len(fromb_times)
     numbefore = np.sum(fromb_times<=zbar)
     numafter = np.sum(fromb_times>zbar)
     pbefore = zbar**numbefore*np.exp(-zbar)/float(factorial([numbefore])[0])
     pafter = (1.4*(T-zbar))**numafter*np.exp(-1.4*(T-zbar))/float(factorial([numafter])[0])
     return pbefore*pafter*.2*np.exp(-.2*zbar)
Exemplo n.º 23
0
def basis2d(n0,n1,beta=[1.,1.]):
    """2d dimensionless Cartesian basis function"""
    b=hermite2d(n0,n1)
    b[0]*=((2**n0)*(np.pi**(.5))*factorial(n0))**(-.5)
    exp0=lambda x: beta[0] * b[0](x) * np.exp(-.5*(x**2))
    b[1]*=((2**n1)*(np.pi**(.5))*factorial(n1))**(-.5)
    exp1=lambda x: beta[1] * b[1](x) * np.exp(-.5*(x**2))
    return [exp0,exp1]
Exemplo n.º 24
0
def constant_potential_twosphere_identical(phi01, phi02, r1, r2, R, kappa, epsilon):
#   From Carnie+Chan 1993

    N = 20 # Number of terms in expansion
    
    qe = 1.60217646e-19
    Na = 6.0221415e23
    E_0 = 8.854187818e-12
    cal2J = 4.184 

    index = arange(N, dtype=float) + 0.5

    k1 = special.kv(index, kappa*r1)*sqrt(pi/(2*kappa*r1))
    k2 = special.kv(index, kappa*r2)*sqrt(pi/(2*kappa*r2))

    i1 = special.iv(index, kappa*r1)*sqrt(pi/(2*kappa*r1))
    i2 = special.iv(index, kappa*r2)*sqrt(pi/(2*kappa*r2))

    B = zeros((N,N), dtype=float)

    for n in range(N):
        for m in range(N):
            for nu in range(N):
                if n>=nu and m>=nu:
                    g1 = gamma(n-nu+0.5)
                    g2 = gamma(m-nu+0.5)
                    g3 = gamma(nu+0.5)
                    g4 = gamma(m+n-nu+1.5)
                    f1 = factorial(n+m-nu)
                    f2 = factorial(n-nu)
                    f3 = factorial(m-nu)
                    f4 = factorial(nu)
                    Anm = g1*g2*g3*f1*(n+m-2*nu+0.5)/(pi*g4*f2*f3*f4)
                    kB = special.kv(n+m-2*nu+0.5,kappa*R)*sqrt(pi/(2*kappa*R))
                    B[n,m] += Anm*kB 

    M = zeros((N,N), float)
    for i in range(N):
        for j in range(N):
            M[i,j] = (2*i+1)*B[i,j]*i1[i]
            if i==j:
                M[i,j] += k1[i]

    RHS = zeros(N)
    RHS[0] = phi01

    a = solve(M,RHS)

    a0 = a[0] 
   
    U = 4*pi * ( -pi/2 * a0/phi01 * 1/sinh(kappa*r1) + kappa*r1 + kappa*r1/tanh(kappa*r1) )

#    print 'E: %f'%U
    C0 = qe**2*Na*1e-3*1e10/(cal2J*E_0)
    C1 = r1*epsilon*phi01*phi01
    E_inter = U*C1*C0
                            
    return E_inter
Exemplo n.º 25
0
def wigner6j(j1,j2,j3,J1,J2,J3):
    """Return the value of the 6-j symbol for the given values of j and m using the Racah formula.
        / j1 j2 j3 \
        <          >
        \ J1 J2 J3 /
        Based upon Wigner3j.m from David Terr, Raytheon                         
        Reference: http://mathworld.wolfram.com/Wigner6j-Symbol.html            

        Usage
        wigner = Wigner6j(j1,j2,j3,J1,J2,J3)
    
    Args:
        j1 (float): j1
        j2 (float): j2
        j3 (float): j3
        J1 (float): J1
        J2 (float): J2
        J3 (float): J3
    
    Returns:
        float: value of the 6-j symbol
    """
    # Check that the js and Js are only integer or half integer
    if ( ( 2*j1 != round(2*j1) ) | ( 2*j2 != round(2*j2) ) | ( 2*j2 != round(2*j2) ) | ( 2*J1 != round(2*J1) ) | ( 2*J2 != round(2*J2) ) | ( 2*J3 != round(2*J3) ) ):
        print('All arguments must be integers or half-integers.')
        return -1
    
# Check if the 4 triads ( (j1 j2 j3), (j1 J2 J3), (J1 j2 J3), (J1 J2 j3) ) satisfy the triangular inequalities
    if ( ( abs(j1-j2) > j3 ) | ( j1+j2 < j3 ) | ( abs(j1-J2) > J3 ) | ( j1+J2 < J3 ) | ( abs(J1-j2) > J3 ) | ( J1+j2 < J3 ) | ( abs(J1-J2) > j3 ) | ( J1+J2 < j3 ) ):
        print('6j-Symbol is not triangular!')
        return 0
    
    # Check if the sum of the elements of each traid is an integer
    if ( ( 2*(j1+j2+j3) != round(2*(j1+j2+j3)) ) | ( 2*(j1+J2+J3) != round(2*(j1+J2+J3)) ) | ( 2*(J1+j2+J3) != round(2*(J1+j2+J3)) ) | ( 2*(J1+J2+j3) != round(2*(J1+J2+j3)) ) ):
        print('6j-Symbol is not triangular!')
        return 0
    
    # Arguments for the factorials
    t1 = j1+j2+j3
    t2 = j1+J2+J3
    t3 = J1+j2+J3
    t4 = J1+J2+j3
    t5 = j1+j2+J1+J2
    t6 = j2+j3+J2+J3
    t7 = j1+j3+J1+J3

    # Finding summation borders
    tmin = max(0, max(t1, max(t2, max(t3,t4))))
    tmax = min(t5, min(t6,t7))
    tvec = arange(tmin,tmax+1,1)
        
    # Calculation the sum part of the 6j-Symbol
    WignerReturn = 0
    for t in tvec:
        WignerReturn += (-1)**t*factorial(t+1)/( factorial(t-t1)*factorial(t-t2)*factorial(t-t3)*factorial(t-t4)*factorial(t5-t)*factorial(t6-t)*factorial(t7-t) )

    # Calculation of the 6j-Symbol
    return WignerReturn*sqrt( TriaCoeff(j1,j2,j3)*TriaCoeff(j1,J2,J3)*TriaCoeff(J1,j2,J3)*TriaCoeff(J1,J2,j3) )
Exemplo n.º 26
0
def dimBasis2d(n0,n1,beta=[1.,1.],phs=[1.,1.]):
    """2d dimensional Cartesian basis function of characteristic size beta
    phs: additional phase factor, used in the Fourier Transform"""
    b=hermite2d(n0,n1)
    b[0]*=(beta[0]**(-.5))*(((2**n0)*(np.pi**(.5))*factorial(n0))**(-.5))
    exp0=lambda x: b[0](x/beta[0]) * np.exp(-.5*((x/beta[0])**2)) * phs[0]
    b[1]*=(beta[1]**(-.5))*(((2**n1)*(np.pi**(.5))*factorial(n1))**(-.5))
    exp1=lambda x: b[1](x/beta[1]) * np.exp(-.5*((x/beta[1])**2)) * phs[1]
    return [exp0,exp1]
Exemplo n.º 27
0
def radial_poly(rho, n, m):
    upper = (n - np.abs(m)) / 2
    s = np.arange(upper + 1)
    consts = -1 ** s
    consts *= factorial(n - s)
    consts /= factorial(s)
    consts /= factorial((n + np.abs(m))/2 - s)
    consts /= factorial((n - np.abs(m))/2 - s)
    return (consts * rho[..., np.newaxis]**(n - 2 * s)).sum(axis=-1)
Exemplo n.º 28
0
 def number_of_permutations(self):
     """
     Returns the number of permutations of this coordination geometry.
     """
     if self.permutations_safe_override:
         return factorial(self.coordination)
     elif self.permutations is None:
         return factorial(self.coordination)
     return len(self.permutations)
Exemplo n.º 29
0
def get_K(x,n):

    K = 0.
    n_fact = factorial(n)
    n_fact2 = factorial(2*n)
    for s in range(n+1):
        K += 2**s*n_fact*factorial(2*n-s)/(factorial(s)*n_fact2*factorial(n-s)) * x**s

    return K
Exemplo n.º 30
0
def _maternpolykernel(R, p, l):
	nu = p + 0.5	
	r = np.sqrt(2.*nu)*R/l
	poly = np.zeros_like(r)
	factor = gamma(p+1)/gamma(2*p+1)
	for i in np.arange(p+1):
		poly +=  factorial(p+i)*np.power(2.*r,p-i)/(factorial(i)*factorial(p-i))

	return factor*np.exp(-r)*poly
Exemplo n.º 31
0
def calculateExponentialSeries(ck, truncationOrder):
    N = truncationOrder // 2
    n = np.linspace(0, 2 * N - 1, 2 * N)
    expSeries = np.power(-ck**2 / 2, n) / factorial(n)
    # print(expSeries)
    return expSeries
Exemplo n.º 32
0
def PiecewiseMollify(theta, c_j, a_n, x):
    """
    Piecewise mollify the spectral reconstruction of a discontinuous function 
    to reduce the effect of Gibbs phenomenon. Perform a real-space convolution 
    of a spectral reconstruction with an adaptive unit-mass mollifier.

    Parameters
    ----------
    theta : float
            free parameter to vary in the range 0 < theta < 1

    c_j : 1D array, 
          Array containing x positions of the discontinuities in the data
 
    a_n : 1D array, shape = (N+1,)
          N - highest order in the Chebyshev expansion
          vector of Chebyshev expansion coefficients

    x : 1D array,
        1D grid of points in real space, where the function is evaluated

    Returns
    ----------
    
    mollified : 1D array, shape = (len(x),)
                real space mollified representation of a discontinuous function

    mollified_err : 1D array, shape = (len(x),)
                    error estimate for each point in the convolution, derived from
                    scipy.integrate.quad

    """
    N = a_n.shape[0] - 1
    sanity_check = np.empty(len(x))
    mollified = np.array([])
    mollified_err = np.array([])

    I_N = lambda y: T.chebval(y, a_n)
    chi_top = lambda y, f: f(y) if -1 <= y <= 1 else 0
    I_N_top = lambda y: chi_top(y, I_N)
    c_jplus = np.append(c_j, 1.0)

    for idx_c, pos in enumerate(c_jplus):

        if idx_c == 0:

            lim_left = -1.0
            lim_right = pos

            offset = np.ma.masked_where(x > lim_right, x)
            offset = np.ma.compressed(offset)

        else:

            lim_left = c_jplus[idx_c - 1]
            lim_right = pos

            offset = np.ma.masked_where(x > lim_right, x)
            offset = np.ma.masked_where(x <= lim_left, offset)
            offset = np.ma.compressed(offset)

        chi_cut = lambda y, f: f(y) if lim_left <= y <= lim_right else 0
        convolution = np.empty(len(offset))
        convolution_err = np.empty(len(offset))

        for idx_o, off_x in enumerate(offset):
            c_jx = c_j - off_x
            dx = lambda y: sqrt(theta * N * min(abs(y - c) for c in c_jx))
            var = lambda y: N / (2 * theta * dx(y))
            p_N = lambda y: (theta**2) * dx(y) * N
            j_max = int(np.amax(np.frompyfunc(p_N, 1, 1)(x)))
            h_j = np.zeros(2 * (j_max + 1))

            for j in range(j_max + 1):
                h_j[2 * j] = ((-1)**j) / ((4**j) * factorial(j))

            hermite = lambda y: H.hermeval(sqrt(var(y)) * y, h_j)
            expon = lambda y: (1. / sqrt(theta * N * dx(y))) * exp(-var(y) *
                                                                   (y**2))
            phi0 = lambda y: hermite(y) * expon(y)
            phi_off = lambda y: phi0(y - off_x)
            phi = lambda y: chi_cut(y, phi_off)
            norm = quad(phi, lim_left, lim_right)[0]

            convfunc = lambda y: (1 / norm) * (phi(y) * I_N_top(y))
            convolution[idx_o], convolution_err[idx_o] = quad(
                convfunc, lim_left, lim_right)

        mollified = np.append(mollified, convolution)
        mollified_err = np.append(mollified_err, convolution_err)

    assert mollified.shape == sanity_check.shape, "Piecewise mollification inconsistent with regular one"
    assert mollified_err.shape == sanity_check.shape, "Piecewise mollification inconsistent with regular one"

    return mollified, mollified_err
Exemplo n.º 33
0
import scipy.misc as sp
import numpy as np

limit = int(sp.factorial(9))
curious = []
for ii in range(limit):
    s = str(ii)
    temp_sum = 0
    for jj in range(len(s)):
        # print(s[jj])
        temp_sum += sp.factorial(int(s[jj]))
    if temp_sum == ii:
        curious.append(ii)

print(curious)
curious = curious[2:]  # Remove 1 and 2
print(np.sum(curious))
Exemplo n.º 34
0
def Wigner6j(j1, j2, j3, J1, J2, J3):
    #======================================================================
    # Calculating the Wigner6j-Symbols using the Racah-Formula
    # Author: Ulrich Krohn
    # Date: 13th November 2009
    #
    # Based upon Wigner3j.m from David Terr, Raytheon
    # Reference: http://mathworld.wolfram.com/Wigner6j-Symbol.html
    #
    # Usage:
    # from wigner import Wigner6j
    # WignerReturn = Wigner6j(j1,j2,j3,J1,J2,J3)
    #
    #  / j1 j2 j3 \
    # <            >
    #  \ J1 J2 J3 /
    #
    #======================================================================

    # Check that the js and Js are only integer or half integer
    if ((2 * j1 != round(2 * j1)) | (2 * j2 != round(2 * j2)) |
        (2 * j2 != round(2 * j2)) | (2 * J1 != round(2 * J1)) |
        (2 * J2 != round(2 * J2)) | (2 * J3 != round(2 * J3))):
        raise ('All arguments must be integers or half-integers.')
        return -1


# Check if the 4 triads ( (j1 j2 j3), (j1 J2 J3), (J1 j2 J3), (J1 J2 j3) ) satisfy the triangular inequalities
    if ((abs(j1 - j2) > j3) | (j1 + j2 < j3) | (abs(j1 - J2) > J3) |
        (j1 + J2 < J3) | (abs(J1 - j2) > J3) | (J1 + j2 < J3) |
        (abs(J1 - J2) > j3) | (J1 + J2 < j3)):
        #print '6j-Symbol is not triangular!'
        return 0

    # Check if the sum of the elements of each traid is an integer
    if ((2 * (j1 + j2 + j3) != round(2 * (j1 + j2 + j3))) |
        (2 * (j1 + J2 + J3) != round(2 * (j1 + J2 + J3))) |
        (2 * (J1 + j2 + J3) != round(2 * (J1 + j2 + J3))) |
        (2 * (J1 + J2 + j3) != round(2 * (J1 + J2 + j3)))):
        #print '6j-Symbol is not triangular!'
        return 0

    # Arguments for the factorials
    t1 = j1 + j2 + j3
    t2 = j1 + J2 + J3
    t3 = J1 + j2 + J3
    t4 = J1 + J2 + j3
    t5 = j1 + j2 + J1 + J2
    t6 = j2 + j3 + J2 + J3
    t7 = j1 + j3 + J1 + J3

    # Finding summation borders
    tmin = max(0, max(t1, max(t2, max(t3, t4))))
    tmax = min(t5, min(t6, t7))
    tvec = arange(tmin, tmax + 1, 1)

    # Calculation the sum part of the 6j-Symbol
    WignerReturn = 0
    for t in tvec:
        WignerReturn += (-1)**t * factorial(t + 1) / (
            factorial(t - t1) * factorial(t - t2) * factorial(t - t3) *
            factorial(t - t4) * factorial(t5 - t) * factorial(t6 - t) *
            factorial(t7 - t))

    # Calculation of the 6j-Symbol
    return WignerReturn * sqrt(
        TriaCoeff(j1, j2, j3) * TriaCoeff(j1, J2, J3) * TriaCoeff(J1, j2, J3) *
        TriaCoeff(J1, J2, j3))
Exemplo n.º 35
0
import scipy.misc as spm

factorials = {}
for i in range(10):
    factorials[str(i)] = spm.factorial(i)

ans = 0
for i in range(3, 500000):
    stri = str(i)
    num = 0
    for j in stri:
        # if j == '9' or j =='8':
        # break
        num += factorials[j]
    if num == i:
        print i
        ans += i
print ans
Exemplo n.º 36
0
 def norm(self, n):
     return 1.0 / (np.sqrt(np.sqrt(np.pi) * (2.0**n) * factorial(n)))
Exemplo n.º 37
0
    def _build_levels_c(self, lodft, log_rad, angle, Xrcos, Yrcos, height,
                        img_dims):
        '''
        Modified by Li Jie
        
        Add muti scale,for example,scale_factor=2**(1/2)
        '''
        if height <= 1:
            coeff = [lodft]

        else:

            Xrcos = Xrcos - np.log2(self.scale_factor)

            ####################################################################
            ####################### Orientation bandpass #######################
            ####################################################################
            himask = pointOp(log_rad, Yrcos, Xrcos)

            order = self.nbands - 1
            const = np.power(2, 2 * order) * np.square(
                factorial(order)) / (self.nbands * factorial(2 * order))
            Ycosn = 2 * np.sqrt(const) * np.power(np.cos(
                self.Xcosn), order) * (np.abs(self.alpha) < np.pi / 2)

            # Loop through all orientation bands
            orientations = []
            for b in range(self.nbands):
                anglemask = pointOp(angle, Ycosn,
                                    self.Xcosn + np.pi * b / self.nbands)
                banddft = np.power(np.complex(
                    0, -1), self.nbands - 1) * lodft * anglemask * himask
                orientations.append(banddft)

            ####################################################################
            ######################## Subsample lowpass #########################
            ####################################################################

            dims = np.array(lodft.shape)
            ctr = np.ceil((dims + 0.5) / 2)

            lodims = np.round(img_dims /
                              (self.scale_factor**(self.height - height)))
            loctr = np.ceil((lodims + 0.5) / 2)
            lostart = (ctr - loctr).astype(np.int)
            loend = (lostart + lodims).astype(np.int)

            # Selection
            log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
            angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
            lodft = lodft[lostart[0]:loend[0], lostart[1]:loend[1]]

            # Subsampling in frequency domain
            YIrcos = np.abs(np.sqrt(1 - Yrcos**2))
            lomask = pointOp(log_rad, YIrcos, Xrcos)
            lodft = lomask * lodft

            ####################################################################
            ####################### Recursion next level #######################
            ####################################################################

            coeff = self._build_levels_c(lodft, log_rad, angle, Xrcos, Yrcos,
                                         height - 1, img_dims)
            coeff.insert(0, orientations)

        return coeff
Exemplo n.º 38
0
 def function(x, lamb, offsX, offsY, scaleX, scaleY):
     # poisson function, parameter lamb is the fit parameter
     x = (x - offsX) / scaleX
     y = (lamb**x / factorial(x)) * np.exp(-lamb)
     return scaleY * y + offsY
Exemplo n.º 39
0
 def double_poisson(self,k, *params):
     (r1,r2,p1,p2)   = params
     poisson_1       = p1 * (r1**k/factorial(k)) * np.exp(-r1)
     poisson_2       = p2 * (r2**k/factorial(k)) * np.exp(-r2)
     return  poisson_1 + poisson_2
Exemplo n.º 40
0
def factor(n, d):
    return (1./np.sqrt(2**n * factorial(n)))*((omega/np.pi)**0.25)*np.exp(-0.5*omega*(x-d)**2)\
           *eval_hermite(n, np.sqrt(omega)*(x-d))
Exemplo n.º 41
0
def chi(zeta, n):
    "Returns the normalization constant of the mSPF basis."
    return np.sqrt(2 / zeta**1.5 * factorial(n) / gamma(n + 3.5))
Exemplo n.º 42
0
def whitney_innerproduct(complex,k):
    """
    For a given SimplicialComplex, compute a matrix representing the 
    innerproduct of Whitney k-forms
    """
    assert(k >= 0 and k <= complex.complex_dimension())    
        
    ## MASS MATRIX COO DATA
    rows,cols = massmatrix_rowcols(complex,k)
    data = empty(rows.shape)


    ## PRECOMPUTATION
    p = complex.complex_dimension()
   
    scale_integration = (factorial(k)**2)/((p + 2)*(p + 1))   
    
    k_forms = [tuple(x) for x in combinations(range(p+1),k)]
    k_faces = [tuple(x) for x in combinations(range(p+1),k+1)]

    num_k_forms = len(k_forms)
    num_k_faces = len(k_faces)

    k_form_pairs = [tuple(x) for x in combinations(k_forms,2)] + [(x,x) for x in k_forms]
    num_k_form_pairs = len(k_form_pairs)
    k_form_pairs_to_index = dict(zip(k_form_pairs,range(num_k_form_pairs)))
    k_form_pairs_to_index.update(zip([x[::-1] for x in k_form_pairs],range(num_k_form_pairs)))
    num_k_face_pairs = num_k_faces**2


    if k > 0:
        k_form_pairs_array = array(k_form_pairs)

    #maps flat vector of determinants to the flattened matrix entries
    dets_to_vals = scipy.sparse.lil_matrix((num_k_face_pairs,num_k_form_pairs))

    k_face_pairs = []
    for face1 in k_faces:
        for face2 in k_faces:
            row_index = len(k_face_pairs)

            k_face_pairs.append((face1,face2))

            for n in range(k+1):
               for m in range(k+1):
                   form1 = face1[:n] + face1[n+1:]
                   form2 = face2[:m] + face2[m+1:]

                   col_index = k_form_pairs_to_index[(form1,form2)]

                   dets_to_vals[row_index,col_index] += (-1)**(n+m)*((face1[n] == face2[m]) + 1)                 

    k_face_pairs_to_index = dict(zip(k_face_pairs,range(num_k_faces**2)))
    dets_to_vals = dets_to_vals.tocsr()
    ## END PRECOMPUTATION


    ## COMPUTATION
    if k == 1:
        Fdet = lambda x : x #det for 1x1 matrices - extend
    else:
        Fdet, = scipy.linalg.flinalg.get_flinalg_funcs(('det',),(complex.vertices,))



    dets = ones(num_k_form_pairs)


    for i,s in enumerate(complex[-1].simplices):

        # for k=1, dets is already correct i.e. dets[:] = 1
        if k > 0:  
            # lambda_i denotes the scalar barycentric basis function of the i-th vertex of this simplex
            # d(lambda_i) is the 1 form (gradient) of the i-th scalar basis function within this simplex
            pts      = complex.vertices[s,:]
            d_lambda = barycentric_gradients(pts)
            
            mtxs = d_lambda[k_form_pairs_array]     # these lines are equivalent to:
            for n,(A,B) in enumerate(mtxs):         #   for n,(form1,form2) in enumerate(k_form_pairs):
                dets[n] = Fdet(inner(A,B))[0]       #       dets[n] = det(dot(d_lambda[form1,:],d_lambda[form2,:].T))

        volume = complex[-1].primal_volume[i]
        vals = dets_to_vals * dets
        vals *= volume * scale_integration  #scale by the volume, barycentric weights, and account for the p! in each whitney form

        #put values into appropriate entries of the COO data array
        data[i*num_k_face_pairs:(i+1)*num_k_face_pairs] = vals


    #now rows,cols,data form a COOrdinate representation for the mass matrix
    shape = (complex[k].num_simplices,complex[k].num_simplices)  
    return coo_matrix((data,(rows,cols)), shape).tocsr()
def poisson_prob(n, lam):
    global poissonBackup
    key = n * 10 + lam
    if key not in poissonBackup.keys():
        poissonBackup[key] = np.exp(-lam) * lam ** n / factorial(n)
    return poissonBackup[key]
Exemplo n.º 44
0
def multinomial(x, n, p):
    x = np.atleast_2d(x)
    return factorial(n) / factorial(x).prod(1) * (p**x).prod(1)
Exemplo n.º 45
0
 def rc(self, n, m, l):
     return (-1.)**l*factorial(n - l)/(factorial(l)*factorial((n+m)/2.0 - l)*factorial((n-m)/2.0 - l))
Exemplo n.º 46
0
import lib.AmBePlots as abp
import lib.BeamPlots as bp
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as scp
import numpy as np
import scipy.misc as scm

SIGNAL_DIRS = ["./Data/V3_5PE100ns/BeamData/"]
SIGNAL_LABELS = ['Beam']
BKG_DIR = "./Data/BkgCentralData/"

PEPERMEV = 12.

expoPFlat= lambda x,C1,tau,mu,B: C1*np.exp(-(x-mu)/tau) + B
mypoisson = lambda x,mu: (mu**x)*np.exp(-mu)/scm.factorial(x)

def GetDataFrame(mytreename,mybranches,filelist):
    RProcessor = rp.ROOTProcessor(treename=mytreename)
    for f1 in filelist:
        RProcessor.addROOTFile(f1,branches_to_get=mybranches)
    data = RProcessor.getProcessedData()
    df = pd.DataFrame(data)
    return df

def BeamPlotDemo(PositionDict,MCdf):
    Sdf = PositionDict["Beam"][0]
    Sdf_trig = PositionDict["Beam"][1]
    Sdf_mrd = PositionDict["Beam"][2]

    Sdf = Sdf.loc[Sdf["eventTimeTank"]>-9].reset_index(drop=True)
Exemplo n.º 47
0
def Wigner3j(j1, j2, j3, m1, m2, m3):
    #======================================================================
    # Wigner3j.m by David Terr, Raytheon, 6-17-04
    #
    # Compute the Wigner 3j symbol using the Racah formula [1].
    #
    # Usage:
    # from wigner import Wigner3j
    # wigner = Wigner3j(j1,j2,j3,m1,m2,m3)
    #
    #  / j1 j2 j3 \
    #  |          |
    #  \ m1 m2 m3 /
    #
    # Reference: Wigner 3j-Symbol entry of Eric Weinstein's Mathworld:
    # http://mathworld.wolfram.com/Wigner3j-Symbol.html
    #======================================================================

    # Error checking
    if ((2 * j1 != floor(2 * j1)) | (2 * j2 != floor(2 * j2)) |
        (2 * j3 != floor(2 * j3)) | (2 * m1 != floor(2 * m1)) |
        (2 * m2 != floor(2 * m2)) | (2 * m3 != floor(2 * m3))):
        raise ('All arguments must be integers or half-integers.')
        return -1

    # Additional check if the sum of the second row equals zero
    if (m1 + m2 + m3 != 0):
        #print '3j-Symbol unphysical'
        return 0

    if (j1 - m1 != floor(j1 - m1)):
        #print '2*j1 and 2*m1 must have the same parity'
        return 0

    if (j2 - m2 != floor(j2 - m2)):
        #print '2*j2 and 2*m2 must have the same parity'
        return
        0

    if (j3 - m3 != floor(j3 - m3)):
        #print '2*j3 and 2*m3 must have the same parity'
        return 0

    if (j3 > j1 + j2) | (j3 < abs(j1 - j2)):
        #print 'j3 is out of bounds.'
        return 0

    if abs(m1) > j1:
        #print 'm1 is out of bounds.'
        return 0

    if abs(m2) > j2:
        #print 'm2 is out of bounds.'
        return 0

    if abs(m3) > j3:
        #print 'm3 is out of bounds.'
        return 0

    t1 = j2 - m1 - j3
    t2 = j1 + m2 - j3
    t3 = j1 + j2 - j3
    t4 = j1 - m1
    t5 = j2 + m2

    tmin = max(0, max(t1, t2))
    tmax = min(t3, min(t4, t5))
    tvec = arange(tmin, tmax + 1, 1)

    wigner = 0

    for t in tvec:
        wigner += (-1)**t / (factorial(t) * factorial(t - t1) *
                             factorial(t - t2) * factorial(t3 - t) *
                             factorial(t4 - t) * factorial(t5 - t))

    return wigner * (-1)**(j1 - j2 - m3) * sqrt(
        factorial(j1 + j2 - j3) * factorial(j1 - j2 + j3) *
        factorial(-j1 + j2 + j3) / factorial(j1 + j2 + j3 + 1) *
        factorial(j1 + m1) * factorial(j1 - m1) * factorial(j2 + m2) *
        factorial(j2 - m2) * factorial(j3 + m3) * factorial(j3 - m3))
Exemplo n.º 48
0
def _derivative_stencil_coefficients(axis, p=1, q=3):
    """Calculates the coefficients needed for the derivative.

    Parameters
    ----------
    axis : array like
        Axis onto which the derivative will be calculated.
    p : integer, optional
        Order of the derivative to be calculated. Default is to
        calculate the first derivative (p=1). 2*n-p+1 gives the relative
        order of the approximation.
    q : integer, optional
        Length of the stencil used for centered differentials. The
        length has to be odd numbered. Default is q=3.

    Returns
    -------
    Coefficients (a_q) needed for the linear combination of `q` points
    to get the first derivative according to Arbic et al. (2012)
    equations (20) and (22). At the boundaries forward and backward
    differences approximations are calculated.

    References
    ----------
    Cushman-Roisin, B. & Beckers, J.-M. Introduction to geophysical
    fluid dynamics: Physical and numerical aspects Academic Press, 2011,
    101, 828.

    Arbic, Brian B. Scott, R. B.; Chelton, D. B.; Richman, J. G. &
    Shriver, J. F. Effects of stencil width on surface ocean geostrophic
    velocity and vorticity estimation from gridded satellite altimeter
    data. Journal of Geophysical Research, 2012, 117, C03029.

    """
    # Calculate left and right stencils.
    q_left = (q - 1) / 2
    q_right = (q - 1) / 2 + 1
    #
    I = axis.size

    # Constructs matrices according to Cushman-Roisin & Beckers (2011)
    # equations (1.25) and adapted for variable grids as in Arbic et
    # al. (2012), equations (20), (22). The linear system of equations
    # is solved afterwards.
    coeffs = zeros((I, q))
    smart_coeffs = dict()
    for i in range(I):
        A = zeros((q, q))
        #
        if i < q_left:
            start = q_left - i
        else:
            start = 0
        if i > I - q_right:
            stop = i - (I - q_right)
        else:
            stop = 0
        #
        A[0, start:q + stop] = 1
        da = axis[i - q_left + start:i + q_right - stop] - axis[i]
        da_key = str(da)
        #
        if da_key not in smart_coeffs.keys():
            for h in range(1, q):
                A[h, start:q - stop] = da**h
            B = zeros((q, 1))
            # This tells where the p-th derivative is calculated
            B[p] = factorial(p)
            C = linalg.solve(A[:q - start - stop, start:q - stop],
                             B[:q - (start + stop), :])
            #
            smart_coeffs[da_key] = C.flatten()
        #
        coeffs[i, start:q - stop] = smart_coeffs[da_key]
    #
    return coeffs
Exemplo n.º 49
0
def TriaCoeff(a, b, c):
    # Calculating the triangle coefficient
    return factorial(a + b - c) * factorial(a - b + c) * factorial(
        -a + b + c) / (factorial(a + b + c + 1))
Exemplo n.º 50
0
def residuez(b, a, tol=1e-3, rtype='avg'):
    """Compute partial-fraction expansion of b(z) / a(z).

    If M = len(b) and N = len(a)

            b(z)     b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
    H(z) = ------ = ----------------------------------------------
            a(z)     a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)

                 r[0]                   r[-1]
         = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
           (1-p[0]z**(-1))         (1-p[-1]z**(-1))

    If there are any repeated roots (closer than tol), then the partial
    fraction expansion has terms like

               r[i]              r[i+1]                    r[i+n-1]
          -------------- + ------------------ + ... + ------------------
          (1-p[i]z**(-1))  (1-p[i]z**(-1))**2         (1-p[i]z**(-1))**n

    See also
    --------
    invresz, poly, polyval, unique_roots

    """
    b, a = map(asarray, (b, a))
    gain = a[0]
    brev, arev = b[::-1], a[::-1]
    krev, brev = polydiv(brev, arev)
    if krev == []:
        k = []
    else:
        k = krev[::-1]
    b = brev[::-1]
    p = roots(a)
    r = p * 0.0
    pout, mult = unique_roots(p, tol=tol, rtype=rtype)
    p = []
    for n in range(len(pout)):
        p.extend([pout[n]] * mult[n])
    p = asarray(p)
    # Compute the residue from the general formula (for discrete-time)
    #  the polynomial is in z**(-1) and the multiplication is by terms
    #  like this (1-p[i] z**(-1))**mult[i].  After differentiation,
    #  we must divide by (-p[i])**(m-k) as well as (m-k)!
    indx = 0
    for n in range(len(pout)):
        bn = brev.copy()
        pn = []
        for l in range(len(pout)):
            if l != n:
                pn.extend([pout[l]] * mult[l])
        an = atleast_1d(poly(pn))[::-1]
        # bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
        # multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
        sig = mult[n]
        for m in range(sig, 0, -1):
            if sig > m:
                # compute next derivative of bn(s) / an(s)
                term1 = polymul(polyder(bn, 1), an)
                term2 = polymul(bn, polyder(an, 1))
                bn = polysub(term1, term2)
                an = polymul(an, an)
            r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
                               polyval(an, 1.0 / pout[n]) /
                               factorial(sig - m) / (-pout[n]) ** (sig - m))
        indx += sig
    return r / gain, p, k
Exemplo n.º 51
0
def cm_volume_helper(cm_det_abs_root, n):
    return cm_det_abs_root / ((2.**(n / 2.)) * factorial(n))
Exemplo n.º 52
0
# PS1EX3
from numpy import sqrt, sin, pi
from scipy.misc import factorial

print(sqrt(17))
print(factorial(18))
print(sin((18 * 2 * pi) / 360))

# PS1EX4
import sys
import numpy as np

G = 9.81


def distance(height, time):
    fallen = 0.5 * G * time**2
    return height - fallen if fallen <= height else 0


if __name__ == '__main__':

    try:
        # also handels non-int input, by ValueError
        given = raw_input("Enter the height and time, space seperated: ")
        height, time = [float(n) for n in given.split(' ')]
    except ValueError:
        print('Not a number. Usage: pset1ex4.py HEIGHT TIME')
        sys.exit()

    final_height = distance(height, time)
Exemplo n.º 53
0
def MollifyQuadBuffer(theta, c_j, a_n, x):
    """
    Mollify the spectral reconstruction of a discontinuous function 
    to reduce the effect of Gibbs phenomenon. Perform a real-space convolution 
    of a spectral reconstruction with an adaptive unit-mass mollifier.

    Parameters
    ----------
    theta : float
            free parameter to vary in the range 0 < theta < 1

    c_j : 1D array, 
          Array containing x positions of the discontinuities in the data
 
    a_n : 1D array, shape = (N+1,)
          N - highest order in the Chebyshev expansion
          vector of Chebyshev expansion coefficients

    x : 1D array,
        1D grid of points in real space, where the function is evaluated

    Returns
    ----------
    
    convolution : 1D array, shape = (len(x),)
                  real space mollified representation of a discontinuous function

    """

    N = a_n.shape[0] - 1
    deltax = 2.0 / (len(x) - 1)

    I_N = lambda y: T.chebval(y, a_n)
    I_Nf = lambda y: I_N(y) if -1 <= y <= 1 else 0
    buff_right = lambda y: I_N(2.0 - y) if 1.0 < y < 1.4 else 0
    buff_left = lambda y: I_N(-(2.0 + y)) if -1.4 < y < -1.0 else 0
    I_Nnew = lambda y: buff_right(y) + buff_left(y) + I_Nf(y)

    add_left = np.arange(-1.4, -1.0, deltax)
    add_right = np.arange(1.0, 1.4, deltax)
    offset = np.hstack((add_left, x, add_right))
    convolution = np.empty(len(offset))

    for idx, off_x in enumerate(offset):
        c_jx = c_j - off_x
        dx = lambda y: sqrt(theta * N * min(abs(y - c) for c in c_jx))
        var = lambda y: N / (2 * theta * dx(y))
        p_N = lambda y: (theta**2) * dx(y) * N
        j_max = int(np.amax(np.frompyfunc(p_N, 1, 1)(x)))
        h_j = np.zeros(2 * (j_max + 1))

        for j in range(j_max + 1):
            h_j[2 * j] = ((-1)**j) / ((4**j) * factorial(j))

        hermite = lambda y: H.hermeval(sqrt(var(y)) * y, h_j)
        expon = lambda y: (1. / sqrt(theta * N * dx(y))) * exp(-var(y) *
                                                               (y**2))
        phi = lambda y: hermite(y) * expon(y)
        phif = lambda y: phi(y - off_x)
        norm = quad(phi, -1.4, 1.4)[0]

        convfunc = lambda y: (phif(y) * I_Nnew(y))
        convolution[idx] = (1 / norm) * quad(convfunc, -1.4, 1.4)[0]

    convolution = convolution[len(add_left):-(len(add_right))]

    return convolution
Exemplo n.º 54
0
def residue(b, a, tol=1e-3, rtype='avg'):
    """
    Compute partial-fraction expansion of b(s) / a(s).

    If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
    expansion H(s) is defined as::

              b(s)     b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
      H(s) = ------ = ----------------------------------------------
              a(s)     a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]

               r[0]       r[1]             r[-1]
           = -------- + -------- + ... + --------- + k(s)
             (s-p[0])   (s-p[1])         (s-p[-1])

    If there are any repeated roots (closer together than `tol`), then H(s)
    has terms like::

            r[i]      r[i+1]              r[i+n-1]
          -------- + ----------- + ... + -----------
          (s-p[i])  (s-p[i])**2          (s-p[i])**n

    Returns
    -------
    r : ndarray
        Residues.
    p : ndarray
        Poles.
    k : ndarray
        Coefficients of the direct polynomial term.

    See Also
    --------
    invres, numpy.poly, unique_roots

    """

    b, a = map(asarray, (b, a))
    rscale = a[0]
    k, b = polydiv(b, a)
    p = roots(a)
    r = p * 0.0
    pout, mult = unique_roots(p, tol=tol, rtype=rtype)
    p = []
    for n in range(len(pout)):
        p.extend([pout[n]] * mult[n])
    p = asarray(p)
    # Compute the residue from the general formula
    indx = 0
    for n in range(len(pout)):
        bn = b.copy()
        pn = []
        for l in range(len(pout)):
            if l != n:
                pn.extend([pout[l]] * mult[l])
        an = atleast_1d(poly(pn))
        # bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
        # multiplicity of pole at po[n]
        sig = mult[n]
        for m in range(sig, 0, -1):
            if sig > m:
                # compute next derivative of bn(s) / an(s)
                term1 = polymul(polyder(bn, 1), an)
                term2 = polymul(bn, polyder(an, 1))
                bn = polysub(term1, term2)
                an = polymul(an, an)
            r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \
                          / factorial(sig - m)
        indx += sig
    return r / rscale, p, k
Exemplo n.º 55
0
    def graficos(self, PA):

        base_path = self.__base_path + '/Graficos/'
        # Gráficos da otimização
        base_dir = '/PSO/'
        Validacao_Diretorio(base_path, base_dir)

        self.Otimizacao.Graficos(base_path + base_dir,
                                 Nome_param=self.parametros.simbolos,
                                 Unid_param=self.parametros.unidades,
                                 FO2a2=True)

        # Gráficos da estimação
        base_dir = '/Estimacao/'
        Validacao_Diretorio(base_path, base_dir)

        # Gráfico comparativo entre valores experimentais e calculados pelo modelo, sem variância
        for i in xrange(self.NY):
            y = self.y.experimental.matriz_estimativa[:, i]
            ym = self.y.modelo.matriz_estimativa[:, i]

            ymin = min(y)
            ymax = max(y)

            diag = linspace(min(y), max(y))
            fig = figure()
            ax = fig.add_subplot(1, 1, 1)
            plot(y, ym, 'bo', linewidth=2.0)
            plot(diag, diag, 'k-', linewidth=2.0)
            ax.yaxis.grid(color='gray', linestyle='dashed')
            ax.xaxis.grid(color='gray', linestyle='dashed')
            xlim((ymin, ymax))
            ylim((ymin, ymax))

            if self.y.simbolos == None and self.y.unidades == None:
                xlabel(r'$y_{%d}$' % (i + 1) + ' experimental')
                ylabel(r'$y_{%d}$' % (i + 1) + ' calculado')
            elif self.y.simbolos != None and self.y.unidades == None:
                xlabel(self.y.simbolos[i] + ' experimental')
                ylabel(self.y.simbolos[i] + ' calculado')
            elif self.y.simbolos != None and self.y.unidades != None:
                xlabel(self.y.simbolos[i] + '/' + self.y.unidades[i] +
                       ' experimental')
                ylabel(self.y.simbolos[i] + '/' + self.y.unidades[i] +
                       ' calculado')
            fig.savefig(base_path + base_dir + 'grafico_' +
                        str(self.y.nomes[i]) + '_ye_ym_sem_var.png')
            close()

        # Região de abrangência (verossimilhança)

        Hist_Posicoes, Hist_Fitness = self.regiaoAbrangencia(PA)

        if self.NP == 1:

            aux = []
            for it in xrange(
                    size(self.parametros.regiao_abrangencia) / self.NP):
                aux.append(self.parametros.regiao_abrangencia[it][0])

            X = [Hist_Posicoes[it][0] for it in xrange(len(Hist_Posicoes))]
            Y = Hist_Fitness
            X_sort = sort(X)
            Y_sort = [Y[i] for i in argsort(X)]
            fig = figure()
            ax = fig.add_subplot(1, 1, 1)
            plot(X_sort, Y_sort, 'bo', markersize=4)
            plot(self.parametros.estimativa[0],
                 self.Otimizacao.best_fitness,
                 'ro',
                 markersize=8)
            plot([min(aux), min(aux)], [min(Y_sort), max(Y_sort) / 4], 'r-')
            plot([max(aux), max(aux)], [min(Y_sort), max(Y_sort) / 4], 'r-')
            ax.text(min(aux),
                    max(Y_sort) / 4,
                    u'%.2e' % (min(aux), ),
                    fontsize=8,
                    horizontalalignment='center')
            ax.text(max(aux),
                    max(Y_sort) / 4,
                    u'%.2e' % (max(aux), ),
                    fontsize=8,
                    horizontalalignment='center')
            ax.yaxis.grid(color='gray', linestyle='dashed')
            ax.xaxis.grid(color='gray', linestyle='dashed')
            ylabel(r"$\quad \Phi $", fontsize=20)
            if self.parametros.simbolos == None and self.parametros.unidades == None:
                xlabel(r'$\theta_{%d}$' % (i + 1), fontsize=20)
            elif self.parametros.simbolos != None and self.parametros.unidades == None:
                xlabel(self.parametros.simbolos[0], fontsize=20)
            elif self.parametros.simbolos != None and self.parametros.unidades != None:
                xlabel(self.parametros.simbolos[0] + '/' +
                       self.parametros.unidades[0],
                       fontsize=20)
            fig.savefig(base_path + base_dir + 'Regiao_verossimilhanca_' +
                        str(self.parametros.nomes[0]) + '_' +
                        str(self.parametros.nomes[0]) + '.png')
            close()

        else:

            Combinacoes = int(
                factorial(self.NP) / (factorial(self.NP - 2) * factorial(2)))
            p1 = 0
            p2 = 1
            cont = 0
            passo = 1

            for pos in xrange(Combinacoes):
                if pos == (self.NP - 1) + cont:
                    p1 += 1
                    p2 = p1 + 1
                    passo += 1
                    cont += self.NP - passo

                fig = figure()
                ax = fig.add_subplot(1, 1, 1)

                for it in xrange(
                        size(self.parametros.regiao_abrangencia) / self.NP):
                    PSO, = plot(self.parametros.regiao_abrangencia[it][p1],
                                self.parametros.regiao_abrangencia[it][p2],
                                'bo',
                                linewidth=2.0,
                                zorder=1)

                plot(self.parametros.estimativa[p1],
                     self.parametros.estimativa[p2],
                     'r*',
                     markersize=10.0,
                     zorder=2)
                ax.yaxis.grid(color='gray', linestyle='dashed')
                ax.xaxis.grid(color='gray', linestyle='dashed')

                if self.parametros.simbolos == None and self.parametros.unidades == None:
                    xlabel(r'$\theta_{%d}$' % (p1 + 1), fontsize=20)
                    ylabel(r'$\theta_{%d}$' % (p2 + 1), fontsize=20)
                elif self.parametros.simbolos != None and self.parametros.unidades == None:
                    xlabel(self.parametros.simbolos[p1], fontsize=20)
                    ylabel(self.parametros.simbolos[p2], fontsize=20)
                elif self.parametros.simbolos != None and self.parametros.unidades != None:
                    xlabel(self.parametros.simbolos[p1] + '/' +
                           self.parametros.unidades[p1],
                           fontsize=20)
                    ylabel(self.parametros.simbolos[p2] + '/' +
                           self.parametros.unidades[p2],
                           fontsize=20)
                fig.savefig(base_path + base_dir + 'Regiao_verossimilhanca_' +
                            str(self.parametros.nomes[p1]) + '_' +
                            str(self.parametros.nomes[p2]) + '.png')
                close()
                p2 += 1
Exemplo n.º 56
0
def fact(x):
    """Inputs x and outputs x!."""
    return misc.factorial(x, exact=True)
Exemplo n.º 57
0
def clebsch(j1, j2, j3, m1, m2, m3):
    """Calculates the Clebsch-Gordon coefficient
    for coupling (j1,m1) and (j2,m2) to give (j3,m3).

    Parameters
    ----------
    j1 : float
        Total angular momentum 1.

    j2 : float
        Total angular momentum 2.

    j3 : float
        Total angular momentum 3.

    m1 : float
        z-component of angular momentum 1.

    m2 : float
        z-component of angular momentum 2.

    m3 : float
        z-component of angular momentum 3.

    Returns
    -------
    cg_coeff : float
        Requested Clebsch-Gordan coefficient.

    """
    if m3 != m1 + m2:
        return 0
    vmin = int(np.max([-j1 + j2 + m3, -j1 + m1, 0]))
    vmax = int(np.min([j2 + j3 + m1, j3 - j1 + j2, j3 + m3]))

    C = np.sqrt(
        (2.0 * j3 + 1.0) * factorial(j3 + j1 - j2) * factorial(j3 - j1 + j2) *
        factorial(j1 + j2 - j3) * factorial(j3 + m3) * factorial(j3 - m3) /
        (factorial(j1 + j2 + j3 + 1) * factorial(j1 - m1) *
         factorial(j1 + m1) * factorial(j2 - m2) * factorial(j2 + m2)))
    S = 0
    for v in range(vmin, vmax + 1):
        S += (-1.0) ** (v + j2 + m2) / factorial(v) * \
            factorial(j2 + j3 + m1 - v) * factorial(j1 - m1 + v) / \
            factorial(j3 - j1 + j2 - v) / factorial(j3 + m3 - v) / \
            factorial(v + j1 - j2 - m3)
    C = C * S
    return C
Exemplo n.º 58
0
 def ref_xfact(m):
     return factorial2(2 * m - 1) / np.sqrt(factorial(2 * m))
Exemplo n.º 59
0
 def single_poisson(self,k, lam):
     poisson         = (lam**k/factorial(k)) * np.exp(-lam)
     return  poisson
Exemplo n.º 60
0
def gamma(x):
    if x % 1 == 0:
        return factorial(x - 1)
    if x % 1 == 0.5:
        return np.sqrt(np.pi) * factorial(
            2 * (x - 0.5)) / (4**(x - 0.5) * factorial((x - 0.5)))