Example #1
0
def multipole_fixed(max_l, points):
    """Calculate all frequency-independent quantities for the multipole
    decomposition.

    These depend only on the coordinates of the points, and need only be
    calculated once for each object and reused at multiple frequencies"""

    num_l = max_l + 1
    l = np.arange(num_l)[:, None]
    m_pos = np.arange(num_l)[None, :]
    m = np.hstack((m_pos, np.arange(-max_l, 0)[None, :]))

    r = np.sqrt(np.sum(points**2, axis=1))
    theta = np.arccos(points[:, 2]/r)
    phi = np.arctan2(points[:, 1], points[:, 0])

    st = np.sin(theta)
    ct = np.cos(theta)
    sp = np.sin(phi)
    cp = np.cos(phi)

    # calculate the unit vectors on the sphere
    r_hat = points/r[:, None]
    theta_hat = np.stack((ct*cp, ct*sp, -st), axis=-1)
    phi_hat = np.stack((-sp, cp, np.zeros_like(sp)), axis=-1)

    exp_imp = np.exp(-1j*m[None, :, :]*phi[:, None, None])

    P_lm = np.zeros((len(ct), len(l), m.shape[1]))
    dP_lm = np.zeros_like(P_lm)

    for point_count, ct_n in enumerate(ct):

        # associated Legendre function and its derivative
        P_lmp, dP_lmp = scipy.special.lpmn(max_l, max_l, ct_n)
        P_lmp = P_lmp.T
        dP_lmp = dP_lmp.T

        # Comes from http://dlmf.nist.gov/14.9, eq 14.9.13
        # Calculate negative values of m from positive
        # Does some relation apply to derivatives??
        P_neg = (-1)**m_pos*factorial(l-m_pos)/factorial(l+m_pos)
        P_lmn = P_neg*P_lmp
        dP_lmn = P_neg*dP_lmp

        # combine positive and negative P_lmn
        P_lm[point_count] = np.hstack((P_lmp,  P_lmn[:, :0:-1]))
        dP_lm[point_count] = np.hstack((dP_lmp, dP_lmn[:, :0:-1]))

    # theta derivative of P_lm(cos\theta)
    tau_lm = -st[:, None, None]*dP_lm
    pi_lm = P_lm*m/st[:, None, None]

    return (r, theta, phi, r_hat, theta_hat, phi_hat, P_lm, exp_imp,
            tau_lm, pi_lm)
def log_likelihood(params):
	a = params[0]
	b = params[1]
	likelihood = 0.
	for i in range(len(x)):
		choose = float(factorial(n))/(factorial(x[i])*factorial(n-x[i]))
		print ">>>>>>>>>>choose is :",choose
		pi = (choose*special.beta(a+x[i], n+b-x[i])/special.beta(a,b))*y[i]/np.sum(y)
		print ">>>>>>>>>>>>each pi is:", pi
		likelihood += pi
	# print likelihood
	# return -np.sum(np.log(likelihood))
	return -1.*likelihood
def overlap(n, m, l):
    n = int(n)
    m = int(m)
    if n >= 0 and m >= 0:
        smaller =  np.min( [n, m] )
        
        fc = np.sqrt(scspecial.factorial(n) * scspecial.factorial(m))
        fc *= np.exp(-0.5 * l**2)
        
        fc *= np.sum([ (-l)**(n-k) * l**(m-k) / scspecial.factorial(k) / scspecial.factorial(m-k) / scspecial.factorial(n-k)   for k in range(smaller+1)])
        
        fc = fc
        return fc
    return 0.0
Example #4
0
def _wigner_laguerre(rho, xvec, yvec, g, parallel):
    """
    Using Laguerre polynomials from scipy to evaluate the Wigner function for
    the density matrices :math:`|m><n|`, :math:`W_{mn}`. The total Wigner
    function is calculated as :math:`W = \sum_{mn} \\rho_{mn} W_{mn}`.
    """

    M = np.prod(rho.shape[0])
    X, Y = meshgrid(xvec, yvec)
    A = 0.5 * g * (X + 1.0j * Y)
    W = zeros(np.shape(A))

    # compute wigner functions for density matrices |m><n| and
    # weight by all the elements in the density matrix
    B = 4 * abs(A) ** 2
    if sp.isspmatrix_csr(rho.data):
        # for compress sparse row matrices
        if parallel:
            iterator = (
                (m, rho, A, B) for m in range(len(rho.data.indptr) - 1))
            W1_out = parfor(_par_wig_eval, iterator)
            W += sum(W1_out)
        else:
            for m in range(len(rho.data.indptr) - 1):
                for jj in range(rho.data.indptr[m], rho.data.indptr[m + 1]):
                    n = rho.data.indices[jj]

                    if m == n:
                        W += real(rho[m, m] * (-1) ** m * genlaguerre(m, 0)(B))

                    elif n > m:
                        W += 2.0 * real(rho[m, n] * (-1) ** m *
                                        (2 * A) ** (n - m) *
                                        sqrt(factorial(m) / factorial(n)) *
                                        genlaguerre(m, n - m)(B))
    else:
        # for dense density matrices
        B = 4 * abs(A) ** 2
        for m in range(M):
            if abs(rho[m, m]) > 0.0:
                W += real(rho[m, m] * (-1) ** m * genlaguerre(m, 0)(B))
            for n in range(m + 1, M):
                if abs(rho[m, n]) > 0.0:
                    W += 2.0 * real(rho[m, n] * (-1) ** m *
                                    (2 * A) ** (n - m) *
                                    sqrt(factorial(m) / factorial(n)) *
                                    genlaguerre(m, n - m)(B))

    return 0.5 * W * g ** 2 * np.exp(-B / 2) / pi
Example #5
0
	def __init__(self, yA, yB):
		yA,yB               = np.asarray(yA, dtype=float), np.asarray(yB, dtype=float)
		self.Y              = self._stack(yA, yB)
		self.JA             = yA.shape[0]
		self.JB             = yB.shape[0]
		self.J              = self.JA + self.JB
		self.labels0        = np.array( [0]*self.JA + [1]*self.JB )  #original labels
		self.labelsZeros    = np.array( [0]*self.J )  #empty labels
		if factorial(self.J)==np.inf:
			self.nPermTotal = -1
		else:
			self.nPermTotal = int(factorial(self.J) / ( factorial(self.JA)*factorial(self.JB) ))
		self.calc           = None
		self.Z              = None
		self._set_stat_calculator()
Example #6
0
    def __init__(self, xi, yi, axis=0):
        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)

        self.xi = np.asarray(xi)
        self.yi = self._reshape_yi(yi)
        self.n, self.r = self.yi.shape

        c = np.zeros((self.n+1, self.r), dtype=self.dtype)
        c[0] = self.yi[0]
        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
        for k in xrange(1,self.n):
            s = 0
            while s <= k and xi[k-s] == xi[k]:
                s += 1
            s -= 1
            Vk[0] = self.yi[k]/float(factorial(s))
            for i in xrange(k-s):
                if xi[i] == xi[k]:
                    raise ValueError("Elements if `xi` can't be equal.")
                if s == 0:
                    Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
                else:
                    Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
            c[k] = Vk[k-s]
        self.c = c
Example #7
0
    def _evaluate_derivatives(self, x, der=None):
        n = self.n
        r = self.r

        if der is None:
            der = self.n
        pi = np.zeros((n, len(x)))
        w = np.zeros((n, len(x)))
        pi[0] = 1
        p = np.zeros((len(x), self.r))
        p += self.c[0,np.newaxis,:]

        for k in xrange(1,n):
            w[k-1] = x - self.xi[k-1]
            pi[k] = w[k-1]*pi[k-1]
            p += pi[k,:,np.newaxis]*self.c[k]

        cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
        cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
        cn[0] = p
        for k in xrange(1,n):
            for i in xrange(1,n-k+1):
                pi[i] = w[k+i-1]*pi[i-1]+pi[i]
                cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
            cn[k] *= factorial(k)

        cn[n,:,:] = 0
        return cn[:der]
Example #8
0
 def hess(self, params, i, j):
     a = np.asarray(params[0])
     rng = np.arange(self.size)
     vec = ((a[..., None] ** rng) / factorial(rng))
     vec = np.insert(vec[..., :-1], 0, 0, -1)
     vec = np.insert(vec[..., :-1], 0, 0, -1)
     return hess_tensor(vec, params, i, j, True, True, dim=1)
Example #9
0
 def test_burkardt_13(self):
     # This is Ward's example #4.
     # This is a version of the Forsythe matrix.
     # The eigenvector problem is badly conditioned.
     # Ward's algorithm has difficulty esimating the accuracy
     # of its results for this problem.
     #
     # Check the construction of one instance of this family of matrices.
     A4_actual = _burkardt_13_power(4, 1)
     A4_desired = [[0, 1, 0, 0],
                   [0, 0, 1, 0],
                   [0, 0, 0, 1],
                   [1e-4, 0, 0, 0]]
     assert_allclose(A4_actual, A4_desired)
     # Check the expm for a few instances.
     for n in (2, 3, 4, 10):
         # Approximate expm using Taylor series.
         # This works well for this matrix family
         # because each matrix in the summation,
         # even before dividing by the factorial,
         # is entrywise positive with max entry 10**(-floor(p/n)*n).
         k = max(1, int(np.ceil(16/n)))
         desired = np.zeros((n, n), dtype=float)
         for p in range(n*k):
             Ap = _burkardt_13_power(n, p)
             assert_equal(np.min(Ap), 0)
             assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
             desired += Ap / factorial(p)
         actual = expm(_burkardt_13_power(n, 1))
         assert_allclose(actual, desired)
Example #10
0
File: igf.py Project: daimonie/igf
 def calculate_q(self): 
     factorial = lambda xx: scspecial.factorial(xx)
     
     newrange = lambda(number): range(number+1) if number > 0 else [0]
     for x in newrange(self.mo):
         for y in newrange(self.mo):
             if y <= x and y%2 == x%2:
                 self.tensor_q[x,y] = (-0.5)**((x-y)/2.0) * factorial(x) * 1.0 /( factorial(y) * factorial( (x-y)/2))
Example #11
0
def dmatrix(l, beta, reorder_p=False):
    '''Wigner small-d matrix (in z-y-z convention)'''
    c = numpy.cos(beta/2)
    s = numpy.sin(beta/2)
    if l == 0:
        return numpy.eye(1)
    elif l == 1:
        mat = numpy.array(((c**2        , sqrt(2)*c*s , s**2       ), \
                           (-sqrt(2)*c*s, c**2-s**2   , sqrt(2)*c*s), \
                           (s**2        , -sqrt(2)*c*s, c**2       )))
        if reorder_p:
            mat = mat[[2,0,1]][:,[2,0,1]]
        return mat
    elif l == 2:
        c3s = c**3*s
        s3c = s**3*c
        c2s2 = (c*s)**2
        c4 = c**4
        s4 = s**4
        s631 = sqrt(6)*(c3s-s3c)
        s622 = sqrt(6)*c2s2
        c4s2 = c4-3*c2s2
        c2s4 = 3*c2s2-s4
        c4s4 = c4-4*c2s2+s4
        return numpy.array((( c4    , 2*c3s, s622, 2*s3c, s4   ),
                            (-2*c3s , c4s2 , s631, c2s4 , 2*s3c),
                            ( s622  ,-s631 , c4s4, s631 , s622 ),
                            (-2*s3c , c2s4 ,-s631, c4s2 , 2*c3s),
                            ( s4    ,-2*s3c, s622,-2*c3s, c4   )))
    else:
        facs = factorial(numpy.arange(2*l+1))
        cs = c**numpy.arange(2*l+1)
        ss = s**numpy.arange(2*l+1)

        mat = numpy.zeros((2*l+1,2*l+1))
        for i,m1 in enumerate(range(-l, l+1)):
            for j,m2 in enumerate(range(-l, l+1)):
                #:fac = sqrt( factorial(l+m1)*factorial(l-m1) \
                #:           *factorial(l+m2)*factorial(l-m2))
                #:for k in range(max(m2-m1,0), min(l+m2, l-m1)+1):
                #:    mat[i,j] += (-1)**(m1+m2+k) \
                #:            * c**(2*l+m2-m1-2*k) * s**(m1-m2+2*k) \
                #:            / (factorial(l+m2-k) * factorial(k) \
                #:               * factorial(m1-m2+k) * factorial(l-m1-k))
                #:mat[i,j] *= fac
                k = numpy.arange(max(m2-m1,0), min(l+m2, l-m1)+1)
                tmp = (cs[2*l+m2-m1-2*k] * ss[m1-m2+2*k] /
                       (facs[l+m2-k] * facs[k] * facs[m1-m2+k] * facs[l-m1-k]))

                mask = ((m1+m2+k) & 0b1).astype(bool)
                mat[i,j] -= tmp[ mask].sum()
                mat[i,j] += tmp[~mask].sum()

        ms = numpy.arange(-l, l+1)
        msfac = numpy.sqrt(facs[l+ms] * facs[l-ms])
        mat *= numpy.einsum('i,j->ij', msfac, msfac)
    return mat
Example #12
0
def factorial(N):
    """Compute the factorial of N.
    If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial
    """
    if N < len(FACTORIALS):
        return FACTORIALS[N]
    else:
        from scipy import special
        return int(special.factorial(N))
Example #13
0
	def __init__(self, y, *args):
		self.Y          = y                         #original responses
		self.J          = y.shape[0]                #number of responses
		self.Z          = None                      #test statistic distribution
		self.labels0    = np.arange( self.J )       #original labels
		self.nPermTotal = factorial( self.J )
		self.nPermTotal = -1 if self.nPermTotal==np.inf else int(self.nPermTotal)
		self.calc       = None
		self._set_teststat_calculator(*args)
Example #14
0
	def __init__(self, y, x):
		self.Y             = y
		self.x             = x
		self.J             = x.size
		self.labels0       = np.arange( self.J )  #original labels
		self.nPermTotal    = int( factorial( self.J ) )
		self.calc          = None
		self.Z             = None  #PDF
		self._set_stat_calculator()
Example #15
0
 def eval(self, params):
     a = np.asarray(params[0])
     rng = np.arange(self.size)
     vec = (a[..., None] ** rng) / factorial(rng)
     val = Tensor(vec, dim=1)
     vec = np.insert(vec[..., :-1], 0, 0, -1)
     grad = grad_tensor(vec, params, 0, True, dim=1)
     vec = np.insert(vec[..., :-1], 0, 0, -1)
     hess = hess_tensor(vec, params, 0, 0, True, True, dim=1)
     return val, [grad], [[hess]]
Example #16
0
def factorial(N):
    """Compute the factorial of N.
    If N <= 16, use a fast lookup table; otherwise use scipy.special.factorial
    """
    if N < len(FACTORIALS):
        return FACTORIALS[N]
    elif scipy_special is None:
        raise ValueError("need scipy for computing larger factorials")
    else:
        return int(scipy_special.factorial(N))
Example #17
0
def _par_wig_eval(args):
    """
    Private function for calculating terms of Laguerre Wigner function
    using parfor.
    """
    m, rho, A, B = args
    W1 = zeros(np.shape(A))
    for jj in range(rho.data.indptr[m], rho.data.indptr[m + 1]):
        n = rho.data.indices[jj]

        if m == n:
            W1 += real(rho[m, m] * (-1) ** m * genlaguerre(m, 0)(B))

        elif n > m:
            W1 += 2.0 * real(rho[m, n] * (-1) ** m *
                             (2 * A) ** (n - m) *
                             sqrt(factorial(m) / factorial(n)) *
                             genlaguerre(m, n - m)(B))
    return W1
Example #18
0
 def get_strain_from_stress(self, stress):
     """
     Gets the strain from a stress state according
     to the compliance expansion corresponding to the
     tensor expansion.
     """
     compl_exp = self.get_compliance_expansion()
     strain = 0
     for n, compl in enumerate(compl_exp):
         strain += compl.einsum_sequence([stress]*(n+1)) / factorial(n+1)
     return strain
Example #19
0
File: igf.py Project: daimonie/igf
 def overlap(self): 
     factorial = lambda xx: scspecial.factorial(xx)
     
     newrange = lambda(number): range(number+1) if number > 0 else [0]
     for n in newrange(self.np):
         for m in newrange(self.np):
             smaller = n
             if m < n:
                 smaller = m
                 
             self.overlap_matrix[n,m] = np.exp(-0.5*self.pec**2) * np.sqrt( factorial(n) * factorial(m))
             self.overlap_matrix[n,m] *= np.sum(np.array([((-self.pec)**(n-k) * (self.pec)**(m-k))/(factorial(k) * factorial(n-k) * factorial(m-k)) for k in newrange(smaller)]))
def Wigner_d(j, m1, m2, x):
    # using definition from A.R. Edmonds, eq 4.1.23
    from scipy.special import factorial, eval_jacobi
    from numpy import sqrt
    mp, m = m1, m2
    p = 1
    if mp + m < 0:
        mp, m = -mp, -m
        p *= (-1) ** (mp - m)
    if mp < m:
        mp, m = m, mp
        p *= (-1) ** (mp - m)
    return (p * 
        sqrt(
            factorial(j + mp) * factorial(j - mp) / 
            (factorial(j + m) * factorial(j - m))
            ) * 
        sqrt(.5 + .5 * x) ** (mp + m) * 
        sqrt(.5 - .5 * x) ** (mp - m) * 
        eval_jacobi(j - mp, mp - m, mp + m, x)
    )
Example #21
0
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
    """
    Estimate the Taylor polynomial of f at x by polynomial fitting.

    Parameters
    ----------
    f : callable
        The function whose Taylor polynomial is sought. Should accept
        a vector of `x` values.
    x : scalar
        The point at which the polynomial is to be evaluated.
    degree : int
        The degree of the Taylor polynomial
    scale : scalar
        The width of the interval to use to evaluate the Taylor polynomial.
        Function values spread over a range this wide are used to fit the
        polynomial. Must be chosen carefully.
    order : int or None, optional
        The order of the polynomial to be used in the fitting; `f` will be
        evaluated ``order+1`` times. If None, use `degree`.

    Returns
    -------
    p : poly1d instance
        The Taylor polynomial (translated to the origin, so that
        for example p(0)=f(x)).

    Notes
    -----
    The appropriate choice of "scale" is a trade-off; too large and the
    function differs from its Taylor polynomial too much to get a good
    answer, too small and round-off errors overwhelm the higher-order terms.
    The algorithm used becomes numerically unstable around order 30 even
    under ideal circumstances.

    Choosing order somewhat larger than degree may improve the higher-order
    terms.

    """
    if order is None:
        order = degree

    n = order+1
    # Choose n points that cluster near the endpoints of the interval in
    # a way that avoids the Runge phenomenon. Ensure, by including the
    # endpoint or not as appropriate, that one point always falls at x
    # exactly.
    xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x

    P = KroghInterpolator(xs, f(xs))
    d = P.derivatives(x,der=degree+1)

    return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
Example #22
0
def poisson(m,*args):
    lam = float(args[0])

    # Make sure input is int!    
    if type(m) == float:
        n = int(m)
    elif type(m) == np.ndarray:
        n = m.astype(int)
    else:
        n = m
        
    # Make lambda a float to prevent int overflow!
    return np.power(lam,n)*np.exp(-lam)/factorial(n)
Example #23
0
 def irns(delay, gain, niter, duration, samplerate=None, nchannels=1):
     '''
     Returns an IRN_S noise. The iterated ripple noise is obtained trough
     a cascade of gain and delay filtering. 
     For more details: see Yost 1996 or chapter 15 in Hartman Sound Signal Sensation.
     '''
     if nchannels!=1:
         raise ValueError("nchannels!=1 not supported.")
     samplerate = get_samplerate(samplerate)
     noise=Sound.whitenoise(duration)
     splrate=noise.samplerate
     x=np.array(noise.T)[0]
     IRNfft=fft(x)
     Nspl,spl_dur=len(IRNfft),float(1.0/splrate)
     w=2*np.pi*fftfreq(Nspl,spl_dur)
     d=float(delay)
     for k in range(1,niter+1):
         nchoosek=factorial(niter)/(factorial(niter-k)*factorial(k))
         IRNfft+=nchoosek*(gain**k)*IRNfft*np.exp(-1j*w*k*d)
     IRNadd = ifft(IRNfft)
     x=np.real(IRNadd)
     return Sound(x,samplerate)
Example #24
0
 def grad(self, params, i):
     """
     grad_f = ((x / m) - (x + r) / (r + m)) * f
     """
     m = np.asarray(params[0])
     x = np.arange(self.size)
     vec = gammaln(self.r + x) - gammaln(self.r)
     vec -=np.log(factorial(x))
     vec += x * np.log(m) + self.r * np.log(self.r)
     vec -= (x + self.r) * np.log(self.r + m)
     return grad_tensor(
         np.exp(vec) * ((x / m) - (x + self.r) / (self.r + m)),
         params, i, True, dim=1)
Example #25
0
def _qfunc_pure(psi, alpha_mat):
    """
    Calculate the Q-function for a pure state.
    """
    n = np.prod(psi.shape)
    if isinstance(psi, Qobj):
        psi = psi.full().flatten()
    else:
        psi = psi.T

    qmat = abs(polyval(fliplr([psi / sqrt(factorial(arange(n)))])[0],
                       conjugate(alpha_mat))) ** 2

    return real(qmat) * exp(-abs(alpha_mat) ** 2) / pi
Example #26
0
def CumulativePoisson(lam, m):
    
    m = int(np.floor(m)) # only use ints!
    
    if m < 0:
        print("m must be >= 0!")
        return -1
    
    tot = 0.0
    
    for i in range(0,m):
        tot += np.power(lam,i)/factorial(i)
    
    return tot*np.exp(-lam)
Example #27
0
	def __init__(self, y, x, roi=None):
		self.Y             = y
		self.x             = x
		self.J             = x.size
		self.I             = y.shape[2] if self.ismultivariate else 1   #number of vector components
		self.labels0       = np.arange( self.J )  #original labels
		self.nPermTotal    = int( factorial( self.J ) )
		self.calc          = None
		self.ZZ            = None                      #all permuted test statistic fields
		self.Z             = None                      #primary PDF:    test statistic field maxima distribution
		self.Z2            = None                      #secondary PDF:  cluster metric distribution
		self.roi           = roi                       #region(s) of interest
		self._set_stat_calculator()
		self._set_roi(roi)
Example #28
0
 def __call__(self, params):
     """
     exp(
         gammaln(r + x) - gammaln(r) - ln x! +
         x ln(m) + r ln(r) - (x + r) * ln(r + m))
         )
     """
     m = np.asarray(params[0])
     x = np.arange(self.size)
     vec = gammaln(self.r + x) - gammaln(self.r)
     vec -=np.log(factorial(x))
     vec += x * np.log(m) + self.r * np.log(self.r)
     vec -= (x + self.r) * np.log(self.r + m)
     return Tensor(np.exp(vec), dim=1)
Example #29
0
    def get_effective_ecs(self, strain, order=2):
        """
        Returns the effective elastic constants
        from the elastic tensor expansion.

        Args:
            strain (Strain or 3x3 array-like): strain condition
                under which to calculate the effective constants
            order (int): order of the ecs to be returned
        """
        ec_sum = 0
        for n, ecs in enumerate(self[order-2:]):
            ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)
        return ec_sum
Example #30
0
    def calculate_stress(self, strain):
        """
        Calculate's a given elastic tensor's contribution to the
        stress using Einstein summation

        Args:
            strain (3x3 array-like): matrix corresponding to strain
        """
        strain = np.array(strain)
        if strain.shape == (6,):
            strain = Strain.from_voigt(strain)
        assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
        stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \
                / factorial(self.order - 1)
        return Stress(stress_matrix)
Example #31
0
def ana(t, tau, mu, phi):
    """                                      
    Solution of $y'(t>=0)=\mu y(t-tau) $     
    et $y(t<=0)=h$                           
    $y(t) = \sum_0^{[t/\tau]+1}              
        \frac{(\mu(t-(n-1)\tau))^n}{n!}$     
    t :float                                 
        current time                         
    tau : float                              
        delay                                
    phi : float                              
        past state t<=t0          

    Reference
    --------
    Baker, 1995, Issues in the numerical solution of evolutionary delay 
        differential equations, Advances in Computational Mathematics.
    """
    s = 0.
    for n in range(int(np.floor(t / tau)) + 2):
        s += (mu*(t - (n - 1.) * tau)**n) / \
                factorial(n)
    return s * phi
Example #32
0
def F_q2d(n, m):
    if n == 0:
        num = m**2 * factorial2(2 * m - 3)
        den = 2**(m + 1) * factorial(m - 1)
        return num / den
    elif n > 0 and m == 1:
        t1num = 4 * (n - 1)**2 * n**2 + 1
        t1den = 8 * (2 * n - 1)**2
        term1 = t1num / t1den
        term2 = 11 / 32 * kronecker(n, 1)
        return term1 + term2
    else:
        Chi = m + n - 2
        nt1 = 2 * n * Chi * (3 - 5 * m + 4 * n * Chi)
        nt2 = m**2 * (3 - m + 4 * n * Chi)
        num = nt1 + nt2

        dt1 = (m + 2 * n - 3) * (m + 2 * n - 2)
        dt2 = (m + 2 * n - 1) * (2 * n - 1)
        den = dt1 * dt2

        term1 = num / den
        return term1 * gamma(n, m)
Example #33
0
    def weights(dim, degree=3):
        """
        Gauss-Hermite quadrature weights.

        Parameters
        ----------
        dim : int
            Dimension of the input random variable.

        degree : int, optional
            Degree of the integration rule.

        Returns
        -------
        : (num_points, ) ndarray
            GH quadrature weights of given degree.
        """
        # 1D sigma-points (x) and weights (w)
        x, w = hermegauss(degree)
        # hermegauss() provides weights that cause posdef errors
        w = factorial(degree) / (degree**2 * hermeval(x, [0] *
                                                      (degree - 1) + [1])**2)
        return np.prod(cartesian([w] * dim), axis=1)
 def test_coherent_state_fock_elements(self):
     r"""Tests if a range of alpha-displaced states have the correct Fock basis elements
    |\alpha> = exp(-0.5 |\alpha|^2) \sum_n \alpha^n / \sqrt{n!} |n>"""
     self.logTestName()
     for mag_alpha in mag_alphas:
         for phase_alpha in phase_alphas:
             alpha = mag_alpha * np.exp(1j * phase_alpha)
             self.circuit.reset(pure=self.kwargs['pure'])
             self.circuit.displacement(alpha, 0)
             state = self.circuit.state()
             if state.is_pure:
                 numer_state = state.ket()
             else:
                 numer_state = state.dm()
             ref_state = np.array([
                 np.exp(-0.5 * np.abs(alpha)**2) * alpha**n /
                 np.sqrt(factorial(n)) for n in range(self.D)
             ])
             if not self.kwargs['pure']:
                 ref_state = np.outer(ref_state, np.conj(ref_state))
             self.assertAllAlmostEqual(numer_state,
                                       ref_state,
                                       delta=self.tol)
    def test_prob_fock_state_nongaussian(self):
        self.logTestName()
        """Tests that probabilities of particular Fock states |n> are correct for a nongaussian state."""
        for mag_alpha in mag_alphas:
            for phase_alpha in phase_alphas:
                self.circuit.reset(pure=self.kwargs['pure'])

                alpha = mag_alpha * np.exp(1j * phase_alpha)
                ref_state = np.array([
                    np.exp(-0.5 * np.abs(alpha)**2) * alpha**n /
                    np.sqrt(factorial(n)) for n in range(self.D)
                ])
                ref_probs = np.abs(ref_state)**2

                self.circuit.prepare_coherent_state(alpha, 0)
                self.circuit.prepare_fock_state(self.D // 2, 1)
                state = self.circuit.state()

                for n in range(self.D):
                    prob_n = state.fock_prob([n, self.D // 2])
                    self.assertAllAlmostEqual(prob_n,
                                              ref_probs[n],
                                              delta=self.tol)
Example #36
0
def histogram(trip_l):
    real_d = [sum(t['Dist']) for t in trip_l]
    perfect_d = vectorized_haversine(
        np.array([[t['From']['Lat'], t['From']['Long']] for t in trip_l]),
        np.array([[t['To']['Lat'], t['To']['Long']] for t in trip_l]))
    efficiency = [
        real / perfect - 1 for real, perfect in zip(real_d, perfect_d)
    ]

    print(*list(zip(real_d, perfect_d, efficiency)), sep='\n')

    mean_eff = np.mean(efficiency)
    var_eff = np.var(efficiency)
    print(mean_eff, var_eff)
    theta = var_eff / mean_eff
    k = mean_eff**2 / var_eff
    x = np.linspace(0.00001, 2, 10000)
    gamma = np.power(k - 1, x) * np.exp(
        -x / theta) / (factorial(k - 1) * theta**k)

    plt.hist(efficiency, bins=300, density=True, range=(0, 2))
    plt.plot(x, gamma, 'r--')
    plt.show()
Example #37
0
    def compensate(self, pos, t, tref, nit=2):
        """Approximately compensate for how much an object currently
		at position pos has moved since the reference time tref, assuming
		it has a similar orbit to us. Compensates for both orbital motion
		and parallax."""
        # First find the sun distance and sun-relative coordinates
        pos_sunrel = pos
        for i in range(nit):
            pos_oo = coordinates.recenter(pos_sunrel, self.pzen)
            x = (pos_oo[0] - self.lon0) % (2 * np.pi)
            sundist = interpolate.splev(x, self.dist_spline)
            pos_sunrel, earthdist = parallax.earth2sun_mixed(pos, sundist, t)
        # Then apply the orbital correction
        pos_oo = coordinates.recenter(pos_sunrel, self.pzen)
        x = (pos_oo[0] - self.lon0) % (2 * np.pi)
        delta_t = t - tref
        old = pos_oo.copy()
        for i, spline in enumerate(self.deriv_splines):
            deriv = interpolate.splev(x, spline)
            pos_oo[0] -= delta_t**(i + 1) / special.factorial(i + 1) * deriv
        # Transform back to celestial coordinates
        opos = coordinates.decenter(pos_oo, self.pzen)
        return opos
Example #38
0
    def log_likelihood(params, *args):
        r, p = params
        X = args[0]
        N = X.size

        # MLE estimate based on the formula on Wikipedia:
        # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
        result = np.sum(gammaln(X + r)) \
            - np.sum(np.log(factorial(X))) \
            - N*(gammaln(r)) \
            + N*r*np.log(p) \
            + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))

        #TODO I have put these condition in here to correct for a double scalar error.
        if (np.isnan(result)):
            return 0.0
        if (np.isinf(result)):
            if (result < 0):
                return -100000
            else:
                return 100000

        return -result
Example #39
0
def G_q2d(n, m):
    """G term for 2D-Q polynomials.  oe-20-3-2483 Eq. (A.15).

    Parameters
    ----------
    n : int
        radial order
    m : int
        azimuthal order

    Returns
    -------
    float
        G

    """
    if n == 0:
        num = special.factorial2(2 * m - 1)
        den = 2**(m + 1) * special.factorial(m - 1)
        return num / den
    elif n > 0 and m == 1:
        t1num = (2 * n**2 - 1) * (n**2 - 1)
        t1den = 8 * (4 * n**2 - 1)
        term1 = -t1num / t1den
        term2 = 1 / 24 * kronecker(n, 1)
        return term1 - term2
    else:
        # nt1 = numerator term 1, d = denominator...
        nt1 = 2 * n * (m + n - 1) - m
        nt2 = (n + 1) * (2 * m + 2 * n - 1)
        num = nt1 * nt2
        dt1 = (m + 2 * n - 2) * (m + 2 * n - 1)
        dt2 = (m + 2 * n) * (2 * n + 1)
        den = dt1 * dt2

        term1 = -num / den
        return term1 * gamma(n, m)
Example #40
0
def DWBA(qx, qz, lamda, n, z, sigma, eta, h, eta_z, d=[0], taylor_n=1):
    k = 2 * pi / lamda
    qx = array(qx, dtype=float64)
    sqn = n**2
    sqn = array(sqn, dtype=complex)
    # Calculating electrical fields
    omega = arctan(qx / qz)  # Not the real omega given by Fewster
    omegap = arcsin(sqrt(qx**2 + qz**2) / 2 / k)
    kxi = k * cos(omega + omegap)
    kxf = k * cos(omegap - omega)
    (T, R, kz) = AmpElfield_q(k, r_[kxi, kxf], lamda, n[::-1], r_[0,
                                                                  d[1:][::-1]])

    lenq = len(qx)
    Ri = R[:, :lenq]
    Rf = R[:, lenq:]
    Ti = T[:, :lenq]
    Tf = T[:, lenq:]
    ki = kz[:, :lenq]
    kf = kz[:, lenq:]
    G = array([Ti * Tf, Ti * Rf, Ri * Tf, Ri * Rf], dtype=complex)
    q = array([ki + kf, ki - kf, -ki + kf, -ki - kf], dtype=complex)

    # Setting up for the Fourier integral as given by Pape et.al.
    maxn = taylor_n
    dqmin = (qx[1] - qx[0]) * eta / (maxn**(1.0 / 2.0 / h))
    if abs(dqmin) < 1e-12:
        dqmin = 1e-3
    q_min = arange(qx[0] * eta / (maxn**(1.0 / 2.0 / h)),
                   qx[-1] * eta + 2 * dqmin,
                   dqmin,
                   dtype=float64)

    table = array(make_F(q_min, h), dtype=complex)
    fn = factorial(arange(1, maxn + 1))
    s = dwba_sum(qx, G, q, eta, h, sigma, sqn, z, table, q_min, fn, eta_z)
    return (s, omega + omegap, omegap - omega)
Example #41
0
    def test_constructor(self):
        a0, wavelength, p0, omega0, m = 2, 980e-9, 0, 1e-6, 3
        cm = (2/constants.pi)**(1/4) / \
            np.sqrt(omega0*(2**m)*special.factorial(m))
        z0 = constants.pi*omega0**2/wavelength

        hgb1d = HGBeam1D(
            a0=a0, wavelength=wavelength, p0=p0, omega0=omega0, m=m)

        self.assertEqual(hgb1d.a0, a0)
        self.assertEqual(hgb1d.cm, cm)
        self.assertEqual(hgb1d.p0, p0)
        self.assertEqual(hgb1d.omega0, omega0)
        self.assertEqual(hgb1d.omega0m, np.sqrt(2*m+1)*omega0)
        self.assertEqual(hgb1d.m, m)
        self.assertEqual(hgb1d.z0, z0)

        self.assertAlmostEqual(
            hgb1d.a_f(10), a0/(1+(10-p0)**2/z0**2)**(1/4))  # 振幅
        self.assertAlmostEqual(hgb1d.omega_f(
            10), omega0*np.sqrt(1+(10-p0)**2/z0**2))  # 模场半径
        self.assertAlmostEqual(hgb1d.r_f(10), (10-p0) *
                               (1+z0**2/(10-p0)**2))  # 波前曲率半径
        self.assertAlmostEqual(hgb1d.phi_f(10), np.arctan((10-p0)/z0))  # phi相位
Example #42
0
    def correlation(i, j):
        """
        The correlation function defined using the normalization and fm

        Parameters
        ----------
        f, g, c: complex
            Dimensionless quantities formed using the system parameters

        m: int
            The truncation of the index upto which the factor should be calculated

        i, j: int
            The index (i, j) of the matrix to calculate the correlation function

        Returns
        -------
        corr: float
            The value of the correlation.
        """
        f, g, c = self.f, self.g, self.c

        m = self.m
        N = self.N

        corr = 0

        coefficient = lambda x: np.power(2, x) / factorial(x)
        hyp = lambda x: self.fm(x + j) * (self.fm(x + i).conjugate())

        generator = (coefficient(idx) * hyp(idx) for idx in range(m))

        for term in generator:
            corr += term

        return corr / N
Example #43
0
def displaced_squeezed(alpha, r, phi, D, pure=True, batched=False, eps=1e-12):
    """creates a single mode input displaced squeezed state"""
    alpha = tf.cast(alpha, def_type)
    r = tf.cast(r, def_type) + eps # to prevent nans if r==0, we add an epsilon (default is miniscule)
    phi = tf.cast(phi, def_type)

    phase = tf.exp(1j * phi)
    sinh = tf.sinh(r)
    cosh = tf.cosh(r)
    tanh = tf.tanh(r)

    # create Hermite polynomials
    gamma = alpha * cosh + tf.conj(alpha) * phase * sinh
    hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r))

    prefactor = tf.expand_dims(tf.exp(-0.5 * alpha * tf.conj(alpha) - 0.5 * tf.conj(alpha) ** 2 * phase * tanh), -1)
    coeff = tf.stack([_numer_safe_power(0.5 * phase * tanh, n / 2.) / tf.sqrt(factorial(n) * cosh)
                      for n in range(D)], axis=-1)
    hermite_terms = tf.stack([tf.cast(H(n, hermite_arg), def_type) for n in range(D)], axis=-1)
    squeezed_coh = prefactor * coeff * hermite_terms

    if not pure:
        squeezed_coh = mixed(squeezed_coh, batched)
    return squeezed_coh
Example #44
0
    def approximate(self, x, a):
        """
        Use this method to approximate the function supplied as the first
        item in the `functions` argument when instantiating the class at the 
        value of `x` (f(x)). Choose `a` such that it can easily be calculated by the 
        the function of interest. 

        The basic form of the equation is as follows:
        f(x) ~ f(a) + f'(a)(x-a)/1! + f"(a)((x-a)^2)/2! + f'''(a)((x-a)^3)/3! ... etc
        
        Arguments:
            x {float} -- desired value for function evaluation
            a {float} -- variable used in the approximation to prevent difficult evaluations of your function
        
        Returns:
            float -- Taylor Series Approximation of f(x)
        """
        fun = self.functions[0]
        initial = fun(a)
        for i, fun in enumerate(self.functions[1:]):
            coef = fun(a) / factorial(i + 1)
            right = (x - a)**(i + 1)
            initial += coef * right
        return initial
Example #45
0
def trap_rate(H, T, dE):
    # give values to the constants
    h = 4.135667662 * (10**(-15))  # h: plank constant in eV
    niu = 208  # frequancy of ground state electron in cm^-1
    kb = 8.6173324 * (10**(-5))
    lambda_out = 0.2412  # The outter shell contribution of the reorganization energy. Unit in eV
    phonon_energy = h * niu * 3 * (10**10
                                   )  # change the niu unit from cm^-1 to Hz
    S = 22  # lambda_in/phonon_energy, unit is cm^-1. lambda_in: inner shell contribution of the reorganization energy
    h_bar = 6.582 * (10**(-16))  # h/2pi

    temperature_change = kb * lambda_out * T
    free_energy_change = 0
    for i in range(
            40
    ):  # use a large number instead of infinity 40, talk in the project
        energy = (S**(i) / factorial(i)) * (np.exp(-(
            (dE - i * phonon_energy - lambda_out)**2) /
                                                   (4 * temperature_change)))
        free_energy_change += energy
    k = (2 * (np.pi) *
         (H**2) / h_bar) * (1 / np.sqrt(2 * (np.pi) * temperature_change)) * (
             np.exp(-S)) * free_energy_change
    return k
    def __init__(self, G):
        self.nodes = list(G.nodes)
        self.edges = list(G.edges)

        self.reverse_an_edge_from = copy.deepcopy(self.edges)
        random.shuffle(self.reverse_an_edge_from)
        self.remove_an_edge_from = copy.deepcopy(self.edges)
        random.shuffle(self.remove_an_edge_from)

        self.shuffled_nodes_1 = copy.deepcopy(self.nodes)
        random.shuffle(self.shuffled_nodes_1)
        self.shuffled_nodes_2 = copy.deepcopy(self.nodes)
        random.shuffle(self.shuffled_nodes_2)

        self.reverse_counter = 0
        self.remove_counter = 0
        self.add_counter = [0, 0]  #i,j

        # print(self.add_counter)

        self.possible_operations = {1, 2, 3}
        self.max_add_edge_operations = factorial(len(
            self.nodes), exact=True) - len(self.edges)  #adding an edge
        self.G = G
Example #47
0
def Born(qx, qz, lamda, n, z, sigma, eta, h, eta_z, d=[0], taylor_n=1):
    k = 2 * pi / lamda
    qx = array(qx, dtype=float64)
    sqn = n**2
    sqn = array(sqn, dtype=complex)
    # print 'Setup Complete'
    # Calculating electrical fields
    omega = arctan(qx / qz)  # Not the real omega given by Fewster
    omegap = arcsin(sqrt(qx**2 + qz**2) / 2 / k)
    # Setting up for the Fourier integral as given by Pape et.al.
    maxn = taylor_n
    dqmin = (qx[1] - qx[0]) * eta / (maxn**(1.0 / 2.0 / h))
    q_min = arange(qx[0] * eta / (maxn**(1.0 / 2.0 / h)),
                   qx[-1] * eta + 2 * dqmin,
                   dqmin,
                   dtype=float64)

    table = make_F(q_min, h)
    table = array(table, dtype=complex128)

    fn = factorial(arange(1, maxn + 1))
    s = born_ext(qx, qz, eta, h, sigma, sqn, z, table, q_min, fn, eta_z)

    return (s, omega + omegap, omegap - omega)
Example #48
0
def qho_approx(pm, n):
    r"""Calculates the nth eigenstate of the quantum harmonic oscillator, and shifts to ensure it
    is neither an odd nor an even function (necessary for the Gram-Schmidt algorithm). 

    .. math:: 

        \bigg(-\frac{1}{2} \frac{d^{2}}{dx^{2}} + \frac{1}{2} \omega^{2} x^{2} \bigg) \phi_{n}(x) 
        = \varepsilon_{n} \phi_{n}(x)

        \phi_{n}(x) = \frac{1}{\sqrt{2^{n}n!}} \bigg(\frac{\omega}{\pi}\bigg)^{1/4} e^{-\frac{\omega x^{2}}{2}} 
        H_{n}\bigg(\sqrt{\omega}x \bigg)

    parameters
    ----------
    pm : object
        Parameters object
    n : integer
        Principle quantum number

    returns array_like
        1D array of the nth eigenstate, indexed as eigenstate[space_index]
    """
    # Single-electron eigenstate
    eigenstate = np.zeros(pm.space.npt, dtype=np.float)

    # Constants
    factorial = spspec.factorial(n)
    omega = 30.0/(pm.sys.xmax**2)
    norm = np.sqrt(np.sqrt(omega/np.pi)/((2.0**n)*factorial))

    # Assign elements
    for j in range(pm.space.npt):
        x = -pm.sys.xmax + j*pm.space.delta
        eigenstate[j] = norm*(spspec.hermite(n)(np.sqrt(omega)*(x+1.0)))*np.exp(-0.5*omega*((x+1.0)**2)) 

    return eigenstate
Example #49
0
def FF_Yang_Dou_residual(vbyu, *args):
    """
    The Yang_Dou residual function; to be used by numerical root finder
    """
    (Re, rough) = args

    Rstar = Re / (2 * vbyu * rough)
    theta = np.pi * np.log(Rstar / 1.25) / np.log(100 / 1.25)
    alpha = (1 - np.cos(theta)) / 2
    beta = 1 - (1 - 0.107) * (alpha + theta / np.pi) / 2
    R = Re / (2 * vbyu)

    rt = 1.
    for i in range(1, 5):
        rt = rt - 1. / np.e * (i / factorial(i) * (67.8 / R)**(2 * i))

    return vbyu - (1 - rt) * R / 4. - rt * (
        2.5 * np.log(R) - 66.69 * R**-0.72 + 1.8 -
        (2.5 * np.log(
            (1 + alpha * Rstar / 5) / (1 + alpha * beta * Rstar / 5)) +
         (5.8 + 1.25) * (alpha * Rstar / (5 + alpha * Rstar))**2 + 2.5 *
         (alpha * Rstar / (5 + alpha * Rstar)) - (5.8 + 1.25) *
         (alpha * beta * Rstar / (5 + alpha * beta * Rstar))**2 - 2.5 *
         (alpha * beta * Rstar / (5 + alpha * beta * Rstar))))
Example #50
0
def hull_volume(xyz):
    """ Calculate the volume of the convex hull of 3D (X,Y,Z) LMA data.
        xyz is a (N_points, 3) array of point locations in space. """
    assert xyz.shape[1] == 3
        
    tri = Delaunay(xyz[:,0:3])
    vertices = tri.points[tri.vertices]
    
    # This is the volume formula in 
    # https://github.com/scipy/scipy/blob/master/scipy/spatial/tests/test_qhull.py#L106
    # Except the formula needs to be divided by ndim! to get the volume, cf., 
    # http://en.wikipedia.org/wiki/Simplex#Geometric_properties
    # Credit Pauli Virtanen, Oct 14, 2012, scipy-user list
    
    q = vertices[:,:-1,:] - vertices[:,-1,None,:]
    simplex_volumes = (1.0 / factorial(q.shape[-1])) * np.fromiter(
           (np.linalg.det(q[k,:,:]) for k in range(tri.nsimplex)) , dtype=float)
    # print vertices.shape # number of simplices, points per simplex, coords
    # print q.shape
    
    # The simplex volumes have negative values since they are oriented 
    # (think surface normal direction for a triangle
    volume=np.sum(np.abs(simplex_volumes))
    return volume, vertices, simplex_volumes
Example #51
0
def genpoisson(mu, lmbda, n):
    """
    Array of Generalized Poisson probabilities for a given mean number per event and per xtalk event.

    Parameters
    ----------
    mu : float scalar
        The mean number per event
    lmbda : float scalar
        The mean number per xtalk event
    n : int scalar
        The array size to return probabilities

    Returns
    -------
    gpdist : ndarray 
        Generalized Poisson distribution of size 'n'.

    """
    k = np.arange(n)
    mu = np.ones_like(k, dtype=np.float) * mu
    gpdist = mu * np.power(mu + k * lmbda,
                           k - 1) * np.exp(-mu - k * lmbda) / factorial(k)
    return gpdist
Example #52
0
def calc_Ihat_tcos(a, b, gamma, zeta0, h1, h2, **kwargs):
    """
    See calc_Ihat_tcone(). They are very similar, but this function is a bit shorter
    """
    nfacs = 7
    p0 = kwargs["p0"]
    G = np.sqrt(h1**2 + h2**2) * (2*np.pi / a)
    
    ##################
    # Pre gamma (calculations independent of gamma, and zeta0!)

    # Static storage / cache
    if 'dims' not in calc_Ihat_tcos.__dict__:
        calc_Ihat_tcos.dims = []
        calc_Ihat_tcos.cache = dict()
    
    dim = len(h1.shape)
    if dim not in calc_Ihat_tcos.dims:
        calc_Ihat_tcos.dims.append(dim)
        
        pregamma = np.zeros((nfacs, *G.shape), dtype=float)
        for i, n in enumerate(np.arange(1, nfacs+1)):
            print("integral power:", n, "/", nfacs)
            quad_array = np.vectorize( lambda Gx: quad(lambda x: x*jv(0, Gx*x)*(zeta0*np.cos(0.5*np.pi*x/p0))**n, 0, p0)[0] ) 
            pregamma[i] = quad_array(G) * 2*np.pi / a**2 / factorial(n)
       
        calc_Ihat_tcos.cache[dim] = pregamma

    else:
        pregamma = calc_Ihat_tcos.cache[dim]

    postgamma = np.zeros_like(pregamma, dtype=np.complex_)
    for i, n in enumerate(np.arange(1, nfacs+1)):
        postgamma[i] = (-1j*gamma)**n

    return np.where(G == 0, 1, 0) + np.sum(pregamma * postgamma, axis=0)
 def initialise_d_start_time(self, arg):
     """
     Initialise the derivative of the state with respect to gbar at t = start_time
     """
     if arg == 'dm':
         alpha_terms = []
         beta_terms = []
         for n in range(0, (self.N)):
             for m in range(0, (self.N)):
                 alpha_terms.append(
                     -exp(-absolute(self.alpha)**2) *
                     ((conj(self.alpha)**m * self.alpha**n) /
                      (sqrt(factorial(n) * factorial(m))) * exp(
                          (self.start_time - sin(self.start_time)) * 1j *
                          (self.k**2 *
                           (n**2 - m**2) - 2. * self.k * self.gbar *
                           (n - m))) *
                      (2. * pi * (self.start_time - sin(self.start_time)) *
                       1j * self.k *
                       (n - m)) * fock(self.N, n) * fock(self.N, m).dag()))
                 beta_terms.append(
                     exp(-absolute(self.beta)**2) *
                     (self.beta**n * conj(self.beta)**m /
                      (sqrt(factorial(n) * factorial(m))) *
                      fock(self.N, n) * fock(self.M, m).dag()))
         drho = tensor(sum(alpha_terms), sum(beta_terms))
         return drho.full()
     if arg == 'ket':
         alpha_terms = []
         beta_terms = []
         for n in range(0, (self.N)):
             alpha_terms.append(
                 exp(-absolute(self.alpha)**2 / 2.) * conj(self.alpha)**n /
                 (sqrt(factorial(n))) * exp(
                     (self.start_time - sin(self.start_time)) * 1j *
                     (self.k**2 * n**2 - 2. * self.k * self.gbar * n)) *
                 ((self.start_time - sin(self.start_time)) * 2. * 1j *
                  self.k * n) * fock(self.N, n))
             beta_terms.append(
                 exp(-absolute(self.beta)**2 / 2.) * self.beta**n /
                 (sqrt(factorial(n))) * fock(self.N, n))
         return tensor(sum(alpha_terms), sum(beta_terms)).full()
Example #54
0
def w2_coeffs(l, p, n, m, q, s):
    a, b, c, d = 0, 0, 0, 0
    fac = 0.0
    w2 = 0.0

    if (l >= abs(m)) and (p >= abs(q)) and (n >= abs(s)):
        #if ( abs(l-p) <= n <= l+p):
        if (l + p + n) % 2 == 0:

            fac = np.sqrt((sp.factorial(l + m) / sp.factorial(l - m)) *
                          (sp.factorial(p + q) / sp.factorial(p - q)) *
                          (sp.factorial(n - s) / sp.factorial(n + s)))

            a = ((-1)**s) * float(
                wigner_3j(l, p, n, 0, 0, 0) *
                wigner_3j(l, p, n, m - 1, q - 1, -s)) * np.sqrt(
                    (l + m) * (l - m + 1) * (p + q) * (p - q + 1))
            #print "Coeff a",a,"\n"
            d = ((-1)**s) * float(
                wigner_3j(l, p, n, 0, 0, 0) *
                wigner_3j(l, p, n, m + 1, q + 1, -s)) * np.sqrt(
                    (l - m) * (l + m + 1) * (p - q) * (p + q + 1))

            if (a != 0) or (d != 0):
                print "non-zero"

            b = ((-1)**s) * float(
                wigner_3j(l, p, n, 0, 0, 0) *
                wigner_3j(l, p, n, m - 1, q + 1, -s)) * np.sqrt(
                    (p - q) * (p + q + 1) * (l + m) * (l - m + 1))
            c = ((-1)**s) * float(
                wigner_3j(l, p, n, 0, 0, 0) *
                wigner_3j(l, p, n, m + 1, q - 1, -s)) * np.sqrt(
                    (p + q) * (p - q + 1) * (l - m) * (l + m + 1))

            w2 = 2.0 * (2 * n + 1) * ((a - b - c + d) / 4.0) * fac

    return w2.real
Example #55
0
def one_mode_matelem(beta, r, theta, m, n, tol=1.0e-8):
    """ Calculates the function f_{m, n}(r, theta, beta) as defined in the conventions file
    If abs(r)<tol then r is taken to be exactly zero and an optimized routine is used
    """
    # pylint: disable=too-many-arguments
    if np.abs(r) > tol:
        nu = np.exp(-1j * theta) * np.sinh(r)
        mu = np.cosh(r)
        alpha = beta * mu - np.conjugate(beta) * nu
        mini = min(n, m)
        hermiteni = hermite(beta / np.sqrt(2 * mu * nu), n + 1)
        hermitemi = hermite(
            -np.conjugate(alpha) / np.sqrt(-2 * mu * np.conjugate(nu)), m + 1)

        ssum = 0.0 + 0.0 * 1j
        for i in range(mini + 1):
            prod = (binom(m, i) / factorial(n - i)) * (
                (2 / (mu * nu))**(i / 2)) * ((-np.conjugate(nu) / (2 * mu))**(
                    (m - i) / 2)) * hermiteni[n - i] * hermitemi[m - i]
            ssum += prod

        matel = ssum * np.sqrt(
            factorial(n) /
            (factorial(m) * mu)) * (nu / (2 * mu))**(n / 2) * np.exp(
                -0.5 * (np.abs(beta)**2 - np.conjugate(nu) * beta**2 / mu))
        return matel

    alpha = beta
    mini = min(n, m)
    ssum = 0.0 + 0.0 * 1j
    for i in range(mini + 1):
        ssum += ((-1)**(m - i)) * (beta**(n - i)) * (np.conjugate(beta)**(
            m - i)) * binom(m, i) / factorial(n - i)

    matel = ssum * np.sqrt(factorial(n) /
                           (factorial(m))) * np.exp(-0.5 * np.abs(beta)**2)
    return matel
Example #56
0
    def _get_M(self, delta_t):
        n = len(self.a)
        A = np.zeros(n)
        for i, ai in enumerate(self.a):
            Ae = [
                self.a[i] * (-1)**j * factorial(i) / factorial(j) /
                factorial(i - j) / ((delta_t)**i) for j in range(i + 1)
            ]  # Elementary A to assemblate in A
            for j, aej in enumerate(Ae):
                A[j] += aej

        n = len(self.b)
        B = np.zeros(n)
        for i, ai in enumerate(self.b):
            Be = [
                self.b[i] * (-1)**j * factorial(i) / factorial(j) /
                factorial(i - j) / ((delta_t)**i) for j in range(i + 1)
            ]  # Elementary B to assemblate in B
            for j, bej in enumerate(Be):
                B[j] += bej

        Mo = [-x / B[0] for x in B[1:][::-1]]
        Mi = [x / B[0] for x in A[::-1]]
        return (Mi, Mo)
Example #57
0
def derivative(fun, z0, n=1, **kwds):
    """
    Calculate n-th derivative of complex analytic function using FFT

    Parameters
    ----------
    fun : callable
        function to differentiate
    z0 : real or complex scalar at which to evaluate the derivatives
    n : scalar integer, default 1
        Number of derivatives to compute where 0 represents the value of the
        function and n represents the nth derivative. Maximum number is 100.

    r : real scalar, default 0.0061
        Initial radius at which to evaluate. For well-behaved functions,
        the computation should be insensitive to the initial radius to within
        about four orders of magnitude.
    max_iter : scalar integer, default 30
        Maximum number of iterations
    min_iter : scalar integer, default max_iter // 2
        Minimum number of iterations before the solution may be deemed
        degenerate.  A larger number allows the algorithm to correct a bad
        initial radius.
    step_ratio : real scalar, default 1.6
        Initial grow/shrinking factor for finding the best radius.
    num_extrap : scalar integer, default 3
        number of extrapolation steps used in the calculation
    full_output : bool, optional
        If `full_output` is False, only the derivative is returned (default).
        If `full_output` is True, then (der, status) is returned `der` is the
        derivative, and `status` is a Results object.

    Returns
    -------
    der : ndarray
       array of derivatives
    status: Optional object into which output information is written. Fields:
        degenerate: True if the algorithm was unable to bound the error
        iterations: Number of iterations executed
        function_count: Number of function calls
        final_radius: Ending radius of the algorithm
        failed: True if the maximum number of iterations was reached
        error_estimate: approximate bounds of the rounding error.

    This module uses the method of Fornberg to compute the derivatives of a
    complex analytic function along with error bounds. The method uses a
    Fast Fourier Transform to invert function evaluations around a circle into
    Taylor series coefficients, uses Richardson Extrapolation to improve
    and bound the estimate, then multiplies by a factorial to compute the
    derivatives. Unlike real-valued finite differences, the method searches for
    a desirable radius and so is reasonably insensitive to the initial
    radius-to within a number of orders of magnitude at least. For most cases,
    the default configuration is likely to succeed.

    Restrictions

    The method uses the coefficients themselves to control the truncation
    error, so the error will not be properly bounded for functions like
    low-order polynomials whose Taylor series coefficients are nearly zero.
    If the error cannot be bounded, degenerate flag will be set to true, and
    an answer will still be computed and returned but should be used with
    caution.

    Examples
    --------

    To compute the first five derivatives of 1 / (1 - z) at z = 0:
    Compute the first 6 taylor derivatives of 1 / (1 - z) at z0 = 0:
    >>> import numdifftools.fornberg as ndf
    >>> import numpy as np
    >>> def fun(x):
    ...    return 1./(1-x)
    >>> c, info = ndf.derivative(fun, z0=0, n=6, full_output=True)
    >>> np.allclose(c, [1, 1, 2, 6, 24, 120, 720, 5040])
    True
    >>> np.all(info.error_estimate < 1e-9*c.real)
    True
    >>> (info.function_count, info.iterations, info.failed) == (144, 18, False)
    True


    References
    ----------
    [1] Fornberg, B. (1981).
        Numerical Differentiation of Analytic Functions.
        ACM Transactions on Mathematical Software (TOMS),
        7(4), 512-526. http://doi.org/10.1145/355972.355979
    """
    result = taylor(fun, z0, n=n, **kwds)
    # convert taylor series --> actual derivatives.
    m = _num_taylor_coefficients(n)
    fact = factorial(np.arange(m))
    if kwds.get('full_output'):
        coefs, info_ = result
        info = _INFO(info_.error_estimate * fact, *info_[1:])
        return coefs * fact, info
    return result * fact
Example #58
0
def coherent_state(alpha, cutoff):
    """Returns the Fock representation of the coherent state |alpha> up to dimension given by cutoff"""
    n = np.arange(cutoff)
    return np.exp(-0.5 * np.abs(alpha)**2) * alpha**n / np.sqrt(factorial(n))
 def function(xx):
     return xx * (1 + np.sum([(-1)**n * (xx)**(2 * n) / factorial(2 * n)
                              for n in range(1, N + 1)],
                             axis=0))
Example #60
0
def func(x,a,l):
    return ((a ** x) * np.exp(-a) / factorial(x) )*z + ((x / l)**(k-1)* np.exp(-x / l)*(1-z)) / (l * factorial(k-1))