Esempio n. 1
0
def calc_a_coef(problem, boundary, eval_bc0, M, m1):
    k = problem.k
    R = problem.R
    a = problem.a
    nu = problem.nu

    phi0_data = np.zeros(n_nodes, dtype=complex)
    W = np.zeros((n_nodes, m1), dtype=complex)

    for i in range(n_nodes):
        th = th_data[i]
        r = boundary.eval_r(th)

        phi0_data[i] = eval_bc0(th)
        for m in range(1, m1+1):
            W[i, m-1] = jv(m*nu, k*r) * np.sin(m*nu*(th-a))

    for m in range(M+1, m1+1):
        W[:, m-1] = W[:, m-1] / jv(m*nu, k*R)

    a_coef, residuals, rank, s = np.linalg.lstsq(W, phi0_data)

    if rank != m1:
        warnings.warn('Rank deficient')

    return a_coef[:M], s
def compute_k2_vals(l_max, num_vals):
    """
    Compute hyper radial infinite well K^2 eigenvalues for a well of unit radial width.  The eigenvalues for a well with
     parameter L = G + 3 D / 2

    Compute square of zeros of J_{l+1/2}(x) for l = 0, 1/2, 1, ..., floor(l_max), floor(l_max)+1/2
    :param l_max:  Max l to find zeros of
    :param num_vals:  Total number of zeros to find for each l
    :return K2:  A 2*l_max + 1 by num_vals ndarray containing the computed squared zeros.  K2[2*K + D-3] are the
    eigenvalues for dimension D and hyper angular momentum L
    """

    from numpy import arange, pi, zeros, zeros_like
    from scipy.optimize import brentq
    from scipy.special import jv

    zro = zeros((2 * l_max + 1, num_vals), dtype=float)

    z_l_m_1 = pi * arange(1,num_vals + l_max + 1)
    z_l = zeros_like(z_l_m_1)
    zz_l = zeros_like(z_l_m_1)

    zro[0] = z_l_m_1[:num_vals]

    for l in range(1, l_max + 1):
        for i in range(num_vals + l_max - l):
            zz_l[i] = brentq(lambda x: jv(l, x), z_l_m_1[i], z_l_m_1[i + 1])
            z_l[i] = brentq(lambda x: jv(l + .5, x), z_l_m_1[i], z_l_m_1[i + 1])

        z_l_m_1[:] = z_l[:]
        zro[2 * l] = z_l[:num_vals]
        zro[2 * l - 1] = zz_l[:num_vals]
    if num_vals == 1:
        zro = zro[:,0]
    return zro**2
Esempio n. 3
0
def besselmode(m, u, w, x, y, phioff=0):
    """Calculate the field of a bessel mode LP mode.

    Arguments:
        - m azimuthal number of periods (m=0,1,2,3...)
        - u, w  radial phase constant and radial decay constant
        - x, y transverse coordinates
        - phioff: offset angle, allows to rotate the mode in
                  the x-y plane

    Returns:
        - mode: calculated bessel mode

    """
    xx,yy = np.meshgrid(x,y)
    rr = np.reshape( np.sqrt(xx**2 + yy**2), len(x)*len(y))
    phi = np.reshape( np.arctan2(xx,yy), len(x)*len(y))
    fak = jv(m,u)/kn(m, w)
    res = np.zeros(len(rr))
    indx1 = rr<=1
    res[indx1] = jv(m,u*rr[indx1])*np.cos(m*phi[indx1]+phioff)
    indx2 = rr>1
    res[indx2] = fak * kn(m, w * rr[indx2]) * np.cos(m * phi[indx2] + phioff)
    res = res / np.max(np.abs(res))
    return np.reshape(res, [len(y), len(x)])
Esempio n. 4
0
File: JPA.py Progetto: edumur/paramp
    def pumpistor_inductance(self, f=None, z_ext=None):
        """
        Return the pumpistor inductance.
        In the case of the non-degenerate case, a parent class must provide
        a external_impedance method returning the impedance of the electrical
        environment seen by the SQUID.

        Parameters
        ----------
        f : float, np.ndarray, optional
            Signal frequency in hertz.
            Is required in the non-degenerate case but optional for the
            degenerate one.
        z_ext : float, np.ndarray, optional
            External impedance seen from the JPA point of view at the idler
            frequency.
        """

        # If z_ext is None, we return the pumpistor inductance of the
        # degenerate case.
        if z_ext is None:

            return -2.*np.exp(1j*self.delta_theta())/self.delta_f()\
                   *cst.hbar/2./cst.e/self.I_c/abs(np.sin(self.F()))\
                   *self.phi_s/(2.*jv(1., self.phi_s)\
                                - 2.*np.exp(2j*self.delta_theta())*jv(3., self.phi_s))
        else:

            return cst.h/2./cst.e/np.pi/self.I_c/np.sin(self.F())**2./self.delta_f()**2.\
                   *(- 2.*np.cos(self.F())\
                     + 1j*cst.h/2./cst.e/np.pi/self.I_c\
                         *2.*np.pi*(self.f_p - f)\
                         *(1./z_ext).conjugate())
def corner_exact(k,m):
    """
    created Monday 11 July 2016
    Function which vanishes at eigenenergies of circular corner.
    """
    psi=jv(m,r1*k)-jv(m,r2*k)*yn(m,r1*k)/yn(m,r2*k)
    return psi
Esempio n. 6
0
def get_left_int_error(n, order):
    a = 2
    b = 30
    intervals = np.linspace(0, 1, n, endpoint=True) ** 2 * (b-a) + a
    discr = CompositeLegendreDiscretization(intervals, order)

    x = discr.nodes
 
    assert abs(discr.integral(1+0*x) - (b-a)) < 1e-13


    alpha = 4
    from scipy.special import jv, jvp
    f = jvp(alpha, x)

    num_int_f = jv(alpha, a) + discr.left_indefinite_integral(f)
    int_f = jv(alpha, x)

    if 0:
        pt.plot(x.ravel(), num_int_f.ravel())
        pt.plot(x.ravel(), int_f.ravel())
        pt.show()

    L2_err = np.sqrt(discr.integral((num_int_f - int_f)**2))
    return 1/n, L2_err
Esempio n. 7
0
    def multiscale(self, ms, vm_in):
        """
        Here we take the input as an phasor in the electrical RF domain
        and the output is returned as a phasor in the optical domain
        
        vm_in = [a0, a1, a2 ... an]
        where
        v(t) = sum_{k=0}^n a_k exp(j k w0) + c.c.

        vm_out = [a0, a1, a2 ... an, a-n ... a-1]
        where
        E(t) = sum_{k=-n}^n a_k exp(j k w0)

        Note:
        Currently only the dc and fundamental component of the signal are used.
        All other harmonics are assumed filtered out.
        """
        #Output signal will be complex
        mso = ms.copy()
        mso.real = False
        
        #Assume only fundamental harmonic and DC at input
        a0 = vm_in[0]
        a1,theta1 = np.abs(vm_in[1]), np.angle(vm_in[1])

        #Jacobi-Anger expansion
        k = mso.ms[:,np.newaxis]
        b1 = special.jv(k, self.v1*a1) * np.exp(1j*self.v1*a0 + 1j*k*(theta1+0.5*pi))
        b2 = special.jv(k, self.v2*a1) * np.exp(1j*self.phi + 1j*self.v2*a0 + 1j*k*(theta1+0.5*pi))
        
        vm_out = self.eloss*self.Em*(b1 + self.a*b2)

        return mso, vm_out
Esempio n. 8
0
def bessel_series(a, b, z, maxiters=500, tol=tol):
    """Compute hyp1f1 using a series of Bessel functions; see (3.20) in
    _[pop].

    """
    Do, Dm, Dn = 1, 0, b/2
    r = sqrt(z*(2*b - 4*a))
    w = z**2 / r**(b + 1)
    # Ao is the first coefficient, and An is the second.
    Ao = 0
    # The first summand comes from the zeroth term.
    So = Do*jv(b - 1, r) / r**(b - 1) + Ao
    An = Dn*w*jv(b + 1, r)
    Sn = So + An
    i = 3
    while i <= maxiters:
        if np.abs(Ao/So) < tol and np.abs(An/Sn) < tol:
            break
        tmp = Dn
        Dn = ((i - 2 + b)*Dm + (2*a - b)*Do) / i
        Do = Dm
        Dm = tmp
        w *= z/r
        Ao = An
        An = Dn*w*jv(b - 1 + i, r)
        So = Sn
        Sn += An
        i += 1
    # if i > maxiters:
    #     warnings.warn("Number of evaluations exceeded maxiters on "
    #                   "a = {}, b = {}, z = {}.".format(a, b, z))
    return gamma(b)*np.exp(z/2)*2**(b - 1)*Sn
Esempio n. 9
0
def Rayleigh_1d(lamda=600,r=0.1*1.E-3,d= 5*1.E-3,D = 2):
	
    a= 10 * 1.E-2
    k=(2.*pi)/(lamda*1.E-9)  #wavelength of light in vaccuum
    
    X_Mmax=a/2. ; X_Mmin = -a/2.; Y_Mmin = X_Mmin; Y_Mmax=X_Mmax
    N =400
    X=linspace(X_Mmin, X_Mmax,N); Y=X # coordinates of screen
    
    rm1=((X-d)**2+Y**2)**0.5
    A1=k*r*rm1/D
    
    I1=(ss.jv(1,A1)/A1)**2
    
    rm2=((X+d)**2+Y**2)**0.5
    A2=k*r*rm2/D
    I2=(ss.jv(1,A2)/A2)**2
    
    I=I1 + I2
    
    fig = plt.figure(figsize=(7,5))
    fig.suptitle('Fraunhofer Diffraction by circular aperture',fontsize=14, fontweight='bold')
    
    ax1 = fig.add_subplot(111)
    ax1.grid(True)
    ax1.plot(X,I1, linewidth=2, alpha=0.8, label="I1")
    ax1.plot(X,I2, linewidth=2, alpha=0.8, label="I2")
    ax1.plot(X,I, linewidth=2, alpha=0.8, label="I = I1 + I2")
    ax1.set_xlim(X_Mmin, X_Mmax)
    ax1.set_xlabel(r'$X \ (m)$',fontsize=14, fontweight='bold')
    ax1.set_ylabel(r'$I(X,Y)/I_0$',fontsize=14, fontweight='bold')
    plt.legend()
    plt.show()
Esempio n. 10
0
    def NormPart(self, Xsub):
        """
         The von Mises and von Mises - Fisher Logarithmic Normalization partition function:...
            is calculated in this method. For the 2D data the function is simplified for faster
            calculation.

            *** This function has been build after great research on the subject. However, some
            things are not very clear this is always in a revision state until theoretically proven
            to be correctly used.

            Arguments
            ---------
                Xsub: The subset of the data point are included in the von Mises-Fisher
                    distribution.

            Output
            ------
                The logarithmic value of the partition normalization function.

        """

        # Calculating the r.
        # The r it suppose to be the norm of the data points of the current cluster, not the...
        # ...whole mixture as it is in the global objective function. ## Might this need to be...
        # ...revised.
        r = np.linalg.norm(Xsub)

        # Calculating the Von Misses Fisher's k concentration is approximated as seen..
        # ....in Banerjee et al. 2003.
        dim = Xsub.shape[1]

        # Calculating the partition function depending on the vector dimensions.
        k = (r * dim - np.power(r, 3.0)) / (1 - np.power(r, 2.0))

        # k=0.001 only for the case where the r is too small. Usually at the 1-2 first iterations...
        # ...of the EM/Kmeans.
        if k < 0.0:
            k = 0.001

        if dim > 3:

            # This is the proper way for calculating the von Misses Fisher normalization factor.
            bessel = np.abs(special.jv((dim / 2.0) - 1.0, k))
            # bessel = np.abs(self.Jd((dim/2.0)-1.0, k))
            cdk = np.power(k, (dim / 2.0) - 1) / (np.power(2 * np.pi, dim / 2) * bessel)

        else:

            # This is the proper way for calculating the vMF normalization factor for 2D vectors.
            bessel = np.abs(special.jv(0, k))
            # bessel = np.abs(self.Jd(0, k))
            cdk = 1.0 / (2 * np.pi * bessel)

        # Returning the log of the normalization function plus the log of the k that is used in...
        # ...the von Mises Fisher PDF, which is separated from the Cosine Distance due to the log.
        # The normalizers are multiplied by the number of all the X data subset, because it is...
        # the global normalizer after the whole summations has been completed.
        # NOTE: Still this need to be revised.
        return np.log(cdk) + np.log(k)  # * Xsub.shape[0]
Esempio n. 11
0
def _chassat_integral(x, zeta=None, Lam=None, w=None, K1=None, K2=None, n1=None, n2=None, m1=None, m2=None):
    """Inner integral to compute the correlations (Chassat)"""
    #compute bessel functions
    j1 = jv(n1+1,x)
    j2 = jv(n2+1,w*x)
    j3 = jv(m1+m2,zeta*x)
    j4 = jv(np.abs(m1-m2),zeta*x)
    return x**(-14./3.) * j1 * j2 * (1. + (Lam / x)**2.)**(-11./6.) * (K1/w * j3 + K2/w * j4)
Esempio n. 12
0
def bessel():
  for i in range(1,360):
    x = i / 6.28
    y1 = spl.jv(1,x)
    y2 = spl.yv(1,x)
    y3 = spl.jv(3,x)
    
    print("%7.3f, %7.3f, %7.3f, %7.3f" % (x, y1, y2, y3))
Esempio n. 13
0
def func_deriv(x):
	if x==0:
		return 0
	num=np.power(x,1+jv(0,x))
	dnum=num*(-1*jv(1,x)*np.log(x)+(1+jv(0,x))/x)
	den=np.sqrt(1-x+100*(x**2)-100*(x**3))
	dden=(-1+200*x-300*(x**2))/(2*den)
	df=(den*dnum-dden*num)/((den**2))
	return df
Esempio n. 14
0
def funSundePrimordial(p1, h1, a):
    """Função inicial para a curva teórica usando as curvas de bessel
    Entrada:
    a - espaçamento entre os condutores ou profundidade da camada
    Saída:
    resistividade para a profundidade "a"
    """
    f = lambda m: exp(-2 * m * h1) * (jv(0, m * a) - jv(0, 2 * m * a))
    return 2 * pi * a * quad(f, 0, Inf)[0]
def system(vec,V,Delta):


    ru,iu,rw,iw = vec
    u = ru+1j*iu
    w = rw+1j*iw
    first = u**2+w**2 - V**2
    second = jv(0,u)/u/jv(1,u) - (1-Delta)*kv(0,w)/w/kv(1,w)
    return np.real(first), np.imag(first),np.real(second), np.imag(second)
Esempio n. 16
0
    def eval_phi0(self, th):
        a = self.a
        nu = self.nu
        k = self.k

        r = self.boundary.eval_r(th)
        p = lambda th: (th-a) * (th - (2*np.pi-a)) / (a*(2*np.pi - a))

        return jv(3*nu/2, k*r) + (jv(nu/2, k*r) - jv(3*nu/2, k*r)) * p(th)
Esempio n. 17
0
def plot4(V=10, nguess=100, wavelength = 10**-6, a = 5*10**-6, l=0, eps=10**-4):
    lguess = [0]*nguess
    for i in range(nguess):
        lguess[i]=i*V*1.1/nguess
    lguess.append(V)
    functy = lambda x: (V**2-x*x)**(1/2)
    functj = lambda x: x*jv(l+1, x)/jv(l, x)
    functk = lambda x: functy(x)*kv(l+1, functy(x))/kv(l, functy(x))

    funct = lambda x: functj(x)-functk(x)
    lroots = [0] * len(lguess)
    for i in range(len(lroots)):
        lroots[i] = fsolve(funct, lguess[i])
    #print lroots
    lx = np.linspace(0,V+2, 10000)
    plt.scatter(lx, functj(lx), s=.5)
    plt.plot(lx, functk(lx))
    #plt.plot(lx, funct(lx))
    plt.ylim([0, max(functk(lx))])
    #sorry this bit isn't particularly clean. Had to change strategies. Lazy. Etc.
    def floatequals(a,b, eps=10**-6):
        if type(b)==float:
            b=[b]
        for number in b:
            if abs(a-number)<eps:
                return True
        return False
    roots=[]
    for i in range(len(lroots)):
        if lroots[i]<0:
            continue
        if lroots[i]==lguess[i]:
           continue
        if floatequals(lroots[i], roots, eps=eps):
            continue
        roots.extend(lroots[i])
    print roots
    plt.scatter(roots, functj(roots), c='red')
    #plt.show()
    plt.scatter(roots, map(functk, roots), c='red')
    title='Characteristic equation for a fiber optic, recall that Y^2=V^2-X^2'
    plt.title(title, size='small')
    plt.xlabel('X')
    plt.ylabel('X*J_(l+1)(X)/J_l(X) and Y*K_(l+1)(Y)/K_l(Y)')
    plt.savefig('hw9-4.png')

    print 'the roots are ' + str(roots)
    print min(roots)

    print functk(0)




    plt.show()
    plt.close()
Esempio n. 18
0
def CoreShell_ab(mCore,mShell,xCore,xShell):
#  http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#CoreShell_ab
  m = mShell/mCore
  u = mCore*xCore
  v = mShell*xCore
  w = mShell*xShell

  mx = max(np.abs(mCore*xShell),np.abs(mShell*xShell))
  nmax = np.round(2+xShell+4*(xShell**(1/3)))
  nmx = np.round(max(nmax,mx)+16)
  n = np.arange(1,nmax+1)
  nu = n+0.5

  sv = np.sqrt(0.5*np.pi*v)
  sw = np.sqrt(0.5*np.pi*w)
  sy = np.sqrt(0.5*np.pi*xShell)

  pv = sv*jv(nu,v)
  pw = sw*jv(nu,w)
  py = sy*jv(nu,xShell)

  chv = -sv*yv(nu,v)
  chw = -sw*yv(nu,w)
  chy = -sy*yv(nu,xShell)

  p1y = np.append([np.sin(xShell)], [py[0:int(nmax)-1]])
  ch1y = np.append([np.cos(xShell)], [chy[0:int(nmax)-1]])
  gsy = py-(0+1.0j)*chy
  gs1y = p1y-(0+1.0j)*ch1y

  # B&H Equation 4.89
  Dnu = np.zeros((int(nmx)),dtype=complex)
  Dnv = np.zeros((int(nmx)),dtype=complex)
  Dnw = np.zeros((int(nmx)),dtype=complex)
  for i in range(int(nmx)-1,1,-1):
    Dnu[i-1] = i/u-1/(Dnu[i]+i/u)
    Dnv[i-1] = i/v-1/(Dnv[i]+i/v)
    Dnw[i-1] = i/w-1/(Dnw[i]+i/w)

  Du = Dnu[1:int(nmax)+1]
  Dv = Dnv[1:int(nmax)+1]
  Dw = Dnw[1:int(nmax)+1]

  uu = m*Du-Dv
  vv = Du/m-Dv
  fv = pv/chv

  dns = ((uu*fv/pw)/(uu*(pw-chw*fv)+(pw/pv)/chv))+Dw
  gns = ((vv*fv/pw)/(vv*(pw-chw*fv)+(pw/pv)/chv))+Dw
  a1 = dns/mShell+n/xShell
  b1 = mShell*gns+n/xShell

  an = (py*a1-p1y)/(gsy*a1-gs1y)
  bn = (py*b1-p1y)/(gsy*b1-gs1y)

  return an, bn
Esempio n. 19
0
def hello_world():
    # compute maxium
    f = lambda x: -special.jv(3,x)
    sol  = optimize.minimize(f,1.0)

    # plot
    x = numpy.linspace(0,10,5000)
    pyplot.plot(x, special.jv(3,x), '-', sol.x, -sol.fun, 'o')

    # produce output
    pyplot.savefig( PLOT_PATH + '/hello-world.png', dpi=96 )
Esempio n. 20
0
def exact_solution(A, B, N):
    """Use scipy to construct exact solution to the Airy equation."""
    # Setup Dedalus domain to get same grid
    domain = dedalus_domain(N)
    x = domain.grid(0)
    # Compute Bessel function on grid
    Jv = special.jv(A, x)
    # Solve for coefficient using boundary condition
    c = B / special.jv(A, 30)
    y_exact = c * Jv
    return y_exact
Esempio n. 21
0
 def P1(self):
     U = self.U
     W = self.W
     return W**2/U**2 * kv(1, W)**2/jv(1, U)**2 * (
         (1-self.s)**2 * (jv(0, U)**2 + jv(1, U)**2) +
         (1+self.s)**2 * (jv(2, U)**2 - jv(1, U)*jv(3, U)) +
         2*U**2/self.rb**2 * (jv(1, U)**2 - jv(0, U)*jv(2, U))
     )
Esempio n. 22
0
    def eval_v(self, r, th):
        """
        Using this function is "cheating".
        """
        a = self.a
        nu = self.nu
        k = self.k

        return (
            jv(nu/2, k*r) * np.sin(nu/2 * (th-a)) +
            jv(3*nu/2, k*r) * np.sin(3*nu/2 * (th-2*np.pi))
        )
Esempio n. 23
0
 def eigenvalue_equation(cls, fiber, W):
     U = sqrt(fiber.V**2 - W**2)
     j0 = jv(0, U)
     j1 = jv(1, U)
     k1 = kv(1, W)
     k1p = kvp(1, W, 1)
     return (
         j0/(U*j1) + (fiber.n**2 + fiber.nc**2)/(2*fiber.n**2) * k1p/(W*k1) - 1/U**2 +
         sqrt(
             ((fiber.n**2 - fiber.nc**2)/(2*fiber.n**2) * k1p/(W*k1))**2
             + (cls._rboverrk(fiber, U, fiber.V)**2/fiber.n**2) * (1/W**2 + 1/U**2)**2
         )
     )
Esempio n. 24
0
def get_r(M, sma, ecc):
    ### very fast series solution of Kepler's equation to 5th order. OK for low ecc of HST's orbit.

    E = M + ( jv(1, 1.0 * ecc) * sin(1.0 * M) * 2.0 / float(1.0) +
              jv(2, 2.0 * ecc) * sin(2.0 * M) * 2.0 / float(2.0) +
              jv(3, 3.0 * ecc) * sin(3.0 * M) * 2.0 / float(3.0) +
              jv(4, 4.0 * ecc) * sin(4.0 * M) * 2.0 / float(4.0) +
              jv(5, 5.0 * ecc) * sin(5.0 * M) * 2.0 / float(5.0) )

    T = 2.0 * numpy.arctan2(numpy.sqrt(1.0 + ecc) * numpy.sin(0.5 * E), numpy.sqrt(1.0 - ecc) * numpy.cos(0.5 * E))
    R = sma * (1.0 - ecc ** 2) / ( 1.0 + ecc * numpy.cos(T) )

    return R, T
Esempio n. 25
0
	def __init__(self,bc,face,FC):
		#Input parameters
		self.problem=bc.problem
		self.N_cycles=self.problem.N_cycles
		self.N_timesteps=self.problem.N_timesteps
		self.nu=self.problem.nu
		self.R=self.problem.radius[face]
		self.T0=self.problem.T
		self.dt=self.problem.dt
		self.options=self.problem.options
		
		#Load the Fourier Coefficients
		infile_FC=open("./data/"+FC)
		self.an=[];self.bn=[]
		for line in infile_FC:
			self.an.append(float(line.split()[0]))
			self.bn.append(float(line.split()[1]))
			
		self.omega=((2.*DOLFIN_PI)/self.T0)*self.N_cycles #omega
		self.Q_mean=self.problem.bc_in_Qmean[face] #Mean flow rate

		self.womn=self.R*(self.omega/self.nu)**0.5 #womersley number
		if master:
			print "Womersley Number is: :",self.womn

		#Poieuille term constant
		self.poiseuille_term_constant=((2.*self.an[0]*self.Q_mean)/(numpy.pi*self.R**2.)) 

		#Pulsatile term constant
		self.pulsatile_term_numerator1=[]
		self.pulsatile_term_denominator=[]
		for n in range(1,len(self.an)):
			alpha=(n)**0.5*self.womn
			self.pulsatile_term_numerator1.append( alpha*1j**1.5*jv(0,alpha*1j**1.5) )
			self.pulsatile_term_denominator.append( alpha*1j**1.5*jv(0,alpha*1j**1.5)- 2*jv(1,alpha*1j**1.5) )

		self.r=[] #store r values
		self.pulsatile_for_all_r=[] #store the varying component of r

		#Velocity term constant
		self.velocity_term_constant=1./(numpy.pi*self.R**2.)*(self.Q_mean)
		
                #flow rate term
                self.t=[] #the flow rate series will remain the same for all 'r' values at a timestep
                self.Qn_at_current_ts=0 #store the series for current timestep for multiple r values 

		
		#Directory to store the velocity profile
		self.wom_dir="WomProf_"+self.options["case_name"]+"_womn"+str(self.womn)#Qmean"+str(self.an[0]*self.Q_mean)+"_R"+str(self.R)+"_nu"+str(self.nu)+"_ts"+str(self.N_timesteps/self.N_cycles)+"_cycles"+str(self.N_cycles)		
		self.n=0
Esempio n. 26
0
def Mie_ab(m,x):
#  http://pymiescatt.readthedocs.io/en/latest/forward.html#Mie_ab
  mx = m*x
  nmax = np.round(2+x+4*(x**(1/3)))
  nmx = np.round(max(nmax,np.abs(mx))+16)
  n = np.arange(1,nmax+1)
  nu = n + 0.5

  sx = np.sqrt(0.5*np.pi*x)
  px = sx*jv(nu,x)

  p1x = np.append(np.sin(x), px[0:int(nmax)-1])
  chx = -sx*yv(nu,x)

  ch1x = np.append(np.cos(x), chx[0:int(nmax)-1])
  gsx = px-(0+1j)*chx
  gs1x = p1x-(0+1j)*ch1x

  # B&H Equation 4.89
  Dn = np.zeros(int(nmx),dtype=complex)
  for i in range(int(nmx)-1,1,-1):
    Dn[i-1] = (i/mx)-(1/(Dn[i]+i/mx))

  D = Dn[1:int(nmax)+1] # Dn(mx), drop terms beyond nMax
  da = D/m+n/x
  db = m*D+n/x

  an = (da*px-p1x)/(da*gsx-gs1x)
  bn = (db*px-p1x)/(db*gsx-gs1x)

  return an, bn
def psf(xarray, yarray, a, R, l, I0):
    # xarray and yarray are arrays
    xv, yv = np.meshgrid(xarray, yarray)
    q_grid = np.sqrt(xv ** 2.0 + yv ** 2.0)
    x_arg_grid = (2.0 * np.pi * a * q_grid) / (l * R)
    image = I0 * ((2.0 * special.jv(1, x_arg_grid)) / x_arg_grid) ** 2.0
    return image
Esempio n. 28
0
    def eval_phi0(self, th):
        a = self.a
        nu = self.nu
        k = self.k

        r = self.boundary.eval_r(th)
        return jv((self.K+1/2)*nu, k*r) * (th - a) / (2*np.pi - a)
Esempio n. 29
0
def getftzer(Jzer,ngrid=128,Rpix=100):
    '''
    ; Compute the Fourier Transform of Zernike mode

    ; ngrid = 128 ; grid half-size, pixels
    ; Rpix = 100 ; pupil radius in pixels

    :param Jzer:
    :return:
    '''

    x = np.arange(-ngrid,ngrid)
    y = np.arange(-ngrid,ngrid)
    theta = np.arctan2(x,y)

    n,m = zern_num(Jzer)
    f = np.roll(np.roll(dist(2*ngrid),
                        ngrid,
                        axis=0),
                ngrid,
                axis=1)/(2*ngrid)*Rpix
    f[ngrid][ngrid] = 1e-3

    ftmod = np.sqrt(n+1.0)*jv(n+1,2*np.pi*f)/(np.pi*f)

    if m == 0:
        zz = ftmod*np.complex(0,1.)**(n/2.)
    else:
        if (Jzer%2 == 0):
            fact=np.sqrt(2.)*np.cos(m*theta)
        else:
            fact=np.sqrt(2.)*np.sin(m*theta)
        zz = ftmod*fact*(-1)**((n-m/2.))*np.complex(0,1.)**m

    return zz
Esempio n. 30
0
 def weights(self,nu,zeroArr):
     """
     return the weights for quadrature
     w_{ mu k} = Y_mu( pi xi_{nu k}/J_nu+1(Pi xi_{nu k})
     """
     piXi = M.pi*zeroArr
     return SS.yv(nu,piXi)/SS.jv(nu+1,piXi)
Esempio n. 31
0
 def __call__(self, k):
     return k * self.pk(k) * jv(self.n, self.r*k)
def bem2cyl(wave_cond, discrete_cyl, vpot_cyl, trunc_ord):
    """ Computes cylindrical amplitude coefficients from the velocity
    potential values on a cylinder.

    :param wave_cond: (water_depth, cfreq, wnum)
    :type wave_cond: tuple
    :param water_depth: water depth (m)
    :type water_depth: float
    :param cfreq: cyclic wave frequency (rad/s)
    :type cfreq: float
    :param wnum: wave number (rad/m) associated to that wave frequency and water depth
    :type wnum: float
    :param discrete_cyl: (radius_cyl, azimuth_cyl, axial_cyl)
    :type discrete_cyl: tuple
    :param radius_cyl: radius (m) of the cylinder
    :type radius_cyl: float
    :param azimuth_cyl: azimuthal discretization (rad) of the cylinder. An equispaced
                        discretization is assumed. Each element of the array is
                        an azimuth. Same axial discretization is assumed for each
                        azimuth
    :type azimuth_cyl: 1D numpy array
    :param axial_cyl: axial discretization (m) of the cylinder. Each element of the array is
                      the z-axial coordinate of a discrete point of the cylinder. Same
                      azimuthal discretization is assumed for each z-coordinate
    :type axial_cyl: 1D numpy array
    :param vpot_cyl: complex amplitude of the velocity potential (m**2/s) for each discrete point
                     of the cylinder and for the inputted water depth and wave frequency. shape
                     (extra axis, number of axial discretization, number of azimuthal
                     discretization)
    :type vpot_cyl: 3D numpy array
    :param trunc_ord: truncation order for the number of wave modes included.
                      Total number of wave modes is 2*trunc_ord+1
    :type trunc_ord: int
    """
    (water_depth, cfreq, wnum) = wave_cond
    (radius_cyl, azimuth_cyl, axial_cyl) = discrete_cyl
    dz = axial_cyl[1:] - axial_cyl[:-1]
    dth = azimuth_cyl[1] - azimuth_cyl[0]  # equispaced is assumed
    rightz = all(dz > 0)
    rightth = dth > 0
    rightth2 = azimuth_cyl[-1] == 2. * np.pi + azimuth_cyl[0] or azimuth_cyl[
        -1] == azimuth_cyl[0]
    # Build integration domain
    (z_cyl, th_cyl) = np.meshgrid(axial_cyl, azimuth_cyl, indexing='ij')
    # Initialize
    a_s = np.zeros((vpot_cyl.shape[0], 2 * trunc_ord + 1), dtype=complex)
    for n_mode, mode in enumerate(range(-trunc_ord, trunc_ord + 1)):
        integrand = vpot_cyl * np.cosh(wnum * (z_cyl + water_depth)) * np.exp(
            -1j * mode * th_cyl)
        # Integrate along th
        int_th = (integrand[:, :, 1:] +
                  integrand[:, :, :-1]).sum(axis=2) * .5 * dth
        if not rightth2:  # add last paralepipede
            int_th += (integrand[:, :, 0] + integrand[:, :, -1]) * .5 * dth
        if not rightth:
            int_th *= -1
        # Integrate I_th along z
        int_th_z = ((int_th[:, 1:] + int_th[:, :-1]) * dz).sum(axis=1) * .5
        if not rightz:
            int_th_z *= -1
        # Cm
        cntm = -1j * cfreq / (2 * np.pi * 9.809)
        cntm *= 2 * np.cosh(wnum * water_depth)
        cntm /= water_depth * (1 + np.sinh(2 * wnum * water_depth) /
                               (2 * wnum * water_depth))
        cntm /= jv(mode, wnum * radius_cyl) - 1j * yv(mode, wnum * radius_cyl)
        # amplitude coefficients
        a_s[:, n_mode] = cntm * int_th_z
    return a_s
Esempio n. 33
0
def alpha_function_new(x, n, beta, R):
    output = beta * ss.jv(n, x) + (x / R) * ss.jvp(n, x)
    return output
Esempio n. 34
0
def exo8():
    v = np.linspace(0, 12, 12)
    x = np.linspace(0, 20, 100)
    v, x = np.meshgrid(v, x)
    besselj = jv(v, x)
    plotSurf(x, v, besselj, "x", "n", "Jn(x)", "Exo 8")
Esempio n. 35
0
    def loglkl(self, cosmo, data):

        # Omega_m contains all species!
        self.Omega_m = cosmo.Omega_m()
        self.small_h = cosmo.h()

        # needed for IA modelling:
        if ('A_IA' in data.mcmc_parameters) and ('exp_IA'
                                                 in data.mcmc_parameters):
            amp_IA = data.mcmc_parameters['A_IA'][
                'current'] * data.mcmc_parameters['A_IA']['scale']
            exp_IA = data.mcmc_parameters['exp_IA'][
                'current'] * data.mcmc_parameters['exp_IA']['scale']
            intrinsic_alignment = True
        elif ('A_IA'
              in data.mcmc_parameters) and ('exp_IA'
                                            not in data.mcmc_parameters):
            amp_IA = data.mcmc_parameters['A_IA'][
                'current'] * data.mcmc_parameters['A_IA']['scale']
            # redshift-scaling is turned off:
            exp_IA = 0.

            intrinsic_alignment = True
        else:
            intrinsic_alignment = False

        # One wants to obtain here the relation between z and r, this is done
        # by asking the cosmological module with the function z_of_r
        self.r, self.dzdr = cosmo.z_of_r(self.z_p)

        # Compute now the selection function p(r) = p(z) dz/dr normalized
        # to one. The np.newaxis helps to broadcast the one-dimensional array
        # dzdr to the proper shape. Note that p_norm is also broadcasted as
        # an array of the same shape as p_z
        if self.bootstrap_photoz_errors:
            # draw a random bootstrap n(z); borders are inclusive!
            random_index_bootstrap = np.random.randint(
                int(self.index_bootstrap_low),
                int(self.index_bootstrap_high) + 1)
            #print('Bootstrap index:', random_index_bootstrap)
            pz = np.zeros((self.nzmax, self.nzbins), 'float64')
            pz_norm = np.zeros(self.nzbins, 'float64')

            for zbin in xrange(self.nzbins):
                #ATTENTION: hard-coded subfolder!
                #index can be recycled since bootstraps for tomographic bins are independent!
                fname = os.path.join(
                    self.data_directory,
                    'Nz_{0:}/Nz_{0:}_Bootstrap/Nz_z{1:}_boot{2:}_{0:}.asc'.
                    format(self.nz_method, self.zbin_labels[zbin],
                           random_index_bootstrap))
                ztemp, hist_pz = np.loadtxt(fname, usecols=(0, 1), unpack=True)
                shift_to_midpoint = np.diff(ztemp)[0] / 2.
                spline_pz = itp.splrep(ztemp + shift_to_midpoint, hist_pz)
                z_mod = self.z_p  #+ self.shift_by_dz[zbin]
                mask_min = z_mod >= ztemp.min()
                mask_max = z_mod <= ztemp.max()
                mask = mask_min & mask_max
                pz[mask, zbin] = itp.splev(z_mod[mask], spline_pz)
                pz_norm[zbin] = np.sum(0.5 * (pz[1:, zbin] + pz[:-1, zbin]) *
                                       (self.z_p[1:] - self.z_p[:-1]))

            self.pr = pz * (self.dzdr[:, np.newaxis] / pz_norm)
        else:
            # use fiducial dn/dz loaded in the __init__:
            self.pr = self.pz * (self.dzdr[:, np.newaxis] / self.pz_norm)

        # get linear growth rate if IA are modelled:
        if intrinsic_alignment:
            self.rho_crit = self.get_critical_density()
            # derive the linear growth factor D(z)
            linear_growth_rate = np.zeros_like(self.z_p)
            #print(self.redshifts)
            for index_z, z in enumerate(self.z_p):
                try:
                    # for CLASS ver >= 2.6:
                    linear_growth_rate[
                        index_z] = cosmo.scale_independent_growth_factor(z)
                except:
                    # my own function from private CLASS modification:
                    linear_growth_rate[index_z] = cosmo.growth_factor_at_z(z)
            # normalize to unity at z=0:
            try:
                # for CLASS ver >= 2.6:
                linear_growth_rate /= cosmo.scale_independent_growth_factor(0.)
            except:
                # my own function from private CLASS modification:
                linear_growth_rate /= cosmo.growth_factor_at_z(0.)

        # Compute function g_i(r), that depends on r and the bin
        # g_i(r) = 2r(1+z(r)) int_r^+\infty drs p_r(rs) (rs-r)/rs
        for Bin in xrange(self.nzbins):
            # shift from first entry only useful if first enrty is 0!
            #for nr in xrange(1, self.nzmax-1):
            for nr in xrange(self.nzmax - 1):
                fun = self.pr[nr:,
                              Bin] * (self.r[nr:] - self.r[nr]) / self.r[nr:]
                self.g[nr, Bin] = np.sum(0.5 * (fun[1:] + fun[:-1]) *
                                         (self.r[nr + 1:] - self.r[nr:-1]))
                self.g[nr, Bin] *= 2. * self.r[nr] * (1. + self.z_p[nr])
        #print('g(r) \n', self.g)

        # Get power spectrum P(k=l/r,z(r)) from cosmological module
        #self.pk_dm = np.zeros_like(self.pk)
        kmax_in_inv_Mpc = self.k_max_h_by_Mpc * self.small_h
        for index_l in xrange(self.nlmax):
            for index_z in xrange(self.nzmax):

                k_in_inv_Mpc = (self.l[index_l] + 0.5) / self.r[index_z]
                if (k_in_inv_Mpc > kmax_in_inv_Mpc):
                    pk_dm = 0.
                else:
                    pk_dm = cosmo.pk(k_in_inv_Mpc, self.z_p[index_z])

                if 'A_bary' in data.mcmc_parameters:
                    A_bary = data.mcmc_parameters['A_bary'][
                        'current'] * data.mcmc_parameters['A_bary']['scale']
                    self.pk[index_l,
                            index_z] = pk_dm * self.baryon_feedback_bias_sqr(
                                k_in_inv_Mpc / self.small_h,
                                self.z_p[index_z],
                                A_bary=A_bary)
                else:
                    self.pk[index_l, index_z] = pk_dm
        '''
        # Recover the non_linear scale computed by halofit. If no scale was
        # affected, set the scale to one, and make sure that the nuisance
        # parameter epsilon is set to zero
        if cosmo.nonlinear_method == 0:
            self.k_sigma[:] = 1.e6
        else:
            self.k_sigma = cosmo.nonlinear_scale(self.z_p, self.nzmax)

        # Define the alpha function, that will characterize the theoretical
        # uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2
        # to self.theoretical_error
        if self.theoretical_error != 0:
            for index_l in xrange(self.nlmax):
                k = (self.l[index_l] + 0.5) / self.r
                self.alpha[index_l, :] = np.log(1. + k[:] / self.k_sigma[:]) / (1. + np.log(1. + k[:] / self.k_sigma[:])) * self.theoretical_error

        # recover the e_th_nu part of the error function
        e_th_nu = self.coefficient_f_nu * cosmo.Omega_nu / cosmo.Omega_m()

        # Compute the Error E_th_nu function
        if 'epsilon' in self.use_nuisance:
            for index_l in xrange(self.nlmax):
                self.E_th_nu[index_l, :] = np.log(1. + self.l[index_l] / self.k_sigma[:] * self.r[:]) / (1. + np.log(1. + self.l[index_l] / self.k_sigma[:] * self.r[:])) * e_th_nu

        # Add the error function, with the nuisance parameter, to P_nl_th, if
        # the nuisance parameter exists
        for index_l in xrange(self.nlmax):
            epsilon = data.mcmc_parameters['epsilon']['current'] * (data.mcmc_parameters['epsilon']['scale'])
            self.pk[index_l, :] *= (1. + epsilon * self.E_th_nu[index_l, :])
        '''

        Cl_GG_integrand = np.zeros_like(self.Cl_integrand)
        Cl_GG = np.zeros_like(self.Cl)

        if intrinsic_alignment:
            Cl_II_integrand = np.zeros_like(self.Cl_integrand)
            Cl_II = np.zeros_like(self.Cl)

            Cl_GI_integrand = np.zeros_like(self.Cl_integrand)
            Cl_GI = np.zeros_like(self.Cl)

        dr = self.r[1:] - self.r[:-1]
        # Start loop over l for computation of C_l^shear
        # Start loop over l for computation of E_l
        for il in xrange(self.nlmax):
            # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
            for Bin1 in xrange(self.nzbins):
                for Bin2 in xrange(Bin1, self.nzbins):
                    Cl_GG_integrand[:, self.one_dim_index(
                        Bin1, Bin2
                    )] = self.g[:, Bin1] * self.g[:,
                                                  Bin2] / self.r**2 * self.pk[
                                                      il, :]
                    #print(self.Cl_integrand)
                    if intrinsic_alignment:
                        factor_IA = self.get_IA_factor(self.z_p,
                                                       linear_growth_rate,
                                                       amp_IA, exp_IA)
                        #print(self.eta_r[1:, zbin1].shape)
                        Cl_II_integrand[:, self.one_dim_index(
                            Bin1, Bin2
                        )] = self.pr[:,
                                     Bin1] * self.pr[:,
                                                     Bin2] * factor_IA**2 / self.r**2 * self.pk[
                                                         il, :]
                        Cl_GI_integrand[:, self.one_dim_index(Bin1, Bin2)] = (
                            self.g[:, Bin1] * self.pr[:, Bin2] +
                            self.g[:, Bin2] * self.pr[:, Bin1]
                        ) * factor_IA / self.r**2 * self.pk[il, :]
                    '''
                    if self.theoretical_error != 0:
                        self.El_integrand[1:, self.one_dim_index(Bin1, Bin2)] = self.g[:, Bin1] * self.g[:, Bin2] / self.r**2 * self.pk[il, :] * self.alpha[il, :]
                    '''

            # Integrate over r to get C_l^shear_ij = P_ij(l)
            # C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r)
            # g_j(r) /r**2) P(k=l/r,z(r)) dr
            # It is then multiplied by 9/16*Omega_m**2
            # and then by (h/2997.9)**4 to be dimensionless
            # (since P(k)*dr is in units of Mpc**4)
            for Bin in xrange(self.nzcorrs):
                Cl_GG[il, Bin] = np.sum(
                    0.5 *
                    (Cl_GG_integrand[1:, Bin] + Cl_GG_integrand[:-1, Bin]) *
                    dr)
                Cl_GG[il, Bin] *= 9. / 16. * self.Omega_m**2
                Cl_GG[il, Bin] *= (self.small_h / 2997.9)**4

                if intrinsic_alignment:
                    Cl_II[il,
                          Bin] = np.sum(0.5 * (Cl_II_integrand[1:, Bin] +
                                               Cl_II_integrand[:-1, Bin]) * dr)

                    Cl_GI[il,
                          Bin] = np.sum(0.5 * (Cl_GI_integrand[1:, Bin] +
                                               Cl_GI_integrand[:-1, Bin]) * dr)
                    # here we divide by 4, because we get a 2 from g(r)!
                    Cl_GI[il, Bin] *= 3. / 4. * self.Omega_m
                    Cl_GI[il, Bin] *= (self.small_h / 2997.9)**2
                '''
                if self.theoretical_error != 0:
                    self.El[il, Bin] = np.sum(0.5 * (self.El_integrand[1:, Bin] + self.El_integrand[:-1, Bin]) * dr)
                    self.El[il, Bin] *= 9. / 16. * self.Omega_m**2
                    self.El[il, Bin] *= (self.small_h / 2997.9)**4
                '''
            '''
            for Bin1 in xrange(self.nzbins):
                Cl_GG[il, self.one_dim_index(Bin1, Bin1)] += self.noise
            '''
        if intrinsic_alignment:
            self.Cl = Cl_GG + Cl_GI + Cl_II
        else:
            self.Cl = Cl_GG
            #print(Cl_GG)
            #print(self.Cl)

        # Spline Cl[il,Bin1,Bin2] along l
        for Bin in xrange(self.nzcorrs):
            self.spline_Cl[Bin] = list(itp.splrep(self.l, self.Cl[:, Bin]))

        # Interpolate Cl at values lll and store results in Cll
        for Bin in xrange(self.nzcorrs):
            self.Cll[Bin, :] = itp.splev(self.lll[:], self.spline_Cl[Bin])

        # Start loop over theta values
        for it in xrange(self.nthetatot):
            ilmax = self.il_max[it]

            self.BBessel0[:ilmax] = special.j0(self.lll[:ilmax] *
                                               self.theta[it] * self.a2r)
            self.BBessel4[:ilmax] = special.jv(
                4, self.lll[:ilmax] * self.theta[it] * self.a2r)

            # Here is the actual trapezoidal integral giving the xi's:
            # - in more explicit style:
            # for Bin in xrange(self.nzcorrs):
            #     for il in xrange(ilmax):
            #         self.xi1[it, Bin] = np.sum(self.ldl[il]*self.Cll[Bin,il]*self.BBessel0[il])
            #         self.xi2[it, Bin] = np.sum(self.ldl[il]*self.Cll[Bin,il]*self.BBessel4[il])
            # - in more compact and vectorizable style:
            self.xi1[it, :] = np.sum(self.ldl[:ilmax] * self.Cll[:, :ilmax] *
                                     self.BBessel0[:ilmax],
                                     axis=1)
            self.xi2[it, :] = np.sum(self.ldl[:ilmax] * self.Cll[:, :ilmax] *
                                     self.BBessel4[:ilmax],
                                     axis=1)

        # normalisation of xi's
        self.xi1 = self.xi1 / (2. * math.pi)
        self.xi2 = self.xi2 / (2. * math.pi)

        # Spline the xi's
        for Bin in xrange(self.nzcorrs):
            self.xi1_theta[Bin] = list(itp.splrep(self.theta, self.xi1[:,
                                                                       Bin]))
            self.xi2_theta[Bin] = list(itp.splrep(self.theta, self.xi2[:,
                                                                       Bin]))

        # Get xi's in same column vector format as the data
        #iz = 0
        #for Bin in xrange(self.nzcorrs):
        #    iz = iz + 1  # this counts the bin combinations
        #    for i in xrange(self.ntheta):
        #        j = (iz-1)*2*self.ntheta
        #        self.xi[j+i] = itp.splev(
        #            self.theta_bins[i], self.xi1_theta[Bin])
        #        self.xi[self.ntheta + j+i] = itp.splev(
        #            self.theta_bins[i], self.xi2_theta[Bin])
        # or in more compact/vectorizable form:
        iz = 0
        for Bin in xrange(self.nzcorrs):
            iz = iz + 1  # this counts the bin combinations
            j = (iz - 1) * 2 * self.ntheta
            self.xi[j:j + self.ntheta] = itp.splev(
                self.theta_bins[:self.ntheta], self.xi1_theta[Bin])
            self.xi[j + self.ntheta:j + 2 * self.ntheta] = itp.splev(
                self.theta_bins[:self.ntheta], self.xi2_theta[Bin])

        # final chi2
        vec = self.xi[self.mask_indices] - self.xi_obs[self.mask_indices]
        #print(self.xi_obs[self.mask_indices], len(self.xi_obs[self.mask_indices]))
        #print(self.xi[self.mask_indices], len(self.xi[self.mask_indices]))

        # this is for running smoothly with MultiNest
        # (in initial checking of prior space, there might occur weird solutions)
        if np.isinf(vec).any() or np.isnan(vec).any():
            chi2 = 2e12
        else:
            # don't invert that matrix...
            # use the Cholesky decomposition instead:
            yt = solve_triangular(self.cholesky_transform, vec, lower=True)
            chi2 = yt.dot(yt)

        return -chi2 / 2.
Esempio n. 36
0
 def model(self, x, p):
     '''
     Base Bessel function modulated spectrum model for micromotion.
     Using definition from Pruttivarasin thesis. 
     p = [center, scale, gamma, offset, modulation depth]
     '''
     p[2] = abs(p[2]) # fwhm is positive
     return p[3] +  p[1]*p[2]*0.5*((sp.jv(-6,p[4])**2/((x - p[0] - 6*p[5])**2 + (0.5*p[2])**2)) + 
            (sp.jv(-5,p[4])**2/((x - p[0] - 5*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(-4,p[4])**2/((x - p[0] - 4*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(-3,p[4])**2/((x - p[0] -3*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(-2,p[4])**2/((x - p[0] -2*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(-1,p[4])**2/((x - p[0] - p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(0,p[4])**2/((x - p[0])**2 + (0.5*p[2])**2)) +
            (sp.jv(1,p[4])**2/((x - p[0] + p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(2,p[4])**2/((x - p[0] + 2*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(3,p[4])**2/((x - p[0] + 3*p[5])**2 + (0.5*p[2])**2)) +
            (sp.jv(4,p[4])**2/((x - p[0] + 4*p[5])**2 + (0.5*p[2])**2)) + 
            (sp.jv(5,p[4])**2/((x - p[0] + 5*p[5])**2 + (0.5*p[2])**2)) + 
            (sp.jv(6,p[4])**2/((x - p[0] + 6*p[5])**2 + (0.5*p[2])**2)))
Esempio n. 37
0
    def Floquet_edge2(self, kx, N):
        """
        This function defines the Floquet edge states for case II
        
        Parameters
        ----------
        N = dimension of matrix
        
        Returns
        -------
        Tridiagonal matrix
        """

        # Parameter
        vJ = self.J * self.S
        vJc = self.Jc * self.S
        v0 = 3 * vJ + vJc

        Ep = 0.5 * sqrt(3 * self.Ey**2 + self.Ex**2 +
                        2 * sqrt(3) * self.Ey * self.Ex * cos(self.phi))
        Em = 0.5 * sqrt(3 * self.Ey**2 + self.Ex**2 -
                        2 * sqrt(3) * self.Ey * self.Ex * cos(self.phi))
        P1 = np.arctan(self.Ex * sin(self.phi) /
                       (self.Ey * sqrt(3) + self.Ex * cos(self.phi)))
        P2 = np.arctan(self.Ex * sin(self.phi) /
                       (self.Ey * sqrt(3) - self.Ex * cos(self.phi)))

        # Define H0
        r0 = vJ * (jv(0, self.Ex) + jv(0, Em) * exp(1j * kx))

        r0s = r0.conj()

        H0 = np.array([[v0, -r0s, vJc, 0], [-r0, v0, 0, vJc],
                       [-vJc, 0, -v0, r0], [0, -vJc, r0s, -v0]])

        # Define H1 and H-1
        r1 = vJ * (jv(1, self.Ex) * exp(1j * self.phi) -
                   jv(1, Em) * exp(1j * kx) * exp(-1j * P2))

        r1s = vJ * (-jv(1, self.Ex) * exp(1j * self.phi) +
                    jv(1, Em) * exp(-1j * kx) * exp(-1j * P2))

        Hp = np.array([[0, -r1s, 0, 0], [-r1, 0, 0, 0], [0, 0, 0, r1],
                       [0, 0, r1s, 0]])

        Hm = Hp.conj().T

        # Floquet Hamiltonian for the diagonal matrix
        com1 = dot(H0, Hm) - dot(Hm, H0)
        com2 = dot(H0, Hp) - dot(Hp, H0)
        com3 = dot(Hm, Hp) - dot(Hp, Hm)

        Floquet_H0 = H0 - (1 / self.omega) * (com1 - com2 + com3)

        r10 = vJ * jv(0, Ep)
        H10 = np.array([[0, -r10, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
                        [0, 0, r10, 0]])
        H20 = np.array([[0, 0, 0, 0], [-r10, 0, 0, 0], [0, 0, 0, r10],
                        [0, 0, 0, 0]])

        r1p = vJ * jv(1, Ep) * exp(1j * P1)
        r1m = -vJ * jv(1, Ep) * exp(1j * P1)

        H1p = np.array([[0, 0, 0, 0], [-r1p, 0, 0, 0], [0, 0, r1p, 0],
                        [0, 0, 0, 0]])
        H1m = H1p.conj().T

        H2p = np.array([[0, 0, 0, 0], [-r1m, 0, 0, 0], [0, 0, 0, r1m],
                        [0, 0, 0, 0]])
        H2m = H2p.conj().T

        # Floquet Hamiltonian for the off-diagonal matrix
        com11 = dot(H10, H1m) - dot(H1m, H10)
        com21 = dot(H10, H1p) - dot(H1p, H10)
        com31 = dot(H1m, H1p) - dot(H1p, H1m)

        Floquet_H10 = H10 - (1 / self.omega) * (com11 - com21 + com31)

        # Floquet Hamiltonian for the off-diagonal matrix
        com12 = dot(H20, H2m) - dot(H2m, H20)
        com22 = dot(H20, H2p) - dot(H2p, H20)
        com32 = dot(H2m, H2p) - dot(H2p, H2m)

        Floquet_H20 = H20 - (1 / self.omega) * (com12 - com22 + com32)

        # Componenets of the tridiagonal matrices
        c0 = diags(np.ones(N), 0).toarray()
        cp = diags(np.ones(N - 1), 1).toarray()
        cm = diags(np.ones(N - 1), -1).toarray()

        # Tridiagonal Hamiltonian
        H_tot = np.kron(c0, Floquet_H0) + np.kron(cp, Floquet_H10) + np.kron(
            cm, Floquet_H20)
        return H_tot
def _wignerd(j, m, n=0, approx_lim=10):
    '''
        Wigner "small d" matrix. (Euler z-y-z convention)
        example::
            j = 2
            m = 1
            n = 0
            beta = linspace(0,pi,100)
            wd210 = _wignerd(j,m,n)(beta)

        some conditions have to be met::
             j >= 0
            -j <= m <= j
            -j <= n <= j

        The approx_lim determines at what point
        bessel functions are used. Default is when::
            j > m+10
            #  and
            j > n+10

        for integer l and n=0, we can use the spherical harmonics. If in
        addition m=0, we can use the ordinary legendre polynomials.
    '''

    if (j < 0) or (abs(m) > j) or (abs(n) > j):
        raise ValueError("_wignerd(j = {0}, m = {1}, n = {2}) value error.".
                         format(j, m, n) +
                         " Valid range for parameters: j>=0, -j<=m,n<=j.")

    if (j > (m + approx_lim)) and (j > (n + approx_lim)):
        #print('bessel (approximation)')
        return lambda beta: jv(m - n, j * beta)

    if (floor(j) == j) and (n == 0):
        if m == 0:
            #print('legendre (exact)')
            return lambda beta: legendre(j)(cos(beta))
        elif False:
            #print('spherical harmonics (exact)')
            a = sqrt(4. * pi / (2. * j + 1.))
            return lambda beta: a * conjugate(sph_harm(m, j, beta, 0.))

    jmn_terms = {
        j + n: (m - n, m - n),
        j - n: (n - m, 0.),
        j + m: (n - m, 0.),
        j - m: (m - n, m - n),
    }

    k = min(jmn_terms)
    a, lmb = jmn_terms[k]

    b = 2. * j - 2. * k - a

    if (a < 0) or (b < 0):
        raise ValueError(
            "_wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j, m, n)
            + " Encountered negative values in (a,b) = ({0},{1})".format(a, b))

    coeff = power(-1., lmb) * sqrt(comb(2. * j - k, k + a)) * \
        (1. / sqrt(comb(k + b, b)))

    #print('jacobi (exact)')
    return lambda beta: coeff \
        * power(sin(0.5 * beta), a) \
        * power(cos(0.5 * beta), b) \
        * jacobi(k, a, b)(cos(beta))
Esempio n. 39
0
           jac=lambda x: np.array((-2 * .5 * (1 - x[0]) - 4 * x[0] *
                                   (x[1] - x[0]**2), 2 * (x[1] - x[0]**2))))
"""prefer BFGS or L-BFGS"""
o.minimize(lambda x: .5 * (1 - x[0])**2 + (x[1] - x[0]**2)**2, [2, -1],
           method='BFGS')
"""least squares - predict coefficients"""
x = np.array(range(10))
y = np.power(x, 2) - 2 * x + 1
A = np.c_[np.power(x, 2), x, np.ones(len(x))]
np.linalg.lstsq(A, y)  #Gives 1, -2, 1 as first arg
"""f root"""
o.fsolve(lambda x: x**2 - 4, 5)
o.fsolve(lambda x: (x - 2)**2 - 4, 5)
"""-----------------integrate----------------------------------"""
"""Integration. Returns integral, error"""
f = lambda x: special.jv(2.5, x)
integrate.quad(f, 0, 10)
"""Integration by samples"""
x = np.linspace(0, 10, 10)
y = [f(xi) for xi in x]
integrate.simps(y, x)
"""-------------------interpolate--------------------------------"""
"""same can be done with splines"""
"""interpolation. Function by dots. Cubic best"""
y = [xi**2 + 2 * xi + 15 for xi in x]
x2 = np.linspace(0, 10, 100)
f = interp1d(x, y)
f2 = interp1d(x, y, kind='cubic')
plt.plot(x, y, 'o', x2, f(x2), '-', x2, f2(x2), '--')
plt.legend(['data', 'linear', 'cubic'], loc='best')
plt.show()
    def tau2D_cylinder(self,
                       energyRange,
                       nk,
                       Uo,
                       m,
                       vfrac,
                       valley,
                       dk_len,
                       ro,
                       n=2000):
        """
        This is a fast algorithm that uses Fermi’s golden rule to compute the energy dependent electron scattering rate
        due cylindrical nanoparticles or pores extended perpendicular to the electrical current
        """

        meff = np.array(m) * thermoelectricProperties.me
        ko = 2 * np.pi / self.latticeParameter * np.array(valley)
        del_k = 2 * np.pi / self.latticeParameter * dk_len * np.array(
            [1, 1, 1])
        N = vfrac / np.pi / ro**2

        kx = np.linspace(ko[0], ko[0] + del_k[0], nk[0],
                         endpoint=True)  # kpoints mesh
        ky = np.linspace(ko[1], ko[1] + del_k[1], nk[1],
                         endpoint=True)  # kpoints mesh
        kz = np.linspace(ko[2], ko[2] + del_k[2], nk[2],
                         endpoint=True)  # kpoints mesh
        [xk, yk, zk] = np.meshgrid(kx, ky, kz)
        xk_ = np.reshape(xk, -1)
        yk_ = np.reshape(yk, -1)
        zk_ = np.reshape(zk, -1)
        kpoint = np.array([xk_, yk_, zk_])
        mag_kpoint = norm(kpoint, axis=0)

        E = thermoelectricProperties.hBar**2 / 2 * \
        ((kpoint[0, :] - ko[0])**2 / meff[0] + (kpoint[1, :] - ko[1])**2 / meff[1] + \
         (kpoint[2, :] - ko[2]) ** 2 / meff[2]) * thermoelectricProperties.e2C

        t = np.linspace(0, 2 * np.pi, n)
        a = np.expand_dims(np.sqrt(2 * meff[1] / thermoelectricProperties.hBar**2 * \
                                   E / thermoelectricProperties.e2C), axis=0)
        b = np.expand_dims(np.sqrt(2 * meff[2] / thermoelectricProperties.hBar**2 * \
                                   E / thermoelectricProperties.e2C), axis=0)

        ds = np.sqrt((a.T * np.sin(t))**2 + (b.T * np.cos(t))**2)

        cos_theta = ((a * kpoint[0]).T * np.cos(t) + (b * kpoint[1]).T * np.sin(t) + \
                     np.expand_dims(kpoint[2]**2, axis=1)) / \
        np.sqrt(a.T**2 * np.cos(t)**2 + b.T**2 * np.sin(t)**2 + \
                np.expand_dims(kpoint[2]**2, axis=1)) / np.expand_dims(mag_kpoint, axis=1)

        delE = thermoelectricProperties.hBar**2 * \
        np.abs((a.T * np.cos(t) - ko[0]) / meff[0] + \
               (b.T * np.sin(t) - ko[1]) / meff[1] + (np.expand_dims(kpoint[2]**2, axis=1) - ko[2] / meff[2]))

        qx = np.expand_dims(kpoint[0], axis=1) - a.T * np.cos(t)
        qy = np.expand_dims(kpoint[1], axis=1) - b.T * np.sin(t)
        qr = np.sqrt(qx**2 + qy**2)

        tau = np.empty((len(ro), len(E)))

        for r_idx in np.arange(len(ro)):
            J = jv(1, ro[r_idx] * qr)
            SR = 2 * np.pi / thermoelectricProperties.hBar * Uo**2 * (
                2 * np.pi)**3 * (ro[r_idx] * J / qr)**2
            f = SR * (1 - cos_theta) / delE * ds
            int_ = np.trapz(f, t, axis=1)
            tau[r_idx] = 1 / (N[r_idx] / (2 * np.pi)**3 *
                              int_) * thermoelectricProperties.e2C

        Ec, indices, return_indices = np.unique(E,
                                                return_index=True,
                                                return_inverse=True)

        tau_c = np.empty((len(ro), len(indices)))

        tauFunctionEnergy = np.empty((len(ro), len(energyRange[0])))

        for r_idx in np.arange(len(ro)):
            tau_c[r_idx] = accum(return_indices,
                                 tau[r_idx],
                                 func=np.mean,
                                 dtype=float)

        for tau_idx in np.arange(len(tau_c)):
            ESpline = PchipInterpolator(Ec[30:], tau_c[tau_idx, 30:])
            tauFunctionEnergy[tau_idx] = ESpline(energyRange)

        return tauFunctionEnergy
Esempio n. 41
0
def analyticalTFpupil(x):
    return pupilRadius**2 * scispe.jv(1, x) / x * 2 * np.pi
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import special
from scipy import integrate

result = integrate.quad(lambda x: special.jv(4,x), 0, 20)
print(result )
#the following line is calculating Gaussian Integral using quad function
print("Gaussian integral", np.sqrt(np.pi), integrate.quad(lambda x: np.exp(-x**2),-np.inf, np.inf))
                phi = 0.0 * pi + np.arctan(y / x)
            if ((x < 0.0) & (y > 0.0)):
                phi = 1.0 * pi + np.arctan(y / x)
            if ((x < 0.0) & (y < 0.0)):
                phi = 1.0 * pi + np.arctan(y / x)
            if ((x > 0.0) & (y < 0.0)):
                phi = 2.0 * pi + np.arctan(y / x)

            j_x = q * -1.0 * r_cy * omega * np.sin(omega * t * Dt + theta_0)
            j_y = q * 1.0 * r_cy * omega * np.cos(omega * t * Dt + theta_0)

            j_rho = np.cos(phi) * j_x + np.sin(phi) * j_y
            j_phi = np.cos(phi) * j_y - np.sin(phi) * j_x

            E_rho_A = omega * mu_0 / (k_cut**2 * rho) * 1.0 * np.cos(
                phi) * sp.jv(1, k_cut * rho) * np.sin(omega * t * Dt)
            E_rho_B = omega * mu_0 / (k_cut**2 * rho) * -1.0 * np.sin(
                phi) * sp.jv(1, k_cut * rho) * np.sin(omega * t * Dt)

            E_phi_A = -1.0 * omega * mu_0 / (2 * k_cut) * np.sin(phi) * (
                sp.jv(0, k_cut * rho) - sp.jv(2, k_cut * rho)) * np.sin(
                    omega * t * Dt)
            E_phi_B = -1.0 * omega * mu_0 / (2 * k_cut) * np.cos(phi) * (
                sp.jv(0, k_cut * rho) - sp.jv(2, k_cut * rho)) * np.sin(
                    omega * t * Dt)

            A = A + Dt * (E_rho_A * j_rho + E_phi_A * j_phi)
            B = B + Dt * (E_rho_B * j_rho + E_phi_B * j_phi)

        P_Nmw = (pi * omega * mu_0 * zeta /
                 (2 * k_cut**4)) * (1.841**2 - 1.0) * sp.jv(
Esempio n. 44
0
#A = np.array([[D[0], -D[0], 0],
#             [D[0], -D[0]-D[1], D[1]],
#             [0, D[2], -D[2]]])
#
#def dX_dt(sm, t=0):
#    return np.dot(A,sm)
#
#t = np.linspace(0, 10, 100)
#X0 = np.array([10, 5, 20])
#X, infodict = integrate.odeint(dX_dt, X0, t, full_output=True)
#
#plt.plot(t,X)
#plt.xlabel('Time')
#plt.ylabel('X')
#plt.legend(['X1','X2','X3'])
#plt.savefig('/home/tomer/my_books/python_in_hydrology/images/ode_system.png')
#

from scipy import optimize, special
x = np.arange(0, 10, 0.01)
for k in np.arange(0.5, 5.5):
    y = special.jv(k, x)

f = lambda x: -special.jv(k, x)
x_max = optimize.fminbound(f, 0, 6)

plt.plot(x, y, lw=3)
plt.plot([x_max], [special.jv(k, x_max)], 'rs', ms=12)
plt.title('Different Bessel functions and their local maxima')
plt.savefig('/home/tomer/my_books/python_in_hydrology/images/inverse.png')
plt.close()
Esempio n. 45
0
zetaH = zetaH[0, :]
zetaV = zetaV[0, :]
rtol = htarg['rtol']
atol = htarg['atol']
nquad = htarg['nquad']
maxint = htarg['maxint']
pts_per_dec = htarg['pts_per_dec']
diff_quad = htarg['diff_quad']
a = htarg['a']
b = htarg['b']
limit = htarg['limit']
g_x, g_w = special.roots_legendre(nquad)
b_zero = np.pi * np.arange(1.25, maxint + 1)
for i in range(10):
    b_x0 = special.j1(b_zero)
    b_x1 = special.jv(2, b_zero)
    b_h = -b_x0 / (b_x0 / b_zero - b_x1)
    b_zero += b_h
    if all(np.abs(b_h) < 8 * np.finfo(float).eps * b_zero):
        break
xint = np.concatenate((np.array([1e-20]), b_zero))
dx = np.repeat(np.diff(xint) / 2, nquad)
Bx = dx * (np.tile(g_x, maxint) + 1) + np.repeat(xint[:-1], nquad)
BJ0 = special.j0(Bx) * np.tile(g_w, maxint)
BJ1 = special.j1(Bx) * np.tile(g_w, maxint)
intervals = xint / off[:, None]
lambd = Bx / off[:, None]
ang_fact = kernel.angle_factor(angle, ab, msrc, mrec)
# 1 Spline version
start = np.log(lambd.min())
stop = np.log(lambd.max())
Esempio n. 46
0
def handleTE(st,
             modes=[0, 0],
             type_of_waveguide="Rectangular",
             A=10,
             B=5,
             R=5):
    if type_of_waveguide == "Rectangular":

        x = np.linspace(0, A, 101)
        y = np.linspace(0, B, 101)
        X, Y = np.meshgrid(x, y)
        M = int(modes[0])
        N = int(modes[1])
        par = TE_TM_Functions(M, N, A, B)
        if M == 0 and N == 0:
            st.error("m and n cannot be 0 at the same time")
            return
        u = np.cos(M * PI / A * X) * np.sin(N * PI / B * Y)
        v = -1 * np.sin(M * PI / A * X) * np.cos(N * PI / B * Y)
        fig, ax = plt.subplots()
        plt.streamplot(X, Y, u, v, color="xkcd:azure")
        plt.axis("scaled")
        st.subheader("E field")
        plt.xlim(0, A)
        plt.ylim(0, B)

        st.pyplot(fig)
        u = np.sin(M * PI / A * X) * np.cos(N * PI / B * Y)
        v = np.cos(M * PI / A * X) * np.sin(N * PI / B * Y)
        fig, ax = plt.subplots()
        plt.streamplot(x, y, u, v, color="red")
        plt.axis("scaled")
        st.subheader("H field")
        plt.xlim(0, A)
        plt.ylim(0, B)

        st.pyplot(fig)
        st.subheader("Values")
        st.write(
            pd.DataFrame({
                "Parameter":
                ["Kc", "Fc", "Beta-g", "Vg", "Zin", "Zg", "lambda-g"],
                "Value": [
                    par.Kc(),
                    par.Fc(),
                    par.beta_g(),
                    par.v_G(),
                    par.Z_in(),
                    par.Z_G_TE(),
                    par.lambda_G(),
                ],
                "Unit": ["1/m", "Hz", "1/m", "m/s", "Ohm", "Ohm", "m"],
            }))

    else:
        r = np.linspace(0, R, 101)
        t = np.linspace(0, 2 * PI, 101)
        T, RAD = np.meshgrid(t, r)
        N = int(modes[0])
        P = int(modes[1])
        if P == 0:
            st.error("p cannot be 0!")
            return

        X = special.jnp_zeros(N, P)
        par = Circular_TE_TM_Functions(N, P, 2.3e-2)
        U = special.jv(N, X[-1].round(3) / R * RAD) * np.sin(N * T)
        V = special.jvp(N, X[-1].round(3) / R * RAD) * np.cos(N * T)
        plt.axis("scaled")
        fig, ax = plt.subplots()
        plt.polar(2 * PI, R)
        plt.streamplot(T, RAD, V, U, color="xkcd:azure")
        plt.axis("scaled")
        st.subheader("E field")

        st.pyplot(fig)
        st.markdown("**Scale: 5units = 2.3 cm**")
        U = -1 * special.jv(N, X[-1].round(3) / R * RAD) * np.cos(N * T)
        V = special.jv(N, X[-1].round(3) / R * RAD) * np.sin(N * T)
        fig, ax = plt.subplots()
        plt.polar(2 * PI, R)
        plt.streamplot(T, RAD, V, U, color="red")
        plt.axis("scaled")
        st.subheader("H field")

        st.pyplot(fig)
        st.markdown("**Scale: 5units = 2.3 cm**")
        st.subheader("Values")
        st.write(
            pd.DataFrame({
                "Parameter":
                ["Kc", "Fc", "Beta-g", "Vg", "Zin", "Zg", "lambda-g"],
                "Value": [
                    par.Kc_TE(),
                    par.Fc_TE(),
                    par.beta_g_TE(),
                    par.v_G_TE(),
                    par.Z_in(),
                    par.Z_G_TE(),
                    par.lambda_G_TE(),
                ],
                "Unit": ["1/m", "Hz", "1/m", "m/s", "Ohm", "Ohm", "m"],
            }))
Esempio n. 47
0
 def bessel_func(k):
     return ((4 * spin - 2 *
              (k - 1)) * ss.jv(0.25,
                               k * np.sqrt(2) * dip_const * xaxis) *
             ss.jv(-0.25,
                   k * np.sqrt(2) * dip_const * xaxis))
def cohModelFunc(user_interface, context, queue, W_main, N, parameters,
                 parallel, debug):

    user_interface.update_outputText("Starting Besinc model function...")

    alpha = float(parameters[0])

    try:

        # parameters
        M = N / 2

        CL_pcohGS = None

        if parallel:
            #*******************************************************************
            # PyOpenCL kernel function
            #*******************************************************************

            # KERNEL: CODE EXECUTED ON THE GPU
            CL_pcohGS = Program(
                context, """
                __kernel void increase(__global float *res,
                                       __global float *data,
                                       const unsigned int N,
                                       const unsigned int M,
                                       const unsigned int i1,
                                       const unsigned int j1,
                                       const double x1,
                                       const double y1,
                                       const double a)
                {
                    int row =  get_global_id(0);
                    int col = get_global_id(1);

                    int y2 = -(row-M);
                    int x2 = (col-M);


                    double y22 = (double) y2;
                    double x22 = (double) x2;

                    double b1 = (double) x1-1*x2;
                    double b2 = (double) y1-1*y2;
                    double b  = (double) sqrt(b1*b1+b2*b2);
                    //double J1 = (double) 1 - pown(a*b,2)/8.0 + pown(a*b,4)/192.0 - pown(a*b,6)/9216.0 + pown(a*b,8)/737280.0 - pown(a*b,10)/88473600.0 + pown(a*b,12)/14863564800.0 ;

                    double c = (double) a*b;
                    double zero = (double) 0.0;
                    double sum = 0;




                    int i;
                    for (i = 0; i < 80; i++)
                    {

                        double ii = (double) i;
                        double j1 = (double) 1+2*i;
                        double j2 = (double) i+1;
                        double j3 = (double) 2+i;
                        double arg = (double) a*b/2;
                        double actual = pow(-1,ii)*pow(arg,j1)/(tgamma(j2)*tgamma(j3));

                        sum = sum +  actual;

                    }

                    if (c>zero)
                    {
                    sum = sum/c ;
                    }
                    else
                    {
                    sum = 1;
                    }




                    double data_const = (double) data[col + N*row ];
                    double final      = (double) data_const*sum;

                    res[col + N*row ]= (float) final;

                }
            """).build()
            #___________________________________________________________________

            user_interface.update_outputText(
                "PyOpenCl will be used. Starting Cycle...")
            user_interface.update_outputText("__")

            for i1 in range(0, N):

                user_interface.update_outputTextSameLine(
                    str(round(i1 * 100. / N, 1)) + "% concluded (" + str(i1) +
                    "/" + str(N - 1) + ").")

                for j1 in range(0, N):

                    if not count_nonzero(W_main[i1, j1]) == 0:

                        # Defining
                        result = zeros((N, N)).astype(float32)
                        data = copy.copy(W_main[i1, j1].real)

                        # Radius of point P1
                        x1 = j1 - M
                        y1 = M - i1

                        # creating memory on gpu
                        mf = mem_flags

                        # Result memory
                        result_gpu_memory = Buffer(context,
                                                   mf.READ_WRITE
                                                   | mf.COPY_HOST_PTR,
                                                   hostbuf=result)

                        # Data Memory
                        data_gpu_memory = Buffer(context,
                                                 mf.READ_ONLY
                                                 | mf.COPY_HOST_PTR,
                                                 hostbuf=data)

                        # Running the program (kernel)
                        CL_pcohGS.increase(queue, result.shape, None,
                                           result_gpu_memory, data_gpu_memory,
                                           int32(N), int32(M), int32(i1),
                                           int32(j1), double(x1), double(y1),
                                           double(alpha))

                        #  Copying results to PCmemory
                        enqueue_copy(queue, result, result_gpu_memory)

                        # Copying results to matrices
                        W_main.real[i1][j1] = result

            user_interface.update_outputTextSameLine("\r" +
                                                     str(round(100.0, 1)) +
                                                     "% concluded")

        # Without PyOpenCL
        else:
            user_interface.update_outputText(
                "PyOpenCl will NOT be used. Starting Cycle...")
            for i1 in range(0, N):

                user_interface.update_outputTextSameLine(
                    str(round(i1 * 100. / N, 1)) + "% concluded (" + str(i1) +
                    "/" + str(N - 1) + ").")

                for j1 in range(0, N):

                    x1 = (j1 - M)
                    y1 = M - i1

                    for i2 in range(0, N):
                        for j2 in range(0, N):

                            x2 = j2 - M
                            y2 = M - i2

                            b = sqrt((x1 - x2)**2 + (y1 - y2)**2)

                            arg1 = alpha * b

                            if arg1 == 0:
                                y = 1
                            else:
                                y = (2 * jv(1, arg1) / arg1)

                            W_main.real[i1, j1, i2,
                                        j2] = W_main.real[i1, j1, i2, j2] * y

            user_interface.update_outputTextSameLine("\r" +
                                                     str(round(100.0, 1)) +
                                                     "% concluded")

    except Exception as error:
        user_interface.update_outputTextSameLine(str(error))

    return W_main
Esempio n. 49
0
File: psf.py Progetto: mvtea/sgl
def mag(
    lam, z, p
):  # Magnification as a function of wavelength, lens distance and optical offset
    a = (((2.0 * math.pi) / (lam)) *
         (math.sqrt(2 * rg / z))) * p  # Dimensionless Bessel variable
    return (4.0 * math.pi**2 * (rg / lam) * sci.jv(0, a)**2)
Esempio n. 50
0
def a(n, x, mA, mB):  # 散乱係数 an
    return (mB * jv(n, mB * x) * jvp(n, mA * x) - mA * jv(n, mA * x) * jvp(n, mB * x)) / \
        (mB * jv(n, mB * x) * h1vp(n, mA * x) -
         mA * h1v(n, mA * x) * jvp(n, mB * x))
Esempio n. 51
0
def calculate_FB_bases(L1):
    maxK = (2 * L1 + 1)**2 - 1

    L = L1 + 1
    R = L1 + 0.5

    truncate_freq_factor = 1.5

    if L1 < 2:
        truncate_freq_factor = 2

    xx, yy = np.meshgrid(range(-L, L + 1), range(-L, L + 1))

    xx = xx / R
    yy = yy / R

    ugrid = np.concatenate([yy.reshape(-1, 1), xx.reshape(-1, 1)], 1)
    tgrid, rgrid = cart2pol(ugrid[:, 0], ugrid[:, 1])

    num_grid_points = ugrid.shape[0]

    kmax = 15

    bessel = np.load(path_to_bessel)

    B = bessel[(bessel[:, 0] <= kmax)
               & (bessel[:, 3] <= np.pi * R * truncate_freq_factor)]

    idxB = np.argsort(B[:, 2])

    mu_ns = B[idxB, 2]**2

    ang_freqs = B[idxB, 0]
    rad_freqs = B[idxB, 1]
    R_ns = B[idxB, 2]

    num_kq_all = len(ang_freqs)
    max_ang_freqs = max(ang_freqs)

    Phi_ns = np.zeros((num_grid_points, num_kq_all), np.float32)

    Psi = []
    kq_Psi = []
    num_bases = 0

    for i in range(B.shape[0]):
        ki = ang_freqs[i]
        qi = rad_freqs[i]
        rkqi = R_ns[i]

        r0grid = rgrid * R_ns[i]

        F = special.jv(ki, r0grid)

        Phi = 1. / np.abs(special.jv(ki + 1, R_ns[i])) * F

        Phi[rgrid >= 1] = 0

        Phi_ns[:, i] = Phi

        if ki == 0:
            Psi.append(Phi)
            kq_Psi.append([ki, qi, rkqi])
            num_bases = num_bases + 1

        else:
            Psi.append(Phi * np.cos(ki * tgrid) * np.sqrt(2))
            Psi.append(Phi * np.sin(ki * tgrid) * np.sqrt(2))
            kq_Psi.append([ki, qi, rkqi])
            kq_Psi.append([ki, qi, rkqi])
            num_bases = num_bases + 2

    Psi = np.array(Psi)
    kq_Psi = np.array(kq_Psi)

    num_bases = Psi.shape[1]

    if num_bases > maxK:
        Psi = Psi[:maxK]
        kq_Psi = kq_Psi[:maxK]
    num_bases = Psi.shape[0]
    p = Psi.reshape(num_bases, 2 * L + 1, 2 * L + 1).transpose(1, 2, 0)
    psi = p[1:-1, 1:-1, :]
    # print(psi.shape)
    psi = psi.reshape((2 * L1 + 1)**2, num_bases)

    c = np.sqrt(np.sum(psi**2, 0).mean())

    psi = psi / c

    return psi, c, kq_Psi
Esempio n. 52
0
def alpha_function(x, n, beta):
    output = beta * ss.jv(n, x) + ss.jvp(n, x)
    return output
Esempio n. 53
0
# Bill Karr's Code for Problem 3 in Problem set 1

from __future__ import division

from scipy.special import jv
import numpy as np

# <codecell>

# Problem 3, Part A

bessel = np.array([jv(i, 20) for i in range(0, 51)])
diff = np.array([
    (bessel[i] - (2 * (i - 1) * bessel[i - 1] / 20 - bessel[i - 2]))
    for i in range(2, 51)
])
relError = np.array([
    (bessel[i] - (2 *
                  (i - 1) * bessel[i - 1] / 20 - bessel[i - 2])) / bessel[i]
    for i in range(2, 51)
])

print "PART A"
print "n & scipy approx & LHS - RHS & % error"
for i in range(0, 49):
    print i + 2, "&", bessel[i], "&", diff[i], "&", 100 * abs(relError[i])

# <codecell>

# Problem 3, Part B
Esempio n. 54
0
 def J1(x):
     return special.jv(1, x)
Esempio n. 55
0
def b(n, x, mA, mB):  # 散乱係数 bn
    return (mA * jv(n, mB * x) * jvp(n, mA * x) - mB * jv(n, mA * x) * jvp(n, mB * x)) / \
        (mA * jv(n, mB * x) * h1vp(n, mA * x) -
         mB * h1v(n, mA * x) * jvp(n, mB * x))
Esempio n. 56
0
# define domain
# z = np.arange(-L, L, dz) # 2N + 1 domain points
z = np.linspace(-L, L,
                N + 1)  # N points (used for now because I'm not zero padding)
print(len(z))

# define magnetic constants
B = 1.5
b = 0.1

epsilon = 1 - B / 2

## define integral components (eq. 2.18)
# bessel functions
bess_first = sp.jv(1, z)
bess_sec = sp.yn(1, z)


def mainIntegrand(S, c, z, N, L, b, B, epsilon):

    # define S derivatives (spectral)
    S_z = funcs.fftDeriv(S, z, order=1)
    S_zz = funcs.fftDeriv(S, z, order=2)

    # bessel functions
    def I(domain, order=1):
        # modified bessel of first kind
        return sp.iv(order, domain)

    def K(domain, order=1):
Esempio n. 57
0
def makePSF(
    wavelength=0.525,
    NA=1.4,
    nx=257,
    nz=257,
    dx=0.02,
    dz=0.02,
    RI=1.33,
    immRI=1.5,
    csRI=1.515,
    csthick=170,
    workingdistance=150,
    particledistance=0,
    num_basis=200,
    num_samples=1000,
    oversampling=1,
):
    # dx/dz are output pixel sizes in microns
    # RI is refractive index of sample
    # workingdistance in microns, working distance (immersion medium thickness) design value
    # particledistance in microns, particle distance from coverslip
    # Size of the PSF array, pixels

    ny = nx  # square ... not really necessary until this becomes 3D again

    ni, ni0 = tuplecheck(
        immRI)  # immersion medium RI experimental value, design value
    ng, ng0 = tuplecheck(csRI)  # coverslip RI experimental value, design value
    tg, tg0 = tuplecheck(
        csthick)  # coverslip thickness experimental value, design value

    # Precision control
    # num_basis    = 100      # Number of rescaled Bessels that approximate the phase function
    # num_samples  = 1000     # Number of pupil samples along radial direction
    # oversampling = 2        # Defines the upsampling ratio on the image space grid for computations

    # Scaling factors for the Fourier-Bessel series expansion
    min_wavelength = 0.436  # microns
    scaling_factor = (NA * (3 * np.arange(1, num_basis + 1) - 2) *
                      min_wavelength / wavelength)

    # Place the origin at the center of the final PSF array
    x0 = (nx - 1) / 2
    y0 = (ny - 1) / 2

    # Find the maximum possible radius coordinate of the PSF array by finding the distance
    # from the center of the array to a corner
    max_radius = round(sqrt((nx - x0) * (nx - x0) + (ny - y0) * (ny - y0))) + 1

    # Radial coordinates, image space
    r = dx * np.arange(0, oversampling * max_radius) / oversampling

    # Radial coordinates, pupil space
    a = min([NA, RI, ni, ni0, ng, ng0]) / NA
    rho = np.linspace(0, a, num_samples)

    # Stage displacements away from best focus
    z = dz * np.arange(-nz / 2, nz / 2) + dz / 2

    # Define the wavefront aberration
    NArho2 = NA**2 * rho**2
    OPDs = particledistance * np.sqrt(RI**2 - NArho2)  # OPD in the sample
    OPDi = (z.reshape(-1, 1) + workingdistance
            ) * np.sqrt(ni**2 - NArho2) - workingdistance * np.sqrt(
                ni0**2 - NArho2)  # OPD in the immersion medium
    OPDg = tg * np.sqrt(ng**2 - NArho2) - tg0 * np.sqrt(
        ng0**2 - NArho2)  # OPD in the coverslip
    W = 2 * np.pi / wavelength * (OPDs + OPDi + OPDg)

    # Sample the phase
    # Shape is (number of z samples by number of rho samples)
    phase = np.cos(W) + 1j * np.sin(W)

    # Define the basis of Bessel functions
    # Shape is (number of basis functions by number of rho samples)
    J = special.jv(0, scaling_factor.reshape(-1, 1) * rho)

    # Compute the approximation to the sampled pupil phase by finding the least squares
    # solution to the complex coefficients of the Fourier-Bessel expansion.
    # Shape of C is (number of basis functions by number of z samples).
    # Note the matrix transposes to get the dimensions correct.
    C, residuals, _, _ = np.linalg.lstsq(J.T, phase.T, rcond=None)

    # Which z-plane to compute
    # z0 = 24

    # The Fourier-Bessel approximation
    # est = J.T.dot(C[:, z0])

    b = 2 * np.pi * r.reshape(-1, 1) * NA / wavelength

    def J0(x):
        return special.jv(0, x)

    def J1(x):
        return special.jv(1, x)

    # See equation 5 in Li, Xue, and Blu
    denom = scaling_factor * scaling_factor - b * b
    R = (scaling_factor * J1(scaling_factor * a) * J0(b * a) * a -
         b * J0(scaling_factor * a) * J1(b * a) * a)
    R /= denom

    # The transpose places the axial direction along the first dimension of the array, i.e. rows
    # This is only for convenience.
    PSF_rz = (np.abs(R.dot(C))**2).T

    # Normalize to the maximum value
    PSF_rz /= np.max(PSF_rz)

    return PSF_rz
Esempio n. 58
0
def test_sp_special(v: int, x: float):
    return jv(v, x)
Esempio n. 59
0
def I(r):
    if (r == 0):
        return 1 / 2  # Using the limit found in the textbook.
    else:
        return (sp.jv(0, k * r) / (k * r))**2
Esempio n. 60
0
    out_file = "fresnel_fourier_1D.spec"
    f = open(out_file, 'w')
    header="#F %s \n\n#S  1 fresnel diffraction \n#N 3 \n#L X[m]  intensity  phase\n"%out_file
    f.write(header)
    for i in range(len(position_x)):
        out = numpy.array((position_x[i],  fieldIntensity[i],fieldPhase[i]))
        f.write(("%20.11e "*out.size+"\n") % tuple( out.tolist()))
    f.close()
    print ("File written to disk: %s"%out_file)

    #plots


    from matplotlib import pylab as plt
    from scipy.special import jv
    sin_theta = position_x / distance
    x = (2*numpy.pi/wavelength) * (aperture_diameter/2) * sin_theta
    U_vs_theta = 2*jv(1,x)/x
    I_vs_theta = U_vs_theta**2 * fieldIntensity.max()


    plt.figure(1)

    plt.plot(position_x*1e6,fieldIntensity,'-',position_x*1e6,I_vs_theta,)
    plt.title("Fresnel Diffraction")
    plt.xlabel("X [um]")
    plt.ylabel("Intensity [a.u.]")
    plt.show()