def get_consts(orbit, Phi, delta=None): """Given an orbit and a potential (and a focal distance), derive the exact and approximate integrals of motion""" R, z, vR, vz, vphi = orbit gotdelta = False if delta is None: delta = find_delta(R, z, Phi) # get a decent value for delta gotdelta = True uorb, vorb = Rz2uv(R, z, delta) # get the current (u,v) coords of this orbit u0 = uorb # for single orbits it doesn't matter what this is E = 0.5 * (vR ** 2.0 + vz ** 2.0 + vphi ** 2.0) + Phi(R, z) # energy of this orbit Jphi = R * vphi # angular momentum about the z-axis is an action # calculate the canonical momenta at the phase space point we possess pu0 = delta * (vR * np.cosh(uorb) * np.sin(vorb) + vz * np.sinh(uorb) * np.cos(vorb)) pv0 = delta * (vR * np.sinh(uorb) * np.cos(vorb) - vz * np.cosh(uorb) * np.sin(vorb)) # now the approx integrals of motion I3U = ( E * np.sinh(uorb) ** 2.0 - pu0 ** 2.0 / (2.0 * delta ** 2.0) - Jphi ** 2.0 / (2.0 * delta ** 2.0 * np.sinh(uorb) ** 2.0) - dU(uorb, u0, vorb, delta, Phi) ) I3V = ( -E * np.sin(vorb) ** 2.0 + pv0 ** 2.0 / (2.0 * delta ** 2.0) + Jphi ** 2.0 / (2.0 * delta ** 2.0 * np.sin(vorb) ** 2.0) - dV(uorb, vorb, delta, Phi) ) if gotdelta: return uorb, vorb, u0, E, Jphi, I3U, I3V, delta else: return uorb, vorb, u0, E, Jphi, I3U, I3V
def deriv_dU(u, v, delta, Phi): """The derivative of dU w.r.t. u, so that we can minimize this function w.r.t. u and choose an appropriate u0""" R, z = uv2Rz(u, v, delta) dRdu = delta * np.cosh(u) * np.sin(v) dzdu = delta * np.sinh(u) * np.cos(v) dPhidu = Phi.dR(R, z) * dRdu + Phi.dz(R, z) * dzdu return (np.sinh(u) ** 2.0 + np.sin(v) ** 2.0) * dPhidu + 2.0 * np.sinh(u) * np.cosh(u) * Phi(R, z)
def pu_squared(u, u0, vorb, E, Jphi, I3U, Phi, delta): """Equation for pu^2 / 2*delta^2""" return ( E * np.sinh(u) ** 2.0 - (I3U + dU(u, u0, vorb, delta, Phi)) - Jphi ** 2.0 / (2.0 * delta ** 2.0 * np.sinh(u) ** 2.0) )
def get_u_a(X_mtx): u_a_x = 1./omega*sinh(omega*1.55)/cosh(omega*X_mtx[:,0]) resp = zeros_like(X_mtx[:,0]) resp[X_mtx[:,0]<1.55] = 1./omega*sinh(omega*(X_mtx[:,0][X_mtx[:,0]<1.55]))/cosh(omega*1.55) peak = 1./omega*sinh(omega*1.55)/cosh(omega*1.55) resp[X_mtx[:,0]>=1.55] = 2.*peak - 1./omega*sinh(-omega*(X_mtx[:,0][X_mtx[:,0]>=1.55]-3.1))/cosh(omega*1.55) return u_a_x.flatten()
def tension_spline_matrix(Xk, X, sig): '''Given the knots Xk and the evaluation points X and tension parameter sig, returns the matrix S such that S.dot(x) is the spline value at each point in X with x the spline parameters''' nknots = len(Xk) INTERP = [] for x in X: row = nknots*2*[0] try: n = np.nonzero(x>Xk)[0][-1] except: row[0] = 1 INTERP.append(row) continue if n == nknots - 1: row[-1] = 1 else: dx = Xk[n+1] - Xk[n] s = np.sinh(sig*dx) dxu = Xk[n+1] - x dxl = x - Xk[n] row[n+1] = dxl / dx row[n] = dxu / dx row[n+1+nknots] = (np.sinh(sig*dxl)/s-dxl/dx) / sig**2 row[n+nknots] = (np.sinh(sig*dxu)/s-dxu/dx) / sig**2 INTERP.append(row) return np.array(INTERP)
def function( x , y ): phi = 0.0 i =0.0 for j in xrange(0,100): phi += 4.0/((2.0*i+1.0)*np.pi)/np.sinh((2.0*i+1.0)*np.pi)*np.sinh((2*i+1)*np.pi*y)*np.sin((2*i+1)*np.pi*x) i += 1.0 return phi
def _poisson_solver(self): """ from http://www.inf.ethz.ch/personal/tulink/FEM14/Ch1_ElmanSyvesterWathen_Ox05.pdf a solver for the Square domain, constant source function f(x) ≡ 1, zero boundary condition. """ row, col = np.meshgrid(range(self.shape[1]), range(self.shape[0])) row = np.asarray(row, dtype=float) col = np.asarray(col, dtype=float) row = 2 * row / self.shape[1] - 1 col = 2 * col / self.shape[0] - 1 t_1 = 1 - row**2 * 0.5 t_2 = 16 / np.pi**3 t_3 = 0 for idx in np.arange(1,12,2): t_3_1 = idx * np.pi * (1 + row) * 0.5 t_3_2 = idx * np.pi * (1 + col) * 0.5 t_3_3 = idx * np.pi * (1 - col) * 0.5 t_3_0 = (np.sin(t_3_1) / (idx**3 * np.sinh(idx * np.pi)) * (np.sinh(t_3_2) + np.sinh(t_3_3))) t_3 += t_3_0 poisson_solution = t_1 - t_2 * t_3 return poisson_solution
def computeNextCom(self,p0,x0=[[0,0] , [0,0]],t=0.05): px=p0[0] py=p0[1] '''Compute COM at time (t < durrationOfStep*(1-alpha) ) This function is usefull for MPC implementation ''' #TODO check t < durrationOfStep*(1-alpha) w2= self.g/self.h w = np.sqrt(w2) durrationOfStep = self.durrationOfStep x0_x=np.matrix([[x0[0][0]], [x0[0][1]]]) x0_y=np.matrix([[x0[1][0]], [x0[1][1]]]) c0_x =x0_x[0,0] c0_y =x0_y[0,0] d_c0_x=x0_x[1,0] d_c0_y=x0_y[1,0] c_x = (c0_x -px) * np.cosh(w*t) + (d_c0_x/w) * np.sinh(w*t)+px d_c_x = w*(c0_x -px) * np.sinh(w*t) + d_c0_x * np.cosh(w*t) c_y = (c0_y -py) * np.cosh(w*t) + (d_c0_y/w) * np.sinh(w*t)+py d_c_y = w*(c0_y -py) * np.sinh(w*t) + d_c0_y * np.cosh(w*t) return [c_x , c_y , d_c_x , d_c_y]
def Er(self,R,z,vR,vz,E,Lz,sinh2u0,u0): """ NAME: Er PURPOSE: calculate the 'radial energy' INPUT: R, z, vR, vz - coordinates E - energy Lz - angular momentum sinh2u0, u0 - sinh^2 and u0 OUTPUT: Er HISTORY: 2012-11-29 - Written - Bovy (IAS) """ u,v= bovy_coords.Rz_to_uv(R,z,self._delta) pu= (vR*numpy.cosh(u)*numpy.sin(v) +vz*numpy.sinh(u)*numpy.cos(v)) #no delta, bc we will divide it out out= (pu**2./2.+Lz**2./2./self._delta**2.*(1./numpy.sinh(u)**2.-1./sinh2u0) -E*(numpy.sinh(u)**2.-sinh2u0) +(numpy.sinh(u)**2.+1.)*actionAngleStaeckel.potentialStaeckel(u,numpy.pi/2.,self._pot,self._delta) -(sinh2u0+1.)*actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,self._pot,self._delta)) # +(numpy.sinh(u)**2.+numpy.sin(v)**2.)*actionAngleStaeckel.potentialStaeckel(u,v,self._pot,self._delta) # -(sinh2u0+numpy.sin(v)**2.)*actionAngleStaeckel.potentialStaeckel(u0,v,self._pot,self._delta)) return out
def p_analytical(x, y): X, Y = np.meshgrid(x, y) p_an = np.sinh(1.5*np.pi*Y / x[-1]) /\ (np.sinh(1.5*np.pi*y[-1]/x[-1]))*np.sin(1.5*np.pi*X/x[-1]) return p_an
def Kw_gelenk_kik(E, A, I, l, u, w): """dyn. Steifigkeit Gelenk an k :E: @todo :A: @todo :I: @todo :l: @todo :u: @todo :w: @todo :returns: @todo """ lam = l * (u * w**2 / E / I) ** (1/4) eps = l * np.sqrt(u * w**2 / E / A) o1 = (np.cosh(lam) + np.cos(lam)) / 2 o2 = (np.sinh(lam) + np.sin(lam)) / 2 o3 = (np.cosh(lam) - np.cos(lam)) / 2 o4 = (np.sinh(lam) - np.sin(lam)) / 2 kik = np.matrix([[E*A/l * eps/np.tan(eps), 0, 0], [0, E*I*lam**3/l**3 * (o1**2 - o2*o4)/(o2*o3 - o1*o4), 0], [0, 0, 0]]) return kik
def M(H, B): J = 1 K = J * B h = H * B m = (np.exp(K) * np.sinh(h)) / \ ((np.exp(2 * K) * np.sinh(h) ** 2 + np.exp(-2 * K)) ** 0.5) return m
def grad(data, QofI='U_avg'): M = data.shape[0] mu = data[:, 0].reshape((M, 1)) rho = data[:, 1].reshape((M, 1)) dpdx = data[:, 2].reshape((M, 1)) eta = data[:, 3].reshape((M, 1)) B0 = data[:, 4].reshape((M, 1)) Ha = B0 / np.sqrt(eta * mu) mu0 = 1.0 if (QofI == 'U_avg'): df_dmu = -dpdx * (np.sqrt(eta * mu) / np.tanh(Ha) - B0 / np.sinh(Ha)**2) / (2 * B0 * mu**2) df_drho = np.random.uniform(1.0e-8, 1.0e-10, (M, 1)) df_ddpdx = -(eta * mu - Ha * eta * mu / np.tanh(Ha)) / (mu * B0**2) df_deta = -dpdx * (2 * eta * mu - Ha * eta * mu / np.tanh(Ha) - (B0 / np.sinh(Ha))**2) / (2 * eta * mu * B0**2) df_dB0 = -dpdx * (-2 * eta * mu + Ha * eta * mu / np.tanh(Ha) + (B0 / np.sinh(Ha))**2) / (mu * B0**3) elif (QofI == 'B_ind'): df_dmu = -dpdx * mu0 * (np.sqrt(eta * mu) * np.sinh(Ha) - B0) / ( 4 * mu * (B0 * np.cosh(Ha / 2))**2) df_drho = np.random.uniform(1.0e-8, 1.0e-10, (M, 1)) df_ddpdx = mu0 * (B0 - 2 * np.sqrt(eta * mu) * np.tanh(Ha / 2)) / ( 2 * B0**2) df_deta = -dpdx * mu0 * (np.sqrt(eta * mu) * np.sinh(Ha) - B0) / ( 4 * eta * (B0 * np.cosh(Ha / 2))**2) df_dB0 = -dpdx * mu0 * (B0 + B0 / np.cosh(Ha / 2)**2 - 4 * np.sqrt( eta * mu) * np.tanh(Ha / 2)) / (2 * B0**3) return np.concatenate((df_dmu, df_drho, df_ddpdx, df_deta, df_dB0), axis=1)
def cosh_d(v1, v2): r1, phi1 = list(v1)[:2] r2, phi2 = list(v2)[:2] return max( 1.0, np.cosh(r1) * np.cosh(r2) - np.sinh(r1) * np.sinh(r2) * np.cos(phi1 - phi2) ) # Python precision issues
def _run(self, xylist, t): x = xylist[0] y = xylist[1] # specific nonhomogeneous contribution \bar T(x,y) tempnonhom = 0 temperature = 0 for n in xrange(1, self.Nsum): kn = n * np.pi / self.a Ttopn = 2 * self.Ttop * (1 - (-1)**n) / (n * np.pi) tmp = Ttopn * np.sin(kn * x) * np.sinh(kn * y) / np.sinh(kn * self.b) tempnonhom += tmp # general homogeneous contribution \tilde T(x, y, t) if self.NonHomogeneousOnly == False: for n in xrange(0, self.Nsum): kn = (2 * n + 1) * np.pi / self.a for m in xrange(1, self.Nsum): km = m * np.pi / self.b alpha2 = kn**2 + km**2 Anm = 4 * self.Ttop * 2 * (-1)**m * (m / (2 * n + 1)) / alpha2 / self.b**2 tmp = Anm * np.sin(kn * x) * np.sin(km * y) * np.exp(-self.kappa * alpha2 * t) temperature += tmp # add homogeneous and nonhomogeneous temperature = temperature + tempnonhom return ExactSolution([x, y, temperature], names=['position_x', 'position_y', 'temperature'], jumps=[] )
def dwdx(kx): #returns d[w/unorm]/dx ky = wavesurf(kx) return a1 * k * np.sinh( ky) * np.cos( kx) \ + 4*a2 * k * np.sinh(2*ky) * np.cos(2*kx) \ + 9*a3 * k * np.sinh(3*ky) * np.cos(3*kx) \ + 16*a4 * k * np.sinh(4*ky) * np.cos(4*kx) \ + 25*a5 * k * np.sinh(5*ky) * np.cos(5*kx)
def stretching(sc, Vstretching, theta_s, theta_b): """ Computes S-coordinates INPUT: sc : normalized levels [ndarray] Vstretching : ROMS stretching algorithm [int] theta_s : [int] theta_b : [int] hc : [int] """ if Vstretching == 1: # Song and Haidvogel, 1994 cff1 = 1. / np.sinh(theta_s) cff2 = 0.5 / np.tanh(0.5*theta_s) C = (1.-theta_b) * cff1 * np.sinh(theta_s * sc) + \ theta_b * (cff2 * np.tanh( theta_s * (sc + 0.5) ) - 0.5) return C if Vstretching == 4: # A. Shchepetkin (UCLA-ROMS, 2010) double vertical stretching function if theta_s > 0: Csur = ( 1.0 - np.cosh(theta_s*sc) ) / ( np.cosh(theta_s) -1.0 ) else: Csur = -sc**2 if theta_b > 0: Cbot = ( np.exp(theta_b*Csur)-1.0 ) / ( 1.0-np.exp(-theta_b) ) return Cbot else: return Csur
def calc_Fwater_layeredNR(hkl, sig, sig_bar, d, zwater, g_inv, database, cell): f_par = database["o2-."] q = hkl[2] * g_inv[2][2] ** 0.5 Auc = cell[0] * Num.sin(Num.radians(cell[5])) * cell[1] f = ( Auc * d * 0.033456 * ( f_par[0] * Num.exp(-(q / 4 / Num.pi) ** 2 * f_par[1]) + f_par[2] * Num.exp(-(q / 4 / Num.pi) ** 2 * f_par[3]) + f_par[4] * Num.exp(-(q / 4 / Num.pi) ** 2 * f_par[5]) + f_par[6] * Num.exp(-(q / 4 / Num.pi) ** 2 * f_par[7]) + f_par[8] ) * Num.exp(-2 * Num.pi ** 2 * q ** 2 * sig) ) x = Num.pi * q * d al = 2 * Num.pi ** 2 * q ** 2 * sig_bar a = Num.exp(al) * Num.cos(2 * x) - 1 b = Num.exp(al) * Num.sin(-2 * x) c = 4 * Num.cos(x) ** 2 * Num.sinh(al / 2) ** 2 - 4 * Num.sin(x) ** 2 * Num.cosh(al / 2) ** 2 d = -2 * Num.sin(2 * x) * Num.sinh(al) rez = Num.cos(2 * Num.pi * hkl[2] * zwater) imz = Num.sin(2 * Num.pi * hkl[2] * zwater) relayer = (a * c + b * d) / (c ** 2 + d ** 2) imlayer = (b * c - a * d) / (c ** 2 + d ** 2) re = f * (relayer * rez - imlayer * imz) im = f * (relayer * imz + imlayer * rez) return re, im
def laplace_solution(x, y, Lx, Ly): """ Computes and returns the analytical solution of the Laplace equation on a given two-dimensional Cartesian grid. Parameters ---------- x : numpy.ndarray The gridline locations in the x direction as a 1D array of floats. y : numpy.ndarray The gridline locations in the y direction as a 1D array of floats. Lx : float Length of the domain in the x direction. Ly : float Length of the domain in the y direction. Returns ------- p : numpy.ndarray The analytical solution as a 2D array of floats. """ X, Y = numpy.meshgrid(x, y) p = (numpy.sinh(1.5 * numpy.pi * Y / Ly) / numpy.sinh(1.5 * numpy.pi * Ly / Lx) * numpy.sin(1.5 * numpy.pi * X / Lx)) return p
def mu(self, ph, om, th, matrix=True): """ Generate M_U matrix Parameters ---------- ph : float, array_like angle, phi om : float, array_like angle, omega th : float, array_like angle, theta matrix : bool, optional method will return a matrix if this is True Returns ------- mu : array_like, matrix M_U = G**(-1)*V_U*G**(-\dagger) """ a = np.cos(ph - om) * np.sinh(2*th) b = np.exp(2j*ph)*np.cosh(th)**2 + np.exp(2j*om)*np.sinh(th)**2 if matrix is True: return np.matrix([[a, b], [np.conj(b), a]]) elif matrix is False: return np.array([[a, b], [np.conj(b), a]]) else: raise Exception("What kind of array do you want returned?")
def calc_F_layered_el_NR(hkl, occ, K, sig, sig_bar, d, d0, g_inv, database, el): f_par = database[el] q = hkl[2] * g_inv[2][2] ** 0.5 qd4pi = q / 4 / Num.pi f = ( ( f_par[0] * Num.exp(-(qd4pi) ** 2 * f_par[1]) + f_par[2] * Num.exp(-(qd4pi) ** 2 * f_par[3]) + f_par[4] * Num.exp(-(qd4pi) ** 2 * f_par[5]) + f_par[6] * Num.exp(-(qd4pi) ** 2 * f_par[7]) + f_par[8] ) * Num.exp(-2 * Num.pi ** 2 * q ** 2 * sig) * occ ) x = Num.pi * q * d al = 2 * Num.pi ** 2 * q ** 2 * sig_bar + K * d a = Num.exp(al) * Num.cos(2 * x) - 1 b = Num.exp(al) * Num.sin(-2 * x) c = 4 * Num.cos(x) ** 2 * Num.sinh(al / 2) ** 2 - 4 * Num.sin(x) ** 2 * Num.cosh(al / 2) ** 2 d = -2 * Num.sin(2 * x) * Num.sinh(al) wert = 2 * Num.pi * hkl[2] * d0 rez = Num.cos(wert) imz = Num.sin(wert) wert = c ** 2 + d ** 2 relayer = (a * c + b * d) / (wert) imlayer = (b * c - a * d) / (wert) re = f * (relayer * rez - imlayer * imz) im = f * (relayer * imz + imlayer * rez) return re, im
def f(k, sigma, h, wh, u): eps = k * wh / 2 f1 = np.tanh(k * h) ** 5 f2 = (k * h / np.sinh(k * h)) ** 4 D = (8.0 + np.cosh(4.0 * k * h) - 2.0 * (np.tanh(k * h) ** 2)) / (8.0 * (np.sinh(k * h) ** 4)) y = (9.81 * k * np.tanh(k * h + f2 * eps) * (1.0 + f1 * (eps ** 2) * D)) ** 0.5 + u * k - sigma return y
def Tc(zl,zs,par): #par=5x1 array of comsological parameters (h,OL,Ok,w0,wa) h,OL,Ok,w0,wa=par[0],par[1],par[2],par[3],par[4] Om=1-Ok-OL #inverse E(z) rEz=lambda x: (Om*(1.+x)**3+Ok*(1+x)**2+OL*(1+x)**(3*(1+w0+wa))*np.exp(-3.0*wa*x/(1+x)))**(-0.5) #dimentionless ratio E_ratio El,er=scipy.integrate.quad(rEz,0,zl) Es,er=scipy.integrate.quad(rEz,0,zs) Els,er=scipy.integrate.quad(rEz,zl,zs) #curvature if Ok<0.0: cur=(np.absolute(Ok))**0.5 El=np.sin(cur*El)/cur Es=np.sin(cur*Es)/cur Els=np.sin(cur*Els)/cur elif Ok>0.0: cur=(np.absolute(Ok))**0.5 El=np.sinh(cur*El)/cur Es=np.sinh(cur*Es)/cur Els=np.sinh(cur*Els)/cur E_ratio=El*Es/Els #Tc Tc=E_ratio/(100*h) return Tc
def TacC2_calc(k=None,p=None,fDc=None,fMag=None,a=None,*args,**kwargs): C=fDc M=fMag s21=np.sinh(k * (1 - a)) s20=np.cosh(k * (1 - a)) s19=4 * M * k * p ** 2 * s21 s18=4 * M * k ** 2 * p ** 2 * s20 s17=4 * C * k ** 3 s16=2 * k ** 4 * p s15=k - 2 * a * k s14=8 * C * a * p ** 3 s13=2 * C * k * p ** 3 s12=4 * C * k * p ** 2 s11=4 * C * k ** 2 * p s10=8 * C * a * k ** 2 * p s9=4 * M * a * k * p ** 3 s8=2 * k ** 2 * p ** 3 s7=4 * C * p ** 3 s6=4 * M * k ** 2 * p s5=8 * C * k ** 2 s4=8 * C * p ** 2 s3=4 * M * k * p ** 2 s2=2 * C * k ** 2 * p ** 2 s1=4 * M * a * k ** 2 * p ** 2 line1=(s1 + s2) * np.cos(a * p) * np.cosh(a * k) + (s3 - s12 - s17) * np.cos(a * p) * np.sinh(k) + (- s12 - s3) * np.cos(a * p) * np.sinh(a * k) + (- 2 * M * k ** 2 * p ** 2 + s5 + s4) * np.cos(a * p) * np.cosh(k) line2=(2 * C * k ** 2 * p ** 2 * s20 - s4 - 4 * C * k * p ** 2 * s21 - s19 - s5 + s18 - 2 * M * k ** 2 * p ** 2 * np.cosh(s15) - 4 * M * a * k ** 2 * p ** 2 * s20) * np.cos(a * p) + (s6 - s7) * np.sin(a * p) * np.cosh(a * k) line3=(s13 + 2 * C * k ** 3 * p + 2 * M * k * p ** 3 + 4 * M * k ** 3 * p - 4 * C * a * k * p ** 3 - 4 * C * a * k ** 3 * p - s9 - 4 * M * a * k ** 3 * p) * np.sin(a * p) * np.sinh(k) + (s13 + s9) * np.sin(a * p) * np.sinh(a * k) line4=(s14 - s7 - s11 - s6 + s10) * np.sin(a * p) * np.cosh(k) + (s7 + 4 * C * p ** 3 * s20 - s14 + s11 + s6 - s10 - 4 * M * k ** 2 * p * s20 - 2 * C * k * p ** 3 * s21 - 4 * M * k * p ** 3 * s21 + 2 * M * k * p ** 3 * np.sinh(s15) + 4 * M * a * k * p ** 3 * s21) * np.sin(a * p) line5=(s17 + 8 * C * k * p ** 2 + s3) * np.sinh(k) - s3 * np.sinh(a * k) + (s1 - s4 - s2 - 4 * M * k ** 2 * p ** 2 - s5) * np.cosh(k) + s5 + s4 - s2 - s19 + s18 - s1 denom=(k ** 5 * p + k ** 3 * p ** 3) * np.sinh(k) + (- s16 - s8) * np.cosh(k) + s16 + s8 TacC2=(line1 + line2 + line3 + line4 + line5) / denom return TacC2
def test_abcd_lossy_line(self): ''' Lossy transmission line of characteristic impedance Z0, length l and propagation constant gamma = alpha + j beta ○---------○ ○---------○ has ABCD matrix of the form: [ cosh(gamma l) Z0 sinh(gamma l) ] [ 1/Z0 sinh(gamma l) cosh(gamma l) ] ''' l = 5.0 z0 = 30.0 alpha = 0.5 beta = 2.0 lossy_media = DefinedGammaZ0( frequency=Frequency(1, 100, 21, 'GHz'), gamma=alpha + 1j*beta, z0=z0 ) ntw = lossy_media.line(d=l, unit='m', z0=z0) gamma = lossy_media.gamma npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cosh(gamma*l)) npy.testing.assert_array_almost_equal(ntw.a[:,0,1], z0*npy.sinh(gamma*l)) npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/z0*npy.sinh(gamma*l)) npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cosh(gamma*l))
def init (): global ct if ct == 0: print "init() dx = " + str(dx) + " dy = " + str(dy) ct = 0 pmin = 1000000000.0 pmax = -1000000000.0 for yi in xrange(ny): for xi in xrange(nx): x = xi*dx y = yi*dy # initialize arrays p = 0.0 # boundary condition p = y @ x = 2 if x == 2.0: p = y # analytic solution sum = 0.0 for n in xrange(10): nn = 2.0*n+1.0 sum += (np.sinh(nn*np.pi*x)*np.cos(nn*np.pi*y)) / ((nn*np.pi)*(nn*np.pi)*np.sinh(2*np.pi*nn)) pa = x/4.0 - 4.0*sum P1[yi][xi] = p P2[yi][xi] = p PA[yi][xi] = pa if pa < pmin: pmin = pa if pa > pmax: pmax = pa #if yi == 10: U1[yi][xi] = x # visualize color xrange im2.set_array(PA) print "pmin = " + str(pmin) + " pmax = " + str(pmax)
def TacB1_calc(k=None,p=None,fDc=None,fMag=None,a=None,*args,**kwargs): C=fDc M=fMag s20=np.sin(p * (1 - a)) s19=np.cos(p * (1 - a)) s18=8 * C * k ** 2 * s20 s17=8 * C * p ** 2 * s20 s16=8 * C * a * p ** 3 * s19 s15=2 * C * k * p ** 3 * s19 s14=4 * C * k ** 2 * p * s19 s13=4 * C * k * p ** 2 * s20 s12=4 * M * k * p ** 2 * s20 s11=8 * C * a * k ** 2 * p * s19 s10=4 * M * a * k * p ** 3 * s19 s9=4 * C * p ** 3 * s19 s8=4 * M * k ** 2 * p * s19 s7=2 * k ** 4 * p s6=k - 2 * a * k s5=4 * C * k ** 2 * p s4=2 * k ** 2 * p ** 3 s3=4 * M * k ** 2 * p s2=np.sinh(k * (1 - a)) s1=np.cosh(k * (1 - a)) denom=(- s7 - s4) * np.cosh(k) + (k ** 5 * p + k ** 3 * p ** 3) * np.sinh(k) + s7 + s4 Ccoshk=s9 + s5 - s18 - s17 + s3 - s16 + s14 - s8 + 2 * M * k ** 2 * p ** 2 * s20 - s11 Csinhk=4 * C * k ** 3 * s20 - 2 * C * k ** 3 * p - s15 - 2 * C * k ** 3 * p * s19 - 4 * M * a * k ** 3 * p - 2 * M * k * p ** 3 * s19 + s13 - s12 + 4 * C * a * k * p ** 3 * s19 + 4 * C * a * k ** 3 * p * s19 + s10 + 4 * M * a * k ** 3 * p * s19 Ccoshak=s9 + s3 - s8 - 2 * C * k ** 2 * p ** 2 * s20 - 4 * M * a * k ** 2 * p ** 2 * s20 Csinhak=s13 - s15 + s12 - s10 line0=s18 - s5 - s9 + s17 - s3 line1=- 4 * C * p ** 3 * s1 * s19 + s16 - s14 - 4 * M * k ** 2 * p * s1 + s8 - 2 * C * k ** 2 * p ** 2 * s1 * s20 - 4 * M * k ** 2 * p ** 2 * s1 * s20 + 2 * M * k ** 2 * p ** 2 * np.cosh(s6) * s20 + s11 + 4 * M * k ** 2 * p * s1 * s19 + 2 * C * k * p ** 3 * s19 * s2 + 4 * M * k * p ** 3 * s19 * s2 line2=- 2 * M * k * p ** 3 * s19 * np.sinh(s6) + 4 * C * k * p ** 2 * s2 * s20 + 4 * M * k * p ** 2 * s2 * s20 - 4 * M * a * k * p ** 3 * s19 * s2 + 4 * M * a * k ** 2 * p ** 2 * s1 * s20 TacB1=(Ccoshk * np.cosh(k) + Csinhk * np.sinh(k) + Ccoshak * np.cosh(a * k) + Csinhak * np.sinh(a * k) + line0 + line1 + line2) / denom return TacB1
def ellipsoidal_to_cart(self, lat, lon, height): #assert height > 0 and lat >= 0 and lat <= np.pi and lon >= 0 and lon < 2 * np.pi x = self.a * np.sinh(height) * np.sin(lat) * np.cos(lon) y = self.a * np.sinh(height) * np.sin(lat) * np.sin(lon) z = self.a * np.cosh(height) * np.cos(lat) pos_local = np.mat([x, y, z]).T return self.center + self.rot * pos_local
def abh(self, bet1, alf1, KL, K): """ bet1 and alf1 at the end of the element """ gamma1 = (1. + alf1 ** 2) / bet1 KL2 = 2.*KL sinhc = numpy.sinh(KL2) / KL2 res = 0.5 * bet1 * (1. + sinhc) + alf1 * numpy.sinh(KL) ** 2. / KL / K + (sinhc - 1.) / (2.*K ** 2.) * gamma1 return res
def fcn2min(params, DR, L, t,err): """ model NLIV """ Lambda_N = params['Lambda_N'].value P = params['Alpha'].value Wpy = 169e-9 Wcu = 168e-9 Tcu = 130e-9 Lambda_F = 5e-9*(PyR(10)/PyR(t)) PyRes = PyR(t) CuRes = CuR(t) Rf = (PyRes*Lambda_F)/(Wpy*Wcu) Rn = (CuRes*Lambda_N)/(Wcu*Tcu) model_tak = (2*P*P*Rf*Rf)/((1-(P*P))*(1-(P*P))*Rn*numpy.sinh((L/Lambda_N))) Rf1 = (2*PyRes*Lambda_F)/((1-(P*P))*Wpy*Wcu) Rn1 = (2*CuRes*Lambda_N)/(Wcu*Tcu) model_Otani = ((P*P)*(Rf1*Rf1))/((2*Rf1*numpy.exp(L/Lambda_N))+(Rn1*numpy.sinh(L/Lambda_N))) model_Cas = (2*(P*P)*(Rn1))/(((2+(Rn1/Rf1))**2*numpy.exp(L/Lambda_N))-((Rn1/Rf1)**2*numpy.exp(-L/Lambda_N))) return (model_tak - DR)/err
def sinh(x: Number = 0.0) -> Number: return np.sinh(x)
# Match the coefficients of free wave x<0 A = (fi[n - b] * cm.exp(1j * q * x[n - b]) - fi[n - int(1.01 * b)] * cm.exp(1j * q * x[n - int(1.01 * b)])) / ( cm.exp(2j * q * x[n - b]) - cm.exp(2j * q * x[n - int(1.01 * b)])) B = (fi[n - b] * cm.exp(-1j * q * x[n - b]) - fi[n - int(1.01 * b)] * cm.exp(-1j * q * x[n - int(1.01 * b)])) / ( cm.exp(-2j * q * x[n - b]) - cm.exp(-2j * q * x[n - int(1.01 * b)])) # Calculate the Transmit probability T = 1 / (abs(A))**2 # print T # Compare T with exact solution T_exact = 1 / (1 + ((V_x[n + int(b / 2)])**2 * np.sinh(np.sqrt(2 * m * (V_x[n + int(b / 2)] - E) / h * a))) / (4 * E * (V_x[n + int(b / 2)] - E))) print A, B, T, T_exact # Visualize the wave and potential fi = np.array(fi) figure(figsize=(8, 6), dpi=80) # real part of wave subplot(2, 1, 1) plot(x, fi.real, color="blue", linewidth=1.0, linestyle="-") plot(x, V_x, color="green", linewidth=1.0, linestyle="-") xlim(-50, 50) # ylim(-2,2)
def error(x): return np.sinh(x)
def elements(x, y, z, vx, vy, vz, mu): R2 = x**2 + y**2 + z**2 R = sqrt(R2) V2 = vx**2 + vy**2 + vz**2 RtimesRdot = x * vx + y * vy + z * vz hx = y * vz - z * vy hy = z * vx - x * vz hz = x * vy - y * vx h2 = hx**2 + hy**2 + hz**2 if (RtimesRdot > 0): Rdot = sqrt(V2 - h2 / R2) else: Rdot = -sqrt(V2 - h2 / R2) #eccentricity and pericenter distance mu_1 = 1.0 / mu temp = 1.0 + h2 * mu_1 * (V2 * mu_1 - 2.0 / R) if (temp <= 0): ecc = 0.0 else: ecc = sqrt(temp) if (ecc < 1e-8): ecc = 1.e-8 peridist = h2 * mu_1 / (1.0 + ecc) semimaj = 1.0 / (2.0 / R - V2 * mu_1) #inclination incl = arccos(hz / sqrt(h2)) if (incl != 0.0): if (hz > 0): node = arctan2(hx / sqrt(h2) / sin(incl), -hy / sqrt(h2) / sin(incl)) else: node = arctan2(-hx / sqrt(h2) / sin(incl), hy / sqrt(h2) / sin(incl)) else: node = 0.0 #true longitude (argument of pericenter plus true anomaly) if ((incl > 1.e-3) & (incl < pi - 1.0e-3)): sinomegaplusf = z / R / sin(incl) cosomegaplusf = 1.0 / cos(node) * ( x / R + sin(node) * sinomegaplusf * cos(incl)) periargplustrue_anom = arctan2(sinomegaplusf, cosomegaplusf) else: periargplustrue_anom = arctan2(y, x) * cos(incl) #true anomaly and argument of pericenter true_anom = arctan2(semimaj * (1.0 - ecc**2) / sqrt(h2) / ecc * Rdot, 1.0 / ecc * (semimaj / R * (1.0 - ecc**2) - 1.0)) periarg = periargplustrue_anom - true_anom periarg = periarg % (2 * pi) true_anom = true_anom % (2 * pi) if (true_anom < 0): true_anom = 2.0 * pi + true_anom if (periarg < 0): periarg = 2.0 * pi + periarg if (ecc < 1.0): tanecc_anom_2 = tan(0.5 * true_anom) * sqrt((1.0 - ecc) / (1.0 + ecc)) tanecc_anom = 2.0 * tanecc_anom_2 / (1.0 - tanecc_anom_2**2) cosecc_anom = (1.0 - R / semimaj) / ecc ecc_anom = arctan2(tanecc_anom * cosecc_anom, cosecc_anom) if (ecc_anom < 0): ecc_anom = 2.0 * pi + ecc_anom mean_anom = ecc_anom - ecc * sin(ecc_anom) return peridist, ecc, incl, periarg, node, true_anom, mean_anom, ecc_anom else: tanhhyp_anom_2 = tan(0.5 * true_anom) * sqrt((ecc - 1.0) / (ecc + 1.0)) tanhhyp_anom = 2.0 * tanhhyp_anom_2 / (tanhhyp_anom_2**2 + 1.0) hyp_anom = arctanh(tanhhyp_anom) mean_anom = ecc * sinh(hyp_anom) - hyp_anom return peridist, ecc, incl, periarg, node, true_anom, mean_anom, hyp_anom
def mode_S(temp, freqs): val = freqs / (2 * Kb * temp) return (1 / (2 * temp) * freqs * np.cosh(val) / np.sinh(val) - Kb * np.log(2 * np.sinh(val)))
def Z_QHO(beta): """Uso: devuelve valor de función de partición para el QHO unidimensional""" return 0.5/np.sinh(beta/2)
def main(): ''' Simulation of a Faraday cage. The problem will be a solution to Laplace's Equation given potentials at the walls, and the cage in the middle where the potential will be zero. The SOR method will be used. ''' import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D # Initialize parameters N = 60 # Number of grid points on each side L = 1. # Length of system h = L / (N - 1) # Grid spacing x = np.arange(N) * h y = np.arange(N) * h # Set omega for the SOR method omegaOpt = 2. / (1 + np.sin(np.pi / N)) # Theoretical optimum print(f'Theoretical optimum omega: {omegaOpt}') omega = float(input('Enter desired omega: ')) # Initialize the flux matrix and set the boundary and Faraday Cage conditions phi = np.zeros((N, N)) # Set initial guess for flux for i in range(1, N): for j in range(1, N): phi[i, j] = 4. / (np.pi * np.sinh(np.pi)) * \ np.sin(np.pi * x[i] / L) * np.sinh(np.pi * y[j] / L) phi[:, 0] = 0. # Left wall flux phi[:, -1] = 100. # Right wall flux for i, y_ in enumerate(y): # Top/bottom wall flux, varying linearly from left to right phi[0, i] = 100. * y_ phi[-1, i] = 100. * y_ # Loop until desired fractional change per iteration is achieved newphi = np.copy(phi) # Copy of the solution (used by Jacobi method) iterMax = N ** 2 # Set as max to avoid excessively lon runs change = np.empty(iterMax) changeDesired = 1e-4 # Stop when the change is less than desired print(f'Desired fractional change: {changeDesired}') for iter in range(iterMax): ## MAIN LOOP ## changeSum = 0. for i in range(1, N - 1): # Loop over interior nodes only for j in range(1, N - 1): phi[19, 19] = 0. phi[29, 19] = 0. phi[39, 19] = 0. phi[19, 29] = 0. phi[19, 39] = 0. phi[29, 39] = 0. phi[39, 29] = 0. phi[39, 39] = 0. temp = 0.25 * omega * (phi[i + 1, j] + phi[i - 1, j] + phi[i, j + 1] + phi[i, j - 1]) + (1 - omega) * \ phi[i, j] changeSum += (1 - phi[i, j] / temp) phi[i, j] = temp # Check if iteration change is small enough to halt the loop change[iter] = changeSum / (N - 2) ** 2 # Averaging the change sum if (iter + 1) % 10 < 1: print(f'After {iter + 1} iterations, fractional change = {change[iter]}') if change[iter] < changeDesired: print(f'Desired accuracy achieved after {iter + 1} iterations') print(f'Breaking out of main loop') # print(newphi == phi) break if np.abs(change[iter] - change[iter-1]) < changeDesired*.1: print(f'No change in results. Breaking after {iter} iterations.') break # Plot final estimate of potential as a contour plot levels = np.linspace(0, 100, 11) ct = plt.contour(x, y, np.flipud(np.rot90(phi)), levels) plt.clabel(ct, fmt='%1.2f') plt.xlabel('x') plt.ylabel('y') plt.title(f'Potential after {iter} iterations') fig = plt.figure() ax = fig.gca(projection='3d') Xp, Yp = np.meshgrid(x, y) ax.plot_surface(Yp, Xp, phi, rstride=1, cstride=1, cmap=cm.hot) ax.view_init(elev=30, azim=210) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel(r'$\Phi(x, y)$') plt.show()
import numpy as np x = 1.0 y = 2.0 #Trigonometric print(np.sin(x)) print(np.cos(x)) print(mp.tan(x)) print(np.arcsin(x)) print(np.arccos(x)) print(np.arctan(x)) print(np.arctan2(x, y)) print(np.rad2deg(x)) #Hyperbolic print(np.sinh(x)) print(np.cosh(x)) print(np.tan(h))
def Pt(U0, E, L, betac, gamma_sqc): """return tunneling probability for square barrier""" return 1 / (np.cosh(betac * L)**2 + gamma_sqc * np.sinh(betac * L)**2)
def coth(x): return np.cosh(x) / np.sinh(x)
def CC70Solver(dt, Tmax, u) : t = 0. while (t < Tmax) : print ("t = ",t/kyr,"/",Tmax/kyr) # TT = np.zeros((M+1, M+1)) R = np.zeros((1, M+1)) a = np.zeros(M+1) b = np.zeros(M+1) c = np.zeros(M+1) for xm in range(1, M) : dx_m = (X[xm+1] - X[xm-1])/2. X_m = 0.5*(X[xm+1] + X[xm-1]) A_m = A(X_m) B_m = B(X_m) C_m = C(X_m) Q_m = Q(X_m) T_m = T(X_m) w_m = B_m/C_m*dx_m d_m = 1./w_m - 1/(np.exp(w_m) - 1) W_m = w_m/2.*1./np.sinh(w_m/2.) Wp_m = W_m*np.exp(w_m/2.) Wm_m = W_m*np.exp(-w_m/2.) dx_m_mh = X[xm] - X[xm-1] X_m_mh = 0.5*(X[xm] + X[xm-1]) A_m_mh = A(X_m_mh) B_m_mh = B(X_m_mh) C_m_mh = C(X_m_mh) Q_m_mh = Q(X_m_mh) w_m_mh = B_m_mh/C_m_mh*dx_m_mh d_m_mh = 1./w_m_mh - 1/(np.exp(w_m_mh) - 1) W_m_mh = w_m_mh/2.*1./np.sinh(w_m_mh/2.) Wp_m_mh = W_m_mh*np.exp(w_m_mh/2.) Wm_m_mh = W_m_mh*np.exp(-w_m_mh/2.) dx_m_ph = X[xm+1] - X[xm] X_m_ph = 0.5*(X[xm+1] + X[xm]) A_m_ph = A(X_m_ph) B_m_ph = B(X_m_ph) C_m_ph = C(X_m_ph) Q_m_ph = Q(X_m_ph) w_m_ph = B_m_ph/C_m_ph*dx_m_ph d_m_ph = 1./w_m_ph - 1/(np.exp(w_m_ph) - 1) W_m_ph = w_m_ph/2.*1./np.sinh(w_m_ph/2.) Wp_m_ph = W_m_ph*np.exp(w_m_ph/2.) Wm_m_ph = W_m_ph*np.exp(-w_m_ph/2.) a_m = dt/(A_m*dx_m)*C_m_mh/dx_m_mh*Wm_m_mh c_m = dt/(A_m*dx_m)*C_m_ph/dx_m_ph*Wp_m_ph b_m = 1 + dt/(A_m*dx_m)*(C_m_mh/dx_m_mh*Wp_m_mh + C_m_ph/dx_m_ph*Wm_m_ph) + dt/T_m a[xm] = -a_m b[xm] = b_m c[xm] = -c_m # TT[xm][xm] = b_m # TT[xm+1][xm] = -a_m # TT[xm][xm+1] = -c_m r_m = dt*Q_m + u[xm] R[0][xm] = r_m # print (R) # Cas xm = 0 dx_m = X[1] - X[0] X_m = X[0] A_m = A(X_m) B_m = B(X_m) C_m = C(X_m) Q_m = Q(X_m) T_m = T(X_m) w_m = B_m/C_m*dx_m d_m = 1./w_m - 1/(np.exp(w_m) - 1) W_m = w_m/2.*1./np.sinh(w_m/2.) Wp_m = W_m*np.exp(w_m/2.) Wm_m = W_m*np.exp(-w_m/2.) dx_m_ph = X[1] - X[0] X_m_ph = 0.5*(X[1] + X[0]) A_m_ph = A(X_m_ph) B_m_ph = B(X_m_ph) C_m_ph = C(X_m_ph) Q_m_ph = Q(X_m_ph) w_m_ph = B_m_ph/C_m_ph*dx_m_ph d_m_ph = 1./w_m_ph - 1/(np.exp(w_m_ph) - 1) W_m_ph = w_m_ph/2.*1./np.sinh(w_m_ph/2.) Wp_m_ph = W_m_ph*np.exp(w_m_ph/2.) Wm_m_ph = W_m_ph*np.exp(-w_m_ph/2.) c_m = dt/(A_m*dx_m)*C_m_ph/dx_m_ph*Wp_m_ph b_m = 1 + dt/(A_m*dx_m)*(C_m_mh/dx_m_mh*Wp_m_mh + C_m_ph/dx_m_ph*Wm_m_ph) + dt/T_m a[0] = 0. b[0] = b_m c[0] = -c_m r_m = dt*Q_m + u[0] R[0][0] = r_m # Cas xm = M dx_m = X[M] - X[M-1] X_m = X[M] A_m = A(X_m) B_m = B(X_m) C_m = C(X_m) Q_m = Q(X_m) T_m = T(X_m) w_m = B_m/C_m*dx_m d_m = 1./w_m - 1/(np.exp(w_m) - 1) W_m = w_m/2.*1./np.sinh(w_m/2.) Wp_m = W_m*np.exp(w_m/2.) Wm_m = W_m*np.exp(-w_m/2.) dx_m_mh = X[M] - X[M-1] X_m_mh = 0.5*(X[M] + X[M-1]) A_m_mh = A(X_m_mh) B_m_mh = B(X_m_mh) C_m_mh = C(X_m_mh) Q_m_mh = Q(X_m_mh) w_m_mh = B_m_mh/C_m_mh*dx_m_mh d_m_mh = 1./w_m_mh - 1/(np.exp(w_m_mh) - 1) W_m_mh = w_m_mh/2.*1./np.sinh(w_m_mh/2.) Wp_m_mh = W_m_mh*np.exp(w_m_mh/2.) Wm_m_mh = W_m_mh*np.exp(-w_m_mh/2.) a_m = dt/(A_m*dx_m)*C_m_mh/dx_m_mh*Wm_m_mh b_m = 1 + dt/(A_m*dx_m)*(C_m_mh/dx_m_mh*Wp_m_mh + C_m_ph/dx_m_ph*Wm_m_ph) + dt/T_m a[M] = -a_m b[M] = b_m c[M] = 0. r_m = dt*Q_m + u[M] R[0][M] = r_m # for ii in range(0, M+1) : # TT[ii][ii] = b[ii] # if (ii < M) : # TT[ii+1][ii] = a[ii] # TT[ii][ii+1] = c[ii] # Tinv = InverseTrigonalMatrix(TT) # U = ProductMatrix(Tinv, R.T) # U = U.T[0] # print (a, b, c) a = np.delete(a, 0) c = np.delete(c, -1) # del c[-1] U = TDMA(a,b,c,R[0]) # plt.figure() ## plt.semilogy(X/pc, u, c="blue") # plt.plot(X/pc, u/U, c="red") # plt.axhline(1.) # print ("ratio = ",sum(u)/sum(U)) u = U u[0] = 0. u[M] = u[M-1] # u = U t += dt u_end = u return u_end
T_list = [ deltT*i for i in range(1,step+1) ] FE_D = dict() ti =0; FE_list = np.zeros(step) for T in T_list: beta = 1./T #-0.25*np.log(za) XX.append(T) kk = 2*np.sinh(2*beta)/(np.cosh(2*beta)**2) dth = 0.001 thelist = np.arange(0,pi,dth) val = 0.0 for ii in thelist: val = val + np.log(0.5*(1.+ np.sqrt(1-(kk**2)*(np.sin(ii))**2) ))*dth val = val/2/pi FF =np.log(2* np.cosh(2*beta) * np.exp(val)) FF = -FF*T YY.append(FF)
def exp_map(v, theta, eps=1e-6): # v: tangent vector in minkowski space # theta: parameter vector in hyperboloid with centroid coordinates # project vector v from tangent minkowski space -> hyperboloid return np.cosh(norm(v)) * theta + np.sinh(norm(v)) * v / (norm(v) + eps)
def g(x): """function whose integration to be estimated""" y = 1 / (1 + (np.sinh(2 * x)) * (np.log(x))) return y
segment_mean = utils.copy_docstring('tf.math.segment_mean', _segment_mean) segment_min = utils.copy_docstring('tf.math.segment_min', _segment_min) segment_prod = utils.copy_docstring('tf.math.segment_prod', _segment_prod) segment_sum = utils.copy_docstring('tf.math.segment_sum', _segment_sum) sigmoid = utils.copy_docstring('tf.math.sigmoid', lambda x, name=None: scipy_special.expit(x)) sign = utils.copy_docstring('tf.math.sign', lambda x, name=None: np.sign(x)) sin = utils.copy_docstring('tf.math.sin', lambda x, name=None: np.sin(x)) sinh = utils.copy_docstring('tf.math.sinh', lambda x, name=None: np.sinh(x)) softmax = utils.copy_docstring('tf.math.softmax', _softmax) def _softplus(x, name=None): # pylint: disable=unused-argument if not JAX_MODE: # This is effectively inlining jax.nn.softplus, which is (currently) # defined as np.logaddexp(x, 0.). # Both are numerically fine (see discussion in b/146563881). return np.log1p(np.exp(-np.abs(x))) + np.maximum(x, 0.) return jax.nn.softplus(x) softplus = utils.copy_docstring('tf.math.softplus', _softplus)
def inverse(x): x = np.asarray(x) return 2 * sigma * np.sinh(x * np.log(base))
"rad2deg": pandas_udf(lambda s: np.rad2deg(s), DoubleType(), PandasUDFType.SCALAR), "radians": F.radians, "reciprocal": pandas_udf(lambda s: np.reciprocal(s), DoubleType(), PandasUDFType.SCALAR), "rint": pandas_udf(lambda s: np.rint(s), DoubleType(), PandasUDFType.SCALAR), "sign": lambda c: F.when(c == 0, 0).when(c < 0, -1).otherwise(1), "signbit": lambda c: F.when(c < 0, True).otherwise(False), "sin": F.sin, "sinh": pandas_udf(lambda s: np.sinh(s), DoubleType(), PandasUDFType.SCALAR), "spacing": pandas_udf(lambda s: np.spacing(s), DoubleType(), PandasUDFType.SCALAR), "sqrt": F.sqrt, "square": pandas_udf(lambda s: np.square(s), DoubleType(), PandasUDFType.SCALAR), "tan": F.tan, "tanh": pandas_udf(lambda s: np.tanh(s), DoubleType(), PandasUDFType.SCALAR), "trunc": pandas_udf(lambda s: np.trunc(s), DoubleType(), PandasUDFType.SCALAR), }) binary_np_spark_mappings = OrderedDict({
import matplotlib.pyplot as plt import numpy as np x = np.linspace(0, 10, 50) plt.subplot(231) plt.plot(x, np.sin(x)) plt.subplot(232) plt.plot(x, np.cos(x)) plt.subplot(233) plt.plot(x, np.tan(x)) plt.subplot(234) plt.plot(x, np.sinh(x)) plt.subplot(235) plt.plot(x, np.cosh(x)) plt.subplot(236) plt.plot(x, np.tanh(x)) plt.show()
def _kepler_equation_hyper(F, M, ecc): return -F + ecc * np.sinh(F) - M
def transform_non_affine(self, a): return np.arctan(np.sinh(a))
def testComplexOps(self): for dtype in self.complex_types: self._assertOpOutputMatchesExpected( math_ops.acosh, np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype), expected=np.arccosh( np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.asinh, np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype), expected=np.arcsinh( np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.atanh, np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype), expected=np.arctanh( np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.cosh, np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype), expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.sinh, np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype), expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.exp, np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype), expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.expm1, np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype), expected=np.expm1( np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.reciprocal, np.array([[1, 2j, 2 + 3j]], dtype=dtype), expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype)) self._assertOpOutputMatchesExpected(math_ops.log, np.array([[5j, 3 - 2j]], dtype=dtype), expected=np.log( np.array([[5j, 3 - 2j]], dtype=dtype))) self._assertOpOutputMatchesExpected(math_ops.sin, np.array([[5j, 3 - 2j]], dtype=dtype), expected=np.sin( np.array([[5j, 3 - 2j]], dtype=dtype))) self._assertOpOutputMatchesExpected(math_ops.cos, np.array([[5j, 3 - 2j]], dtype=dtype), expected=np.cos( np.array([[5j, 3 - 2j]], dtype=dtype))) # TODO(b/34703906): improve log1p implementation and make tolerance # tighter. self._assertOpOutputMatchesExpected( math_ops.log1p, np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype), expected=np.log1p( np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype))) val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype) self._assertOpOutputMatchesExpected(math_ops.rsqrt, val, expected=1 / np.sqrt(val)) self._assertOpOutputMatchesExpected(math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val))) self._assertOpOutputMatchesExpected(math_ops.sqrt, val, expected=np.sqrt(val)) self._assertOpOutputMatchesExpected( math_ops.tanh, np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype), expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.tan, np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype), expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype))) ctypes = {np.complex64: np.float32} self._assertOpOutputMatchesExpected( math_ops.abs, np.array([[3 - 4j, -1j, np.inf]], dtype=dtype), expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype])) self._assertOpOutputMatchesExpected(math_ops.negative, np.array([[-1 + 2j, -3j]], dtype=dtype), expected=np.array( [[1 - 2j, 3j]], dtype=dtype)) self._assertOpOutputMatchesExpected( math_ops.square, np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype), expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2) self._assertOpOutputMatchesExpected( array_ops.zeros_like, np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype), expected=np.array([[0, 0], [0, 0]], dtype=dtype)) self._assertOpOutputMatchesExpected( array_ops.ones_like, np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype), expected=np.array([[1, 1], [1, 1]], dtype=dtype)) self._assertOpOutputMatchesExpected( math_ops.angle, np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype), expected=np.angle( np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype))) self._assertOpOutputMatchesExpected( math_ops.conj, np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype), expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype)) self._assertOpOutputMatchesExpected( math_ops.imag, np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype), expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype])) self._assertOpOutputMatchesExpected( math_ops.real, np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype), expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
for mm in Fluorophores: if mm.find('SC')>-1: markers.append(mm) else: identifier='$P'+str(channels.index(mm)+1)+'S' markers.append(meta[identifier]+' ('+mm[mm.find('-')+1:-2]+')') AllData[len(AllData)]=df[Fluorophores] AllData[len(AllData)-1].columns=markers pSTAT1=pd.DataFrame(np.zeros((96,5)),columns=['pSTAT1 in B cells','pSTAT1 in CD8+ T cells','pSTAT1 in CD4+ T cells', 'pSTAT1 in Small B cells','pSTAT1 in Large B cells'],index=FileNames) for ff in FileNames: index=(AllData[FileNames.index(ff)]['CD19 (PE ( 561 ))'] > 600) & \ (AllData[FileNames.index(ff)]['IA_IE (BV510)'] > 2000) pSTAT1.loc[ff]['pSTAT1 in B cells']=np.sinh(np.mean(np.arcsinh(AllData[FileNames.index(ff)][index]['pSTAT1 (FITC)']))) index=(AllData[FileNames.index(ff)]['CD3 (PE-Cy5)'] > 200) & \ (AllData[FileNames.index(ff)]['CD8 (APC-Cy7)'] > 3000) pSTAT1.loc[ff]['pSTAT1 in CD8+ T cells']=np.sinh(np.mean(np.arcsinh(AllData[FileNames.index(ff)][index]['pSTAT1 (FITC)']))) index=(AllData[FileNames.index(ff)]['CD3 (PE-Cy5)'] > 10) & \ (AllData[FileNames.index(ff)]['CD4 (APC)'] > 1000) pSTAT1.loc[ff]['pSTAT1 in CD4+ T cells']=np.sinh(np.mean(np.arcsinh(AllData[FileNames.index(ff)][index]['pSTAT1 (FITC)']))) # Small and large B Cells # Get average cell size (average FSC) to use as a threshold for small and large cells index = (AllData[FileNames.index(ff)]['CD19 (PE ( 561 ))'] > 600) & \ (AllData[FileNames.index(ff)]['IA_IE (BV510)'] > 2000) well_threshold_size = AllData[FileNames.index(ff)][index]['FSC-A'].quantile(q=0.8)
def htrig_fun(Z, **params): return Z * np.sinh(Z) - np.cosh(Z) + 1
def d_htrig_fun(Z, **params): return np.sinh(Z)
def pzPtEta(_pt, _eta): pz = _pt * np.sinh(_eta) return pz
def my_func(x): # x will be a numpy array with the contents of the placeholder below return np.sinh(x)
def T_ana(lambda_, a=1): k = np.sqrt(lambda_) eta = np.sqrt(1 - lambda_) return (1 + ((k**2 + eta**2) / (2 * k * eta))**2 * np.sinh(eta * a)**2)**-1
import numpy as np """ Analytical calculations for the 2x2 Ising model with temperature T = 1 """ # System specifics N_spin = 4 # Number of spins T = 1 # Temperature Z = 4 * np.cosh(8 / T) + 12 # Partition function # Expectation values E = (-(8 * np.sinh(8 / T)) / (np.cosh(8 / T) + 3)) / N_spin # Energy per spin M = ((1 / Z) * (-4 * np.exp(8 / T) - 8 + 8 + 4 * np.exp(8 / T)) ) / N_spin # Magnetization per spin absM = (((2 * np.exp(8 / T) + 4) / (np.cosh(8 / T) + 3))) / N_spin # Absolute Magnetization per spin C_V = (64 / N_spin * T**2) * ( (1 + 3 * np.cosh(8 / T)) / ((np.cosh(8 / T) + 3)**2)) # Heat Capacity per spin*boltzmann constant chi = ((8 / T) * ((np.exp(8 / T) + 1) / (np.cosh(8 / T) + 3))) / N_spin # Susceptibility per spin*J # Printing the results print("=" * 20) print("Expectation Values Analytic") print("=" * 20) print("E: M: C_V: chi: absM:") print("%.7f" % E, "%.7f" % M, "%.7f" % C_V, "%.7f" % chi, "%.7f" % absM)
'rad2deg': F.pandas_udf(lambda s: np.rad2deg(s), DoubleType()), 'radians': F.radians, 'reciprocal': F.pandas_udf(lambda s: np.reciprocal(s), DoubleType()), 'rint': F.pandas_udf(lambda s: np.rint(s), DoubleType()), 'sign': lambda c: F.when(c == 0, 0).when(c < 0, -1).otherwise(1), 'signbit': lambda c: F.when(c < 0, True).otherwise(False), 'sin': F.sin, 'sinh': F.pandas_udf(lambda s: np.sinh(s), DoubleType()), 'spacing': F.pandas_udf(lambda s: np.spacing(s), DoubleType()), 'sqrt': F.sqrt, 'square': F.pandas_udf(lambda s: np.square(s), DoubleType()), 'tan': F.tan, 'tanh': F.pandas_udf(lambda s: np.tanh(s), DoubleType()), 'trunc': F.pandas_udf(lambda s: np.trunc(s), DoubleType()), }) binary_np_spark_mappings = OrderedDict({
def fock_tensor( S, alpha, cutoff, choi_r=np.arcsinh(1.0), check_symplectic=True, sf_order=False, rtol=1e-05, atol=1e-08, ): r""" Calculates the Fock representation of a Gaussian unitary parametrized by the symplectic matrix S and the displacements alpha up to cutoff in Fock space. Args: S (array): symplectic matrix alpha (array): complex vector of displacements cutoff (int): cutoff in Fock space choi_r (float): squeezing parameter used for the Choi expansion check_symplectic (boolean): checks whether the input matrix is symplectic sf_order (boolean): reshapes the tensor so that it follows the sf ordering of indices rtol (float): the relative tolerance parameter used in `np.allclose` atol (float): the absolute tolerance parameter used in `np.allclose` Return: (array): Tensor containing the Fock representation of the Gaussian unitary """ # Check the matrix is symplectic if check_symplectic: if not is_symplectic(S, rtol=rtol, atol=atol): raise ValueError("The matrix S is not symplectic") # And that S and alpha have compatible dimensions m, _ = S.shape l = m // 2 if l != len(alpha): raise ValueError( "The matrix S and the vector alpha do not have compatible dimensions" ) # Check if S corresponds to an interferometer, if so use optimized routines if np.allclose(S @ S.T, np.identity(m), rtol=rtol, atol=atol) and np.allclose(alpha, 0, rtol=rtol, atol=atol): reU = S[:l, :l] imU = S[:l, l:] if np.allclose(imU, 0, rtol=rtol, atol=atol): Ub = np.block([[0 * reU, -reU], [-reU.T, 0 * reU]]) tensor = interferometer_real(Ub, cutoff) else: U = reU - 1j * imU Ub = np.block([[0 * U, -U], [-U.T, 0 * U]]) tensor = interferometer(Ub, cutoff) else: # Construct the covariance matrix of l two-mode squeezed vacua pairing modes i and i+l ch = np.cosh(choi_r) * np.identity(l) sh = np.sinh(choi_r) * np.identity(l) zh = np.zeros([l, l]) Schoi = np.block([[ch, sh, zh, zh], [sh, ch, zh, zh], [zh, zh, ch, -sh], [zh, zh, -sh, ch]]) # And then its Choi expanded symplectic S_exp = expand(S, list(range(l)), 2 * l) @ Schoi # And this is the corresponding covariance matrix cov = S_exp @ S_exp.T alphat = np.array(list(alpha) + ([0] * l)) x = 2 * alphat.real p = 2 * alphat.imag mu = np.concatenate([x, p]) tensor = state_vector( mu, cov, normalize=False, cutoff=cutoff, hbar=2, check_purity=False, choi_r=choi_r, ) if sf_order: sf_indexing = tuple(chain.from_iterable([[i, i + l] for i in range(l)])) return tensor.transpose(sf_indexing) return tensor