def phase(filtered, carf, sampf,tarr,bitclock,baudrate): uphasearr = [] # Establishing arrays to hold the entire unfiltered phase lphasearr = [] # in both the upper and lower sideband frequencies deltaf = 50 # This is determined from baudrate and modulation scheme (MSK) window = 125 # This is the window the phase is calculated and averaged over for a single bit (1/6 of a full bit). This bit phase is in turn averaged over the whole second later on phasebitsize = len(filtered[0])/window/baudrate # data points in a bit in phase time series (6) rawbitsize = len(filtered[0])/baudrate # data points in a bit in raw signal time series (750) bins = len(filtered[0])/window - phasebitsize # Lose a full bits worth of data points(6) to start in sync with bitclock time = np.array(tarr) # Just to not f up the 'tarr' array created a 'time' array for k in range(0,len(filtered)): modu = carf[k] + deltaf # The sideband frequencies used in the modl = carf[k] - deltaf # MSK modulation scheme startbin = (np.abs(time - bitclock[k])).argmin() # Start measuring the phase at start of measured bitclock endbin = startbin - rawbitsize # Endbin will be negative to make sure it is even splitting the time series into chunks 1/6 of a bit in length uy = filtered[k]*sin((2.0)*(pi)*modu*time) # Crunching the phase in segments ux = filtered[k]*cos((2.0)*(pi)*modu*time) # 1/6 of a bit in length uysum = np.split(uy[startbin:endbin],bins) # Summed over this whole segment for uxsum = np.split(ux[startbin:endbin],bins) # phase measurement uphase = -arctan((sum(uysum, axis = 1))/(sum(uxsum, axis = 1))) # a phase for upper and lower sidebands in MSK modulation ly = filtered[k]*sin((2.0)*(pi)*modl*time) # Crunching the phase in segments lx = filtered[k]*cos((2.0)*(pi)*modl*time) # 1/6 of a bit in length lysum = np.split(ly[startbin:endbin],bins) # Summed over this whole segment for lxsum = np.split(lx[startbin:endbin],bins) # phase measurement lphase = -arctan((sum(lysum, axis = 1))/(sum(lxsum, axis = 1))) # this is the lower sidebands phase lphasearr.extend([lphase]) # Adding the arrays of uppper phase uphasearr.extend([uphase]) # and lower phase for each frequency return uphasearr, lphasearr # Each element in array has 1194 datapoints
def binary_ephem(P, T, e, a, i, O_node, o_peri, t): # Grados a radianes d2rad = pi/180. rad2d = 180./pi i = i*d2rad O_node = (O_node*d2rad)%(2*pi) o_peri = (o_peri*d2rad)%(2*pi) # Anomalia media M = ((2.0*pi)/P)*(t - T) # radianes if M >2*pi: M = M - 2*pi M=M%(2*pi) # Anomalia excentrica (1ra aproximacion) E0 = M + e*sin(M) + (e**2/M) * sin(2.0*M) for itera in range(15): M0 = E0 - e*sin(E0) E0 = E0 + (M-M0)/(1-e*cos(E0)) true_anom = 2.0*arctan(sqrt((1+e)/(1-e))*tan(E0/2.0)) #radius = (a*(1-e**2))/(1+e*cos(true_anom)) radius = a*(1-e*cos(E0)) theta = arctan( tan(true_anom + o_peri)*cos(i) ) + O_node rho = radius * (cos(true_anom + o_peri)/cos(theta - O_node)) # revuelve rho ("), theta (grad), Anomalia excentrica (grad), Anomalia verdadera (grad) return rho, (theta*rad2d)%360. #, E0*rad2d, M*rad2d, true_anom*rad2d
def log_reconstruction_parameters(self): """ h - object size\nz - sam-det dist\npix - # of pix\ndel_x_d - pixel size """ dx_d = CXP.experiment.dx_d x = (CXP.p/2.)*dx_d l = energy_to_wavelength(CXP.experiment.energy) h = min(CXP.experiment.beam_size) pix = CXP.p z=CXP.experiment.z NF = lambda nh, nl, nz: nh**2./(nl*nz) del_x_s = lambda l, z, x: (l*z)/(2.*x) nNF = NF(h, l, z) OS = lambda l, z, x, h, pix: ((pix*del_x_s(l, z, x))**2.)/(h**2.) nOS = OS(l, z, x, h, pix) NA = sp.sin(sp.arctan(x/z)) axial_res = 2*l/NA**2. lateral_res = l/(2.*NA) CXP.log.info('Fresnel number: {:2.2e}'.format(nNF)) CXP.log.info('Oversampling: {:3.2f}'.format(nOS)) CXP.log.info('Detector pixel size: {:3.2f} [micron]'.format(1e6*dx_d)) CXP.log.info('Detector width: {:3.2f} [mm]'.format(1e3*pix*dx_d)) CXP.log.info('Sample pixel size: {:3.2f} [nm]'.format(1e9*del_x_s(l, z, x))) CXP.log.info('Sample FOV: {:3.2f} [micron]'.format(1e6*del_x_s(l, z, x)*pix)) CXP.log.info('Numerical aperture: {:3.2f}'.format(NA)) CXP.log.info('Axial resolution: {:3.2f} [micron]'.format(1e6*axial_res)) CXP.log.info('Lateral resolution: {:3.2f} [nm]'.format(1e9*lateral_res)) self.slow_db_queue['fresnel_number'] = (nNF,) self.slow_db_queue['oversampling'] = (nOS,) self.slow_db_queue['dx_s'] = (del_x_s(l, z, x),) self.slow_db_queue['sample_fov'] = (del_x_s(l, z, x)*pix,) self.slow_db_queue['numerical_aperture'] = (NA,) self.slow_db_queue['axial_resolution'] = (axial_res,)
def __init__(self, yaml): self._tf_listener = tf.TransformListener() self._grid = SearchGrid(10, 10, 2.0, 2.0) camera = yaml.sensors[0].camera self._fov_h = camera.horizontal_fov self._fov_v = 2.0 * scipy.arctan(scipy.tan(self._fov_h / 2.0) * (camera.image_height / camera.image_width)) self._fov_vectors = fov_vectors(self._fov_h, self._fov_v)
def distance_fn(p1, l1, p2, l2, units='m'): """ Simplified Vincenty formula. Returns distance between coordinates. """ assert (units in ['km', 'm', 'nm']), 'Units must be km, m, or nm' if units == 'km': r = 6372.7974775959065 elif units == 'm': r = 6372.7974775959065 * 0.621371 elif units == 'nm': r = 6372.7974775959065 * 0.539957 # compute Vincenty formula l = abs(l1 - l2) num = scipy.sqrt(((scipy.cos(p2) * scipy.sin(l)) ** 2) +\ (((scipy.cos(p1) * scipy.sin(p2)) - (scipy.sin(p1) * scipy.cos(p2) * scipy.cos(l))) ** 2)) den = scipy.sin(p1) * scipy.sin(p2) + scipy.cos(p1) * scipy.cos(p2) * scipy.cos(l) theta = scipy.arctan(num / den) distance = abs(int(round(r * theta))) return distance
def velocity(self, mass, time=0., anomaly_offset=1e-3): """Returns the radial velocities and proper motions in km/s. Returns an (N, 2) array with the radial velocities and the proper motions due to the binary orbital motions of the N binaries. Arguments: - `mass`: primary mass of the star in solar masses. - `time`: """ nbinaries = self.size mean_anomaly = (self.phase + time / self.period) * 2. * sp.pi ecc_anomaly = mean_anomaly old = sp.zeros(nbinaries) - 1. count_iterations = 0 while (abs(ecc_anomaly - old) > anomaly_offset).any() and count_iterations < 20: old = ecc_anomaly ecc_anomaly = ecc_anomaly - (ecc_anomaly - self.eccentricity * sp.sin(ecc_anomaly) - mean_anomaly) / (1. - self.eccentricity * sp.cos(ecc_anomaly)) count_iterations += 1 theta_orb = 2. * sp.arctan(sp.sqrt((1. + self.eccentricity) / (1. - self.eccentricity)) * sp.tan(ecc_anomaly / 2.)) seperation = (1 - self.eccentricity ** 2) / (1 + self.eccentricity * sp.cos(theta_orb)) thdot = 2 * sp.pi * sp.sqrt(1 - self.eccentricity ** 2) / seperation ** 2 rdot = seperation * self.eccentricity * thdot * sp.sin(theta_orb) / (1 + self.eccentricity * sp.cos(theta_orb)) vtotsq = (thdot * seperation) ** 2 + rdot ** 2 vlos = (thdot * seperation * sp.sin(self.theta - theta_orb) + rdot * sp.cos(self.theta - theta_orb)) * sp.sin(self.inclination) vperp = sp.sqrt(vtotsq - vlos ** 2) velocity = sp.array([vlos, vperp]) * self.semi_major(mass) / (self.period * (1 + 1 / self.mass_ratio)) * 4.74057581 return velocity
def TB_U_exceso(self, T, P): """Método de cálculo de la energía interna de exceso mediante la ecuación de estado de Trebble-Bishnoi""" a, b, c, d, q1, q2 = self.TB_lib(T, P) v = self.TB_V(T, P) z = P * v / R_atml / T A = a * P / R_atml ** 2 / T ** 2 B = b * P / R_atml / T u = 1 + c / b t = 1 + 6 * c / b + c ** 2 / b ** 2 + 4 * d ** 2 / b ** 2 tita = abs(t) ** 0.5 if t >= 0: lamda = log((2 * z + B * (u - tita)) / (2 * z + B * (u + tita))) else: lamda = 2 * arctan((2 * z + u * B) / B / tita) - pi delta = v ** 2 + (b + c) * v - b * c - d ** 2 beta = 1.0 + q2 * (1 - self.tr(T) + log(self.tr(T))) da = -q1 * a / self.Tc if self.tr(T) <= 1.0: db = b / beta * (1 / T - 1 / self.Tc) else: db = 0 U = lamda / b / tita * (a - da * T) + db * T * ( -R_atml * T / (v - b) + a / b ** 2 / t * ((v * (3 * c + b) - b * c + c ** 2 - 2 * d ** 2) / delta + (3 * c + b) * lamda / b / tita) ) # atm*l/mol return unidades.Enthalpy(U * 101325 / 1000 / self.peso_molecular, "Jkg")
def polarZ(z): if(z == 0): return (0,0) else : a = z.real b = z.imag return( sp.hypot(a,b), sp.arctan(b/a))
def pix2sky(header,x,y): hdr_info = parse_header(header) x0 = x-hdr_info[1][0]+1. # Plus 1 python->image y0 = y-hdr_info[1][1]+1. x0 = x0.astype(scipy.float64) y0 = y0.astype(scipy.float64) x = hdr_info[2][0,0]*x0 + hdr_info[2][0,1]*y0 y = hdr_info[2][1,0]*x0 + hdr_info[2][1,1]*y0 if hdr_info[3]=="DEC": a = x.copy() x = y.copy() y = a.copy() ra0 = hdr_info[0][1] dec0 = hdr_info[0][0]/raddeg else: ra0 = hdr_info[0][0] dec0 = hdr_info[0][1]/raddeg if hdr_info[5]=="TAN": r_theta = scipy.sqrt(x*x+y*y)/raddeg theta = arctan(1./r_theta) phi = arctan2(x,-1.*y) elif hdr_info[5]=="SIN": r_theta = scipy.sqrt(x*x+y*y)/raddeg theta = arccos(r_theta) phi = artan2(x,-1.*y) ra = ra0 + raddeg*arctan2(-1.*cos(theta)*sin(phi-pi), sin(theta)*cos(dec0)-cos(theta)*sin(dec0)*cos(phi-pi)) dec = raddeg*arcsin(sin(theta)*sin(dec0)+cos(theta)*cos(dec0)*cos(phi-pi)) return ra,dec
def ccd_stats(energy, npix, pix_size, z_sam_det): NA = sp.sin(sp.arctan(0.5*npix*pix_size/z_sam_det)) l = energy_to_wavelength(energy) axial_res = 2*l/NA**2. lateral_res = l/(2.*NA) print 'NA: %1.2e\nAxial resolution: %1.2e\nLateral resolution: %1.2e' % (NA, axial_res, lateral_res)
def ecef2geodetic(x, y, z): """Convert ECEF coordinates to geodetic. J. Zhu, "Conversion of Earth-centered Earth-fixed coordinates \ to geodetic coordinates," IEEE Transactions on Aerospace and \ Electronic Systems, vol. 30, pp. 957-961, 1994.""" a = 6378.137 b = 6356.7523142 esq = 6.69437999014 * 0.001 e1sq = 6.73949674228 * 0.001 # return h in kilo r = sqrt(x * x + y * y) Esq = a * a - b * b F = 54 * b * b * z * z G = r * r + (1 - esq) * z * z - esq * Esq C = (esq * esq * F * r * r) / (pow(G, 3)) S = sqrt(1 + C + sqrt(C * C + 2 * C)) P = F / (3 * pow((S + 1 / S + 1), 2) * G * G) Q = sqrt(1 + 2 * esq * esq * P) r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \ P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r) U = sqrt(pow((r - esq * r_0), 2) + z * z) V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z) Z_0 = b * b * z / (a * V) h = U * (1 - b * b / (a * V)) lat = arctan((z + e1sq * Z_0) / r) lon = arctan2(y, x) return degrees(lat), degrees(lon), h
def TB_Cv_exceso(self, T, P): """Método de cálculo de la capacidad calorífica a volumen constante de exceso mediante la ecuación de estado de Trebble-Bishnoi""" a, b, c, d, q1, q2=self.TB_lib(T, P) v=self.TB_V(T, P) z=P*v/R_atml/T t=1+6*c/b+c**2/b**2+4*d**2/b**2 tita=abs(t)**0.5 A=a*P/R_atml**2/T**2 B=b*P/R_atml/T u=1+c/b delta=v**2+(b+c)*v-b*c-d**2 beta=1.+q2*(1-self.tr(T)+log(self.tr(T))) da=-q1*a/self.Tc dda=q1**2*a/self.Tc**2 if self.tr(T)<=1.0: db=b/beta*(1/T-1/self.Tc) ddb=-q2*b/beta/T**2 else: db=0 ddb=0 dt=-db/b**2*(6*c+2*c**2/b+8*d**2/b) dtita=abs(dt)/20 if t>=0: lamda=log((2*z+B*(u-tita))/(2*z+B*(u+tita))) dlamda=(db-db*tita-b*dtita)/(2*v+b+c-b*tita)-(db+db*tita+b*dtita)/((2*v+b+c+b*tita)) else: lamda=2*arctan((2*z+u*B)/B/tita)-pi dlamda=2/(1+((2*v+b+c)/b/tita)**2)*(db/b/tita-(2*v+b+c)*(db/b**2/tita+dtita/b/tita**2)) Cv=1/b/tita*(dlamda*(a-da*T)-lamda*dda*T-lamda*(a-da*T)*(db/b+dtita/tita))+(ddb*T+db)*(-R_atml*T/(v-b)+a/b**2/t*((v*(3*c+b)-b*c+c**2-2*d**2)/delta+(3*c+b)*lamda/b/tita))+db*T*(-R_atml/(v-b)-R_atml*T*db/(v-b)**2+1/b**2/t*(da-2*a*db/b-a*dt/t)*((v*(3*c+b)-b*c+c**2-2*d**2)/delta+(3*c+b)*lamda/b/tita)+a/b**2/t*(db*(v-c)*(v**2-2*c*v-c**2+d**2)/delta**2+db*lamda/b/tita+(3*c+b)/b/tita*(dlamda-lamda*(db/b+dtita/tita)))) return unidades.SpecificHeat(Cv*101325/1000/self.peso_molecular, "JkgK")
def joinT(yb,ya,xb,xa): dya=yb-ya+0. dxa=xb-xa+0. if (dxa==0 and dya>0): tAn=math.pi/4. return tAn elif (dxa==0 and dya<=0): tAn=(3./2.)*math.pi return tAn elif (dya==0 and dxa>=0): tAn = 0. return tAn elif (dya==0 and dxa<0): tAn = math.pi return tAn else : tAn= arctan(((dya)/dxa)) # get correct quadrant if(dya<0 and dxa>0): tAn= tAn + 2*math.pi elif(dya<0 and dxa<0): tAn= tAn + math.pi elif(dya>0 and dxa<0): tAn= tAn + math.pi return tAn #TBN
def drawlabel(self, name, Preferences, t, W, label, unit): """ Draw annotation for isolines name: name of isoline Preferences: Configparse instance of pychemqt preferences t: x array of line W: y array of line label: text value to draw unit: text units to draw """ if Preferences.getboolean("Psychr", name+"label"): tmin = unidades.Temperature(Preferences.getfloat("Psychr", "isotdbStart")).config() tmax = unidades.Temperature(Preferences.getfloat("Psychr", "isotdbEnd")).config() x = tmax-tmin wmin = Preferences.getfloat("Psychr", "isowStart") wmax = Preferences.getfloat("Psychr", "isowEnd") y = wmax-wmin i = 0 for ti, wi in zip(t, W): if tmin <= ti <= tmax and wmin <= wi <= wmax: i += 1 label = str(label) if Preferences.getboolean("Psychr", name+"units"): label += unit pos = Preferences.getfloat("Psychr", name+"position") p = int(i*pos/100-1) rot = arctan((W[p]-W[p-1])/y/(t[p]-t[p-1])*x)*360/2/pi self.diagrama2D.axes2D.annotate(label, (t[p], W[p]), rotation=rot, size="small", ha="center", va="center")
def myArctan(x,y): alpha = sp.arctan(y/x) if x < 0: alpha += sp.pi elif y < 0: alpha += 2*sp.pi # print 'myArctan: ',x,y,alpha return alpha
def grassmann_logmap(A,p, tol=1e-13, skip_orthog_check=False): ''' Computes the manifold log-map of (nxk) orthogonal matrix A, centered at the point p (i.e. the "pole"), which is also an (nxk) orthogonal matrix. The log-map takes a point on the manifold and maps it to the tangent space which is centered at a given pole. The dimension of the tangent space is k(n-k), and points A,p are on Gr(n,k). @param A: The orthogonal matrix A, representing a point on the grassmann manifold. @param p: An orthogonal matrix p, representing a point on the grassmann manifold where the tangent space will be formed. Also called the "pole". @param tol: Numerical tolerance used to set singular values to exactly zero when within this tolerance of zero. @param skip_orthog_check: Set to True if you can guarantee that the inputs are already orthogonal matrices. Otherwise, this function will check, and if A and/or p are not orthogonal, the closest orthogonal matrix to A (or p) will be used. @return: A tuple (log_p(A), ||log_p(A)|| ), representing the tangent-space mapping of A, and the distance from the mapping of A to the pole in the tangent space. ''' #check that A and p are orthogonal, if # not, then compute orthogonal representations and # send back a warning message. if not skip_orthog_check: if not isOrthogonal(A): print "WARNING: You are calling grassmann_logmap function on non-orthogonal input matrix A" print "(This function will compute an orthogonal representation for A using an SVD.)" A = closestOrthogonal(A) if not isOrthogonal(p): print "WARNING: You are calling grassmann_logmap function on non-orthogonal pole p." print "(This function will compute an orthogonal representation for p using an SVD.)" p = closestOrthogonal(p) #p_perp is the orthogonal complement to p, = null(p.T) p_perp = nullspace(p.T) #compute p_perp * p_perp.T * A * inv(p.T * A) T = sp.dot(p.T,A) try: Tinv = LA.inv(T) except(LA.LinAlgError): Tinv = LA.pinv(T) X = sp.dot( sp.dot( sp.dot(p_perp,p_perp.T), A), Tinv ) u, s, vh = LA.svd(X, full_matrices=False) s[ s < tol ]= 0 #set extremely small values to zero theta = sp.diag( sp.arctan(s) ) logA = sp.dot(u, sp.dot( theta,vh)) normA = sp.trace( sp.dot(logA.T, logA) ) return logA, normA
def g(self, x): if x[1] == 0.0: A = pi/2.0 else: A = arctan(x[0]/x[1]) g1 = x[1]**2 + x[0]**2 - 1.0 -0.1*cos(16.0*A) g2 = 0.5 -(x[0]-0.5)**2 -(x[1]-0.5)**2 if g1 >= 0 and g2 >= 0: return True,array([0.,0.]) return False,array([g1,g2])
def find_tang(self, pt): x = pt[0] y = pt[1] d = self.param k = sp.sqrt((x+d)**2 + y**2 - (1+d)**2) theta = sp.arctan(k/(1+d)) # translation by d, rotation by 2*theta, then translate back by d x += d x, y = rotation((x,y), 2*theta) x -= d return sp.array((x,y))
def tosph(self): x, y, z = self.coord rho = self.norm() if sp.absolute(x) < 1e-8: if y >= 0: theta = sp.pi/2 else: theta = 3*sp.pi/2 else: theta = sp.arctan(y/x) phi = sp.arccos(z/rho) return rho, theta, phi
def RiemannSurface4(): """Riemann surface for real part of arctan(z)""" fig = plt.figure() ax = Axes3D(fig) Xres, Yres = .01, .2 ax.view_init(elev=11., azim=-56) X = sp.arange(-4, -.0001, Xres) Y = sp.arange(-4, 4, Yres) X, Y = sp.meshgrid(X, Y) Z = sp.real(sp.arctan(X+1j*Y)) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap) ax.plot_surface(X, Y, Z+sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap) ax.plot_surface(X, Y, Z-sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap) X = sp.arange(.0001, 4, Xres) Y = sp.arange(-4,4, Yres) X, Y = sp.meshgrid(X, Y) Z = sp.real(sp.arctan(X+1j*Y)) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap) ax.plot_surface(X, Y, Z+sp.pi, rstride=1, cstride=1, linewidth=0,cmap=cmap) ax.plot_surface(X, Y, Z-sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap) plt.savefig('RiemannSurface4.pdf', bbox_inches='tight', pad_inches=0)
def __call__(self, pts): print '*** geo_barrel_shell called ***' x_, y_, z_ = pts.T L = self.length_quarter b = self.width_quarter f = self.arc_height t = self.thickness #------------------------------------------- # transformation for 'cylinder coordinates' #------------------------------------------- # calculate the arc radius: # R = f / 2. + b ** 2 / (2.*f) # calculate the arc angle [rad] beta = sp.arctan(b / (R - f)) # cylinder coordinates of the barrel shell # y = y_ * L x = (R - z_ * t) * np.sin(x_ * beta) z = (R - z_ * t) * np.cos(x_ * beta) - R + f #------------------------------------------- # cut of free edge by 45 deg #------------------------------------------- # rounded length Lr = self.Lr # length to be substracted in y-direction (linear relation with respect to the z-axis) # delta_yr = (1. - z / f) * Lr # used regular discretization up to y = L1 L1 = self.L1 # substract 'yr' for y_ = 1.0 (edge) and substract 0. for y_ = L1/L # and interpolate linearly within 'L' and 'L1' # idx_r = np.where(y_ > L1 / L)[0] y[ idx_r ] -= ((y_[ idx_r ] - L1 / L) / (1.0 - L1 / L) * delta_yr[ idx_r ]) pts = np.c_[x, y, z] return pts
def alignFlyImage(self,fly_image,slope): #paste into triple-size image to avoid losing corners in rotation deg = scipy.arctan(slope)*180./scipy.pi+90 x,y = fly_image.size large = Image.new("L",(3*x,3*y),255) large.paste(fly_image,(x,y)) #convert slope to angle and rotate to vertical aligned = large.rotate(deg) cropped = aligned.crop((int(1.2*x),int(.5*y),int(1.8*x),int(2.5*y))) bounded = cropped.crop(self.getbbox(cropped)) self.pic_id += 1 #bounded.save(r'c:\\Documents and Settings\\Jake F\\My Documents\\frames\\aligned\\'+str(self.pic_id)+r'.bmp') self.window.displayEngine2(bounded.resize((80,120))) return bounded
def ellipse2bbox(a, b, angle, cx, cy): a, b = max(a, b), min(a, b) ca = sp.cos(angle) sa = sp.sin(angle) if sa == 0.0: cta = 2.0 / sp.pi else: cta = ca / sa if ca == 0.0: ta = sp.pi / 2.0 else: ta = sa / ca x = lambda t: cx + a * sp.cos(t) * ca - b * sp.sin(t) * sa y = lambda t: cy + b * sp.sin(t) * ca + a * sp.cos(t) * sa # x = cx + a * cos(t) * cos(angle) - b * sin(t) * sin(angle) # tan(t) = -b * tan(angle) / a tx1 = sp.arctan(-b * ta / a) tx2 = tx1 - sp.pi x1, y1 = x(tx1), y(tx1) x2, y2 = x(tx2), y(tx2) # y = cy + b * sin(t) * cos(angle) + a * cos(t) * sin(angle) # tan(t) = b * cot(angle) / a ty1 = sp.arctan(b * cta / a) ty2 = ty1 - sp.pi x3, y3 = x(ty1), y(ty1) x4, y4 = x(ty2), y(ty2) minx, maxx = Util.minmax([x1, x2, x3, x4]) miny, maxy = Util.minmax([y1, y2, y3, y4]) return sp.floor(minx), sp.floor(miny), sp.ceil(maxx), sp.ceil(maxy)
def TB_Fugacidad(self, T, P): """Método de cálculo de la fugacidad mediante la ecuación de estado de Trebble-Bishnoi""" a, b, c, d, q1, q2=self.TB_lib(T, P) z=self.TB_Z(T, P) A=a*P/R_atml**2/T**2 B=b*P/R_atml/T u=1+c/b t=1+6*c/b+c**2/b**2+4*d**2/b**2 tita=abs(t)**0.5 if t>=0: lamda=log((2*z+B*(u-tita))/(2*z+B*(u+tita))) else: lamda=2*arctan((2*z+u*B)/B/tita)-pi fi=z-1-log(z-B)+A/B/tita*lamda return unidades.Pressure(P*exp(fi), "atm")
def create_straight(self): last_track = self[-1] #the following math is based on Mauro's matlab program if len(self) > 1: last_pos = self[-1].position before_last_pos = self[-2].position orient = sp.arctan((last_pos.Y - before_last_pos.Y) / (last_pos.X - before_last_pos.X)) else: orient = last_track.orient x0 = last_track.position.X y0 = last_track.position.Y dl = constants['length'] / constants['diff_index'] for i in range(1, constants['diff_index']+1): X = x0 + dl * i * sp.cos(orient) Y = y0 + dl * i * sp.sin(orient) position = Position(X,Y) self.append(_Straight_Track(orient, position)) return TrackInfo(orient, position)
def plot(self, indice): self.diagrama.ax.clear() self.diagrama.ax.set_xlim(0, 6) self.diagrama.ax.set_ylim(0, 1) title = QtWidgets.QApplication.translate( "pychemqt", "Heat Transfer effectiveness") self.diagrama.ax.set_title(title, size='12') self.diagrama.ax.set_xlabel("NTU", size='12') self.diagrama.ax.set_ylabel("ε", size='14') flujo = self.flujo[indice][1] self.mixed.setVisible(flujo == "CrFSMix") kw = {} if flujo == "CrFSMix": kw["mixed"] = str(self.mixed.currentText()) C = [0, 0.2, 0.4, 0.6, 0.8, 1.] NTU = arange(0, 6.1, 0.1) for ci in C: e = [0] for N in NTU[1:]: e.append(efectividad(N, ci, flujo, **kw)) self.diagrama.plot(NTU, e, "k") fraccionx = (NTU[40]-NTU[30])/6 fracciony = (e[40]-e[30]) try: angle = arctan(fracciony/fraccionx)*360/2/pi except ZeroDivisionError: angle = 90 self.diagrama.ax.annotate( "C*=%0.1f" % ci, (NTU[29], e[30]), rotation=angle, size="medium", ha="left", va="bottom") self.diagrama.draw() img = image.imread('images/equation/%s.png' % flujo) self.image.set_data(img) self.refixImage()
def magncollacf(tau,K,C,alpha,Om,nu): """ magncollacf(tau,K,C,alpha,Om) by John Swoboda This function will create a single particle acf for a particle species with magnetic field and collisions. Inputs tau: The time vector for the acf. K: Bragg scatter vector magnetude. C: Thermal speed of the species. alpha: Magnetic aspect angle in radians. Om: The gyrofrequency of the particle. nu: The collision frequency in collisions/sec Output acf - The single particle acf. """ Kpar = sp.sin(alpha)*K Kperp = sp.cos(alpha)*K gam = sp.arctan(nu/Om) deltl = sp.exp(-sp.power(Kpar*C/nu,2.0)*(nu*tau-1+sp.exp(-nu*tau))) deltp = sp.exp(-sp.power(C*Kperp,2.0)/(Om*Om+nu*nu)*(sp.cos(2*gam)+nu*tau-sp.exp(-nu*tau)*(sp.cos(Om*tau-2.0*gam)))) return deltl*deltp
def drawlabel(self, name, t, W, label, unit): """ Draw annotation for isolines name: name of isoline t: x array of line W: y array of line label: text value to draw unit: text units to draw """ if self.Preferences.getboolean("Psychr", name+"label"): TMIN = self.Preferences.getfloat("Psychr", "isotdbStart") TMAX = self.Preferences.getfloat("Psychr", "isotdbEnd") tmin = Temperature(TMIN).config() tmax = Temperature(TMAX).config() wmin = self.Preferences.getfloat("Psychr", "isowStart") wmax = self.Preferences.getfloat("Psychr", "isowEnd") if self.Preferences.getboolean("Psychr", "chart"): x = tmax-tmin y = wmax-wmin i = 0 for ti, wi in zip(t, W): if tmin <= ti <= tmax and wmin <= wi <= wmax: i += 1 else: x = wmax-wmin y = tmax-tmin i = 0 for ti, wi in zip(t, W): if tmin <= wi <= tmax and wmin <= ti <= wmax: i += 1 label = str(label) if self.Preferences.getboolean("Psychr", name+"units"): label += unit pos = self.Preferences.getfloat("Psychr", name+"position") p = int(i*pos/100-1) rot = arctan((W[p]-W[p-1])/y/(t[p]-t[p-1])*x)*360/2/pi self.plt.ax.annotate(label, (t[p], W[p]), rotation=rot, size="small", ha="center", va="center")
def ecef2geodetic(x, y, z, degrees=True): """ecef2geodetic(x, y, z) [m][m][m] Convert ECEF coordinates to geodetic. J. Zhu, "Conversion of Earth-centered Earth-fixed coordinates \ to geodetic coordinates," IEEE Transactions on Aerospace and \ Electronic Systems, vol. 30, pp. 957-961, 1994.""" r = sqrt(x * x + y * y) Esq = a * a - b * b F = 54 * b * b * z * z G = r * r + (1 - esq) * z * z - esq * Esq C = (esq * esq * F * r * r) / (pow(G, 3)) S = cbrt(1 + C + sqrt(C * C + 2 * C)) P = F / (3 * pow((S + 1 / S + 1), 2) * G * G) Q = sqrt(1 + 2 * esq * esq * P) r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \ P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r) U = sqrt(pow((r - esq * r_0), 2) + z * z) V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z) Z_0 = b * b * z / (a * V) h = U * (1 - b * b / (a * V)) lat = arctan((z + e1sq * Z_0) / r) lon = arctan2(y, x) return rad2deg(lat), rad2deg(lon), z
def _1_R_Polygon(self, polysurf, r): # 参见经典文献中的公式 try: norm_ = np.cross(polysurf[:, 1, :] - polysurf[:, 0, :], polysurf[:, 2, :] - polysurf[:, 0, :]) # ne*3 norm_ = norm_ / np.sqrt(np.sum(norm_**2, axis=-1).reshape([-1, 1])) l_ = np.zeros_like(polysurf) # ne*3*3 u_ = np.zeros_like(polysurf) # ne*3*3 r_ = np.zeros([ r.shape[0], polysurf.shape[0], polysurf.shape[1], polysurf.shape[2] ]) # nr*ne*3*3 R = np.zeros([r.shape[0], polysurf.shape[0], polysurf.shape[1]]) # nr*ne*3 for ii in xrange(3): temp = polysurf[:, (ii + 1) % 3, :] - polysurf[:, ii, :] l_[:, ii, :] = temp / np.sqrt( np.sum(temp**2, axis=-1).reshape([-1, 1])) u_[:, ii, :] = np.cross(l_[:, ii, :], norm_) r_[:, :, ii, :] = polysurf[:, ii, :] - r.reshape([-1, 1, 3]) R[:, :, ii] = np.sqrt( np.sum(r_[:, :, ii, :] * r_[:, :, ii, :], axis=-1)) d = np.sum(r_[:, :, 0, :] * norm_, axis=-1) # nr*ne P_ = np.zeros_like(r_) # nr*ne*3*3 temp = d.reshape([d.shape[0], d.shape[1], 1]) * norm_.reshape( [1, norm_.shape[0], norm_.shape[1]]) # nr*ne*3 for ii in xrange(3): P_[:, :, ii, :] = r_[:, :, ii, :] - temp P0 = np.zeros_like(R) # nr*ne*3 lpos = np.zeros_like(R) lneg = np.zeros_like(R) R0 = np.zeros_like(R) for ii in xrange(3): P0[:, :, ii] = np.abs(np.sum(P_[:, :, ii, :] * u_[:, ii, :], axis=-1)) lpos[:, :, ii] = np.sum(P_[:, :, (ii + 1) % 3, :] * l_[:, ii, :], axis=-1) lneg[:, :, ii] = np.sum(P_[:, :, ii, :] * l_[:, ii, :], axis=-1) R0[:, :, ii] = np.sqrt(P0[:, :, ii] * P0[:, :, ii] + d * d) result = np.zeros([r.shape[0], polysurf.shape[0]]) R0__2 = R0**2 absd = np.abs(d) for ii in xrange(3): noise = 1.e-10 * np.sqrt(np.sum(l_[:, ii, :]**2, axis=-1)) check2 = (R[:, :, ii] + lneg[:, :, ii]) > noise lg = np.where(check2,\ scipy.log(R[:,:,(ii+1)%3]+lpos[:,:,ii]) \ - scipy.log(R[:,:,ii]+lneg[:,:,ii]),\ np.zeros([r.shape[0],polysurf.shape[0]])\ ) check3 = absd > noise result_branch2 = np.where(check3,\ P0[:,:,ii]*lg - absd*( \ scipy.arctan(P0[:,:,ii]*lpos[:,:,ii]/(R0__2[:,:,ii]+absd*R[:,:,(ii+1)%3]))\ -scipy.arctan(P0[:,:,ii]*lneg[:,:,ii]/(R0__2[:,:,ii]+absd*R[:,:,ii]))), \ P0[:,:,ii]*lg) check1 = (R0[:, :, ii] < noise) temp_result_add = np.where(check1.reshape([r.shape[0],polysurf.shape[0]]), \ np.zeros_like(result), \ result_branch2) sing = np.sum(P_[:, :, ii, :] * u_[:, ii, :], axis=-1) > 0 result = np.where( sing,\ result + temp_result_add,\ result - temp_result_add) return result except Exception as e: print e raise
def DensityLorentz(x,Delta): ''' particle denisty of a Lorentzian band for T=0 ''' return 0.5 - sp.arctan(x/Delta)/sp.pi
def Brewster(self): return sp.arctan(self.n2 / self.n1 * sp.sqrt( (self.n1**2 - (self.mu1 / self.mu2)**2 * self.n2**2) / (self.n1**2 - self.n2**2)))
def u2polar(vec): ratio = vec[1] / vec[0] theta = np.arctan(abs(ratio)) * 2 phi = np.angle(ratio) return theta, phi
def butterfly(physics, phase, network, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', throat_diameter='throat.diameter', **kwargs): r""" Computes the capillary entry pressure assuming the throat in a hourglass tube. Parameters ---------- network : OpenPNM Network Object The Network object is phase : OpenPNM Phase Object Phase object for the invading phases containing the surface tension and contact angle values. sigma : dict key (string) The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. theta : dict key (string) The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. throat_diameter : dict key (string) The dictionary key containing the throat diameter values to be used. Notes ----- The Butterfly equation is: .. math:: P_c = -\frac{2\sigma(cos(arctan(max(dr/dx)) + \theta))}{r} """ fibreRadius = 4.5e-6 constLength = 1.5e-5 print("Calculating Capillary Pressures...") if surface_tension.split('.')[0] == 'pore': sigma = phase[surface_tension] sigma = phase.interpolate_data(data=sigma) else: sigma = phase[surface_tension] if contact_angle.split('.')[0] == 'pore': theta = phase[contact_angle] theta = phase.interpolate_data(data=theta) else: theta = phase[contact_angle] # Base radius (not including fibre) r = network[throat_diameter] / 2 r = r[:, _sp.newaxis] x = _sp.linspace(0, constLength, 100) f = lambda y: fibreRadius * _sp.sin(10 * y / (constLength * _sp.pi)) df = lambda y: (10 * fibreRadius / (constLength * _sp.pi)) * _sp.cos(10 * y / (constLength * _sp.pi)) r = r - f(x) # -2*sigma*cos(theta)/radius drdx = _sp.absolute(df(x)) value = [] for i in range(len(r)): if i % 1000 == 0: print(i) radii = r[i] caps = [] deg = theta[i] sig = sigma[i] caps = [] caps = [ -2 * sig * _sp.cos(_sp.arctan(drdx[j]) + _sp.radians(deg)) / radii[j] for j in range(len(x)) ] maxcap = min(caps) value.append(maxcap) ''' value = -2*sigma*_sp.cos(_sp.radians(theta))/r if throat_diameter.split('.')[0] == 'throat': value = value[phase.throats(physics.name)] else: value = value[phase.pores(physics.name)] value[_sp.absolute(value) == _sp.inf] = 0 ''' return value
def ecef2lla(xyz): # TODO # [ ] make it vectorizable ? """ Function: ecef2lla(xyz) --------------------- Converts ECEF X, Y, Z coordinates to WGS-84 latitude, longitude, altitude Inputs: ------- xyz : 1x3 vector containing [X, Y, Z] coordinate Outputs: -------- lla : 1x3 vector containing the converted [lat, lon, alt] (alt is in [m]) Notes: ------ Based from Jonathan Makela's GPS_WGS84.m script History: -------- 7/21/12 Created, Timothy Duly ([email protected]) """ x = xyz[0][0] y = xyz[0][1] z = xyz[0][2] run = 1 lla = np.array(np.zeros(xyz.size)) # Compute longitude: lla[1] = arctan2(y, x) * (180. / pi) # guess iniital latitude (assume you're on surface, h=0) p = sqrt(x**2 + y**2) lat0 = arctan(z / p * (1 - E**2)**-1) while (run == 1): # Use initial latitude to estimate N: N = A**2 / sqrt(A**2 * (cos(lat0))**2 + B**2 * (sin(lat0))**2) # Estimate altitude h = p / cos(lat0) - N # Estimate new latitude using new height: lat1 = arctan(z / p * (1 - ((E**2 * N) / (N + h)))**-1) if abs(lat1 - lat0) < LAT_ACCURACY_THRESH: run = 0 # Replace our guess latitude with most recent estimate: lat0 = lat1 # load output array with best approximation of latitude (in degrees) # and altiude (in meters) lla[0] = lat1 * (180. / pi) lla[2] = h return lla
def integrand(r, R, sig): gauss = np.exp(-r**2 / (2 * sig**2)) x1 = scipy.arctan(np.sqrt((2 * R - r) / (2 * R + r))) x2 = scipy.sin(4 * scipy.arctan(np.sqrt((2 * R - r) / (2 * R + r)))) factor = 4 * x1 - x2 return r * gauss * factor
def __call__(self, *args, **kwargs): if self.isdist: return scipy.arctan(self.dist(*args, **kwargs)) else: return scipy.arctan(self.dist)
def step_func(x, coeffs): H, L, P = coeffs[:3] d = coeffs[3] y = 0.5 * H * (0.5 + (1.0 / numpy.pi) * scipy.arctan( (x - P) / (0.5 * L))) + d return y
def __init__(self, fc, c_vel, alp_g, mu_los, mu_nlos, a, b, noise_var, hUAV, xUAV, yUAV, xUE, yUE): dist = sp.sqrt( sp.add(sp.square(sp.subtract(yUAV, yUE)), sp.square(sp.subtract(xUAV, xUE))) ) R_dist = sp.sqrt( sp.add(sp.square(dist), sp.square(hUAV)) ) temp1 = sp.multiply(10, sp.log10(sp.power(fc*4*sp.pi*R_dist/c_vel, alp_g))) temp2 = sp.multiply(sp.subtract(mu_los, mu_nlos), sp.divide(1, (1+a*sp.exp(-b*sp.arctan(hUAV/dist)-a)))) temp3 = sp.add(sp.add(temp1, temp2), mu_nlos) self.pathloss = sp.divide(sp.real(sp.power(10, -sp.divide(temp3, 10))), noise_var)
def drawhelix(base, helixobj, nbase, xc1, nc1, thc1, dzc1, shades, render, acap, bcap, adye, bdye, ax3d, gg, helpers=True): # nbase # number of bases in helix # xc1(3) # start helix axis position # nc1(3) # start helix axis vector # thc1 # start helix rotation around axis # dzc1 # translation along helix axis from start of helix axis # shades(3) # colormap indices for chain a, chain b, base pairs # render(3) # render chain a, chain b, base pair struts # acap(2) # cap 5', 3' end of a chain # bcap(2) # cap 5', 3' end of b chain # adye(2) # dye 5', 3' end of a chain # bdye(2) # dye 5', 3' end of b chain #~ print "base = ",base #~ print "helix = ",helixobj #~ print "nbase = ",nbase #~ print "xc1 = ",xc1 #~ print "nc1 = ",nc1 #~ print "thc1 = ",thc1 #~ print "dzc1 = ",dzc1 #~ print "shades = ",shades #~ print "render = ",render #~ print "acap = ",acap #~ print "bcap = ",bcap #~ print "adye = ",adye #~ print "bdye = ",bdye #~ print "ax3d = ",ax3d #~ print helixobj.bases nhtot = [0, 0] nhtot[0] = (gg.nh[0] - 1) * (nbase - 1) + 1 # total points along chain if nhtot[0] < 2: nhtot[0] = 2 nhtot[1] = gg.nh[1] # total points around chain #~print "drawhelix "+"-"*50 #pdb.set_trace() # 0) numpy.linalg.normalize target helix axis vector n = numpy.linalg.norm(nc1) if n > 0: nc1 = nc1 / n dz = gg.dzb / (gg.nh[0] - 1) dth = 2 * scipy.pi / (gg.nh[1] - 1) x_a = numpy.zeros(nhtot) y_a = numpy.zeros(nhtot) z_a = numpy.zeros(nhtot) x_b = numpy.zeros(nhtot) y_b = numpy.zeros(nhtot) z_b = numpy.zeros(nhtot) th = numpy.arange(0, 2 * scipy.pi + dth / 2, dth) step = dz * gg.dthb / gg.dzb if nbase == 1: thc_a = numpy.arange(0, 1.1 * step, step) xc_a = numpy.zeros([3, thc_a.size]) xc_b = numpy.zeros([3, thc_a.size]) xc_a[0, :] = gg.rdh * scipy.cos(thc_a) xc_a[1, :] = gg.rdh * scipy.sin(thc_a) # move start of a chain so rise due to inclination of # base pair is centered on helix origin xc_a[2, :] = numpy.arange(0, 1.1 * dz, dz) - .5 * gg.strutrise thc_b = gg.dthgroove + numpy.arange(0, 1.1 * step, step) xc_b[0, :] = gg.rdh * scipy.cos(thc_b) xc_b[1, :] = gg.rdh * scipy.sin(thc_b) # move start of chain b so rise due to inclination of # base pair is centered on the origin in the z direction xc_b[2, :] = numpy.arange(0, 1.1 * dz, dz) + .5 * gg.strutrise else: thc_a = numpy.arange(0, gg.dthb * (nbase - 1) + step / 2, step) xc_a = numpy.zeros([3, thc_a.size]) xc_b = numpy.zeros([3, thc_a.size]) xc_a[0, :] = gg.rdh * scipy.cos(thc_a) xc_a[1, :] = gg.rdh * scipy.sin(thc_a) # move start of a chain so rise due to inclination of # base pair is centered on helix origin xc_a[2, :] = numpy.arange(0, gg.dzb * (nbase - 1) + dz / 2, dz) - .5 * gg.strutrise thc_b = gg.dthgroove + numpy.arange(0, gg.dthb * (nbase - 1) + step / 2, step) xc_b[0, :] = gg.rdh * scipy.cos(thc_b) xc_b[1, :] = gg.rdh * scipy.sin(thc_b) # move start of chain b so rise due to inclination of # base pair is centered on the origin in the z direction xc_b[2, :] = numpy.arange(0, gg.dzb * (nbase - 1) + dz / 2, dz) + .5 * gg.strutrise if helpers and ax3d: ax3d.addPolyCylinder(numpy.array([xc_a[0], xc_a[1], xc_a[2]]).T, colors=Export.colors["shady_blue"], radius=gg.rhc) ax3d.addPolyCylinder(numpy.array([xc_b[0], xc_b[1], xc_b[2]]).T, colors=Export.colors["shady_green"], radius=gg.rhc) minp = -20. maxp = 20 n = 11 d = maxp - minp step = d / (n - 1) points3 = numpy.zeros([n, 3]) points3[:, 2] = points3[:, 1] = numpy.zeros(n) z = numpy.arange(minp, maxp + step / 2, step) #~ print z points3[:, 0] = z #~ print points3 ax3d.addPolyCylinder(points3, radius=1, colors=Export.colors["white"]) # # define backbone # phi = scipy.pi / 2 - scipy.arctan(gg.dzb / (gg.rdh * gg.dthb)) x3_a = y3_a = z3_a = x5_b = y5_b = z5_b = None # convenient to rotate surface using matlab function rotate # however, still need to keep track of end positions and vectors using # rotation matrices, hence, might be more consistent just to explicitly # compute rotation matrix and do everything manually # # actually, would be useful reference check to keep moving surfaces using # "rotate" and move end info manually, unfortunately, since there is no # "translate" equivalent for translation, have to translate manually before # "rotate" and this makes it messy to rotate the end points since have # to change origin of rotation for them # # decided just to do everything manually in the end # # 1) first rotate around z axis amount thc1 # rotate(h,[0 0 1],thc1,[0 0 0]); # "rotate" won't work properly if xc1 \neq (since "rotate" won't do translation) # (sign of sin terms seems reversed to me...????) rmat1 = numpy.matrix([[scipy.cos(thc1*scipy.pi/180), \ -scipy.sin(thc1*scipy.pi/180), 0], \ [scipy.sin(thc1*scipy.pi/180), \ scipy.cos(thc1*scipy.pi/180), 0], \ [0, 0, 1]]) # rotate around z axis # 2) rotate helix axis to vector u_n \equiv nc1(3) # axis starts out as u_z # rotation is around vector u_rot = u_z x u_n sinth_rot = numpy.sqrt(nc1[0]**2 + nc1[1]**2) costh_rot = nc1[2] if sinth_rot > 0: u_rot = numpy.matrix([-nc1[1], nc1[0], 0]).T u_rot = u_rot / numpy.linalg.norm(u_rot) # make unit vectors th_rot = 180. / scipy.pi * scipy.arctan2(sinth_rot, costh_rot) # rotate(h,u_rot,th_rot,[0 0 0]); # "rotate" won't work properly if xc1 \neq 0 # (since intrinsic function won't do translation) # th_rot needs to be reversed compared to value for using # matlab intrinsic function "rotate" rmat2 = scipy.cos(-th_rot*scipy.pi/180)*I3 \ + (1-scipy.cos(-th_rot*scipy.pi/180))*u_rot*u_rot.T \ + scipy.sin(-th_rot*scipy.pi/180) * \ numpy.matrix([[0 , u_rot[2], -u_rot[1]], \ [-u_rot[2], 0 , u_rot[0]], \ [u_rot[1] , -u_rot[0], 0]]) elif costh_rot == -1: # need special case for u_n = [0; 0; -1] u_rot = numpy.matrix([0, 1, 0]).T th_rot = 180. rmat2 = scipy.cos(-th_rot*scipy.pi/180)*I3 \ + (1-scipy.cos(-th_rot*scipy.pi/180))*u_rot*u_rot.T \ + scipy.sin(-th_rot*scipy.pi/180) * \ numpy.matrix([[0 , u_rot[2], -u_rot[1]], \ [-u_rot[2], 0 , u_rot[0]], \ [ u_rot[1], -u_rot[0], 0 ]]) else: # special case for u_n = [0; 0; 1] rmat2 = numpy.matrix(I3) # 3) then translate the helix # chains for j in range(xc_a.shape[1]): xtmp = numpy.array(rmat2*rmat1*numpy.matrix([xc_a[0,j], xc_a[1,j], \ xc_a[2,j]]).T).flatten() + xc1 + nc1*dzc1 xc_a[0, j] = xtmp[0] xc_a[1, j] = xtmp[1] xc_a[2, j] = xtmp[2] xtmp = numpy.array(rmat2*rmat1*numpy.matrix([xc_b[0,j], xc_b[1,j], \ xc_b[2,j]]).T).flatten() + xc1 + nc1*dzc1 xc_b[0, j] = xtmp[0] xc_b[1, j] = xtmp[1] xc_b[2, j] = xtmp[2] # base pair struts #~ print "Calculating base positions" for j in range(1, nbase + 1): i = (j - 1) * (gg.nh[0] - 1) bar = numpy.array([[xc_a[0, i], xc_a[1, i], xc_a[2, i]], [xc_b[0, i], xc_b[1, i], xc_b[2, i]]]) base[helixobj.bases[j - 1][0]].x3 = xc_a[:, i] base[helixobj.bases[j - 1][1]].x3 = xc_b[:, i] ax3d.addPolyCylinder(bar, radius=gg.rbc[0], colors=Export.colors["light_gray"]) ### Draw cylinders if False and helpers and ax3d: ax3d.addPolyCylinder(numpy.array([xc_a[0], xc_a[1], xc_a[2]]).T, colors=Export.colors["yellow"], radius=gg.rhc) ax3d.addPolyCylinder(numpy.array([xc_b[0], xc_b[1], xc_b[2]]).T, colors=Export.colors["yellow"], radius=gg.rhc) offset = gg.nh[0] x1a = numpy.matrix(xc_a[:, 0]).T n1a = numpy.matrix([0, -gg.rdh*gg.dthb/numpy.sqrt(gg.dzb**2 + (gg.rdh*gg.dthb)**2), \ -gg.dzb/numpy.sqrt(gg.dzb**2 + (gg.rdh*gg.dthb)**2)]) x1b = numpy.matrix(xc_b[:, 0]).T ca = scipy.cos( 2 * scipy.pi - gg.dthgroove) # rotation matrix is cw but groove angle is ccw sa = scipy.sin(2 * scipy.pi - gg.dthgroove) # rotate around z axis rmat = ca*I3 + (1-ca)*numpy.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 1]]) \ + sa*numpy.matrix([[0, 1, 0], [-1, 0, 0], [0, 0, 0]]) n1b = rmat * n1a.T # end chain information xc2 = numpy.matrix([0, 0, gg.dzb * (nbase - 1)]) # end helix axis nc2 = nc1 # end helix axis vector thc2 = gg.dthb * (nbase - 1) * 180 / scipy.pi # end helix rotation around axis rmat = numpy.matrix([[scipy.cos(thc2*scipy.pi/180), \ -scipy.sin(thc2*scipy.pi/180), 0], \ [scipy.sin(thc2*scipy.pi/180), \ scipy.cos(thc2*scipy.pi/180), 0], \ [0, 0, 1]]) # rotate around z axis x2a = numpy.matrix(xc_a[:, -1]).T n2a = -rmat * n1a.T # end helix chain vector 5'->3' chain x2b = numpy.matrix(xc_b[:, -1]).T n2b = -rmat * n1b # end helix chain vector 3'->5' chain n1a = numpy.array( n1a / numpy.linalg.norm(n1a)).flatten() # normalize normal vectors n1b = numpy.array(n1b / numpy.linalg.norm(n1b)).flatten() n2a = numpy.array(n2a / numpy.linalg.norm(n2a)).flatten() n2b = numpy.array(n2b / numpy.linalg.norm(n2b)).flatten() thc2 = thc2 + thc1 return xc2, nc2, thc2, x1a, n1a, x2a, n2a, x1b, n1b, x2b, n2b, nhtot, numpy.array( [xc_a[0], xc_a[1], xc_a[2]]).T, numpy.array([xc_b[0], xc_b[1], xc_b[2]]).T
def get_fields(w, dims, nr, sym, N, harm, pol, res=200, ax=None): ''' Returns the complex field values at specified points in the 2D cross section of a rectangular resonator INPUTS w - complex freq. using exp[-iwt] convention (wr - 1j*wi) dims - dimensions of resonator normalized to wavelength nr - refractive index of resonator sym - symmetry of dominant field (z component) w.r.t. x axis N - Number of cylindrical harmonics considered in series harm - (0,1) mode generated by even or odd harmonics (determines y-symmetry) pol - ('TE','TM') polarization; TE implies Hz res - resolution of field data ax - axes on which to plot fields; if None, generates new figure OUTPUTS Fz, Fx, Fy - complex field data for each component. If Fz is Hz, then Fx,y will be Ex,y and vice-versa. ''' a, b = dims # grid dimensions in normalized length units xmax = a + 1 ymax = b + 1 xs = np.linspace(-xmax, xmax, res) ys = np.linspace(-ymax, ymax, res) X, Y = np.meshgrid(xs, ys) R = sqrt(X**2 + Y**2) TH = sp.arctan(Y / X) ko = w / c k1 = ko * nr ns = getns(N, sym, pol, harm) phi = getphi(sym) mask = (abs(X) <= a / 2.) * (abs(Y) <= b / 2.) C1, C2 = get_coefs(w, dims, nr, sym, N, harm, pol) if pol == 'TE': #Fz = Hz Fz_int = np.sum([ C1[ni] * jn(n, k1 * R) * cos(n * TH + phi) for ni, n in enumerate(ns) ], axis=0) Fz_ext = np.sum([ C2[ni] * h1(n, ko * R) * cos(n * TH + phi) for ni, n in enumerate(ns) ], axis=0) if pol == 'TM': #Fz = Ez Fz_int = np.sum([ C1[ni] * jn(n, k1 * R) * sin(n * TH + phi) for ni, n in enumerate(ns) ], axis=0) Fz_ext = np.sum([ C2[ni] * h1(n, ko * R) * sin(n * TH + phi) for ni, n in enumerate(ns) ], axis=0) Fz_tot = Fz_int * mask + Fz_ext * (~mask) ext = [-xmax, xmax, -ymax, ymax] # create shaded box to display resonator rect = plt.Rectangle((-a / 2., -b / 2.), a, b, facecolor='k', alpha=0.3) if ax == None: fig, ax = plt.subplots(1, 1, figsize=(10, 10)) ax.imshow(abs(Fz_tot), interpolation='nearest', extent=ext, vmax=np.amax(abs(Fz_int * mask)))
plt.plot(x_points, circle_positive) plt.plot(x_points, circle_negative) line_x = [0] line_y = [-1] m = 0.3 n = 0 while (sp.sqrt(line_x[n]**2 + line_y[n]**2) <= 1): line_x.append(line_x[n] + 0.0001) line_y.append(line_x[n + 1] * m - 1) n = n + 1 plt.plot(line_x, line_y) theta = sp.pi - sp.arctan(m) + 2 * sp.arctan2(line_y[n - 1], line_x[n - 1]) grad = sp.tan(theta) inte = line_y[n - 1] - grad * line_x[n - 1] line_x1 = [line_x[n - 1]] line_y1 = [line_y[n - 1]] x_p = sp.linspace(0.2, 0.8, 100) y_p = x_p * line_y[n] / line_x[n] n = 0 while (sp.sqrt(line_x1[n]**2 + line_y1[n]**2) <= 1): line_x1.append(line_x1[n] + 0.0001) line_y1.append(line_x1[n + 1] * grad + inte) n = n + 1 plt.plot(line_x1, line_y1) plt.plot(x_p, y_p)
def desplaz(self): # notacion de Chinnery:f(e,eta)||= f(x,p)-f(x,p-W)-f(x-L,p)+f(x-L,W-p) p = self.y * cos(self.dip) + self.D * sin(self.dip) q = self.y * sin(self.dip) - self.D * cos(self.dip) e = array([self.x, self.x, self.x - self.largo, self.x - self.largo]).T eta = array([p, p - self.W, p, p - self.W]).T qq = array([q, q, q, q]).T # b = 4 ytg = eta * cos(self.dip) + qq * sin(self.dip) dtg = eta * sin(self.dip) - qq * cos(self.dip) R = power(e**2 + eta**2 + qq**2, 0.5) X = power(e**2 + qq**2, 0.5) if degrees(self.dip) != 90: I5 = (1 / cos(self.dip)) * scp.arctan( (eta * (X + qq * cos(self.dip)) + X * (R + X) * sin(self.dip)) / (e * (R + X) * cos(self.dip))) I4 = .5 / cos(self.dip) * (scp.log(R + dtg) - sin(self.dip) * scp.log(R + eta)) I1 = (.5 * ((-1. / cos(self.dip)) * (e / (R + dtg))) - (sin(self.dip) * I5 / cos(self.dip))) I3 = (.5 * (1 / cos(self.dip) * (ytg / (R + (dtg))) - scp.log(R + eta)) + (sin(self.dip) * I4 / cos(self.dip))) if degrees(self.dip) == 90: I5 = -.5 * e * sin(self.dip) / (R + dtg) I4 = -.5 * qq / (R + dtg) I3 = .25 * (eta / (R + dtg) + ytg / (R + dtg)**2 - scp.log(R + eta)) I1 = -.25 * e * qq / (R + dtg)**2 I2 = 0.5 * (-scp.log(R + eta)) - I3 # self.dip-slip ux_ds = -sin(self.rake) / (2 * pi) * ( qq / R - I3 * sin(self.dip) * cos(self.dip)) uy_ds = -sin(self.rake) / (2 * pi) * ( (ytg * qq / R / (R + e)) + (cos(self.dip) * scp.arctan(e * eta / qq / R)) - (I1 * sin(self.dip) * cos(self.dip))) uz_ds = -sin(self.rake) / (2 * pi) * ( (dtg * qq / R / (R + e)) + (sin(self.dip) * scp.arctan(e * eta / qq / R)) - (I5 * sin(self.dip) * cos(self.dip))) # strike-slipe ux_ss = -cos(self.rake) / (2 * pi) * ( (e * qq / R / (R + eta)) + (scp.arctan(e * eta / (qq * R))) + I1 * sin(self.dip)) uy_ss = -cos(self.rake) / (2 * pi) * ( (ytg * qq / R / (R + eta)) + qq * cos(self.dip) / (R + eta) + I2 * sin(self.dip)) uz_ss = -cos(self.rake) / (2 * pi) * ( (dtg * qq / R / (R + eta)) + qq * sin(self.dip) / (R + eta) + I4 * sin(self.dip)) # representacion chinnery self.dip-slip uxd = ux_ds.T[0] - ux_ds.T[1] - ux_ds.T[2] + ux_ds.T[3] uyd = uy_ds.T[0] - uy_ds.T[1] - uy_ds.T[2] + uy_ds.T[3] uzd = uz_ds.T[0] - uz_ds.T[1] - uz_ds.T[2] + uz_ds.T[3] # representacion chinnery strike-slip uxs = ux_ss.T[0] - ux_ss.T[1] - ux_ss.T[2] + ux_ss.T[3] uys = uy_ss.T[0] - uy_ss.T[1] - uy_ss.T[2] + uy_ss.T[3] uzs = uz_ss.T[0] - uz_ss.T[1] - uz_ss.T[2] + uz_ss.T[3] # cantidad de desplazamiento uxs = uxs uys = uys uzs = uzs uxd = uxd uyd = uyd uzd = uzd # suma componentes strike y dip slip. ux = uxd + uxs uy = uyd + uys uz = uzd + uzs # proyeccion a las componentes geograficas Ue = ux * sin(self.strike) - uy * cos(self.strike) Un = ux * cos(self.strike) + uy * sin(self.strike) # para revisar valores if False: print(ux, uy, uz) return Ue, Un, uz
def aperture_stats(energy, z, x): l=energy_to_wavelength(energy) NA = sp.sin(sp.arctan(x/z)) axial_res = 2*l/NA**2. lateral_res = l/(2.*NA) print 'NA: %1.2e\nAxial resolution: %1.2e\nLateral resolution: %1.2e' % (NA, axial_res, lateral_res)
def GetWarping(self): return (2 / self.__T) * arctan( 2 * pi * self.GetFrequency() * self.__T / 2) / (2 * pi)
def generate_base_points(num_points, domain_size, prob=None): r""" Generates a set of base points for passing into the DelaunayVoronoiDual class. The points can be distributed in spherical, cylindrical, or rectilinear patterns. Parameters ---------- num_points : scalar The number of base points that lie within the domain. Note that the actual number of points returned will be larger, with the extra points lying outside the domain. domain_size : list or array Controls the size and shape of the domain, as follows: **sphere** : If a single value is received, its treated as the radius [r] of a sphere centered on [0, 0, 0]. **cylinder** : If a two-element list is received it's treated as the radius and height of a cylinder [r, z] positioned at [0, 0, 0] and extending in the positive z-direction. **rectangle** : If a three element list is received, it's treated as the outer corner of rectangle [x, y, z] whose opposite corner lies at [0, 0, 0]. prob : 3D array, optional A 3D array that contains fractional (0-1) values indicating the liklihood that a point in that region should be kept. If not specified an array containing 1's in the shape of a sphere, cylinder, or cube is generated, depnending on the give ``domain_size`` with zeros outside. When specifying a custom probabiliy map is it recommended to also set values outside the given domain to zero. If not, then the correct shape will still be returned, but with too few points in it. Notes ----- This method places the given number of points within the specified domain, then reflects these points across each domain boundary. This results in smooth flat faces at the boundaries once these excess pores are trimmed. The reflection approach tends to create larger pores near the surfaces, so it might be necessary to use the ``prob`` argument to specify a slightly higher density of points near the surfaces. For rough faces, it is necessary to define a larger than desired domain then trim to the desired size. This will discard the reflected points plus some of the original points. Examples -------- The following generates a spherical array with higher values near the core. It uses a distance transform to create a sphere of radius 10, then a second distance transform to create larger values in the center away from the sphere surface. These distance values could be further skewed by applying a power, with values higher than 1 resulting in higher values in the core, and fractional values smoothinging them out a bit. >>> import OpenPNM as op >>> import scipy as sp >>> import scipy.ndimage as spim >>> im = sp.ones([21, 21, 21], dtype=int) >>> im[10, 10, 10] = 0 >>> im = spim.distance_transform_edt(im) <= 20 # Create sphere of 1's >>> prob = spim.distance_transform_edt(im) >>> prob = prob / sp.amax(prob) # Normalize between 0 and 1 >>> pts = op.Network.tools.generate_base_points(num_points=50, ... domain_size=[2], ... prob=prob) >>> net = op.Network.DelaunayVoronoiDual(points=pts, domain_size=[2]) """ def _try_points(num_points, prob): prob = _sp.array(prob)/_sp.amax(prob) # Ensure prob is normalized base_pts = [] N = 0 while N < num_points: pt = _sp.random.rand(3) # Generate a point # Test whether to keep it or not [indx, indy, indz] = _sp.floor(pt*_sp.shape(prob)).astype(int) if _sp.random.rand(1) <= prob[indx][indy][indz]: base_pts.append(pt) N += 1 base_pts = _sp.array(base_pts) return base_pts if len(domain_size) == 1: # Spherical domain_size = _sp.array(domain_size) if prob is None: prob = _sp.ones([41, 41, 41]) prob[20, 20, 20] = 0 prob = _spim.distance_transform_bf(prob) <= 20 base_pts = _try_points(num_points, prob) # Convert to spherical coordinates [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0.5]).T # Center at origin r = 2*_sp.sqrt(X**2 + Y**2 + Z**2)*domain_size[0] theta = 2*_sp.arctan(Y/X) phi = 2*_sp.arctan(_sp.sqrt(X**2 + Y**2)/Z) # Trim points outside the domain (from improper prob images) inds = r <= domain_size[0] [r, theta, phi] = [r[inds], theta[inds], phi[inds]] # Reflect base points across perimeter new_r = 2*domain_size - r r = _sp.hstack([r, new_r]) theta = _sp.hstack([theta, theta]) phi = _sp.hstack([phi, phi]) # Convert to Cartesean coordinates X = r*_sp.cos(theta)*_sp.sin(phi) Y = r*_sp.sin(theta)*_sp.sin(phi) Z = r*_sp.cos(phi) base_pts = _sp.vstack([X, Y, Z]).T elif len(domain_size) == 2: # Cylindrical domain_size = _sp.array(domain_size) if prob is None: prob = _sp.ones([41, 41, 41]) prob[20, 20, :] = 0 prob = _spim.distance_transform_bf(prob) <= 20 base_pts = _try_points(num_points, prob) # Convert to cylindrical coordinates [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0]).T # Center on z-axis r = 2*_sp.sqrt(X**2 + Y**2)*domain_size[0] theta = 2*_sp.arctan(Y/X) z = Z*domain_size[1] # Trim points outside the domain (from improper prob images) inds = r <= domain_size[0] [r, theta, z] = [r[inds], theta[inds], z[inds]] inds = ~((z > domain_size[1]) + (z < 0)) [r, theta, z] = [r[inds], theta[inds], z[inds]] # Reflect base points about faces and perimeter new_r = 2*domain_size[0] - r r = _sp.hstack([r, new_r]) theta = _sp.hstack([theta, theta]) z = _sp.hstack([z, z]) r = _sp.hstack([r, r, r]) theta = _sp.hstack([theta, theta, theta]) z = _sp.hstack([z, -z, 2-z]) # Convert to Cartesean coordinates X = r*_sp.cos(theta) Y = r*_sp.sin(theta) Z = z base_pts = _sp.vstack([X, Y, Z]).T elif len(domain_size) == 3: # Rectilinear domain_size = _sp.array(domain_size) Nx, Ny, Nz = domain_size if prob is None: prob = _sp.ones([10, 10, 10], dtype=float) base_pts = _try_points(num_points, prob) base_pts = base_pts*domain_size # Reflect base points about all 6 faces orig_pts = base_pts base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts + [2.0*Nx, 0, 0])) base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts + [0, 2.0*Ny, 0])) base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts + [0, 0, 2.0*Nz])) base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts)) base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts)) base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts)) return base_pts
def __plot(self, metodo=0, eD=[]): """Plot the Moody chart using the indicate method método de cálculo: 0 - Colebrook 1 - Chen (1979) 2 - Romeo (2002) 3 - Goudar-Sonnad 4 - Manadilli (1997) 5 - Serghides 6 - Churchill (1977) 7 - Zigrang-Sylvester (1982) 8 - Swamee-Jain (1976)") eD: lista con las líneas de rugosidades relativas a dibujar Prmin: escala del eje x, minimo valor de Pr a representar Prmax: escala del eje y, maximo valor de Pr a representar """ if not eD: eD=[0, 1e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 4e-4, 6e-4, 8e-4, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.006, 0.008, 0.01, 0.0125, 0.015, 0.0175, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.06, 0.07] F=f_list[metodo] #laminar Re=[600, 2400] f=[64./R for R in Re] self.diagrama.axes2D.plot(Re, f, "k") #turbulento Re=logspace(log10(2400), 8, 50) for e in eD: self.diagrama.axes2D.plot(Re, [F(Rei, e) for Rei in Re], "k") self.diagrama.axes2D.annotate(representacion(e, tol=4.5), (Re[45], F(Re[45], e)), size="small", horizontalalignment="center", verticalalignment="bottom", rotation=arctan((log10(F(Re[47], e))-log10(F(Re[35], e)))/(log10(Re[47])-log10(Re[35])))*360/2/pi) #Transición f=[(1/(1.14-2*log10(3500/R)))**2 for R in Re] self.diagrama.axes2D.plot(Re, f, "k", lw=0.5, linestyle=":") self.diagrama.axes2D.add_artist(ConnectionPatch((600, 0.009), (2400, 0.009), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w")) self.diagrama.axes2D.add_artist(ConnectionPatch((2400, 0.009), (6000, 0.009), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w")) self.diagrama.axes2D.add_artist(ConnectionPatch((6000, 0.095), (40000, 0.095), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w")) self.diagrama.axes2D.add_artist(ConnectionPatch((40000, 0.095), (9.9e7, 0.095), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w")) self.diagrama.axes2D.text(15000, 0.094, QtGui.QApplication.translate("pychemqt", "Transition Zone"), size="small", verticalalignment="top", horizontalalignment="center") self.diagrama.axes2D.text(2e6, 0.094, QtGui.QApplication.translate("pychemqt", "Turbulent flux fully desarrolled"), size="small", verticalalignment="top", horizontalalignment="center") self.diagrama.axes2D.text(4000, 0.0091, QtGui.QApplication.translate("pychemqt", "Critic\nzone"), size="small", verticalalignment="bottom", horizontalalignment="center") self.diagrama.axes2D.text(1200, 0.0091, QtGui.QApplication.translate("pychemqt", "Laminar flux"), size="small", verticalalignment="bottom", horizontalalignment="center")
def kink(x, t, v, x0, epsilon=1): # epsilon = \pm 1 g = gamma(v) u = 4 * arctan(exp(epsilon * g * (x - x0 - v * t))) ut = -2 * epsilon * g * v / cosh(epsilon * g * (x - x0 - v * t)) return {'u': u, 'ut': ut}
def teta(V): sigma = V_0 / V return teta_0 * sigma**(2 / 3) * exp( (gamma_0 - 2 / 3) * (B**2 + D**2) / B * arctan(B * log(sigma) / (B**2 + D * (log(sigma) + D))))
if count > 0: release_times[counter:counter + count] = release_time counter += count release_times = utility.draw_from_inputted_distribution( release_times, 2, swarm_size) heading_data = { 'angles': (scipy.pi / 180) * scipy.array([0., 90., 180., 270.]), 'counts': scipy.array([[1724, 514, 1905, 4666], [55, 72, 194, 192]]) } else: #Grab wind info to determine heading mean wind_x, wind_y = importedWind.quiver_at_time(0) heading_mean = scipy.arctan(wind_y[0, 0] / wind_x[0, 0]) beta = 10. release_times = scipy.random.exponential(beta, (swarm_size, )) kappa = 2. heading_data = None swarm_param = { 'swarm_size': swarm_size, 'heading_data': heading_data, 'initial_heading': scipy.random.vonmises(heading_mean, kappa, (swarm_size, )), 'x_start_position': scipy.zeros(swarm_size), 'y_start_position': scipy.zeros(swarm_size), 'flight_speed': scipy.full((swarm_size, ), 0.5),
I1 = I1 / 1000000000 I2 = I2 / 1000000000 I3 = I3 / 1000000000 var1, f_var1 = opt.curve_fit(I_phi1, x1, I1, [1, 160e-6, 25e-3], maxfev=10000) var2, f_var2 = opt.curve_fit(I_phi1, x2, I2, [1, 160e-6, 25e-3], maxfev=10000) var3, f_var3 = opt.curve_fit(I_phi2, x3, I3, [1, 40e-6, 25e-3, 0.25e-3], maxfev=10000) x_werte = np.linspace( -0.04, 0.04, 10000) # linspace(a,b, N) erstellt Array mit N Werten von a bis b phi1 = sp.arctan((x1 - var1[2]) / L) phi2 = sp.arctan((x2 - var2[2]) / L) phi3 = sp.arctan((x3 - var3[2]) / L) plt.plot(phi1, I1 / (650e-9), "b.", label="Messwerte") plt.plot(1.3 * x_werte, I_phi1((x_werte + var1[2]), var1[0], var1[1], var1[2]) / (650e-9), 'r-', label=r"$\mathrm{Fit}$") plt.xlabel("Winkel in rad") plt.ylabel("Normierte Intesität") plt.legend() plt.grid() plt.show() #plt.plot(phi2,I2/(480e-9),"b.",label="Messwerte")
def integrand_delay(r, d0, v, sigma, R): atan = 4. * scipy.arctan(np.sqrt((2. * R - r) / (2. * R + r))) return (d0 + r/v) * \ np.exp(-r**2/(2.*sigma**2)) * \ r * (atan - np.sin(atan))
def getXiCross(self,rp,rt,z,pk_lin,pars): k = self.k if not self.fit_aiso: ap=pars["ap"] at=pars["at"] else: ap=pars["aiso"]*pars["1+epsilon"]*pars["1+epsilon"] at=pars["aiso"]/pars["1+epsilon"] drp=pars["drp"] Lpar=pars["Lpar_cross"] Lper=pars["Lper_cross"] qso_evol = [pars['qso_evol_0'],pars['qso_evol_1']] rp_shift=rp+drp ar=np.sqrt(rt**2*at**2+rp_shift**2*ap**2) mur=rp_shift*ap/ar muk = model.muk kp = k * muk kt = k * np.sqrt(1-muk**2) bias_lya = pars["bias_lya*(1+beta_lya)"]/(1.+pars["beta_lya"]) beta_lya = pars["beta_lya"] ### UV fluctuation if self.uv_fluct: bias_gamma = pars["bias_gamma"] bias_prim = pars["bias_prim"] lambda_uv = pars["lambda_uv"] W = sp.arctan(k*lambda_uv)/(k*lambda_uv) bias_lya_prim = bias_lya + bias_gamma*W/(1+bias_prim*W) beta_lya = bias_lya*beta_lya/bias_lya_prim bias_lya = bias_lya_prim ### LYA-QSO cross correlation bias_qso = pars["bias_qso"] beta_qso = pars["growth_rate"]/bias_qso pk_full = bias_lya*bias_qso*(1+beta_lya*muk**2)*(1+beta_qso*muk**2)*pk_lin ### HCDS-QSO cross correlation if self.lls: bias_lls = pars["bias_lls"] beta_lls = pars["beta_lls"] L0_lls = pars["L0_lls"] F_lls = sp.sinc(kp*L0_lls/sp.pi) pk_full+=bias_lls*F_lls*bias_qso*(1+beta_lls*muk**2)*(1+beta_qso*muk**2)*pk_lin ### Velocity dispersion if (self.velo_gauss): pk_full *= sp.exp( -0.25*(kp*pars['sigma_velo_gauss'])**2 ) if (self.velo_lorentz): pk_full /= np.sqrt(1.+(kp*pars['sigma_velo_lorentz'])**2) ### Peak broadening sigmaNLper = pars["SigmaNL_perp"] sigmaNLpar = sigmaNLper*pars["1+f"] pk_full *= sp.exp( -0.5*( (sigmaNLper*kt)**2 + (sigmaNLpar*kp)**2 ) ) ### Pixel size pk_full *= sp.sinc(kp*Lpar/2./sp.pi)**2 pk_full *= sp.sinc(kt*Lper/2./sp.pi)**2 ### Non-linear correction pk_full *= np.sqrt(self.DNL(self.k,self.muk,self.pk,self.q1_dnl,self.kv_dnl,self.av_dnl,self.bv_dnl,self.kp_dnl,self.dnl_model)) ### Redshift evolution evol = np.power( self.evolution_growth_factor(z)/self.evolution_growth_factor(self.zref),2. ) evol *= self.evolution_Lya_bias(z,[pars["alpha_lya"]])/self.evolution_Lya_bias(self.zref,[pars["alpha_lya"]]) evol *= self.evolution_QSO_bias(z,qso_evol)/self.evolution_QSO_bias(self.zref,qso_evol) return self.Pk2Xi(ar,mur,k,pk_full,ell_max=self.ell_max)*evol
def integrand_Cnorm(r, sigma, R): atan = 4. * scipy.arctan(np.sqrt((2. * R - r) / (2. * R + r))) return np.exp(-r**2/(2.*sigma**2)) * \ r * (atan - np.sin(atan))
#%% # imports from IPython.display import Image import scipy as sp import numpy as np import matplotlib.pyplot as plt from sympy import symbols, limit #%% [markdown] # ## This is markdown # Image(filename="src/EE112/HW1/Problem1.png") #%% [markdown] # ### Problem 1 # $z = 0 + j2$ #%% z = complex(0, 2) plt.plot([0, z.real], [0, z.imag]) plt.show() #%% [markdown] # $r = \sqrt{0^2 + 2^2}$ r = sp.sqrt(0**2 + 2**2) print("r = %1d" % r) #%% [markdown] # $ \theta = \tan^{-1}{\frac{2}{0}} x = symbols('x') theta = limit(sp.arctan(2 / x), x, 0) #%%
def integrand(r, R, sigma): return r * np.exp(-r**2/(2*sigma**2)) * \ ( 4*scipy.arctan(np.sqrt( (2*R-r)/(2*R+r) )) - \ scipy.sin(4*scipy.arctan(np.sqrt( (2*R-r)/(2*R+r) ))) )
def sample(ignition, connection, local_features_path=None): """ Pulls in dataframe of relevant observations and columns from PSQL. Parameters ========== ignition : yaml with all information necessary connection : SQLConn connection class local_features_path : str Path to locally stored features file. If provided, works with features from locally stored file. If not provided, works with features stored in PSQL. Returns ======= X_train, X_test, y_train, y_test : pd.DataFrames X_train, X_test : shape = (# of observations, # of features) y_train, y_test : shape = (# of observations, # of classes) """ # pull in all variables of interest from ignition # some are no longer use -- may drop some e_feature_cols = ignition['existing_features'] target_col = ignition['target'] labels_table = ignition['labels_table'] features_table = ignition['features_table'] unique_id = ignition['unique_id'] query = ignition['query'] data_type = ignition['data_type'] classes = ignition['classes'] condition = ignition['condition'] test_perc = ignition['test_perc'] seed = ignition['seed'] sql_seed = (2 / pi) * arctan(seed) if not unique_id: print( "You must have a unique id listed to be able to generate test data." ) return if not data_type == "flat": print("Data type not supported.") return None # save required features as string ref_features = [] for e_feature_col in e_feature_cols: ref_features.append('semantic.' + features_table + '.' + e_feature_col) ref_features = ', '.join(ref_features) # condiiton, typically used to limit size of the sample used if condition: cond = condition else: cond = ' ' if local_features_path: # get features stored on disk and join to labels from PSQL labels_query = f"select setseed({sql_seed}); select * from semantic.{labels_table} {cond};" labels_df = connection.query(labels_query) labels_df[unique_id] = labels_df[unique_id].astype('int64') features_df = pd.read_pickle(local_features_path) features_df[unique_id] = features_df[unique_id].astype('int64') all_data = labels_df.join(features_df.set_index(unique_id), on=unique_id, how='inner') else: # get data from SQL database query = f""" select setseed({sql_seed}); select {ref_features}, semantic.{labels_table}.* \ from semantic.{features_table} \ inner join semantic.{labels_table} \ on semantic.{features_table}.{unique_id}=semantic.{labels_table}.{unique_id} {cond};""" all_data = connection.query(query) # split out features (X) and labels (y) X = all_data[e_feature_cols] labels = [i.lower() for i in classes] y = all_data[labels] # split data into train and test x_train, x_test, y_train, y_test = create_train_test_split( X, y, test_size=test_perc, random_seed=seed) return x_train, x_test, y_train, y_test
def getQ_complex(w, dims, nr, sym=1, N=5, harm=1, pol='TE', scaling=True, units='norm'): ''' Builds Q-matrix representing system of linear equations for matching fields along the boundary of a rectangular resonator. Based off of Goell 1969. Inputs: w - complex frequency (exp[-i*(wr - i*wi)*t) dims - dimensions of resonator (width,height) normalized by units of high-index wavelength nr - refractive index of resonator sym - Consider even (0) or odd (1) symmetry across x-axis N - number of harmonics to consider harm - {0,1,'both') determines if even (0), odd (1) or both harmonics are considered in the cylindrical harmonic expansion pol - ('TE', 'TM,' or None) transverse wrt long axis of wire (same as FDFD) ''' if units == 'norm': e0 = 1. mu = 1. Zo = 1. wl = 1. c0 = 1. else: e0 = 8.85e-12 mu = 4 * pi * 1e-7 Zo = csqrt(mu / e0) wl = 1e-6 c0 = 3e8 er = nr**2 kz = 0 h = k1 = w / c0 * nr p = ko = w / c0 a, b = np.array( dims) * wl / nr # wavevectors and dimensions are now in absolute units if a == b: a *= 1.01 # avoid errors associated with selecting the corner point as a matching point phi = getphi(sym) # Instantize matrices eLA = np.zeros((N, N), dtype=complex) # N: number of harmonics we are using eLC = np.zeros((N, N), dtype=complex) hLB = np.zeros((N, N), dtype=complex) hLD = np.zeros((N, N), dtype=complex) eTA = np.zeros((N, N), dtype=complex) eTB = np.zeros((N, N), dtype=complex) eTC = np.zeros((N, N), dtype=complex) eTD = np.zeros((N, N), dtype=complex) hTA = np.zeros((N, N), dtype=complex) hTB = np.zeros((N, N), dtype=complex) hTC = np.zeros((N, N), dtype=complex) hTD = np.zeros((N, N), dtype=complex) # Choose angles for boundary matching conditions m = np.arange(N) + 1 # m is 1 to N theta = (m - 0.5) * pi / (2 * N) # theta_m # Formulate Matrix Elements tc = sp.arctan(b / a) R = sin(theta) * (theta < tc) + cos(theta + pi / 4.) * ( theta == tc) + -1 * cos(theta) * (theta > tc) T = cos(theta) * (theta < tc) + cos(theta - pi / 4.) * ( theta == tc) + sin(theta) * (theta > tc) rm = a / (2. * cos(theta)) * (theta < tc) + (a**2 + b**2)**0.5 / 2. * ( theta == tc) + b / (2. * sin(theta)) * (theta > tc) for ni in range(N): # array (0 to N-1) # angles used for boundary matching fields at boundary depend on whether current harmonic is odd/even # use exclusively even or odd harmonics if harm == 1: n = 2 * ni + 1 elif harm == 0: n = 2 * ni else: n = ni S = sin(n * theta + phi) C = cos(n * theta + phi) J = jn(n, h * rm) Jp = jvp(n, h * rm) JJ = n * J / (h**2 * rm) JJp = Jp / (h) H = h1(n, p * rm) Hp = h1vp(n, p * rm) HH = n * H / (p**2 * rm) HHp = Hp / (p) # scaling to prevent overflow/underflow if scaling: d = (a + b) / 2. Jmult = h**2 * d / abs(jn(n, h * np.amin(a, b) / 2.)) Hmult = p**2 * d / abs(h1(n, p * np.amin(a, b) / 2.)) else: Jmult = Hmult = 1. eLA[:, ni] = J * S * Jmult eLC[:, ni] = H * S * Hmult hLB[:, ni] = J * C * Jmult hLD[:, ni] = H * C * Hmult eTA[:, ni] = 0 #-1*kz*(JJp*S*R + JJ*C*T) * Jmult eTB[:, ni] = ko * Zo * (JJ * S * R + JJp * C * T) * Jmult eTC[:, ni] = 0 #kz*(HHp*S*R + HH*C*T) * Hmult eTD[:, ni] = -1 * ko * Zo * (HH * S * R + HHp * C * T) * Hmult hTA[:, ni] = er * ko * ( JJ * C * R - JJp * S * T) / Zo * Jmult # typo in paper - entered as JJp rather than Jp hTB[:, ni] = 0 #-kz*(JJp*C*R - JJ*S*T) * Jmult hTC[:, ni] = -ko * (HH * C * R - HHp * S * T) / Zo * Hmult hTD[:, ni] = 0 #kz*(HHp*C*R - HH*S*T) * Hmult if scaling: eLA[:, ni] /= np.amax(abs(eLA[:, ni])) eLC[:, ni] /= np.amax(abs(eLC[:, ni])) hLB[:, ni] /= np.amax(abs(hLB[:, ni])) hLD[:, ni] /= np.amax(abs(hLD[:, ni])) eTA[:, ni] /= np.amax(abs(eTA[:, ni])) eTB[:, ni] /= np.amax(abs(eTB[:, ni])) eTC[:, ni] /= np.amax(abs(eTC[:, ni])) eTD[:, ni] /= np.amax(abs(eTD[:, ni])) hTA[:, ni] /= np.amax(abs(hTA[:, ni])) hTB[:, ni] /= np.amax(abs(hTB[:, ni])) hTC[:, ni] /= np.amax(abs(hTC[:, ni])) hTD[:, ni] /= np.amax(abs(hTD[:, ni])) ''' print 'n:',n print 'Jmult:',Jmult print 'Hmult:',Hmult print 'abs(h1):',abs(h1(n,p*rm)) print 'abs(h1vp):',abs(h1vp(n,p*rm)) print 'eLA:',eLA[:,ni] print 'eLC:',eLC[:,ni] print ''' O = np.zeros(np.shape(eLA)) if pol == 'TM': Q1 = np.hstack((eLA, -1 * eLC)) Q2 = np.hstack((hTA, -1 * hTC)) elif pol == 'TE': Q1 = np.hstack((hLB, -1 * hLD)) Q2 = np.hstack((eTB, -1 * eTD)) if pol != None: Q = np.vstack((Q1, Q2)) else: Q1 = np.hstack((eLA, O, -1 * eLC, O)) Q2 = np.hstack((O, hLB, O, -1 * hLD)) Q3 = np.hstack((eTA, eTB, -1 * eTC, -1 * eTD)) Q4 = np.hstack((hTA, hTB, -1 * hTC, -1 * hTD)) Q = np.vstack((Q1, Q2, Q3, Q4)) # for even harmonics, eliminate n=0 terms for E or H, depending on symmetry. Inclusion of these terms results in zero columns and thus a zero determinant. # Since we are eliminating columns from our matrix, we must eliminate rows as well to maintain square dimensions (4N-2). Goell's # convention is to discard the first and last rows for whichever longitudinal component has odd symmetry (eg. hLB/D if sym=0) if pol == None: if harm == 0: if sym == 0: #eliminate b0,d0 terms Q = np.delete( Q, [N, 3 * N], 1) #delete syntax: (array,index,axis (0 = row, 1 = column) Q = np.delete(Q, [N, 2 * N - 1], 0) elif sym == 1: #eliminate a0,c0 terms Q = np.delete(Q, [0, 2 * N], 1) Q = np.delete(Q, [0, N - 1], 0) else: # TE or TM if harm == 0: if sym == 0 and pol == 'TE': #eliminate b0,d0 terms Q = np.delete(Q, [0, N], 1) Q = np.delete(Q, [0, N - 1], 0) elif sym == 1 and pol == 'TM': #eliminate a0,c0 terms Q = np.delete(Q, [0, N], 1) Q = np.delete(Q, [0, N - 1], 0) return Q
Gamma1 = 0.5 Gamma2 = 0.5 Gamma = Gamma1 + Gamma2 SigmaR = -1j*Gamma/2.0 SigmaA = +1j*Gamma/2.0 Omega = 1.0 # response frequency #kT = 1.0 # 25.6 # room temperature E0 = 0.0 # single energy level of the QD NE = 2000 # number of energy points Ueq = 0.0 # equilibrium potential # Energy grid FermiEnergy = sp.linspace(-10,10,200) Gh = [] for Ef in FermiEnergy: realGh1 = Gamma1*Gamma2/(8*sp.pi*Gamma*Omega)*(-4*Omega*sp.arctan(2*(E0-Ef)/Gamma) \ +(4*(Ef-E0)+2*Omega)*sp.arctan(2*(E0-Ef-Omega)/Gamma) \ +(4*(E0-Ef)+2*Omega)*sp.arctan(2*(E0-Ef+Omega)/Gamma) \ -Gamma*sp.log(4*(E0-Ef)**2 + Gamma**2 + 8*(E0-Ef)*Omega + 4*Omega**2) \ +Gamma*sp.log(4*(E0-Ef)**2 + Gamma**2 - 8*(E0-Ef)*Omega + 4*Omega**2) ) imagGh1 = Gamma1*Gamma2/(8*sp.pi*Gamma*Omega)*(-4*Gamma*sp.arctan(2*(E0-Ef)/Gamma) \ +2*Gamma*sp.arctan(2*(E0-Ef-Omega)/Gamma) \ +2*Gamma*sp.arctan(2*(E0-Ef+Omega)/Gamma) \ +4*(Ef-E0)*sp.log(4*(E0-Ef)**2 + Gamma**2) \ +(2*(E0-Ef)+Omega)*sp.log(4*(E0-Ef)**2 + Gamma**2 + 8*(E0-Ef)*Omega + 4*Omega**2) \ +(2*(E0-Ef)-Omega)*sp.log(4*(E0-Ef)**2 + Gamma**2 - 8*(E0-Ef)*Omega + 4*Omega**2) ) realGh2 = -Gamma1*Gamma2/(4*Gamma*sp.pi)*(2*sp.arctan(2*(E0-Ef)/Gamma) \ -sp.arctan(2*(E0-Ef-Omega)/Gamma) \ -sp.arctan(2*(E0-Ef+Omega)/Gamma)) imagGh2 = Gamma1*Gamma2/(8*Gamma*sp.pi)*(sp.log(4*(E0-Ef)**2 + Gamma**2 + 8*(E0-Ef)*Omega + 4*Omega**2) \ -sp.log(4*(E0-Ef)**2 + Gamma**2 - 8*(E0-Ef)*Omega + 4*Omega**2))
def ecef2lla(ecef: Sequence[float], cst: ConstantsFile) -> Tuple[float, float, float]: """ converts a cartesian (x, y, z) earth-centred earth-fixed coordinate to a radial (lat, lon, alt) coordinate. """ x, y, z = ecef lat = 0. lon = 0. alt = 0. # for x and y are both zero, calculate # the geodetic vector now if (x == 0.) and (y == 0.): # set latitude lon = 0. # set altitude - deduct radius of earth # from the z-coordinate alt = abs(z) - cst.semi_major_axis * (1. - cst.flat_coeff) # set the longitude if (z > 0.): lat = cst.pi / 2. elif (z < 0.): lat = -cst.pi / 2. else: # if everything is 0, coordinate is the centre of the earth raise GeolocationError("invalid ECEF coordinates: {}".format(ecef)) return lat, lon, alt # otherwise, convert through iteration # compute accentricity squared (e^2) ecc_sqr = cst.flat_coeff * (2. - cst.flat_coeff) # first iteration - E-W curvature equals semi-major axis x0 = cst.semi_major_axis rad_xy = norm([x, y]) alt_est = norm(ecef) - cst.semi_major_axis * sqrt(1. - cst.flat_coeff) tmp = 1. - ecc_sqr * x0 / (x0 + alt_est) lat_est = arctan(z / (rad_xy * tmp)) # now iterate until geodetic coordinates are within GEODETIC_ERR # (or for COORD_ITERS number of iterations) max_iters = True for iter_count in range(COORD_ITERS): sin_sqr_lat = sin(lat_est) * sin(lat_est) xn = cst.semi_major_axis / sqrt(1. - ecc_sqr * sin_sqr_lat) alt = rad_xy / cos(lat_est) - x0 tmp = 1. - ecc_sqr * xn / (xn + alt) lat = arctan(z / (rad_xy * tmp)) # compute latitude error lat_err = abs(lat - lat_est) # and altitude error alt_err = abs(alt - alt_est) / cst.semi_major_axis # update estimations x0 = xn lat_est = lat alt_est = alt if (lat_err < GEODETIC_ERR) and (alt_err < GEODETIC_ERR): max_iters = False break if max_iters: raise RuntimeWarning("MAX_ITERS reached in ecef2lla") if x == 0.: if y == 0.: lon = 0. elif y > 0.: lon = cst.pi / 2. elif y < 0.: lon = -cst.pi / 2. else: lon = arctan2(y, x) return lat, lon, alt