def __init__(self, x_hat, ref, radius, vec=None, angle=None, flag=None): """ """ super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.sagi.s = scipy.atleast_1d(radius) self.meri.s = scipy.atleast_1d(radius)
def __init__(self, x_hat, ref, radius, vec=None, angle=None, flag=None): """ """ super(Surf, self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.sagi.s = scipy.atleast_1d(radius) self.meri.s = scipy.atleast_1d(radius)
def addGIWdata(conn, shot, name='c_W', name2='c_W_l', table='shots2015'): """pulls all time points associated with shot and places in database""" c = conn.cursor() points = scipy.atleast_1d( scipy.squeeze((c.execute('SELECT id from ' + table + ' where shot=' + str(shot))).fetchall())) time = scipy.atleast_1d( scipy.squeeze((c.execute('SELECT time from ' + table + ' where shot=' + str(shot))).fetchall())) #if the shotfile doesn't exist, or if good data at specific timepoints dont exist, set to 0 try: output = goodGIW(time, shot, name) output2 = goodGIW(time, shot, name2) except dd.PyddError: print(shot) output = scipy.zeros(time.shape) output2 = scipy.zeros(time.shape) for i in xrange(len(points)): c.execute('UPDATE ' + table + ' SET "c_W"=' + str(output[i]) + ' WHERE id=' + str(points[i])) c.execute('UPDATE ' + table + ' SET "c_W_l"=' + str(output2[i]) + ' WHERE id=' + str(points[i])) conn.commit()
def ev(self, xi, yi, zi): """evaluates tricubic spline at point (xi,yi,zi) which is f[xi,yi,zi]. Args: xi (scalar float or 1-dimensional float): Position in x dimension. This is the first dimension of 3d-valued grid. yi (scalar float or 1-dimensional float): Position in y dimension. This is the second dimension of 3d-valued grid. zi (scalar float or 1-dimensional float): Position in z dimension. This is the third dimension of 3d-valued grid. Returns: val (array or scalar float): The interpolated value at (xi,yi,zi). Raises: ValueError: If any of the dimensions exceed the evaluation boundary of the grid """ x = scipy.atleast_1d(xi) y = scipy.atleast_1d(yi) z = scipy.atleast_1d(zi) # This will not modify x1,y1,z1. val = self.fill_value * scipy.ones(x.shape) idx = self._check_bounds(x, y, z) if z[idx].size != 0: if self._regular: val[idx] = _tricub.reg_ev(z[idx], y[idx], x[idx], self._f, self._z, self._y, self._x) else: val[idx] = _tricub.nonreg_ev(z[idx], y[idx], x[idx], self._f, self._z, self._y, self._x) return val
def __init__(self,params,maxsize=12,label='',): params['k']=atleast_1d(params['k']) if not params.has_key('c'): params['c']=zeros(shape(params['k'])) else: params['c']=atleast_1d(params['c']) TMMElementIHT.__init__(self,'spring with actuator',params,maxsize=maxsize,label=label)
def ev(self, xi, yi, zi): """evaluates tricubic spline at point (xi,yi,zi) which is f[xi,yi,zi]. Args: xi (scalar float or 1-dimensional float): Position in x dimension. This is the first dimension of 3d-valued grid. yi (scalar float or 1-dimensional float): Position in y dimension. This is the second dimension of 3d-valued grid. zi (scalar float or 1-dimensional float): Position in z dimension. This is the third dimension of 3d-valued grid. Returns: val (array or scalar float): The interpolated value at (xi,yi,zi). Raises: ValueError: If any of the dimensions exceed the evaluation boundary of the grid """ x = scipy.atleast_1d(xi) y = scipy.atleast_1d(yi) z = scipy.atleast_1d(zi) # This will not modify x1,y1,z1. val = self.fill_value*scipy.ones(x.shape) idx = self._check_bounds(x, y, z) if z[idx].size != 0: if self._regular: val[idx] = _tricub.reg_ev(z[idx], y[idx], x[idx], self._f, self._z, self._y, self._x) else: val[idx] = _tricub.nonreg_ev(z[idx], y[idx], x[idx], self._f, self._z, self._y, self._x) return val
def rasterCoords(self,s,t): ''' Return the raster coordinates for this mipmap from the texture coordinates (s,t) ''' i = scipy.atleast_1d(self.mult[0]*s + self.offset[0] + 1) j = scipy.atleast_1d(self.mult[1]*t + self.offset[1] + 1) return (i,j)
def rasterCoords(self, s, t): ''' Return the raster coordinates for this mipmap from the texture coordinates (s,t) ''' i = scipy.atleast_1d(self.mult[0] * s + self.offset[0] + 1) j = scipy.atleast_1d(self.mult[1] * t + self.offset[1] + 1) return (i, j)
def __init__(self, mu, sigma): sigma = scipy.atleast_1d(scipy.asarray(sigma, dtype=float)) mu = scipy.atleast_1d(scipy.asarray(mu, dtype=float)) if sigma.shape != mu.shape: raise ValueError("sigma and mu must have the same shape!") if sigma.ndim != 1: raise ValueError("sigma and mu must both be one dimensional!") self.sigma = sigma self.emu = scipy.exp(mu)
def __init__(self, x_hat, ref, area, vec=None, angle=None, flag=None): """ """ if flag is None: flag = ref.flag super(Surf, self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.sagi.s = old_div(scipy.atleast_1d(area[0]), 2) self.meri.s = old_div(scipy.atleast_1d(area[1]), 2)
def __init__(self, x_hat, ref, area, vec=None, angle=None, flag=None): """ """ if flag is None: flag = ref.flag super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.sagi.s = scipy.atleast_1d(area[0])/2 self.meri.s = scipy.atleast_1d(area[1])/2
def __init__(self, x_hat, ref, area, radius, vec=None, angle=None, flag=None): """ """ if flag is None: flag = ref.flag super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.norm.s = scipy.atleast_1d(area[0])/2 self.meri.s = scipy.atleast_1d(area[1])/2 if self.meri.s > scipy.pi: raise ValueError('angle of cylinder can only be < 2*pi') self.sagi.s = abs(scipy.array(radius))
def __init__(self,params, symlabel='sd', symname='Usd',**kwargs): """Create a TorsionalSpringDamper instance. params is a dictionary with keys of 'k' and 'c'. 'k' and 'c' are lists or arrays if maxsize > 4. If maxsize==8, k = [k0,k1]. If maxsize==12, k=[k0, k1, k2]. 'c' is optional and will be set to zeros(shape(k)) if it is omitted. Otherwise c=c0, [c0], [c0,c1], or [c0, c1, c2].""" if not params.has_key('k'): params['k']=None params['k']=atleast_1d(params['k']) if not params.has_key('c'): params['c']=zeros(shape(params['k'])) else: params['c']=atleast_1d(params['c']) TMMElementIHT.__init__(self,'spring',params,symlabel=symlabel,symname=symname,**kwargs)
def compute_ellipse_params(Sigma, ci=0.95): """Compute the parameters of the confidence ellipse for the bivariate normal distribution with the given covariance matrix. Parameters ---------- Sigma : 2d array, (2, 2) Covariance matrix of the bivariate normal. ci : float or 1d array, optional Confidence interval(s) to compute. Default is 0.95. Returns ------- a : float or 1d array Major axes for each element in `ci`. b : float or 1d array Minor axes for each element in `ci`. ang : float Angle of ellipse, in radians. """ ci = scipy.atleast_1d(ci) lam, v = scipy.linalg.eigh(Sigma) chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci] a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2] b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2] ang = scipy.arctan2(v[1, -1], v[0, -1]) return a, b, ang
def __call__(self, x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices, 1, len(self.x) - 1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x, lo, axis=self.interp_axis) x_hi = take(self.x, hi, axis=self.interp_axis) y_lo = take(self.y, lo, axis=self.interp_axis) y_hi = take(self.y, hi, axis=self.interp_axis) slope = (y_hi - y_lo) / (x_hi - x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope * (x_new_1d - x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1] * len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape) * out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new, self.interp_axis, self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def fake_dataset(self, nvel, vdisp, fbin, sigvel, mass=1., dates=(0., ), vmean=0.): """Creates a fake single-epoch radial velocity data for Monte Carlo simulations. Note that multiple calls will use the same binary properties. Redraw the binary properties to get a new set. Arguments: - `nvel`: number of velocities to draw. - `vdisp`: velocity dispersion of the cluster in km/s. - `fbin`: binary fraction among observed stars. - `sigvel`: array-like of size `nvel` or single number; measurement uncertainty in km/s (undistinguishable from velocity dispersion for single epoch case). - `mass`: array-like of size `nvel` or single number; mass of observed stars in solar masses. - `dates`: iterable with relative dates of observations in years. Creates a one-dimensional single-epoch dataset if the iterable has a length of one. - `vmean`: mean velocity in km/s. """ v_systematic = sp.randn(nvel) * vdisp v_bin_offset = sp.array( [self[:nvel].velocity(mass, time)[0, :] for time in dates]) v_bin_offset[:, sp.rand(nvel) > fbin] = 0. v_meas_offset = sp.randn(v_bin_offset.size).reshape( v_bin_offset.shape) * sp.atleast_1d(sigvel)[:, sp.newaxis] return sp.squeeze(v_systematic[sp.newaxis, :] + v_bin_offset + v_meas_offset)
def __init__(self, surf1, surf2): """ """ normal = geometry.pts2Vec(surf1, surf2) #orthogonal coordinates based off of connecting normal snew = surf1.sagi - normal * ((surf1.sagi * normal) * (old_div(surf1.sagi.s, normal.s))) mnew = surf1.meri - normal * ((surf1.meri * normal) * (old_div(surf1.meri.s, normal.s))) super(Beam, self).__init__(surf1, surf1._origin, vec=[mnew, normal]) #calculate area at diode. self.sagi.s = snew.s a1 = surf1.area(snew.s, mnew.s) #calculate area at aperature a2 = surf2.area(((old_div( (self.sagi * surf2.sagi), self.sagi.s))**2 + (old_div( (self.meri * surf2.sagi), self.meri.s))**2)**.5, ((old_div( (self.sagi * surf2.meri), self.sagi.s))**2 + (old_div( (self.meri * surf2.meri), self.meri.s))**2)**.5) #generate etendue self.etendue = a1 * a2 / (normal.s**2) # give inital beam, which is two points self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s, 0, 0.)
def autocorrelation_1d(x): """Estimate the normalized autocorrelation function of a 1-D series Args: x: The series as a 1-D numpy array. Returns: array: The autocorrelation function of the time series. Taken from: https://github.com/dfm/emcee/blob/master/emcee/autocorr.py """ x = scipy.atleast_1d(x) if len(x.shape) != 1: raise ValueError("invalid dimensions for 1D autocorrelation function") def next_pow_two(n): """Returns the next power of two greater than or equal to `n`""" i = 1 while i < n: i = i << 1 return i n = next_pow_two(len(x)) # Compute the FFT and then (from that) the auto-correlation function f = fft.fft(x - scipy.mean(x), n=2 * n) acf = fft.ifft(f * scipy.conjugate(f))[:len(x)].real acf /= acf[0] return acf
def compute_pk(self, k, z=None, cb=False, nonlinear=''): """k in h/Mpc, P(k) in (Mpc/h)^3""" k = scipy.atleast_1d(k) if cb and self.params.get('N_ncdm', 0) == 0: self.logger.warning('No neutrinos, switching to compute_pk_lin...') cb = False if z is not None: self.set_params(z_pk=z) self.set_params( **{ 'output': 'tCl,pCl,lCl,mPk', 'lensing': 'yes', 'P_k_max_h/Mpc': k.max(), 'non linear': nonlinear }) self.compute() h = self.h() if cb: self.logger.info( 'Computing power spectrum of cold dark matter and baryons.') if nonlinear: fun = self.pk_cb else: fun = self.pk_cb_lin else: self.logger.info('Computing matter power spectrum.') if nonlinear: fun = self.pk else: fun = self.pk_lin return h**3 * scipy.asarray( [fun(k_ * h, self.params['z_pk']) for k_ in k])
def __init__(self, surf1, surf2): """ """ normal = geometry.pts2Vec(surf1, surf2) #orthogonal coordinates based off of connecting normal snew = surf1.sagi - normal*((surf1.sagi * normal)*(surf1.sagi.s/normal.s)) mnew = surf1.meri - normal*((surf1.meri * normal)*(surf1.meri.s/normal.s)) super(Beam, self).__init__(surf1, surf1._origin, vec=[mnew,normal]) #calculate area at diode. self.sagi.s = snew.s a1 = surf1.area(snew.s,mnew.s) #calculate area at aperature a2 = surf2.area((((self.sagi*surf2.sagi)/self.sagi.s)**2 + ((self.meri*surf2.sagi)/self.meri.s)**2)**.5, (((self.sagi*surf2.meri)/self.sagi.s)**2 + ((self.meri*surf2.meri)/self.meri.s)**2)**.5) #generate etendue self.etendue = a1*a2/(normal.s ** 2) # give inital beam, which is two points self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s,0,0.)
def gaussians(x,x0,A,sig): #if sc.amax(abs(sc.imag(A)))/sc.amax(abs(sc.real(A)))>0.01: # warnings.warn(\ #'Gaussian amplitude has a sizable imaginary part\(max(|Im|)/max(|Re|)={0}, mean(abs(A))={1}).'\ # .format(sc.amax(abs(sc.imag(A)))/sc.amax(abs(sc.real(A))), sc.mean(abs(A)))) x0=sc.atleast_1d(x0) A=sc.atleast_1d(A) sig=sc.atleast_1d(sig) amp=A*sc.sqrt(1/2.0/sc.pi)/sig [X,X0]=sc.meshgrid(x,x0) gg=None #if len(sc.shape(amp))==1: # gg=sc.einsum('i,ij',amp,sc.exp(-0.5*(X-X0)**2/sc.tile(sig**2,(sc.shape(x)[0],1)).T)) #elif len(sc.shape(amp))==2: # gg=sc.einsum('ij,jk',amp,sc.exp(-0.5*(X-X0)**2/sc.tile(sig**2,(sc.shape(x)[0],1)).T)) gg=sc.einsum('...i,...ij->...j',amp,sc.exp(-0.5*(X-X0)**2/sc.tile(sig**2,(sc.shape(x)[0],1)).T)) return gg
def ev(self, xi, yi): """Evaluate the rectBiVariateSpline at (xi,yi). (x,y)values are checked for being in the bounds of the interpolated data. Args: xi (float array): input x dimensional values yi (float array): input x dimensional values Returns: val (float array): evaluated spline at points (x[i], y[i]), i=0,...,len(x)-1 """ idx = self._check_bounds(xi, yi) # print(idx) zi = self.fill_value*scipy.ones(xi.shape) zi[idx] = super(RectBivariateSpline, self).ev(scipy.atleast_1d(xi)[idx], scipy.atleast_1d(yi)[idx]) return zi
def __call__(self,x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x,x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices,1,len(self.x)-1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1; hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x,lo,axis=self.interp_axis) x_hi = take(self.x,hi,axis=self.interp_axis) y_lo = take(self.y,lo,axis=self.interp_axis) y_hi = take(self.y,hi,axis=self.interp_axis) slope = (y_hi-y_lo)/(x_hi-x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope*(x_new_1d-x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1]*len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape)*out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new,self.interp_axis,self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def tmin(self, r, z, trace=False): """Calculates and returns the s value along the norm vector which minimizes the distance from the ray to a circle defined by input (r,z). Args: r: value, iterable or scipy.array, radius in meters z: value, iterable or scipy.array, z value in meters Kwargs: trace: bool if set true, the ray is assumed to be traced within a tokamak. A further evaluation reduces the value to one within the bounds of the vacuum vessel/limiter. Returns: numpy array of s values in meters """ r = scipy.atleast_1d(r) z = scipy.atleast_1d(z) params = _beam.lineCirc(self(0).x(), self.norm.unit, r, z) sout = scipy.zeros(r.shape) for i in xrange(len(params)): temp = scipy.roots(params[i]) # only positive real solutions are taken temp = temp[scipy.imag(temp) == 0] temp = scipy.real(temp[temp > 0]) test = self(temp).r() # must decide between local and global minima sout[i] = temp[((test[0]-r[i])**2 + (test[2] - z[i])**2).argmin()] if trace and ((sout[i] > self.norm.s[-1]) or (sout[i] < self.norm.s[-2])): #need to implement this such that it searches only in area of interest sout[i] = None return sout
def te2rho(Te, tcore, tped): #solve for rhos for given Te tcore = scipy.atleast_1d(tcore) tped = scipy.atleast_1d(tped) Te = scipy.atleast_2d(Te) idx = len(tcore) output = scipy.zeros((idx, len(Te))) rho = scipy.linspace(0, 1, 1e2 + 1) for i in xrange(idx): tetemp = te(rho, tcore[idx], tped[idx]) temp = scipy.interpolate.interp1d(tetemp, rho, kind='cubic', bounds_error=False) output[i] = temp(Te[i]) # find positions of each temperature return output
def interp(X, Y, Z): X = scipy.atleast_1d(X) Y = scipy.atleast_1d(Y) Z = scipy.atleast_1d(Z) ox = X[0] oy = Y[0] oz = Z[0] rx = (ox % dx)/dx ry = (oy % dy)/dy rz = (oz % dz)/dz fx = int(len(IntXVals)/2) + int(ox/dx) fy = int(len(IntYVals)/2) + int(oy/dy) fz = int(len(IntZVals)/2) + int(oz/dz) #print fx #print rx, ry, rz xl = len(X) yl = len(Y) zl = len(Z) #print xl m000 = interpModel[fx:(fx+xl),fy:(fy+yl),fz:(fz+zl)] m100 = interpModel[(fx+1):(fx+xl+1),fy:(fy+yl),fz:(fz+zl)] m010 = interpModel[fx:(fx+xl),(fy + 1):(fy+yl+1),fz:(fz+zl)] m110 = interpModel[(fx+1):(fx+xl+1),(fy+1):(fy+yl+1),fz:(fz+zl)] m001 = interpModel[fx:(fx+xl),fy:(fy+yl),(fz+1):(fz+zl+1)] m101 = interpModel[(fx+1):(fx+xl+1),fy:(fy+yl),(fz+1):(fz+zl+1)] m011 = interpModel[fx:(fx+xl),(fy + 1):(fy+yl+1),(fz+1):(fz+zl+1)] m111 = interpModel[(fx+1):(fx+xl+1),(fy+1):(fy+yl+1),(fz+1):(fz+zl+1)] #print m000.shape # m = scipy.sum([((1-rx)*(1-ry)*(1-rz))*m000, ((rx)*(1-ry)*(1-rz))*m100, ((1-rx)*(ry)*(1-rz))*m010, ((rx)*(ry)*(1-rz))*m110, # ((1-rx)*(1-ry)*(rz))*m001, ((rx)*(1-ry)*(rz))*m101, ((1-rx)*(ry)*(rz))*m011, ((rx)*(ry)*(rz))*m111], 0) m = ((1-rx)*(1-ry)*(1-rz))*m000 + ((rx)*(1-ry)*(1-rz))*m100 + ((1-rx)*(ry)*(1-rz))*m010 + ((rx)*(ry)*(1-rz))*m110+((1-rx)*(1-ry)*(rz))*m001+ ((rx)*(1-ry)*(rz))*m101+ ((1-rx)*(ry)*(rz))*m011+ ((rx)*(ry)*(rz))*m111 #print m.shape return m
def gaussians(x, x0, A, sig): #if sc.amax(abs(sc.imag(A)))/sc.amax(abs(sc.real(A)))>0.01: # warnings.warn(\ #'Gaussian amplitude has a sizable imaginary part\(max(|Im|)/max(|Re|)={0}, mean(abs(A))={1}).'\ # .format(sc.amax(abs(sc.imag(A)))/sc.amax(abs(sc.real(A))), sc.mean(abs(A)))) x0 = sc.atleast_1d(x0) A = sc.atleast_1d(A) sig = sc.atleast_1d(sig) amp = A * sc.sqrt(1 / 2.0 / sc.pi) / sig [X, X0] = sc.meshgrid(x, x0) gg = None #if len(sc.shape(amp))==1: # gg=sc.einsum('i,ij',amp,sc.exp(-0.5*(X-X0)**2/sc.tile(sig**2,(sc.shape(x)[0],1)).T)) #elif len(sc.shape(amp))==2: # gg=sc.einsum('ij,jk',amp,sc.exp(-0.5*(X-X0)**2/sc.tile(sig**2,(sc.shape(x)[0],1)).T)) gg = sc.einsum( '...i,...ij->...j', amp, sc.exp(-0.5 * (X - X0)**2 / sc.tile(sig**2, (sc.shape(x)[0], 1)).T)) return gg
def tmin(self, r, z, trace=False): """Calculates and returns the s value along the norm vector which minimizes the distance from the ray to a circle defined by input (r,z). Args: r: value, iterable or scipy.array, radius in meters z: value, iterable or scipy.array, z value in meters Kwargs: trace: bool if set true, the ray is assumed to be traced within a tokamak. A further evaluation reduces the value to one within the bounds of the vacuum vessel/limiter. Returns: numpy array of s values in meters """ r = scipy.atleast_1d(r) z = scipy.atleast_1d(z) params = _beam.lineCirc(self(0).x(), self.norm.unit, r, z) sout = scipy.zeros(r.shape) for i in range(len(params)): temp = scipy.roots(params[i]) # only positive real solutions are taken temp = temp[scipy.imag(temp) == 0] temp = scipy.real(temp[temp > 0]) test = self(temp).r() # must decide between local and global minima sout[i] = temp[((test[0] - r[i])**2 + (test[2] - z[i])**2).argmin()] if trace and ((sout[i] > self.norm.s[-1]) or (sout[i] < self.norm.s[-2])): #need to implement this such that it searches only in area of interest sout[i] = None return sout
def __init__(self, x_hat, ref, area, radius, vec=None, angle=None, flag=None): """ """ if flag is None: flag = ref.flag super(Surf, self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag) self.norm.s = old_div(scipy.atleast_1d(area[0]), 2) self.meri.s = old_div(scipy.atleast_1d(area[1]), 2) if self.meri.s > scipy.pi: raise ValueError('angle of cylinder can only be < 2*pi') self.sagi.s = abs(scipy.array(radius))
def __init__(self, pt1, inp2): """ """ try: self.norm = geometry.pts2Vec(pt1, inp2) except AttributeError: self.norm = inp2.copy() super(Ray,self).__init__(pt1) self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s,0,0.)
def __init__(self, pt1, inp2): """ """ try: self.norm = geometry.pts2Vec(pt1, inp2) except AttributeError: self.norm = inp2.copy() super(Ray, self).__init__(pt1) self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s, 0, 0.)
def __init__(self, k, a, b): a = scipy.atleast_1d(scipy.asarray(a, dtype=float)) b = scipy.atleast_1d(scipy.asarray(b, dtype=float)) if len(a) != k.num_dim: raise ValueError("a must have length equal to k.num_dim!") if len(b) != k.num_dim: raise ValueError("b must have length equal to k.num_dim!") param_names = [] initial_params = [] param_bounds = [] # Set this to be narrow so the LL doesn't overflow. for d in range(0, k.num_dim): param_names += ['a_{:d}'.format(d), 'b_{:d}'.format(d)] initial_params += [a[d], b[d]] param_bounds += [(a[d] - 1e-3, a[d] + 1e-3), (b[d] - 1e-3, b[d] + 1e-3)] w = WarpingFunction( linear_warp, num_dim=k.num_dim, initial_params=initial_params, param_bounds=param_bounds, fixed_params=scipy.ones_like(initial_params, dtype=bool), param_names=param_names ) super(LinearWarpedKernel, self).__init__(k, w)
def update_cost(self, X, y): """ For an ignorance function 1/N, where N is number of times the system passed on a point X, update ignorance and wall. +1 the n_grid """ Xs = sp.atleast_2d(X) ys = sp.atleast_1d(y) for X, y in zip(Xs, ys): X = X.flatten() index = where_a_in_b(X, self.X_grid).item() self.y_grid[index] = y self.wall[index] = (y > self.y_threshold) self.n_grid[index] = 1. / (self.n_grid[index] + 1)
def __init__(self, k, a, b): a = scipy.atleast_1d(scipy.asarray(a, dtype=float)) b = scipy.atleast_1d(scipy.asarray(b, dtype=float)) if len(a) != k.num_dim: raise ValueError("a must have length equal to k.num_dim!") if len(b) != k.num_dim: raise ValueError("b must have length equal to k.num_dim!") param_names = [] initial_params = [] param_bounds = [] # Set this to be narrow so the LL doesn't overflow. for d in xrange(0, k.num_dim): param_names += ['a_%d' % (d, ), 'b_%d' % (d, )] initial_params += [a[d], b[d]] param_bounds += [(a[d] - 1e-3, a[d] + 1e-3), (b[d] - 1e-3, b[d] + 1e-3)] w = WarpingFunction(linear_warp, num_dim=k.num_dim, initial_params=initial_params, param_bounds=param_bounds, fixed_params=scipy.ones_like(initial_params, dtype=bool), param_names=param_names) super(LinearWarpedKernel, self).__init__(k, w)
def compute_matrix_cond(sv): """yield matrix condition number w.r.t. l2-norm given singular values :type sv: ndarray :param sv: vector of singular values sorted s.t. :sv[i]: >= :sv[i+1]: :returns: float - condition number of :mat: or :inf: on error """ sv = sp.atleast_1d(sv) if sv.size == 0: raise ValueError('undefined for empty list') try: return sp.absolute(sv[0] / sv[-1]) except: return sp.inf
def __init__(self, track_type, data): self.type = track_type if not self.type in DataTrack.TYPES: sys.stderr.write( 'ERROR: Track type %s was given. Allowed track types are: %s\n' % (self.type, ','.join(DataTrack.TYPES))) sys.exit(1) self.strains = [] self.bam_fnames = [] self.group_labels = [] self.event_info = [] ### handle events if track_type == 'event': self.event_info.extend(data) else: for data_items in data: ### handle any other type of data label = '' if ':' in data_items: assert len( data_items.split(':') ) < 3, 'ERROR: At most one label can be given per data group!\n' label, data_items = data_items.split(':') if data_items.endswith('.txt'): self.bam_fnames.extend([ str(x) for x in sp.atleast_1d( sp.loadtxt(data_items, dtype='str')) ]) else: self.bam_fnames.append(sp.array(data_items.split(','))) if track_type == 'coverage': for group in self.bam_fnames: for fname in group: if not os.path.isfile(fname): print( 'ERROR: Input file %s can not be found\n\n' % fname, file=sys.stderr) sys.exit(2) self.strains.append( sp.array([ re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)$', '', os.path.basename(_)) for _ in self.bam_fnames[-1] ])) self.group_labels.append(label)
def fake_dataset(self, nvel, vdisp, fbin, sigvel, mass=1., dates=(0., ), vmean=0.): """Creates a fake single-epoch radial velocity data for Monte Carlo simulations. Note that multiple calls will use the same binary properties. Redraw the binary properties to get a new set. Arguments: - `nvel`: number of velocities to draw. - `vdisp`: velocity dispersion of the cluster in km/s. - `fbin`: binary fraction among observed stars. - `sigvel`: array-like of size `nvel` or single number; measurement uncertainty in km/s (undistinguishable from velocity dispersion for single epoch case). - `mass`: array-like of size `nvel` or single number; mass of observed stars in solar masses. - `dates`: iterable with relative dates of observations in years. Creates a one-dimensional single-epoch dataset if the iterable has a length of one. - `vmean`: mean velocity in km/s. """ v_systematic = sp.randn(nvel) * vdisp v_bin_offset = sp.array([self[:nvel].velocity(mass, time)[0, :] for time in dates]) v_bin_offset[:, sp.rand(nvel) > fbin] = 0. v_meas_offset = sp.randn(v_bin_offset.size).reshape(v_bin_offset.shape) * sp.atleast_1d(sigvel)[:, sp.newaxis] return sp.squeeze(v_systematic[sp.newaxis, :] + v_bin_offset + v_meas_offset)
def generate_noise(shape: List[int], porosity=None, octaves: int = 3, frequency: int = 32, mode: str = 'simplex'): r""" Generate a field of spatially correlated random noise using the Perlin noise algorithm, or the updated Simplex noise algorithm. Parameters ---------- shape : array_like The size of the image to generate in [Nx, Ny, Nz] where N is the number of voxels. porosity : float If specified, this will threshold the image to the specified value prior to returning. If no value is given (the default), then the scalar noise field is returned. octaves : int Controls the *texture* of the noise, with higher octaves giving more complex features over larger length scales. frequency : array_like Controls the relative sizes of the features, with higher frequencies giving larger features. A scalar value will apply the same frequency in all directions, given an isotropic field; a vector value will apply the specified values along each axis to create anisotropy. mode : string Which noise algorithm to use, either ``'simplex'`` (default) or ``'perlin'``. Returns ------- image : ND-array If porosity is given, then a boolean array with ``True`` values denoting the pore space is returned. If not, then normally distributed and spatially correlated randomly noise is returned. Notes ----- This method depends the a package called 'noise' which must be compiled. It is included in the Anaconda distribution, or a platform specific binary can be downloaded. See Also -------- porespy.tools.norm_to_uniform """ try: import noise except ModuleNotFoundError: raise Exception("The noise package must be installed") shape = sp.array(shape) if sp.size(shape) == 1: Lx, Ly, Lz = sp.full((3, ), int(shape)) elif len(shape) == 2: Lx, Ly = shape Lz = 1 elif len(shape) == 3: Lx, Ly, Lz = shape if mode == 'simplex': f = noise.snoise3 else: f = noise.pnoise3 frequency = sp.atleast_1d(frequency) if frequency.size == 1: freq = sp.full(shape=[3, ], fill_value=frequency[0]) elif frequency.size == 2: freq = sp.concatenate((frequency, [1])) else: freq = sp.array(frequency) im = sp.zeros(shape=[Lx, Ly, Lz], dtype=float) for x in range(Lx): for y in range(Ly): for z in range(Lz): im[x, y, z] = f(x=x/freq[0], y=y/freq[1], z=z/freq[2], octaves=octaves) im = im.squeeze() if porosity: im = norm_to_uniform(im, scale=[0, 1]) im = im < porosity return im
def _findLCFS(rgrid, zgrid, psiRZ, rcent, zcent, psiLCFS, nbbbs=100): """ internal function for finding the last closed flux surface based off of a Equilibrium instance""" ang = scipy.linspace(-scipy.pi,scipy.pi,nbbbs) plt.ioff() fig = plt.figure() cs = plt.contour(rgrid, zgrid, scipy.squeeze(psiRZ), scipy.atleast_1d(psiLCFS)) splines = [] for i in cs.collections[0].get_paths(): temp = i.vertices # turn points into polar coordinates about the plasma center rvals = scipy.sqrt((temp[:,0] - rcent)**2 + (temp[:,1] - zcent)**2) thetvals = scipy.arctan2(temp[:,1] - zcent,temp[:,0] - rcent) # find all monotonic sections of contour line in r,theta space temp = scipy.diff(thetvals) idx = 0 sign = scipy.sign(temp[0]) for j in xrange(len(temp)-1): if (scipy.sign(temp[j+1]) != sign): sign = scipy.sign(temp[j+1]) #only write data if the jump at the last point is well resolved if (j+2-idx > 2):#abs(thetvals[idx]-thetvals[j+1]) < 7*scipy.pi/4) and plt.plot(thetvals[idx:j+2],rvals[idx:j+2],'o') sortang = scipy.argsort(thetvals[idx:j+2]) splines += [scipy.interpolate.interp1d(thetvals[sortang+idx], rvals[sortang+idx], kind='linear', bounds_error=False, fill_value=scipy.inf)] idx = j+1 if (len(thetvals) - idx > 2): plt.plot(thetvals[idx:],rvals[idx:],'o') sortang = scipy.argsort(thetvals[idx:]) splines += [scipy.interpolate.interp1d(thetvals[sortang+idx], rvals[sortang+idx], kind='linear', bounds_error=False, fill_value=scipy.inf)] # construct a set of angles about the center, and use the splines # to find the closest part of the contour to the center at that # angle, this is the LCFS, store value. If no value is found, store # an infite value, which is then tossed out. outr = scipy.empty((nbbbs,)) for i in xrange(nbbbs): temp = scipy.inf for j in splines: pos = j(ang[i]) if pos < temp: temp = pos outr[i] = temp # remove infinites ang = ang[scipy.isfinite(outr)] outr = outr[scipy.isfinite(outr)] #move back to r,z space output = scipy.empty((2,len(ang) + 1)) output[0,:-1] = outr*scipy.cos(ang) + rcent output[1,:-1] = outr*scipy.sin(ang) + zcent output[0,-1] = output[0,0] output[1,-1] = output[1,0] # turn off plotting stuff plt.ion() plt.clf() plt.close(fig) plt.ioff() return output.T
def grouped_plot_matrix(X, Y=None, w=None, feature_labels=None, class_labels=None, show_legend=True, colors=None, fixed_height=None, fixed_width=None, l=0.1, r=0.9, t=0.9, b=0.1, ax_space=0.1, rotate_last_hist=None, hist1d_kwargs={}, cmap=None, legend_kwargs={}, autocolor=True, use_kde=False, kde_bws='scott', cutoff_w=None, kde1d_kwargs={}, **kwargs): """Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- X : 2d array, (num_samp, num_dim) Features. Y : 1d array, (num_samp,), optional Labels. Default is to treat all data as being from same class. w : 1d array, (num_samp,), optional Weights of the samples. Default is to treat all samples as equally-weighted. feature_labels : list of str, optional The labels to use for each of the features. class_labels : list of str, optional The labels to use for each class. show_legend : bool, optional If True, draw a legend. A legend is never drawn for single-class data. Default is True. colors : list of color strings, optional The colors to use for each class. Using strings (either name or hex) lets the function automatically pick the corresponding colormaps. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t : float, optional The location (in relative figure units) of the top of the grid of histograms. Default is 0.9. b : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. rotate_last_hist : bool, optional If True, rotate the bottom right histogram. Default is to only do this for bivariate data. hist1d_kwargs : dict, optional Extra keyword arguments for the 1d histograms. cmap : str, optional Colormap to use, overriding the default color cycle. Probably only useful for one-class data. autocolor : bool, optional If True, automatically assign colors and colormaps. Otherwise, just use what is in the optional keywords. Default is True. use_kde : bool, optional If True, use a kernel density estimator (KDE) for the univariate and bivariate (when `plot_heatmap=True`) histograms. Default is False. kde_bws : {'scott', 'silverman', array of float (num_dim,) or (2, num_dim)} Which rule to use to estimate KDE bandwidths, a bandwidth to use for all dimensions, or a list of bandwidths for each dimension. If an array of bandwidths is provided, it can either be 1d (same bandwidth for 1d and 2d KDEs), or 2d (row 0 is 1d bandwidth, row 1 is 2d bandwidth). Default is 'scott' (use Scott's rule of thumb). Note that the labels `Y` are ignored when computing the bandwidths, and the same bandwidth is used for each label. Therefore, you should compute the proper bandwidth externally when plotting KDEs for each label. cutoff_w : float, optional If `w` and `cutoff_w` are present, points with `w < cutoff_w * w.max()` will be excluded. Default is to plot all points. kde1d_kwargs : dict, optional Extra keyword arguments for the `plot` command when plotting the 1d KDE. kwargs : optional keywords All extra keyword arguments are passed to `hist2d_contour`. """ # TODO: kwarg handling when using KDE is too convoluted! # TODO: add proper handling of KDE bandwidth for multiclass data! # Mask out points with low weights: if w is not None and cutoff_w is not None: mask = w >= (cutoff_w * w.max()) X = X[mask, :] if Y is not None: Y = Y[mask] w = w[mask] # Compute kernel bandwidths: if use_kde: if isinstance(kde_bws, str): kde_bw_method = kde_bws kde_bws = scipy.zeros((2, X.shape[1])) # Handle both weighted and unweighted case: if w is None: wtmp = scipy.ones(X.shape[0]) else: wtmp = w if kde_bw_method == 'scott': kde_bws[0, :] = wkde.scott_bw(X, wtmp, 1) kde_bws[1, :] = wkde.scott_bw(X, wtmp, 2) elif kde_bw_method == 'silverman': kde_bws[0, :] = wkde.silverman_bw(X, wtmp, 1) kde_bws[1, :] = wkde.silverman_bw(X, wtmp, 2) else: raise ValueError( 'Invalid KDE bandwidth estimation method "{:s}"!'.format( kde_bw_method)) else: kde_bws = scipy.asarray(kde_bws, dtype=float) if kde_bws.ndim == 0: kde_bws = kde_bws * scipy.ones((2, X.shape[1])) elif kde_bws.ndim == 1: kde_bws = scipy.tile(kde_bws, (2, 1)) if kde_bws.shape != (2, X.shape[1]): raise ValueError('Invalid number of KDE bandwidths!') # Number of features: k = X.shape[1] # Unique class labels: if Y is None: Y = scipy.ones(X.shape[0]) uY = [ 1.0, ] else: uY = scipy.unique(Y) # Default to plot heatmap for one-class data, contours for multi-class # data: if len(uY) == 1: if 'plot_heatmap' not in kwargs: kwargs['plot_heatmap'] = True if 'plot_levels_filled' not in kwargs: kwargs['plot_levels_filled'] = False if 'plot_levels' not in kwargs: kwargs['plot_levels'] = False if 'plot_points' not in kwargs: kwargs['plot_points'] = False if cmap is None: cmap = mpl.rcParams['image.cmap'] else: if 'plot_heatmap' not in kwargs: kwargs['plot_heatmap'] = False if 'plot_levels_filled' not in kwargs: kwargs['plot_levels_filled'] = False if 'plot_levels' not in kwargs: kwargs['plot_levels'] = True if 'plot_points' not in kwargs: kwargs['plot_points'] = False if 'plot_ci' not in kwargs: kwargs['plot_ci'] = 0.95 # Defaults for 1d histograms: if 'density' not in hist1d_kwargs: hist1d_kwargs['density'] = True if 'histtype' not in hist1d_kwargs: hist1d_kwargs['histtype'] = 'stepfilled' if 'bins' not in hist1d_kwargs: if w is None and not use_kde: hist1d_kwargs['bins'] = 'auto' else: # TODO: Come up with better default! hist1d_kwargs['bins'] = 50 if 'alpha' not in hist1d_kwargs: hist1d_kwargs['alpha'] = 0.75 if len(uY) > 1 else 1 # Set color order: if colors is None: cc = get_color_10_cycle() colors = [next(cc) for i in range(0, len(uY))] # Handle rotation of bottom right histogram: if rotate_last_hist is None: rotate_last_hist = k == 2 # Default labels for features and classes: if feature_labels is None: feature_labels = ['{:d}'.format(kv) for kv in range(0, k)] if class_labels is None: class_labels = ['{:d}'.format(int(yv)) for yv in uY] # Set up geometry: if fixed_height is None: if fixed_width is None: # Default: use matplotlib's default width, handle remaining # parameters with the fixed width case below: fixed_width = mpl.rcParams['figure.figsize'][0] fixed_height = fixed_width * (r - l) / (t - b) elif fixed_width is None: # Only height specified, compute width to yield square histograms fixed_width = fixed_height * (t - b) / (r - l) # Otherwise width and height are fixed, and we may not have square # histograms, at the user's discretion. wspace = ax_space hspace = ax_space f = plt.figure(figsize=(fixed_width, fixed_height)) gs = GridSpec(k, k) gs.update(bottom=b, top=t, left=l, right=r, wspace=wspace, hspace=hspace) axes = [] # j is the row, i is the column. for j in range(k): row = [] for i in range(k): if i > j: row.append(None) else: if rotate_last_hist and i == j and i == k - 1: sharey = row[-1] else: sharey = row[-1] if i > 0 and i < j and j < k else None sharex = axes[-1][i] if j > i and j < k else \ (row[-1] if i > 0 and j == k else None) row.append( f.add_subplot(gs[j, i], sharey=sharey, sharex=sharex)) axes.append(row) axes = scipy.asarray(axes) # Update axes with the data: # j is the row, i is the column. for i in range(0, k): if rotate_last_hist and i == k - 1: orientation = 'horizontal' else: orientation = 'vertical' for ic, yv in enumerate(uY): mask = Y == yv if autocolor: hist1d_kwargs['color'] = colors[ic] if use_kde: kde_grid = scipy.linspace(X[mask, i].min(), X[mask, i].max(), hist1d_kwargs['bins']) kde = wkde.univariate_weighted_kde( X[mask, i], w[mask] if w is not None else scipy.ones(mask.sum()), kde_grid, kde_bws[0, i]) if orientation == 'vertical': axes[i, i].plot(kde_grid, kde, color=hist1d_kwargs['color'], **kde1d_kwargs) if hist1d_kwargs['alpha'] > 0: axes[i, i].fill(kde_grid, kde, color=hist1d_kwargs['color'], alpha=hist1d_kwargs['alpha']) axes[i, i].set_ylim(bottom=0.0) else: axes[i, i].plot(kde, kde_grid, color=hist1d_kwargs['color'], **kde1d_kwargs) if hist1d_kwargs['alpha'] > 0: axes[i, i].fill(kde, kde_grid, color=hist1d_kwargs['color'], alpha=hist1d_kwargs['alpha']) axes[i, i].set_xlim(left=0.0) else: axes[i, i].hist(X[mask, i], weights=w[mask] if w is not None else None, orientation=orientation, **hist1d_kwargs) if i < k - 1 or (rotate_last_hist and i == k - 1): plt.setp(axes[i, i].get_xticklabels(), visible=False) else: axes[i, i].set_xlabel(feature_labels[i]) plt.setp(axes[i, i].get_yticklabels(), visible=False) for j in range(i + 1, k): for ic, yv in enumerate(uY): # TODO: more control over coloring! mask = Y == yv c = colors[ic] cm = 'w_' + c + '_a' if cmap is None else cmap if autocolor: pcolor_kwargs = kwargs.get('pcolor_kwargs', {}) pcolor_kwargs['cmap'] = cm kwargs['pcolor_kwargs'] = pcolor_kwargs contour_kwargs = kwargs.get('contour_kwargs', {}) if 'colors' in contour_kwargs: contour_kwargs['colors'] = c else: contour_kwargs['cmap'] = cm kwargs['contour_kwargs'] = contour_kwargs scatter_kwargs = kwargs.get('scatter_kwargs', {}) scatter_kwargs['color'] = c kwargs['scatter_kwargs'] = scatter_kwargs plot_ci = kwargs.get('plot_ci', None) if plot_ci is not None and autocolor: ci_kwargs = kwargs.get('ci_kwargs', {}) plot_ci = scipy.atleast_1d(plot_ci) if 'colors' in ci_kwargs or len(plot_ci) == 1: ci_kwargs['colors'] = c else: ci_kwargs['cmap'] = cm kwargs['ci_kwargs'] = ci_kwargs hist2d_contour(axes[j, i], X[mask, i], X[mask, j], w=w, use_kde=use_kde, kde_bw0=kde_bws[1, i] if use_kde else None, kde_bw1=kde_bws[1, j] if use_kde else None, **kwargs) if j < k - 1: plt.setp(axes[j, i].get_xticklabels(), visible=False) else: axes[j, i].set_xlabel(feature_labels[i]) if i != 0: plt.setp(axes[j, i].get_yticklabels(), visible=False) else: axes[j, i].set_ylabel(feature_labels[j]) # Draw legend: if show_legend and len(uY) > 1: handles = [ Patch(color=clr, label=lbl, alpha=hist1d_kwargs['alpha']) for clr, lbl in zip(colors, class_labels) ] if 'loc' not in legend_kwargs: legend_kwargs['loc'] = 'upper right' if 'bbox_to_anchor' not in legend_kwargs: legend_kwargs['bbox_to_anchor'] = (r, t) if 'bbox_transform' not in legend_kwargs: legend_kwargs['bbox_transform'] = f.transFigure f.legend(handles, [h.get_label() for h in handles], **legend_kwargs) return f, axes
def hist2d_contour(a, x, y, w=None, plot_heatmap=False, plot_levels_filled=False, plot_levels=True, plot_points=False, filter_contour=True, filter_heatmap=False, hist_kwargs={}, pcolor_kwargs={}, contour_kwargs={}, scatter_kwargs={}, filter_kwargs={}, filter_sigma=1.0, plot_ci=None, ci_kwargs={}, scatter_fraction=1.0, use_kde=False, kde_bw0=None, kde_bw1=None): """Make combined 2d histogram, contour and/or scatter plot. Parameters ---------- a : axes The axes to plot on. x : 1d array The x data. y : 1d array The y data. w : 1d array, optional The weights plot_heatmap : bool, optional If True, plot the heatmap of the histogram. Default is False. plot_levels_filled : bool, optional If True, plot the filled contours of the histogram. Default is False. plot_levels : bool, optional If True, plot the contours of the histogram. Default is True. plot_points : bool, optional If True, make a scatterplot of the points. Default is False. filter_contour : bool, optional If True, filter the histogram before plotting contours. Default is True. filter_heatmap : bool, optional If True, filter the histogram before plotting heatmap. Default is False. hist_kwargs : dict, optional Keyword arguments for scipy.histogram2d. pcolor_kwargs : dict, optional Keyword arguments for pcolormesh when plotting heatmap. contour_kwargs : dict, optional Keyword arguments for contour and contourf when plotting contours. To specify the number of contours, use the key 'N'. To use specific contour levels, use the key 'V'. scatter_kwargs : dict, optional Keyword arguments for scatterplot when plotting points. filter_kwargs : dict, optional Keyword arguments for filtering of histogram. filter_sigma : float, optional The standard deviation for the Gaussian filter used to smooth the histogram. Default is 2 bins. plot_ci : float or 1d array, optional If this is a float, the contour containing this much probability mass is drawn. Default is None (don't draw contour). ci_kwargs : dict, optional Keyword arguments for drawing the confidence interval. scatter_fraction : float, optional Fraction of points to include in the scatterplot. Default is 1.0 (use all points). use_kde : bool, optional If True, use a kernel density estimator (KDE) in place of bivariate histogram when `plot_heatmap=True`. Default is False. kde_bw0 : float, optional KDE bandwidth for the zeroth dimension. kde_bw1 : float, optional KDE bandwidth for the first dimension. """ if 'bins' not in hist_kwargs: hist_kwargs['bins'] = (100, 101) if 'normed' not in hist_kwargs: hist_kwargs['normed'] = True # Only compute histogram if needed: if (plot_heatmap or plot_levels or plot_levels_filled or (plot_ci is not None)): if use_kde: xgrid = scipy.linspace(x.min(), x.max(), hist_kwargs['bins'][0]) ygrid = scipy.linspace(y.min(), y.max(), hist_kwargs['bins'][1]) H = wkde.bivariate_weighted_kde( x, y, w if w is not None else scipy.ones(len(x)), xgrid, ygrid, kde_bw0, kde_bw1) # Match pcolormesh's silly format: dx = xgrid[1] - xgrid[0] xedges = (xgrid[1:] + xgrid[:-1]) / 2.0 xedges = scipy.concatenate( ([xedges[0] - dx], xedges, [xedges[-1] + dx])) dy = ygrid[1] - ygrid[0] yedges = (ygrid[1:] + ygrid[:-1]) / 2.0 yedges = scipy.concatenate( ([yedges[0] - dy], yedges, [yedges[-1] + dy])) # Doesn't make sense to filter KDE... Hf = H else: H, xedges, yedges = scipy.histogram2d(x, y, weights=w, **hist_kwargs) if filter_contour or filter_heatmap: Hf = gaussian_filter(H, filter_sigma, **filter_kwargs) if plot_heatmap: XX, YY = scipy.meshgrid(xedges, yedges) a.pcolormesh(XX, YY, Hf.T if filter_heatmap else H.T, **pcolor_kwargs) if plot_levels or plot_levels_filled or (plot_ci is not None): # Convert to bin centers: xcenters = 0.5 * (xedges[:-1] + xedges[1:]) ycenters = 0.5 * (yedges[:-1] + yedges[1:]) XX, YY = scipy.meshgrid(xcenters, ycenters) args = [] if 'V' in contour_kwargs: args += [ scipy.atleast_1d(contour_kwargs.pop('V')), ] elif 'N' in contour_kwargs: args += [ contour_kwargs.pop('N'), ] if plot_levels_filled: a.contourf(XX, YY, Hf.T if filter_contour else H.T, *args, **contour_kwargs) if plot_levels: a.contour(XX, YY, Hf.T if filter_contour else H.T, *args, **contour_kwargs) if plot_ci is not None: V = prob_contour(H, xedges, yedges, p=plot_ci) if 'vmin' not in ci_kwargs: ci_kwargs['vmin'] = 0.0 if 'vmax' not in ci_kwargs: ci_kwargs['vmax'] = H.max() a.contour(XX, YY, Hf.T if filter_contour else H.T, scipy.unique(scipy.atleast_1d(V)), **ci_kwargs) if plot_points: # Make the markersize not ridiculous by default: if 'ms' not in scatter_kwargs and 'markersize' not in scatter_kwargs: scatter_kwargs['ms'] = 0.1 # Make points transparent by default (approximates heatmap...): if 'alpha' not in scatter_kwargs: scatter_kwargs['alpha'] = 0.5 # Do not connect with lines by default: if 'ls' not in scatter_kwargs and 'linestyle' not in scatter_kwargs: scatter_kwargs['ls'] = '' # Plot with circles by default: if 'marker' not in scatter_kwargs: scatter_kwargs['marker'] = 'o' if scatter_fraction != 1.0: N = int(round(scatter_fraction * len(x))) indices = scipy.random.choice(range(len(x)), size=N, replace=False) else: indices = range(len(x)) a.plot(x[indices], y[indices], **scatter_kwargs)
def fluxFourierSensRho(beams,plasma,time,points,mcos=[0],msin=[],ds=1e-3,meth='psinorm'): """Calculates the distance weight matrix for specified fourier components Similar to fluxFourierSens, it instead derives weightings from the plasma equilibrium assuming that the plasma object contains a method .rz2rho. It should return a value of normalized radius to some basis function related to the plasma equilibrium. Args: beams: geometry Object with reference origin plasma: geometry Object with reference origin time: equilibrium time for inversion points: points in basis function space in which to map to. Kwargs: mcos: number of cosine fourier components to generate msin: number of sine fourier components to generate ds: step size along beam/ray in which to evaluate meth: normalization method (psinorm,phinorm,volnorm) Returns: output: A 3-dimensional numpy-array of weights in meters which follows is [time,beam,radial x fourier components]. The order of the last dimension is grouped by fourier component, cosine radial terms then sine radial terms. """ time = scipy.atleast_1d(time) interp = scipy.interpolate.interp1d(points,scipy.arange(len(points)),kind='cubic') # initialize output array of sensitivities output = scipy.zeros((len(time),len(beams),len(points))) for i in xrange(len(beams)): temp = beams[i](scipy.mgrid[beams[i].norm.s[-2]:beams[i].norm.s[-1]:step]) mapped = plasma.eq.rz2rho(meth, temp.r0(), temp.x2(), scipy.array(time)) # knowing that the last point (point[-1]) is assumed to be a ZERO emissivity point, #an additional brightness is added which only sees the last emissivity to yield the zero scipy.place(mapped,mapped > points[-1], points[-1]) # for a given point along a chord, use a spline to solve what reconstruction points #it is most close to. Then in the weighting matrix, (which is (brightness,points) in shape add the various fractional weighting. out = interp(mapped) # find out point using a floor like command (returns ints) idx = out.astype(int) # reduce out to the fraction out = (out % 1.)*step for j in range(len(idx[0])): output[:,i,idx[:,j]] += out[:,j] scipy.place(idx[:,j], idx[:,j] < len(points)-1, idx[:,j]+1) output[:,i,idx[:,j]] += step - out[:,j] return output
def plotLine(vector,val=1.0, close=False, tube_radius=None, index=None, **kwargs): """ PlotLine creates a single plot object from a singular vector or from a n-dimensional tuple or list. """ plot = False try: x = vector.x() temp0 = x[0] temp1 = x[1] temp2 = x[2] s = val*scipy.ones(temp0.shape) # For surface objects, this keyword allows for the last corner to connect with the first if close: temp0 = scipy.concatenate((temp0,scipy.atleast_1d(temp0[0]))) temp1 = scipy.concatenate((temp1,scipy.atleast_1d(temp1[0]))) temp2 = scipy.concatenate((temp2,scipy.atleast_1d(temp2[0]))) s = scipy.concatenate((s,scipy.atleast_1d(s[0]))) if not index is None: N = len(temp0) connect = scipy.vstack([scipy.arange(index, index + N - 1.5), scipy.arange(index + 1, index + N - .5)] ).T # I want to rewrite this... index += N except AttributeError: temp0 = [] temp1 = [] temp2 = [] s = [] connect = [] # if it is not some sort of vector or vector-derived class, iterate through and make a surface object if index is None: index = 0 plot = True for i in vector: output = plotLine(i, close=close, index=index, **kwargs) temp0 += [output[0]] temp1 += [output[1]] temp2 += [output[2]] s += [output[3]] connect += [output[4]] index = output[5] #turn to arrays here so I don't accidentally nest lists or tuples temp0 = scipy.hstack(temp0) temp1 = scipy.hstack(temp1) temp2 = scipy.hstack(temp2) s = scipy.hstack(s) connect = scipy.vstack(connect) if index is None: try: mlab.plot3d(temp0, temp1, temp2, s, vmin=0., vmax=1., tube_radius=tube_radius, **kwargs) except ValueError: mlab.plot3d(temp0.flatten(), temp1.flatten(), temp2.flatten(), s.flatten(), vmin=0., vmax=1., tube_radius=tube_radius, **kwargs) else: if plot: # follows http://docs.enthought.com/mayavi/mayavi/auto/example_plotting_many_lines.html#example-plotting-many-lines src = mlab.pipeline.scalar_scatter(temp0, temp1, temp2, s) src.mlab_source.dataset.lines = connect lines = mlab.pipeline.stripper(src) mlab.pipeline.surface(lines, **kwargs) else: return (temp0,temp1,temp2,s,connect,index)
def solve(self, wls): """Anisotropic solver. INPUT wls = wavelengths to scan (any asarray-able object). OUTPUT self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected and transmitted. """ self.wls = S.atleast_1d(wls) LAMBDA = self.LAMBDA n = self.n multilayer = self.multilayer alpha = self.alpha delta = self.delta psi = self.psi phi = self.phi nlayers = len(multilayer) i = S.arange(-n, n + 1) nood = 2 * n + 1 hmax = nood - 1 DEO1 = S.zeros((nood, self.wls.size)) DEO3 = S.zeros_like(DEO1) DEE1 = S.zeros_like(DEO1) DEE3 = S.zeros_like(DEO1) c1 = S.array([1., 0., 0.]) c3 = S.array([1., 0., 0.]) # grating on the xy plane K = 2 * pi / LAMBDA * \ S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex) dirk1 = S.array([S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha)]) # D polarization vector u = S.array([S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta), S.cos(psi) * S.cos(alpha) * S.sin(delta) + S.sin(psi) * S.cos(delta), -S.cos(psi) * S.sin(alpha)]) kO1i = S.zeros((3, i.size), dtype=complex) kE1i = S.zeros_like(kO1i) kO3i = S.zeros_like(kO1i) kE3i = S.zeros_like(kO1i) Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex) M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex) dlt = (i == 0).astype(int) for iwl, wl in enumerate(self.wls): nO1 = nE1 = multilayer[0].mat.n(wl).item() nO3 = nE3 = multilayer[-1].mat.n(wl).item() # wavevectors k = 2 * pi / wl eps1 = S.diag(S.asarray([nE1, nO1, nO1]) ** 2) eps3 = S.diag(S.asarray([nE3, nO3, nO3]) ** 2) # ordinary wave abskO1 = k * nO1 # abskO3 = k * nO3 # extraordinary wave # abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2) # abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2) k1 = abskO1 * dirk1 kO1i[0, :] = k1[0] - i * K[0] kO1i[1, :] = k1[1] * S.ones_like(i) kO1i[2, :] = - \ dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :], k, nO1) kE1i[0, :] = kO1i[0, :] kE1i[1, :] = kO1i[1, :] kE1i[2, :] = -dispersion_relation_extraordinary(kE1i[0, :], kE1i[1, :], k, nO1, nE1, c1) kO3i[0, :] = kO1i[0, :] kO3i[1, :] = kO1i[1, :] kO3i[ 2, :] = dispersion_relation_ordinary( kO3i[ 0, :], kO3i[ 1, :], k, nO3) kE3i[0, :] = kO1i[0, :] kE3i[1, :] = kO1i[1, :] kE3i[ 2, :] = dispersion_relation_extraordinary( kE3i[ 0, :], kE3i[ 1, :], k, nO3, nE3, c3) # k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]] k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [- i * K[2]]] # aliases for constant wavevectors kx = kO1i[0, :] # o kE1i(1,;), tanto e' lo stesso ky = k1[1] # matrices I = S.eye(nood, dtype=complex) ZERO = S.zeros((nood, nood), dtype=complex) Kx = S.diag(kx / k) Ky = ky / k * I Kz = S.diag(k2i[2, :] / k) KO1z = S.diag(kO1i[2, :] / k) KE1z = S.diag(kE1i[2, :] / k) KO3z = S.diag(kO3i[2, :] / k) KE3z = S.diag(kE3i[2, :] / k) ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0] BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1] CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2]) ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0] BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1] CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2]) ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0] BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1] CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2]) ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0] BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1] CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2]) DRE = c1[1] * KE1z - c1[2] * Ky ERE = c1[2] * Kx - c1[0] * KE1z FRE = c1[0] * Ky - c1[1] * Kx DTE = c3[1] * KE3z - c3[2] * Ky ETE = c3[2] * Kx - c3[0] * KE3z FTE = c3[0] * Ky - c3[1] * Kx b = S.r_[u[0] * dlt, u[1] * dlt, (k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt, ( k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt] Ky_CRO_1 = ky / k * CRO_1 Ky_CRE_1 = ky / k * CRE_1 Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1 Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1 MR31 = -S.dot(Ky_CRO_1, ARO) MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z MR33 = -S.dot(Ky_CRE_1, ARE) MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z MR41 = S.dot(Kx_CRO_1, ARO) + KO1z MR42 = S.dot(Kx_CRO_1, BRO) MR43 = S.dot(Kx_CRE_1, ARE) + KE1z MR44 = S.dot(Kx_CRE_1, BRE) MR = S.asarray(S.bmat([[I, ZERO, I, ZERO], [ZERO, I, ZERO, I], [MR31, MR32, MR33, MR34], [MR41, MR42, MR43, MR44]])) Ky_CTO_1 = ky / k * CTO_1 Ky_CTE_1 = ky / k * CTE_1 Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1 Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1 MT31 = -S.dot(Ky_CTO_1, ATO) MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z MT33 = -S.dot(Ky_CTE_1, ATE) MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z MT41 = S.dot(Kx_CTO_1, ATO) + KO3z MT42 = S.dot(Kx_CTO_1, BTO) MT43 = S.dot(Kx_CTE_1, ATE) + KE3z MT44 = S.dot(Kx_CTE_1, BTE) MT = S.asarray(S.bmat([[I, ZERO, I, ZERO], [ZERO, I, ZERO, I], [MT31, MT32, MT33, MT34], [MT41, MT42, MT43, MT44]])) Mp.fill(0.0) M.fill(0.0) for nlayer in range(nlayers - 2, 0, -1): # internal layers layer = multilayer[nlayer] thickness = layer.thickness EPS2, EPS21 = layer.getEPSFourierCoeffs( wl, n, anisotropic=True) # Exx = S.squeeze(EPS2[0, 0, :]) # Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:]) Exy = S.squeeze(EPS2[0, 1, :]) Exy = toeplitz(S.flipud(Exy[0:hmax + 1]), Exy[hmax:]) Exz = S.squeeze(EPS2[0, 2, :]) Exz = toeplitz(S.flipud(Exz[0:hmax + 1]), Exz[hmax:]) Eyx = S.squeeze(EPS2[1, 0, :]) Eyx = toeplitz(S.flipud(Eyx[0:hmax + 1]), Eyx[hmax:]) Eyy = S.squeeze(EPS2[1, 1, :]) Eyy = toeplitz(S.flipud(Eyy[0:hmax + 1]), Eyy[hmax:]) Eyz = S.squeeze(EPS2[1, 2, :]) Eyz = toeplitz(S.flipud(Eyz[0:hmax + 1]), Eyz[hmax:]) Ezx = S.squeeze(EPS2[2, 0, :]) Ezx = toeplitz(S.flipud(Ezx[0:hmax + 1]), Ezx[hmax:]) Ezy = S.squeeze(EPS2[2, 1, :]) Ezy = toeplitz(S.flipud(Ezy[0:hmax + 1]), Ezy[hmax:]) Ezz = S.squeeze(EPS2[2, 2, :]) Ezz = toeplitz(S.flipud(Ezz[0:hmax + 1]), Ezz[hmax:]) Exx_1 = S.squeeze(EPS21[0, 0, :]) Exx_1 = toeplitz(S.flipud(Exx_1[0:hmax + 1]), Exx_1[hmax:]) Exx_1_1 = inv(Exx_1) # lalanne Ezz_1 = inv(Ezz) Ky_Ezz_1 = ky / k * Ezz_1 Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1 Exz_Ezz_1 = S.dot(Exz, Ezz_1) Eyz_Ezz_1 = S.dot(Eyz, Ezz_1) H11 = 1j * S.dot(Ky_Ezz_1, Ezy) H12 = 1j * S.dot(Ky_Ezz_1, Ezx) H13 = S.dot(Ky_Ezz_1, Kx) H14 = I - S.dot(Ky_Ezz_1, Ky) H21 = 1j * S.dot(Kx_Ezz_1, Ezy) H22 = 1j * S.dot(Kx_Ezz_1, Ezx) H23 = S.dot(Kx_Ezz_1, Kx) - I H24 = -S.dot(Kx_Ezz_1, Ky) H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy) H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx) H33 = 1j * S.dot(Exz_Ezz_1, Kx) H34 = -1j * S.dot(Exz_Ezz_1, Ky) H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy) H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx) H43 = -1j * S.dot(Eyz_Ezz_1, Kx) H44 = 1j * S.dot(Eyz_Ezz_1, Ky) H = 1j * S.diag(S.repeat(S.diag(Kz), 4)) + \ S.asarray(S.bmat([[H11, H12, H13, H14], [H21, H22, H23, H24], [H31, H32, H33, H34], [H41, H42, H43, H44]])) q, W = eig(H) W1, W2, W3, W4 = S.split(W, 4) # # boundary conditions # # x = [R T] # R = [ROx ROy REx REy] # T = [TOx TOy TEx TEy] # b + MR.R = M1p.c # M1.c = M2p.c # ... # ML.c = MT.T # therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T # missing equations from (46)..(49) in glytsis_rigorous # [b] = [-MR Mtot.MT] [R] # [0] [...........] [T] z = S.zeros_like(q) z[S.where(q.real > 0)] = -thickness D = S.exp(k * q * z) Sy0 = W1 * D[S.newaxis, :] Sx0 = W2 * D[S.newaxis, :] Uy0 = W3 * D[S.newaxis, :] Ux0 = W4 * D[S.newaxis, :] z = thickness * S.ones_like(q) z[S.where(q.real > 0)] = 0 D = S.exp(k * q * z) D1 = S.exp(-1j * k2i[2, :] * thickness) Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :] Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :] Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :] Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :] Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0] M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd] Mtot = S.eye(4 * nood, dtype=complex) for nlayer in range(1, nlayers - 1): Mtot = S.dot( S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :, nlayer])) BC_b = S.r_[b, S.zeros_like(b)] BC_A1 = S.c_[-MR, S.dot(Mtot, MT)] BC_A2 = S.asarray(S.bmat( [[(c1[0] * I - c1[2] * S.dot(CRO_1, ARO)), (c1[1] * I - c1[2] * S.dot(CRO_1, BRO)), ZERO, ZERO, ZERO, ZERO, ZERO, ZERO], [ZERO, ZERO, (DRE - S.dot(S.dot(FRE, CRE_1), ARE)), (ERE - S.dot(S.dot(FRE, CRE_1), BRE)), ZERO, ZERO, ZERO, ZERO], [ZERO, ZERO, ZERO, ZERO, (c3[0] * I - c3[2] * S.dot(CTO_1, ATO)), (c3[1] * I - c3[2] * S.dot(CTO_1, BTO)), ZERO, ZERO], [ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, (DTE - S.dot(S.dot(FTE, CTE_1), ATE)), (ETE - S.dot(S.dot(FTE, CTE_1), BTE))]])) BC_A = S.r_[BC_A1, BC_A2] x = linsolve(BC_A, BC_b) ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8) ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy))) REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy))) TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy))) TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy))) denom = (k1[2] - S.dot(u, k1) * u[2]).real DEO1[:, iwl] = -((S.absolute(ROx) ** 2 + S.absolute(ROy) ** 2 + S.absolute(ROz) ** 2) * S.conj(kO1i[2, :]) - (ROx * kO1i[0, :] + ROy * kO1i[1, :] + ROz * kO1i[2, :]) * S.conj(ROz)).real / denom DEE1[:, iwl] = -((S.absolute(REx) ** 2 + S.absolute(REy) ** 2 + S.absolute(REz) ** 2) * S.conj(kE1i[2, :]) - (REx * kE1i[0, :] + REy * kE1i[1, :] + REz * kE1i[2, :]) * S.conj(REz)).real / denom DEO3[:, iwl] = ((S.absolute(TOx) ** 2 + S.absolute(TOy) ** 2 + S.absolute(TOz) ** 2) * S.conj(kO3i[2, :]) - (TOx * kO3i[0, :] + TOy * kO3i[1, :] + TOz * kO3i[2, :]) * S.conj(TOz)).real / denom DEE3[:, iwl] = ((S.absolute(TEx) ** 2 + S.absolute(TEy) ** 2 + S.absolute(TEz) ** 2) * S.conj(kE3i[2, :]) - (TEx * kE3i[0, :] + TEy * kE3i[1, :] + TEz * kE3i[2, :]) * S.conj(TEz)).real / denom # save the results self.DEO1 = DEO1 self.DEE1 = DEE1 self.DEO3 = DEO3 self.DEE3 = DEE3 return self
def solve(self, wls): """Isotropic solver. INPUT wls = wavelengths to scan (any asarray-able object). OUTPUT self.DE1, self.DE3 = power reflected and transmitted. NOTE see: Moharam, "Formulation for stable and efficient implementation of the rigorous coupled-wave analysis of binary gratings", JOSA A, 12(5), 1995 Lalanne, "Highly improved convergence of the coupled-wave method for TM polarization", JOSA A, 13(4), 1996 Moharam, "Stable implementation of the rigorous coupled-wave analysis for surface-relief gratings: enhanced trasmittance matrix approach", JOSA A, 12(5), 1995 """ self.wls = S.atleast_1d(wls) LAMBDA = self.LAMBDA n = self.n multilayer = self.multilayer alpha = self.alpha delta = self.delta psi = self.psi phi = self.phi nlayers = len(multilayer) i = S.arange(-n, n + 1) nood = 2 * n + 1 hmax = nood - 1 # grating vector (on the xz plane) # grating on the xy plane K = 2 * pi / LAMBDA * \ S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex) DE1 = S.zeros((nood, self.wls.size)) DE3 = S.zeros_like(DE1) dirk1 = S.array([S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha)]) # usefull matrices I = S.eye(i.size) I2 = S.eye(i.size * 2) ZERO = S.zeros_like(I) X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex) MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex) MTp2 = S.zeros_like(MTp1) EPS2 = S.zeros(2 * hmax + 1, dtype=complex) EPS21 = S.zeros_like(EPS2) dlt = (i == 0).astype(int) for iwl, wl in enumerate(self.wls): # free space wavevector k = 2 * pi / wl n1 = multilayer[0].mat.n(wl).item() n3 = multilayer[-1].mat.n(wl).item() # incident plane wave wavevector k1 = k * n1 * dirk1 # all the other wavevectors tmp_x = k1[0] - i * K[0] tmp_y = k1[1] * S.ones_like(i) tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1) k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]] # k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]] tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3) k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]] # aliases for constant wavevectors kx = k1i[0, :] ky = k1[1] # angles of reflection # phi_i = S.arctan2(ky,kx) phi_i = S.arctan2(ky, kx.real) # OKKIO Kx = S.diag(kx / k) Ky = ky / k * I Z1 = S.diag(k1i[2, :] / (k * n1 ** 2)) Y1 = S.diag(k1i[2, :] / k) Z3 = S.diag(k3i[2, :] / (k * n3 ** 2)) Y3 = S.diag(k3i[2, :] / k) # Fc = S.diag(S.cos(phi_i)) fc = S.cos(phi_i) # Fs = S.diag(S.sin(phi_i)) fs = S.sin(phi_i) MR = S.asarray(S.bmat([[I, ZERO], [-1j * Y1, ZERO], [ZERO, I], [ZERO, -1j * Z1]])) MT = S.asarray(S.bmat([[I, ZERO], [1j * Y3, ZERO], [ZERO, I], [ZERO, 1j * Z3]])) # internal layers (grating or layer) X.fill(0.0) MTp1.fill(0.0) MTp2.fill(0.0) for nlayer in range(nlayers - 2, 0, -1): # internal layers layer = multilayer[nlayer] d = layer.thickness EPS2, EPS21 = layer.getEPSFourierCoeffs( wl, n, anisotropic=False) E = toeplitz(EPS2[hmax::-1], EPS2[hmax:]) E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:]) E11 = inv(E1) # B = S.dot(Kx, linsolve(E,Kx)) - I B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I # A = S.dot(Kx, Kx) - E A = S.diag((kx / k) ** 2) - E # Note: solution bug alfredo # randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10) # soluzione sporca... :-( # per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B # non sono invertibili --> cambio leggermente i kx... ma dovrei invece # trattare separatamente (analiticamente) questi casi if cond(A) > 1e10: warning('BAD CONDITIONING: randomization of kx') while cond(A) > 1e10: Kx = Kx * (1 + 1e-9 * S.rand()) B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I A = S.diag((kx / k) ** 2) - E if S.absolute(K[2] / k) > 1e-10: raise ValueError( 'First Order Helmholtz Operator not implemented, yet!') elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1): # lalanne # H_U_reduced = S.dot(Ky, Ky) + A H_U_reduced = (ky / k) ** 2 * I + A # H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11 H_S_reduced = (ky / k) ** 2 * I + kx[:, S.newaxis] / k * linsolve(E, kx[:, S.newaxis] / k * E11) - E11 q1, W1 = eig(H_U_reduced) q1 = S.sqrt(q1) q2, W2 = eig(H_S_reduced) q2 = S.sqrt(q2) # boundary conditions # V11 = S.dot(linsolve(A, W1), S.diag(q1)) V11 = linsolve(A, W1) * q1[S.newaxis, :] V12 = (ky / k) * S.dot(linsolve(A, Kx), W2) V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1)) # V22 = S.dot(linsolve(B, W2), S.diag(q2)) V22 = linsolve(B, W2) * q2[S.newaxis, :] # Vss = S.dot(Fc, V11) Vss = fc[:, S.newaxis] * V11 # Wss = S.dot(Fc, W1) + S.dot(Fs, V21) Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21 # Vsp = S.dot(Fc, V12) - S.dot(Fs, W2) Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2 # Wsp = S.dot(Fs, V22) Wsp = fs[:, S.newaxis] * V22 # Wpp = S.dot(Fc, V22) Wpp = fc[:, S.newaxis] * V22 # Vpp = S.dot(Fc, W2) + S.dot(Fs, V12) Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12 # Wps = S.dot(Fc, V21) - S.dot(Fs, W1) Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1 # Vps = S.dot(Fs, V11) Vps = fs[:, S.newaxis] * V11 Mc2bar = S.asarray(S.bmat([[Vss, Vsp, Vss, Vsp], [Wss, Wsp, -Wss, -Wsp], [Wps, Wpp, -Wps, -Wpp], [Vps, Vpp, Vps, Vpp]])) x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)] # Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x])) xx = S.r_[S.ones_like(x), x] Mc1 = Mc2bar * xx[S.newaxis, :] X[:, :, nlayer] = S.diag(x) MTp = linsolve(Mc2bar, MT) MTp1[:, :, nlayer] = MTp[0:2 * nood, :] MTp2 = MTp[2 * nood:, :] MT = S.dot( Mc1, S.r_[ I2, S.dot( MTp2, linsolve( MTp1[ :, :, nlayer], X[ :, :, nlayer]))]) else: ValueError( 'Second Order Helmholtz Operator not implemented, yet!') # M = S.asarray(S.bmat([-MR, MT])) M = S.c_[-MR, MT] b = S.r_[S.sin(psi) * dlt, 1j * S.sin(psi) * n1 * S.cos(alpha) * dlt, -1j * S.cos(psi) * n1 * dlt, S.cos(psi) * S.cos(alpha) * dlt] x = linsolve(M, b) R, T = S.split(x, 2) Rs, Rp = S.split(R, 2) for ii in range(1, nlayers - 1): T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T) Ts, Tp = S.split(T, 2) DE1[:, iwl] = (k1i[2, :] / (k1[2])).real * S.absolute(Rs) ** 2 + \ (k1i[2, :] / (k1[2] * n1 ** 2)).real * \ S.absolute(Rp) ** 2 DE3[:, iwl] = (k3i[2, :] / (k1[2])).real * S.absolute(Ts) ** 2 + \ (k3i[2, :] / (k1[2] * n3 ** 2)).real * \ S.absolute(Tp) ** 2 # save the results self.DE1 = DE1 self.DE3 = DE3 return self
def parse_args(options, identity='main'): ### load all default settings CFG = default_settings() ### general options if options.verbose in ['n', 'y']: CFG['verbose'] = (options.verbose == 'y') else: print >> sys.stderr, 'ERROR: option verbose should have value y or n, but has %s' % options.verbose sys.exit(1) if options.debug in ['n', 'y']: CFG['debug'] = (options.debug == 'y') else: print >> sys.stderr, 'ERROR: option debug should have value y or n, but has %s' % options.debug sys.exit(1) CFG['event_types'] = options.event_types.strip(',').split(',') if options.outdir == '-': print >> sys.stderr, 'ERROR: please provide the mandatory parameter: out directory\n\n' options.parser.print_help() sys.exit(2) else: if not os.path.exists(options.outdir): print >> sys.stderr, 'WARNING: Output directory %s does not exist - will be created\n\n' % options.outdir try: os.makedirs(options.outdir) except OSError: print >> sys.stderr, 'ERROR: Output directory %s can not be created.\n\n' % options.outdir sys.exit(2) CFG['out_dirname'] = options.outdir ### options specific for main program if identity == 'main': if options.insert_ir in ['n', 'y']: CFG['do_insert_intron_retentions'] = (options.insert_ir == 'y') else: print >> sys.stderr, 'ERROR: option insert_ir should have value y or n, but has %s' % options.insert_ir sys.exit(1) if options.insert_es in ['n', 'y']: CFG['do_insert_cassette_exons'] = (options.insert_es == 'y') else: print >> sys.stderr, 'ERROR: option insert_es should have value y or n, but has %s' % options.insert_es sys.exit(1) if options.insert_ni in ['n', 'y']: CFG['do_insert_intron_edges'] = (options.insert_ni == 'y') else: print >> sys.stderr, 'ERROR: option insert_ni should have value y or n, but has %s' % options.insert_ni sys.exit(1) if options.remove_se in ['n', 'y']: CFG['do_remove_short_exons'] = (options.remove_se == 'y') else: print >> sys.stderr, 'ERROR: option remove_se should have value y or n, but has %s' % options.remove_se sys.exit(1) if options.infer_sg in ['n', 'y']: CFG['do_infer_splice_graph'] = (options.infer_sg == 'y') else: print >> sys.stderr, 'ERROR: option infer_sg should have value y or n, but has %s' % options.infer_sg sys.exit(1) if options.var_aware in ['n', 'y']: CFG['var_aware'] = (options.var_aware == 'y') else: print >> sys.stderr, 'ERROR: option var_aware should have value y or n, but has %s' % options.var_aware sys.exit(1) if options.primary_only in ['n', 'y']: CFG['primary_only'] = (options.primary_only == 'y') else: print >> sys.stderr, 'ERROR: option primary_only should have value y or n, but has %s' % options.primary_only sys.exit(1) if options.intron_cov in ['n', 'y']: CFG['count_intron_cov'] = (options.intron_cov == 'y') else: print >> sys.stderr, 'ERROR: option intron_cov should have value y or n, but has %s' % options.intron_cov if options.quantify_graph in ['n', 'y']: CFG['count_segment_graph'] = (options.quantify_graph == 'y') else: print >> sys.stderr, 'ERROR: option quantify_graph should have value y or n, but has %s' % options.quantify_graph if options.ignore_mismatches in ['n', 'y']: CFG['ignore_mismatch_tag'] = (options.ignore_mismatches == 'y') else: print >> sys.stderr, 'ERROR: option ignore mismatches bam should have value y or n, but has %s' % options.ignore_mismatches if options.output_struc in ['n', 'y']: CFG['output_struc'] = (options.output_struc == 'y') CFG['output_confirmed_struc'] = (options.output_struc == 'y') else: print >> sys.stderr, 'ERROR: option output struc value y or n, but has %s' % options.output_struc ### option to store sparse BAM representation if options.sparse_bam in ['n', 'y']: CFG['bam_to_sparse'] = (options.sparse_bam == 'y') else: print >> sys.stderr, 'ERROR: option sparse_bam should have value y or n, but has %s' % options.sparse_bam CFG['insert_intron_iterations'] = options.iterations if options.spladderfile != '-': CFG['spladder_infile'] = options.spladderfile ### settings for the alt splice part CFG['same_genestruct_for_all_samples'] = (options.same_genome == 'y') if options.replicates != '-': CFG['replicate_idxs'] = [int(x) for x in options.replicates.split(',')] CFG['curate_alt_prime_events'] = (options.curate_alt_prime == 'y') ### open log file, if specified if options.logfile != '-': CFG['log_fname'] = options.logfile CFG['fd_log'] = open(options.logfile, 'w') else: CFG['log_fname'] = 'stdout' CFG['fd_log'] = sys.stdout #if options.user != '-': # CFG['user_settings'] = options.user ### alt splice analysis CFG['run_as_analysis'] = (options.extract_as == 'y') ### mandatory parameters for main spladder if options.bams == '-': print >> sys.stderr, 'ERROR: please provide the mandatory parameter: bam files\n\n' options.parser.print_help() sys.exit(2) else: CFG['bam_fnames'] = options.bams.strip(',').split(',') ### check existence of files for fname in CFG['bam_fnames']: if not os.path.isfile(fname): print >> sys.stderr, 'ERROR: Input file %s can not be found\n\n' % fname sys.exit(2) if options.annotation == '-': print >> sys.stderr, 'ERROR: please provide the mandatory parameter: annotation\n\n' options.parser.print_help() sys.exit(2) elif not os.path.isfile(options.annotation): print >> sys.stderr, 'ERROR: Annotation file %s can not be found\n\n' % options.annotation sys.exit(2) else: CFG['anno_fname'] = options.annotation if options.refstrain != '-': CFG['reference_strain'] = options.refstrain ref_tag = '%s:' % options.refstrain else: ref_tag = '' ### rproc options if options.pyproc == 'y': CFG['rproc'] = (options.pyproc == 'y') CFG['options_rproc'] = dict() CFG['options_rproc']['mem_req_resubmit'] = [30000, 60000, 80000] CFG['options_rproc']['time_req_resubmit'] = [60*60, 80*60, 90*60] CFG['options_rproc']['resubmit'] = 3 CFG['options_rproc']['priority'] = 100 CFG['options_rproc']['addpaths'] = CFG['paths'] if identity in ['main', 'test']: ### parallel processing CFG['parallel'] = options.parallel CFG['merge_strategy'] = options.merge CFG['read_length'] = options.readlen CFG['confidence_level'] = options.confidence if options.validate_sg in ['n', 'y']: CFG['validate_splicegraphs'] = (options.validate_sg == 'y') else: print >> sys.stderr, 'ERROR: validate_sg matlab should have value y or n, but has %s' % options.validate_sg sys.exit(1) if identity == 'test': CFG['multiTest'] = options.correction CFG['max_0_frac'] = options.max_0_frac CFG['min_count'] = options.min_count if options.non_alt_norm in ['n', 'y']: CFG['non_alt_norm'] = (options.non_alt_norm == 'y') else: print >> sys.stderr, 'ERROR: option non_alt_norm should have value y or n, but has %s' % options.non_alt_norm sys.exit(1) if options.matlab in ['n', 'y']: CFG['is_matlab'] = (options.matlab == 'y') else: print >> sys.stderr, 'ERROR: option matlab should have value y or n, but has %s' % options.matlab sys.exit(1) if options.conditionA == '-': print >> sys.stderr, 'ERROR: At least one sample for condition A required' sys.exit(1) if options.conditionB == '-': print >> sys.stderr, 'ERROR: At least one sample for condition B required' sys.exit(1) if options.diagnose_plots in ['n', 'y']: CFG['diagnose_plots'] = (options.diagnose_plots == 'y') else: print >> sys.stderr, 'ERROR: option diagnose_plots should have value y or n, but has %s' % options.diagnose_plots sys.exit(1) CFG['conditionA'] = [os.path.basename(x).replace('.bam', '') for x in options.conditionA.strip(',').split(',')] CFG['conditionB'] = [os.path.basename(x).replace('.bam', '') for x in options.conditionB.strip(',').split(',')] if len(CFG['conditionA']) > 0 and CFG['conditionA'][0].lower().endswith('txt'): CFG['conditionA'] = [str(x) for x in sp.loadtxt(CFG['conditionA'][0], dtype='str')] if len(CFG['conditionB']) > 0 and CFG['conditionB'][0].lower().endswith('txt'): CFG['conditionB'] = [str(x) for x in sp.loadtxt(CFG['conditionB'][0], dtype='str')] ### check if we got a list of bam files in a text file instead of a comma separated list if len(CFG['bam_fnames']) > 0 and CFG['bam_fnames'][0].split('.')[-1] == 'txt': CFG['bam_fnames'] = [str(x) for x in sp.atleast_1d(sp.loadtxt(CFG['bam_fnames'][0], dtype='str'))] ### assemble strain list CFG['samples'] = [] CFG['strains'] = [] for i in range(len(CFG['bam_fnames'])): if options.label != '-': CFG['samples'].append('%s_%s' % (options.label, re.sub(r'(.bam|.hdf5)$', '', CFG['bam_fnames'][i].split('/')[-1]))) else: CFG['samples'].append(re.sub(r'(.bam|.hdf5)$', '', CFG['bam_fnames'][i].split('/')[-1])) CFG['strains'].append('%s%s' % (ref_tag, CFG['samples'][-1])) CFG['strains'] = sp.array(CFG['strains']) ### adapt graph validation requirement to max number of samples CFG['sg_min_edge_count'] = min(CFG['sg_min_edge_count'], len(CFG['samples'])) return CFG
def parse_args(options, identity='main'): ### load all default settings options = default_settings(options) ref_tag = '' options.event_types = options.event_types.strip(',').split(',') if not os.path.exists(options.outdir): print('WARNING: Output directory %s does not exist - will be created\n\n' % options.outdir, file=sys.stderr) try: os.makedirs(options.outdir) except OSError: print('ERROR: Output directory %s can not be created.\n\n' % options.outdir, file=sys.stderr) sys.exit(2) ### options specific for main program if identity == 'main': ### open log file, if specified if options.logfile != '-': options.log_fname = options.logfile else: options.log_fname = 'stdout' ### set tmp directory default if options.tmpdir == '': options.tmpdir = os.path.join(options.outdir, 'tmp') options.bam_fnames = options.bams.strip(',').split(',') if not os.path.isfile(options.annotation): print('ERROR: Annotation file %s can not be found\n\n' % options.annotation, file=sys.stderr) sys.exit(2) #if options.refstrain != '-': # CFG['reference_strain'] = options.refstrain # ref_tag = '%s:' % options.refstrain ### pyproc options if options.pyproc: options.options_rproc = dict() options.options_rproc['mem_req_resubmit'] = [30000, 60000, 80000] options.options_rproc['time_req_resubmit'] = [60*60, 80*60, 90*60] options.options_rproc['resubmit'] = 3 options.options_rproc['priority'] = 100 options.options_rproc['addpaths'] = options.paths if options.environment: options.options_rproc['environment'] = options.environment if identity == 'viz': if options.bams != '-': options.bam_fnames = options.bams.strip(':').split(':') for g, group in enumerate(options.bam_fnames): options.bam_fnames[g] = group.strip(',').split(',') ### check existence of files for fname in options.bam_fnames[g]: if not os.path.isfile(fname): print('ERROR: Input file %s can not be found\n\n' % fname, file=sys.stderr) sys.exit(2) if identity == 'test': if options.conditionA == '-': print('ERROR: At least one sample for condition A required', file=sys.stderr) sys.exit(1) if options.conditionB == '-': print('ERROR: At least one sample for condition B required', file=sys.stderr) sys.exit(1) options.conditionA = options.conditionA.strip(',').split(',') options.conditionB = options.conditionB.strip(',').split(',') if len(options.conditionA) > 0 and options.conditionA[0].lower().endswith('txt'): options.conditionA = [str(x) for x in sp.loadtxt(options.conditionA[0], dtype='str')] if len(options.conditionB) > 0 and options.conditionB[0].lower().endswith('txt'): options.conditionB = [str(x) for x in sp.loadtxt(options.conditionB[0], dtype='str')] options.conditionA = [re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)|.[nN][pP][zZ]$', '', x) for x in options.conditionA] options.conditionB = [re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)|.[nN][pP][zZ]$', '', x) for x in options.conditionB] options.conditionA = [os.path.basename(x) for x in options.conditionA] options.conditionB = [os.path.basename(x) for x in options.conditionB] ### check if we got a list of bam files in a text file instead of a comma separated list if len(options.bam_fnames) > 0: if identity == 'main' and options.bam_fnames[0].split('.')[-1] == 'txt': options.bam_fnames = [str(x) for x in sp.atleast_1d(sp.loadtxt(options.bam_fnames[0], dtype='str'))] elif identity == 'viz': for g, group in enumerate(options.bam_fnames): if group[0].split('.')[-1] == 'txt': options.bam_fnames[g] = [str(x) for x in sp.atleast_1d(sp.loadtxt(group[0], dtype='str'))] ### check existence of alignment files for fname in flatten(options.bam_fnames): if not os.path.isfile(fname): print('ERROR: Input file %s can not be found\n\n' % fname, file=sys.stderr) sys.exit(2) if not fname.endswith('.hdf5') and not os.path.isfile(fname + '.bai'): print('ERROR: Input file %s is not indexed. %s.bai can not be found\n\n' % (fname, fname), file=sys.stderr) sys.exit(2) ### assemble strain list options.samples = [] options.strains = [] if identity in ['viz']: for g, group in enumerate(options.bam_fnames): options.strains.append([]) options.samples.append([]) for i in range(len(group)): options.samples[-1].append(re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)$', '', group[i].split('/')[-1])) options.strains[-1].append('%s%s' % (ref_tag, options.samples[-1][-1])) options.strains[-1] = sp.array(options.strains[-1]) if options.labels != '': options.labels = options.labels.split(',') assert (len(options.labels) == len(options.bam_fnames)), "Number of labels (%i given) and file names (%i given) needs to match!" % (len(options.labels), len(options.bam_fnames)) else: for i in range(len(options.bam_fnames)): options.samples.append(re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)$', '', options.bam_fnames[i].split('/')[-1])) options.strains.append('%s%s' % (ref_tag, options.samples[-1])) options.strains = sp.array(options.strains) ### adapt graph validation requirement to max number of samples options.sg_min_edge_count = min(options.sg_min_edge_count, len(options.samples)) return options
def find_ics(y, yp, time, var_types, rtol, atol, constants, net, redirect_msgs=False): # We use this to find consistent sets of initial conditions for our # integrations. (We don't let ddaskr do it, because it doesn't calculate # values for d(alg_var)/dt, and we need them for sensitivity integration.) # On some systems, the f2py'd functions don't like len(constants)=0. if len(constants) == 0: constants = [0] var_types = scipy.asarray(var_types) atol = scipy.asarray(atol) rtol = scipy.asarray(rtol) # Note that we're copying y and yprime y = scipy.array(y, scipy.float_) yp = scipy.array(yp, scipy.float_) N_alg = scipy.sum(var_types == -1) dv_typ_vals = scipy.asarray([net.get_var_typical_val(id) for id in net.dynamicVars.keys()]) if N_alg: # First we calculate a consistent set of algebraic variable values alg_vars_guess = y[var_types == -1] alg_typ_vals = dv_typ_vals[var_types == -1] possible_guesses = [alg_vars_guess, alg_typ_vals, scipy.ones(N_alg, scipy.float_)] redir = Utility.Redirector() if redirect_msgs: redir.start() try: for guess in possible_guesses: sln, infodict, ier, mesg = \ scipy.optimize.fsolve(net.alg_res_func, x0 = guess, xtol = min(rtol), args = (y, time, constants), full_output=True) sln = scipy.atleast_1d(sln) final_residuals = net.alg_res_func(sln, y, time, constants) if not scipy.any(abs(final_residuals) > abs(atol[var_types == -1])): # This is success. break else: message = ('Failed to calculate consistent algebraic values in ' 'network %s.' % net.get_id()) raise Utility.SloppyCellException(message) finally: messages = redir.stop() # Now plug those values into the current y y[var_types == -1] = sln # The non-algebraic variable yprimes come straight from the residuals yp_non_alg = net.res_function(time, y, y*0, constants)[var_types == 1] yp[var_types == 1] = yp_non_alg if not N_alg: return y, yp # Now we need to figure out yprime for the algebraic vars curr_alg_yp = yp[var_types == -1] ones_arr = scipy.ones(N_alg, scipy.float_) # We try a range of possible guesses. Note that this is really just # a linear system, so we continue to have difficulties with this part of # the calculation, or if it becomes a slow-down, we should consider doing # it by a linear solve, rather than using fsolve. possible_guesses = [curr_alg_yp, alg_typ_vals, ones_arr, scipy.mean(abs(yp)) * ones_arr, -scipy.mean(abs(yp)) * ones_arr, max(abs(yp)) * ones_arr, -max(abs(yp)) * ones_arr] if redirect_msgs: redir.start() try: for guess in possible_guesses: sln, infodict, ier, mesg = \ scipy.optimize.fsolve(net.alg_deriv_func, x0 = guess, xtol = min(rtol), args = (y, yp, time, constants), full_output=True) sln = scipy.atleast_1d(sln) final_residuals = net.alg_deriv_func(sln, y, yp, time, constants) if not scipy.any(abs(final_residuals) > abs(atol[var_types == -1])): break else: raise Utility.SloppyCellException('Failed to calculate alg var '\ 'derivatives in network %s.' % net.get_id()) finally: messages=redir.stop() sln = scipy.atleast_1d(sln) yp[var_types == -1] = sln return y, yp
def solve(self, wls): """Isotropic solver. INPUT wls = wavelengths to scan (any asarray-able object). OUTPUT self.DE1, self.DE3 = power reflected and transmitted. NOTE see: Moharam, "Formulation for stable and efficient implementation of the rigorous coupled-wave analysis of binary gratings", JOSA A, 12(5), 1995 Lalanne, "Highly improved convergence of the coupled-wave method for TM polarization", JOSA A, 13(4), 1996 Moharam, "Stable implementation of the rigorous coupled-wave analysis for surface-relief gratings: enhanced trasmittance matrix approach", JOSA A, 12(5), 1995 """ self.wls = S.atleast_1d(wls) LAMBDA = self.LAMBDA n = self.n multilayer = self.multilayer alpha = self.alpha delta = self.delta psi = self.psi phi = self.phi nlayers = len(multilayer) i = S.arange(-n, n + 1) nood = 2 * n + 1 hmax = nood - 1 # grating vector (on the xz plane) # grating on the xy plane K = 2 * pi / LAMBDA * np.array( [S.sin(phi), 0., S.cos(phi)], dtype=complex) DE1 = S.zeros((nood, self.wls.size)) DE3 = S.zeros_like(DE1) dirk1 = np.array([ S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha) ]) # usefull matrices I = S.eye(i.size) I2 = S.eye(i.size * 2) ZERO = S.zeros_like(I) X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex) MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex) MTp2 = S.zeros_like(MTp1) EPS2 = S.zeros(2 * hmax + 1, dtype=complex) EPS21 = S.zeros_like(EPS2) dlt = (i == 0).astype(int) for iwl, wl in enumerate(self.wls): # free space wavevector k = 2 * pi / wl n1 = multilayer[0].mat.n(wl).item() n3 = multilayer[-1].mat.n(wl).item() # incident plane wave wavevector k1 = k * n1 * dirk1 # all the other wavevectors tmp_x = k1[0] - i * K[0] tmp_y = k1[1] * S.ones_like(i) tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1) k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]] # k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]] tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3) k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]] # aliases for constant wavevectors kx = k1i[0, :] ky = k1[1] # angles of reflection # phi_i = S.arctan2(ky,kx) phi_i = S.arctan2(ky, kx.real) # OKKIO Kx = S.diag(kx / k) Ky = ky / k * I Z1 = S.diag(k1i[2, :] / (k * n1**2)) Y1 = S.diag(k1i[2, :] / k) Z3 = S.diag(k3i[2, :] / (k * n3**2)) Y3 = S.diag(k3i[2, :] / k) # Fc = S.diag(S.cos(phi_i)) fc = S.cos(phi_i) # Fs = S.diag(S.sin(phi_i)) fs = S.sin(phi_i) MR = S.asarray( S.bmat([[I, ZERO], [-1j * Y1, ZERO], [ZERO, I], [ZERO, -1j * Z1]])) MT = S.asarray( S.bmat([[I, ZERO], [1j * Y3, ZERO], [ZERO, I], [ZERO, 1j * Z3]])) # internal layers (grating or layer) X.fill(0.0) MTp1.fill(0.0) MTp2.fill(0.0) for nlayer in range(nlayers - 2, 0, -1): # internal layers layer = multilayer[nlayer] d = layer.thickness EPS2, EPS21 = layer.getEPSFourierCoeffs(wl, n, anisotropic=False) E = toeplitz(EPS2[hmax::-1], EPS2[hmax:]) E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:]) E11 = inv(E1) # B = S.dot(Kx, linsolve(E,Kx)) - I B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I # A = S.dot(Kx, Kx) - E A = S.diag((kx / k)**2) - E # Note: solution bug alfredo # randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10) # soluzione sporca... :-( # per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B # non sono invertibili --> cambio leggermente i kx... ma dovrei invece # trattare separatamente (analiticamente) questi casi if cond(A) > 1e10: warning("BAD CONDITIONING: randomization of kx") while cond(A) > 1e10: Kx = Kx * (1 + 1e-9 * S.rand()) B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I A = S.diag((kx / k)**2) - E if S.absolute(K[2] / k) > 1e-10: raise ValueError( "First Order Helmholtz Operator not implemented, yet!") elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1): # lalanne # H_U_reduced = S.dot(Ky, Ky) + A H_U_reduced = (ky / k)**2 * I + A # H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11 H_S_reduced = ((ky / k)**2 * I + kx[:, S.newaxis] / k * linsolve(E, kx[:, S.newaxis] / k * E11) - E11) q1, W1 = eig(H_U_reduced) q1 = S.sqrt(q1) q2, W2 = eig(H_S_reduced) q2 = S.sqrt(q2) # boundary conditions # V11 = S.dot(linsolve(A, W1), S.diag(q1)) V11 = linsolve(A, W1) * q1[S.newaxis, :] V12 = (ky / k) * S.dot(linsolve(A, Kx), W2) V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1)) # V22 = S.dot(linsolve(B, W2), S.diag(q2)) V22 = linsolve(B, W2) * q2[S.newaxis, :] # Vss = S.dot(Fc, V11) Vss = fc[:, S.newaxis] * V11 # Wss = S.dot(Fc, W1) + S.dot(Fs, V21) Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21 # Vsp = S.dot(Fc, V12) - S.dot(Fs, W2) Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2 # Wsp = S.dot(Fs, V22) Wsp = fs[:, S.newaxis] * V22 # Wpp = S.dot(Fc, V22) Wpp = fc[:, S.newaxis] * V22 # Vpp = S.dot(Fc, W2) + S.dot(Fs, V12) Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12 # Wps = S.dot(Fc, V21) - S.dot(Fs, W1) Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1 # Vps = S.dot(Fs, V11) Vps = fs[:, S.newaxis] * V11 Mc2bar = S.asarray( S.bmat([ [Vss, Vsp, Vss, Vsp], [Wss, Wsp, -Wss, -Wsp], [Wps, Wpp, -Wps, -Wpp], [Vps, Vpp, Vps, Vpp], ])) x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)] # Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x])) xx = S.r_[S.ones_like(x), x] Mc1 = Mc2bar * xx[S.newaxis, :] X[:, :, nlayer] = S.diag(x) MTp = linsolve(Mc2bar, MT) MTp1[:, :, nlayer] = MTp[0:2 * nood, :] MTp2 = MTp[2 * nood:, :] MT = S.dot( Mc1, S.r_[I2, S. dot(MTp2, linsolve(MTp1[:, :, nlayer], X[:, :, nlayer])), ], ) else: ValueError( "Second Order Helmholtz Operator not implemented, yet!" ) # M = S.asarray(S.bmat([-MR, MT])) M = S.c_[-MR, MT] b = S.r_[S.sin(psi) * dlt, 1j * S.sin(psi) * n1 * S.cos(alpha) * dlt, -1j * S.cos(psi) * n1 * dlt, S.cos(psi) * S.cos(alpha) * dlt, ] x = linsolve(M, b) R, T = S.split(x, 2) Rs, Rp = S.split(R, 2) for ii in range(1, nlayers - 1): T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T) Ts, Tp = S.split(T, 2) DE1[:, iwl] = (k1i[2, :] / (k1[2])).real * S.absolute(Rs)**2 + ( k1i[2, :] / (k1[2] * n1**2)).real * S.absolute(Rp)**2 DE3[:, iwl] = (k3i[2, :] / (k1[2])).real * S.absolute(Ts)**2 + ( k3i[2, :] / (k1[2] * n3**2)).real * S.absolute(Tp)**2 # save the results self.DE1 = DE1 self.DE3 = DE3 return self
def smooth_data( y, X, smoothing, smoothing_dimensions = None): """ Make a smoothener based on the scheme: none - return eye(N) - no mixing between Xs all - return 1_N/N complete mixing between Xs local - return cdist( x_m, X_N ) partial local mixing between Xs subset - return a partition of m random Xs random - return rand( m, N ) partial local mixing between Xs """ N, d = X.shape if smoothing_dimensions == None: smoothing_dimensions = N if smoothing == "none": return y, X elif smoothing == "all": Q = ones( N )/N y, X = Q.dot( y ), Q.dot( X ) return sc.atleast_1d( y ), sc.atleast_2d( X ) elif smoothing == "local": # Choose smoothing_dimensions number of random Xs Zi = permutation(N)[:smoothing_dimensions] Z = X[ Zi ] Q = exp( - cdist( Z, X )**2 ) # Normalise to be stochastic Q = (Q.T/Q.sum(1)).T y, X = Q.dot( y ), Q.dot( X ) return y, X elif smoothing == "subset": # Choose smoothing_dimensions number of random Xs Q = zeros( (smoothing_dimensions, N) ) for i in xrange( smoothing_dimensions ): Zi = permutation(N)[:smoothing_dimensions] Q[ i, Zi ] = 1.0/len(Zi) y, X = Q.dot( y ), Q.dot( X ) return y, X elif smoothing == "dirichlet": # Choose smoothing_dimensions number of random Xs alpha = 0.1 Q = dirichlet( alpha * ones(N)/N, smoothing_dimensions ) y, X = Q.dot( y ), Q.dot( X ) return y, X elif smoothing == "white": # Choose smoothing_dimensions number of random Xs Q = pinv( X ) y, X = Q.dot( y ), Q.dot( X ) return y, X elif smoothing == "random": # Choose smoothing_dimensions number of random Xs Q = rand(smoothing_dimensions, N) # Normalise to be stochastic Q = (Q.T/Q.sum(1)).T y, X = Q.dot( y ), Q.dot( X ) return y, X else: raise NotImplementedError()
def besselFourierSens(beam, rcent, zcent, rmax, l=range(15), mcos=[0], msin=[], rcond=2e-2): """Calculates the distance weight matrix for specified fourier components This function is used directly for poloidal tomography exstensibly for many tokamaks. It assumes that the sinogram space can be parameterized with a radial variable and an angular variable dependent on the plasma center (which typically use the tokamak.center method). Args: beam: geometry Object with reference origin rcent: geometry Object with reference origin zcent: rmax: Kwargs: l: mcos: msin: rcond: Returns: Vector object: Vector points from pt1 to pt2. """ # find and store bessel zeros m = scipy.unique(mcos+msin) length = len(l) zeros = scipy.zeros((len(m),length)) for i in xrange(len(m)): zeros[i] = scipy.special.jn_zeros(m[i],zeros.shape[1]) kernel = scipy.zeros((len(m),length)) # need to modify zeros try: try: output = scipy.zeros((len(rcent), length*(len(mcos)+len(msin)))) except TypeError: output = scipy.zeros((1, length*(len(mcos)+len(msin)))) rmax = scipy.atleast_1d(rmax) for i in xrange(len(rcent)): idx = 0 # returns closest approach vector to plasma center mod for NSTX-U #temp = j(j.tmin(rcent,zcent)).t(rcent,zcent) cent = geometry.Point(geometry.Vecr([rcent[i],0,zcent[i]]),beam._origin) temp2 = beam(beam.smin(cent)) - cent temp = [temp2.s,0,scipy.arctan2(temp2.x2(),temp2.x0())] #temp = beam(beam.tmin(rcent[i], zcent[i])).t(rcent[i], zcent[i]) if temp[0] > rmax[i]: rho = 1.0 warnings.warn('chord outside of specified designated edge zero emissivity', RuntimeWarning) else: rho = temp[0]/rmax[i] for j in xrange(len(m)): for k in xrange(length): kernel[j, k] = rmax[i]*besselFourierKernel(m[j], zeros[j,k], rho) # fill sens matrix for j in xrange(len(mcos)): output[i, idx*length:(idx+1)*length] = scipy.cos(mcos[j]*temp[2])*kernel[scipy.where(m == mcos[j])] idx += 1 for j in xrange(len(msin)): output[i, idx*length:(idx+1)*length] = scipy.sin(msin[j]*temp[2])*kernel[scipy.where(m == msin[j])] idx += 1 return output except AttributeError: #implement parallelization here output = scipy.zeros((len(rcent), len(beam), length*len(mcos+msin))) for i in xrange(len(beam)): output[:,i,:] += besselFourierSens(beam[i], rcent, zcent, rmax, l=l, mcos=mcos, msin=msin) return output
def parse_args(options, identity='main'): ### load all default settings options = default_settings(options) ref_tag = '' if hasattr(options, 'event_types'): options.event_types = options.event_types.strip(',').split(',') if not os.path.exists(options.outdir): print( 'WARNING: Output directory %s does not exist - will be created\n\n' % options.outdir, file=sys.stderr) try: os.makedirs(options.outdir) except OSError: print('ERROR: Output directory %s can not be created.\n\n' % options.outdir, file=sys.stderr) sys.exit(2) ### options specific for main program if identity == 'main': ### open log file, if specified if options.logfile != '-': options.log_fname = options.logfile else: options.log_fname = 'stdout' ### set tmp directory default if options.tmpdir == '': options.tmpdir = os.path.join(options.outdir, 'tmp') options.bam_fnames = options.bams.strip(',').split(',') if not os.path.isfile(options.annotation): print('ERROR: Annotation file %s can not be found\n\n' % options.annotation, file=sys.stderr) sys.exit(2) ### pyproc options if options.pyproc: options.options_rproc = dict() options.options_rproc['mem_req_resubmit'] = [30000, 60000, 80000] options.options_rproc['time_req_resubmit'] = [ 60 * 60, 80 * 60, 90 * 60 ] options.options_rproc['resubmit'] = 3 options.options_rproc['priority'] = 100 options.options_rproc['addpaths'] = options.paths if options.environment: options.options_rproc['environment'] = options.environment if identity == 'test': if options.conditionA == '-': print('ERROR: At least one sample for condition A required', file=sys.stderr) sys.exit(1) if options.conditionB == '-': print('ERROR: At least one sample for condition B required', file=sys.stderr) sys.exit(1) options.conditionA = options.conditionA.strip(',').split(',') options.conditionB = options.conditionB.strip(',').split(',') if len(options.conditionA) > 0 and options.conditionA[0].lower( ).endswith('txt'): options.conditionA = [ str(x) for x in sp.loadtxt(options.conditionA[0], dtype='str') ] if len(options.conditionB) > 0 and options.conditionB[0].lower( ).endswith('txt'): options.conditionB = [ str(x) for x in sp.loadtxt(options.conditionB[0], dtype='str') ] options.conditionA = [ re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)|.[nN][pP][zZ]$', '', x) for x in options.conditionA ] options.conditionB = [ re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)|.[nN][pP][zZ]$', '', x) for x in options.conditionB ] options.conditionA = [os.path.basename(x) for x in options.conditionA] options.conditionB = [os.path.basename(x) for x in options.conditionB] ### check if we got a list of bam files in a text file instead of a comma separated list if len(options.bam_fnames) > 0: if identity == 'main' and options.bam_fnames[0].split( '.')[-1] == 'txt': options.bam_fnames = [ str(x) for x in sp.atleast_1d( sp.loadtxt(options.bam_fnames[0], dtype='str')) ] ### check existence of alignment files for fname in flatten(options.bam_fnames): if not os.path.isfile(fname): print('ERROR: Input file %s can not be found\n\n' % fname, file=sys.stderr) sys.exit(2) if not fname.endswith('.hdf5') and not os.path.isfile(fname + '.bai'): print( 'ERROR: Input file %s is not indexed. %s.bai can not be found\n\n' % (fname, fname), file=sys.stderr) sys.exit(2) ### assemble strain list options.samples = [] options.strains = [] if identity != 'viz': for i in range(len(options.bam_fnames)): options.samples.append( re.sub(r'(.[bB][aA][mM]|.[hH][dD][fF]5)$', '', options.bam_fnames[i].split('/')[-1])) options.strains.append('%s%s' % (ref_tag, options.samples[-1])) options.strains = sp.array(options.strains) ### adapt graph validation requirement to max number of samples options.sg_min_edge_count = min(options.sg_min_edge_count, len(options.samples)) return options
def fluxFourierSens(beam, plasmameth, centermeth, time, points, mcos=[0], msin=[], ds=1e-3): """Calculates the distance weight matrix for specified fourier components This function is used directly for poloidal tomography extensibly for many tokamaks. It assumes that the sinogram space can be parameterized with a flux-based radial variable (which is defined using the eqtools methods such as rz2psinorm, rz2volnorm, etc.) and an angular variable dependent on the plasma center (which typically use the tokamak.center method). It returns a matrix which is [time,beam,radial x fourier components] in size, which is necessary for inverting the measured brightnesses. Each value in the array is a length, which is the effective weight of a radial surface with specific fourier dependence. Each weight along a chord can be summed to represent the beam line-integral through the vessel. It is assumed that the toroidal mode number is small such that there is no cross coupling of the modes in the line integrals of the chords. This is the cylindrical mode limit, where the ratio of inverse aspect ratio to toroidal mode number is negligible. Args: beam: geometry Object with reference origin (either beam OR ray) plasmameth: flux-based radial method (from plasma object) centermeth: plasma center method (from plasma object) time: equilibrium time points: points in radial sinogram in which to map to. Kwargs: mcos: number of cosine fourier components to generate msin: number of sine fourier components to generate ds: step size along beam/ray in which to evaluate in meters Returns: output: A 3-dimensional array of weights in meters which follows is [time,beam,radial x fourier components]. The order of the last dimension is grouped by fourier component, cosine radial terms then sine radial terms. """ time = scipy.atleast_1d(time) interp = scipy.interpolate.interp1d(points, scipy.arange(len(points)), kind='cubic') length = len(points) try: output = scipy.zeros((len(time),length*len(mcos+msin))) temp = beam(scipy.mgrid[beam.norm.s[-2]:beam.norm.s[-1]:ds]) mapped = scipy.atleast_2d(plasmameth(temp.r0(), temp.x2(), time)) # recover angles of each position in temp vector utlizing the t2 method # to geometry.Vec improper vectorization strategy in t2 causes the use # of a for loop angle = scipy.zeros(mapped.shape) for i in xrange(len(time)): pt0 = centermeth(time[i]) angle[i] = temp.t2(pt0[0],pt0[1]) # knowing that the last point (point[-1]) is assumed to be a ZERO # emissivity point an additional brightness is added which only sees # the last emissivity to yield the zero scipy.place(mapped, mapped > points[-1], points[-1]) if mapped.min() < 0: warning.warn('chord measures at a parameter below point grid', RuntimeWarning) # for a given point along a chord, use a spline to solve what # reconstruction points it is most close to. Then in the weighting # matrix, (which is (brightness,points) in shape add the various # fractional weighting. out = interp(mapped) # find out point using a floor like command (returns ints) idx1 = out.astype(int) scipy.clip(idx1, 0, length-1, out=idx1) idx2 = idx1 + 1 scipy.clip(idx2, 0, length-1, out=idx2) # reduce out to the fraction in nearby bins out = (out % 1.)*ds lim = 0 for i in mcos: angin = scipy.cos(i*angle) #_beam.idx_add2(output[:,lim:lim+length],idx1,idx2,out,angin,ds) _beam.idx_add(output,idx1,idx2,out,angin,ds,lim) lim += length for i in msin: angin = scipy.sin(i*angle) #_beam.idx_add2(output[:,lim:lim+length],idx1,idx2,out,angin,ds) _beam.idx_add(output,idx1,idx2,out,angin,ds,lim) lim += length except AttributeError: output = scipy.zeros((len(time),len(beam),length*len(mcos+msin))) #this for loop should be parallellized (well compartmentalized) for i in xrange(len(beam)): output[:,i,:] = fluxFourierSens(beam[i], plasmameth, centermeth, time, points, mcos=mcos, msin=msin, ds=ds) return output
def solve(self, wls): """Anisotropic solver. INPUT wls = wavelengths to scan (any asarray-able object). OUTPUT self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected and transmitted. """ self.wls = S.atleast_1d(wls) LAMBDA = self.LAMBDA n = self.n multilayer = self.multilayer alpha = self.alpha delta = self.delta psi = self.psi phi = self.phi nlayers = len(multilayer) i = S.arange(-n, n + 1) nood = 2 * n + 1 hmax = nood - 1 DEO1 = S.zeros((nood, self.wls.size)) DEO3 = S.zeros_like(DEO1) DEE1 = S.zeros_like(DEO1) DEE3 = S.zeros_like(DEO1) c1 = np.array([1., 0., 0.]) c3 = np.array([1., 0., 0.]) # grating on the xy plane K = 2 * pi / LAMBDA * np.array( [S.sin(phi), 0., S.cos(phi)], dtype=complex) dirk1 = np.array([ S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha) ]) # D polarization vector u = np.array([ S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta), S.cos(psi) * S.cos(alpha) * S.sin(delta) + S.sin(psi) * S.cos(delta), -S.cos(psi) * S.sin(alpha), ]) kO1i = S.zeros((3, i.size), dtype=complex) kE1i = S.zeros_like(kO1i) kO3i = S.zeros_like(kO1i) kE3i = S.zeros_like(kO1i) Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex) M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex) dlt = (i == 0).astype(int) for iwl, wl in enumerate(self.wls): nO1 = nE1 = multilayer[0].mat.n(wl).item() nO3 = nE3 = multilayer[-1].mat.n(wl).item() # wavevectors k = 2 * pi / wl eps1 = S.diag(S.asarray([nE1, nO1, nO1])**2) eps3 = S.diag(S.asarray([nE3, nO3, nO3])**2) # ordinary wave abskO1 = k * nO1 # abskO3 = k * nO3 # extraordinary wave # abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2) # abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2) k1 = abskO1 * dirk1 kO1i[0, :] = k1[0] - i * K[0] kO1i[1, :] = k1[1] * S.ones_like(i) kO1i[2, :] = -dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :], k, nO1) kE1i[0, :] = kO1i[0, :] kE1i[1, :] = kO1i[1, :] kE1i[2, :] = -dispersion_relation_extraordinary( kE1i[0, :], kE1i[1, :], k, nO1, nE1, c1) kO3i[0, :] = kO1i[0, :] kO3i[1, :] = kO1i[1, :] kO3i[2, :] = dispersion_relation_ordinary(kO3i[0, :], kO3i[1, :], k, nO3) kE3i[0, :] = kO1i[0, :] kE3i[1, :] = kO1i[1, :] kE3i[2, :] = dispersion_relation_extraordinary( kE3i[0, :], kE3i[1, :], k, nO3, nE3, c3) # k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]] k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [-i * K[2]]] # aliases for constant wavevectors kx = kO1i[0, :] # o kE1i(1,;), tanto e' lo stesso ky = k1[1] # matrices I = S.eye(nood, dtype=complex) ZERO = S.zeros((nood, nood), dtype=complex) Kx = S.diag(kx / k) Ky = ky / k * I Kz = S.diag(k2i[2, :] / k) KO1z = S.diag(kO1i[2, :] / k) KE1z = S.diag(kE1i[2, :] / k) KO3z = S.diag(kO3i[2, :] / k) KE3z = S.diag(kE3i[2, :] / k) ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0] BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1] CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2]) ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0] BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1] CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2]) ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0] BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1] CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2]) ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0] BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1] CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2]) DRE = c1[1] * KE1z - c1[2] * Ky ERE = c1[2] * Kx - c1[0] * KE1z FRE = c1[0] * Ky - c1[1] * Kx DTE = c3[1] * KE3z - c3[2] * Ky ETE = c3[2] * Kx - c3[0] * KE3z FTE = c3[0] * Ky - c3[1] * Kx b = S.r_[u[0] * dlt, u[1] * dlt, (k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt, (k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt, ] Ky_CRO_1 = ky / k * CRO_1 Ky_CRE_1 = ky / k * CRE_1 Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1 Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1 MR31 = -S.dot(Ky_CRO_1, ARO) MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z MR33 = -S.dot(Ky_CRE_1, ARE) MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z MR41 = S.dot(Kx_CRO_1, ARO) + KO1z MR42 = S.dot(Kx_CRO_1, BRO) MR43 = S.dot(Kx_CRE_1, ARE) + KE1z MR44 = S.dot(Kx_CRE_1, BRE) MR = S.asarray( S.bmat([ [I, ZERO, I, ZERO], [ZERO, I, ZERO, I], [MR31, MR32, MR33, MR34], [MR41, MR42, MR43, MR44], ])) Ky_CTO_1 = ky / k * CTO_1 Ky_CTE_1 = ky / k * CTE_1 Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1 Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1 MT31 = -S.dot(Ky_CTO_1, ATO) MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z MT33 = -S.dot(Ky_CTE_1, ATE) MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z MT41 = S.dot(Kx_CTO_1, ATO) + KO3z MT42 = S.dot(Kx_CTO_1, BTO) MT43 = S.dot(Kx_CTE_1, ATE) + KE3z MT44 = S.dot(Kx_CTE_1, BTE) MT = S.asarray( S.bmat([ [I, ZERO, I, ZERO], [ZERO, I, ZERO, I], [MT31, MT32, MT33, MT34], [MT41, MT42, MT43, MT44], ])) Mp.fill(0.0) M.fill(0.0) for nlayer in range(nlayers - 2, 0, -1): # internal layers layer = multilayer[nlayer] thickness = layer.thickness EPS2, EPS21 = layer.getEPSFourierCoeffs(wl, n, anisotropic=True) # Exx = S.squeeze(EPS2[0, 0, :]) # Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:]) Exy = S.squeeze(EPS2[0, 1, :]) Exy = toeplitz(S.flipud(Exy[0:hmax + 1]), Exy[hmax:]) Exz = S.squeeze(EPS2[0, 2, :]) Exz = toeplitz(S.flipud(Exz[0:hmax + 1]), Exz[hmax:]) Eyx = S.squeeze(EPS2[1, 0, :]) Eyx = toeplitz(S.flipud(Eyx[0:hmax + 1]), Eyx[hmax:]) Eyy = S.squeeze(EPS2[1, 1, :]) Eyy = toeplitz(S.flipud(Eyy[0:hmax + 1]), Eyy[hmax:]) Eyz = S.squeeze(EPS2[1, 2, :]) Eyz = toeplitz(S.flipud(Eyz[0:hmax + 1]), Eyz[hmax:]) Ezx = S.squeeze(EPS2[2, 0, :]) Ezx = toeplitz(S.flipud(Ezx[0:hmax + 1]), Ezx[hmax:]) Ezy = S.squeeze(EPS2[2, 1, :]) Ezy = toeplitz(S.flipud(Ezy[0:hmax + 1]), Ezy[hmax:]) Ezz = S.squeeze(EPS2[2, 2, :]) Ezz = toeplitz(S.flipud(Ezz[0:hmax + 1]), Ezz[hmax:]) Exx_1 = S.squeeze(EPS21[0, 0, :]) Exx_1 = toeplitz(S.flipud(Exx_1[0:hmax + 1]), Exx_1[hmax:]) Exx_1_1 = inv(Exx_1) # lalanne Ezz_1 = inv(Ezz) Ky_Ezz_1 = ky / k * Ezz_1 Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1 Exz_Ezz_1 = S.dot(Exz, Ezz_1) Eyz_Ezz_1 = S.dot(Eyz, Ezz_1) H11 = 1j * S.dot(Ky_Ezz_1, Ezy) H12 = 1j * S.dot(Ky_Ezz_1, Ezx) H13 = S.dot(Ky_Ezz_1, Kx) H14 = I - S.dot(Ky_Ezz_1, Ky) H21 = 1j * S.dot(Kx_Ezz_1, Ezy) H22 = 1j * S.dot(Kx_Ezz_1, Ezx) H23 = S.dot(Kx_Ezz_1, Kx) - I H24 = -S.dot(Kx_Ezz_1, Ky) H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy) H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx) H33 = 1j * S.dot(Exz_Ezz_1, Kx) H34 = -1j * S.dot(Exz_Ezz_1, Ky) H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy) H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx) H43 = -1j * S.dot(Eyz_Ezz_1, Kx) H44 = 1j * S.dot(Eyz_Ezz_1, Ky) H = 1j * S.diag(S.repeat(S.diag(Kz), 4)) + S.asarray( S.bmat([ [H11, H12, H13, H14], [H21, H22, H23, H24], [H31, H32, H33, H34], [H41, H42, H43, H44], ])) q, W = eig(H) W1, W2, W3, W4 = S.split(W, 4) # # boundary conditions # # x = [R T] # R = [ROx ROy REx REy] # T = [TOx TOy TEx TEy] # b + MR.R = M1p.c # M1.c = M2p.c # ... # ML.c = MT.T # therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T # missing equations from (46)..(49) in glytsis_rigorous # [b] = [-MR Mtot.MT] [R] # [0] [...........] [T] z = S.zeros_like(q) z[S.where(q.real > 0)] = -thickness D = S.exp(k * q * z) Sy0 = W1 * D[S.newaxis, :] Sx0 = W2 * D[S.newaxis, :] Uy0 = W3 * D[S.newaxis, :] Ux0 = W4 * D[S.newaxis, :] z = thickness * S.ones_like(q) z[S.where(q.real > 0)] = 0 D = S.exp(k * q * z) D1 = S.exp(-1j * k2i[2, :] * thickness) Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :] Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :] Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :] Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :] Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0] M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd] Mtot = S.eye(4 * nood, dtype=complex) for nlayer in range(1, nlayers - 1): Mtot = S.dot(S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :, nlayer])) BC_b = S.r_[b, S.zeros_like(b)] BC_A1 = S.c_[-MR, S.dot(Mtot, MT)] BC_A2 = S.asarray( S.bmat([ [ (c1[0] * I - c1[2] * S.dot(CRO_1, ARO)), (c1[1] * I - c1[2] * S.dot(CRO_1, BRO)), ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, ], [ ZERO, ZERO, (DRE - S.dot(S.dot(FRE, CRE_1), ARE)), (ERE - S.dot(S.dot(FRE, CRE_1), BRE)), ZERO, ZERO, ZERO, ZERO, ], [ ZERO, ZERO, ZERO, ZERO, (c3[0] * I - c3[2] * S.dot(CTO_1, ATO)), (c3[1] * I - c3[2] * S.dot(CTO_1, BTO)), ZERO, ZERO, ], [ ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, (DTE - S.dot(S.dot(FTE, CTE_1), ATE)), (ETE - S.dot(S.dot(FTE, CTE_1), BTE)), ], ])) BC_A = S.r_[BC_A1, BC_A2] x = linsolve(BC_A, BC_b) ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8) ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy))) REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy))) TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy))) TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy))) denom = (k1[2] - S.dot(u, k1) * u[2]).real DEO1[:, iwl] = (-( (S.absolute(ROx)**2 + S.absolute(ROy)**2 + S.absolute(ROz)**2) * S.conj(kO1i[2, :]) - (ROx * kO1i[0, :] + ROy * kO1i[1, :] + ROz * kO1i[2, :]) * S.conj(ROz)).real / denom) DEE1[:, iwl] = (-( (S.absolute(REx)**2 + S.absolute(REy)**2 + S.absolute(REz)**2) * S.conj(kE1i[2, :]) - (REx * kE1i[0, :] + REy * kE1i[1, :] + REz * kE1i[2, :]) * S.conj(REz)).real / denom) DEO3[:, iwl] = ( (S.absolute(TOx)**2 + S.absolute(TOy)**2 + S.absolute(TOz)**2) * S.conj(kO3i[2, :]) - (TOx * kO3i[0, :] + TOy * kO3i[1, :] + TOz * kO3i[2, :]) * S.conj(TOz)).real / denom DEE3[:, iwl] = ( (S.absolute(TEx)**2 + S.absolute(TEy)**2 + S.absolute(TEz)**2) * S.conj(kE3i[2, :]) - (TEx * kE3i[0, :] + TEy * kE3i[1, :] + TEz * kE3i[2, :]) * S.conj(TEz)).real / denom # save the results self.DEO1 = DEO1 self.DEE1 = DEE1 self.DEO3 = DEO3 self.DEE3 = DEE3 return self
def _findLCFS(rgrid, zgrid, psiRZ, rcent, zcent, psiLCFS, nbbbs=100): """ internal function for finding the last closed flux surface based off of a Equilibrium instance""" ang = scipy.linspace(-scipy.pi, scipy.pi, nbbbs) plt.ioff() fig = plt.figure() cs = plt.contour(rgrid, zgrid, scipy.squeeze(psiRZ), scipy.atleast_1d(psiLCFS)) splines = [] for i in cs.collections[0].get_paths(): temp = i.vertices # turn points into polar coordinates about the plasma center rvals = scipy.sqrt((temp[:, 0] - rcent)**2 + (temp[:, 1] - zcent)**2) thetvals = scipy.arctan2(temp[:, 1] - zcent, temp[:, 0] - rcent) # find all monotonic sections of contour line in r,theta space temp = scipy.diff(thetvals) idx = 0 sign = scipy.sign(temp[0]) for j in range(len(temp) - 1): if (scipy.sign(temp[j + 1]) != sign): sign = scipy.sign(temp[j + 1]) #only write data if the jump at the last point is well resolved if (j + 2 - idx > 2 ): #abs(thetvals[idx]-thetvals[j+1]) < 7*scipy.pi/4) and plt.plot(thetvals[idx:j + 2], rvals[idx:j + 2], 'o') sortang = scipy.argsort(thetvals[idx:j + 2]) splines += [ scipy.interpolate.interp1d(thetvals[sortang + idx], rvals[sortang + idx], kind='linear', bounds_error=False, fill_value=scipy.inf) ] idx = j + 1 if (len(thetvals) - idx > 2): plt.plot(thetvals[idx:], rvals[idx:], 'o') sortang = scipy.argsort(thetvals[idx:]) splines += [ scipy.interpolate.interp1d(thetvals[sortang + idx], rvals[sortang + idx], kind='linear', bounds_error=False, fill_value=scipy.inf) ] # construct a set of angles about the center, and use the splines # to find the closest part of the contour to the center at that # angle, this is the LCFS, store value. If no value is found, store # an infite value, which is then tossed out. outr = scipy.empty((nbbbs, )) for i in range(nbbbs): temp = scipy.inf for j in splines: pos = j(ang[i]) if pos < temp: temp = pos outr[i] = temp # remove infinites ang = ang[scipy.isfinite(outr)] outr = outr[scipy.isfinite(outr)] #move back to r,z space output = scipy.empty((2, len(ang) + 1)) output[0, :-1] = outr * scipy.cos(ang) + rcent output[1, :-1] = outr * scipy.sin(ang) + zcent output[0, -1] = output[0, 0] output[1, -1] = output[1, 0] # turn off plotting stuff plt.ion() plt.clf() plt.close(fig) plt.ioff() return output.T