def __enter__(self): """ Set up the `with` block compatibility. """ from scipy.interpolate import RectBivariateSpline as RBS #yy,xx = [sorted(zz) for zz in N.indices(self.lats.shape)] yidx,xidx = N.indices(self.lats.shape) xx = xidx[0,:] yy = yidx[:,0] self.lat_RBS = RBS(x=xx,y=yy,z=self.lats) self.lon_RBS = RBS(x=xx,y=yy,z=self.lons) print("Ready to receive x/y points for interpolation") return self
def semb(workspace,**kwargs): print '' def onclick(e): print "(%.2f, %.2f) ," %(e.ydata, e.xdata), vels = kwargs['velocities'] nvels = vels.size ns = kwargs['ns'] result = np.zeros((nvels,ns),'f') loc = np.mean(workspace['cdp']) for v in range(nvels): panel = workspace.copy() kwargs['vels'] = np.ones(kwargs['ns'], 'f') * vels[v] nmo(panel, None, **kwargs) result[v,:] += np.abs(_stack_gather(panel)['trace']) result = result[:,::kwargs['smoother']] window = 5 filter = np.ones((window,window), 'f')/(1.0*window**2) result = convolve2d(result, filter, boundary='symm', mode='same') x = vels y = np.arange(ns)[::kwargs['smoother']] f = RBS(x, y, result, s=0) result = f(x, np.arange(ns)) pylab.imshow(result.T, aspect='auto', extent=(min(vels), max(vels),kwargs['ns']*kwargs['dt'],0.), cmap='jet') pylab.xlabel('velocity') pylab.ylabel('time') pylab.title("cdp = %d" %np.unique(loc)) pylab.colorbar() fig = pylab.gcf() fig.canvas.mpl_connect('button_press_event', onclick) print "vels[%d] = " %np.unique(loc), pylab.show()
def get_subpixel(res): mgx, mgy = np.meshgrid(np.arange(-1, 1.01, 0.1), np.arange(-1, 1.01, 0.1), indexing='xy') # sub-pixel mesh minval, _, minloc, _ = cv2.minMaxLoc(res) rbs_halfsize = 3 # size of peak area used for spline for subpixel peak loc rbs_order = 4 # polynomial order for subpixel rbs interpolation of peak location if ((np.array([n - rbs_halfsize for n in minloc]) >= np.array([0, 0])).all() & (np.array([(n + rbs_halfsize) for n in minloc]) < np.array(list(res.shape))).all()): rbs_p = RBS( range(-rbs_halfsize, rbs_halfsize + 1), range(-rbs_halfsize, rbs_halfsize + 1), res[(minloc[1] - rbs_halfsize):(minloc[1] + rbs_halfsize + 1), (minloc[0] - rbs_halfsize):(minloc[0] + rbs_halfsize + 1)], kx=rbs_order, ky=rbs_order) b = rbs_p.ev(mgx.flatten(), mgy.flatten()) mml = cv2.minMaxLoc(b.reshape(21, 21)) # mgx,mgy: meshgrid x,y of common area # sp_delx,sp_dely: subpixel delx,dely sp_delx = mgx[mml[3][0], mml[3][1]] sp_dely = mgy[mml[3][0], mml[3][1]] else: sp_delx = 0.0 sp_dely = 0.0 return sp_delx, sp_dely
def __init__(self, data, lrho, le, ratios=None, indep=None, dens_pow=-1, fn=None, add_var=None): super(EOS, self).__init__() self.fn = fn if ratios is None: ratios = np.ones(data.shape[0]) lr = np.log(ratios) self._lr = lr if indep is None: indep = 'ei' self.indep = indep self.data = data self.lrho = lrho self.le = le self.dens_pow = dens_pow var = ['p', 'e', 'asq_p'] if add_var is not None: var.extend(add_var) d = { var[i]: RBS(lrho, le + lr[i], np.log10(data[i].T), kx=1, ky=1).ev for i in range(len(var)) } self._interp_dict = d
def __getitem__(self, idx): grid = self.data()[idx].values # Need to make lat increasing for interpolation ip = RBS(self.inlat[::-1], self.inlon, grid[::-1]) # print("interpolating") igrid = ip(self.outlat[::-1], self.outlon)[::-1] return igrid
def getSky(fFile): from scipy.interpolate import RectBivariateSpline as RBS data = fFile[2].data sky = data['allsky'][0] xi = np.arange(sky.shape[0]) yi = np.arange(sky.shape[1]) interp = RBS(xi, yi, sky) return interp(data['yinterp'], data['xinterp'])
def RectBivariateSpline(DEM, **kwargs): if type(DEM) is not GeoImg: raise TypeError('DEM must be a GeoImg') X, Y = DEM.xy() f = RBS(Y[0:, 0], X[0, 0:], DEM.img) out_dem = f.ev(Y, X) # might have to flip upside-down? return DEM.copy(new_raster=out_dem)
def interp_latlon(self, xcol, yrow): #points = (N.indices(self.xx.shape)[0].flat,N.indices(self.yy.shape)[0].flat) # clat = scipy.interpolate.griddata(points=(self.xx.flat,self.yy.flat), #clat = scipy.interpolate.griddata(points=points, # values=self.lats.flat,xi=(xcol,yrow),method='linear') # clon = scipy.interpolate.griddata(points=(self.xx.flat,self.yy.flat), #clon = scipy.interpolate.griddata(points=points, # values=self.lons.flat,xi=(xcol,yrow),method='linear') #pdb.set_trace() if self.rbs is None: self.rbs = dict() x = N.arange(self.raw_data.shape[0]) y = N.arange(self.raw_data.shape[1]) self.rbs['lats'] = RBS(x=x, y=y, z=self.lats) self.rbs['lons'] = RBS(x=x, y=y, z=self.lons) clat = self.rbs['lats'](yrow, xcol) clon = self.rbs['lons'](yrow, xcol) return clat, clon
def resample(UV_map, vts): h, w, c = UV_map.shape vts *= np.array([[h - 1, w - 1]]) vt_3d = np.zeros((vts.shape[0], 3), dtype=vts.dtype) for i in range(c): spline_function = RBS(x=np.arange(h), y=np.arange(w), z=UV_map[:, :, i]) vt_3d[:, i] = spline_function(vts[:, 0], vts[:, 1])
def scale_field(self, f): """ Scaling of the field according to calculated meshgrid. For the interpolation rectangular bivariate Splines are used. These are implemented from the scipy function `RectBivariateSpline <https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.html>`_. :param f: field to be interpolated :returns: field after interpolation """ rbs = RBS(self._tx, self._ty, f) return rbs.ev(self._out_x, self._out_y)
def __init__(self, photons, assume_symmetry=[], binning='auto'): self.ndim = 1 + photons.ndim - len(assume_symmetry) if binning == 'auto': binning = np.linspace( 0.0, 1.0, int((len(photons) * photons.n_steps)**(1.0 / (self.ndim + 1.0))) + 1) if type(binning) == int: binning = np.linspace(0.0, 1.0, binning) t = photons.t.flatten() x = photons.x.reshape(-1, photons.ndim) r = np.linalg.norm(x, axis=-1) vals = [t, r] self.bins = [binning * np.max(t), binning * np.max(r)] self.signature = 'f(t,r' if photons.ndim == 2: if 'phi' not in assume_symmetry: phi = np.arccos(x[:, 0] / r) vals.append(phi) self.bins.append(binning * 2.0 * np.pi) self.signature += ',phi' elif photons.ndim == 3: if 'theta' not in assume_symmetry: theta = np.arccos(x[:, 2] / r) vals.append(theta) self.bins.append(binning * np.pi) self.signature += ',theta' if 'phi' not in assume_symmetry: phi = np.arctan(x[:, 1], x[:, 0]) vals.append(phi) self.bins.append(2.0 * np.pi * binning - np.pi) self.signature += ',phi' self.signature += ')' self.H, _ = np.histogramdd(vals, self.bins) self.t = [(b[1:] + b[:-1]) * 0.5 for b in self.bins] if self.ndim == 2: self.interpolator = RBS(*self.t, np.log(self.H + 1e-8), bbox=[0.0, np.max(t), 0.0, np.max(r)], kx=1, ky=1) else: self.interpolator_ = RGI(self.t, self.H, bounds_error=False, fill_value=0.0) self.interpolator = lambda *x: self.interpolator_(x)
def interpolate_to_wrfgrid(arr,new_shp): from scipy.interpolate import RectBivariateSpline as RBS old_shp = arr.shape xx = N.arange(old_shp[1]) yy = N.arange(old_shp[0]) rbs = RBS(xx,yy,arr) xx_new = N.arange(new_shp[1]) yy_new = N.arange(new_shp[0]) newarr = rbs(xx,yy) # pdb.set_trace() return newarr
def load_Gd(self, **kwargs): ''' This function loads all the material data to run a Gd materialself. The material data is based on the MFT simulations. You can shift the properties by setting the shift function or the Tcurie to set the Curie temperature. ''' if 'shift' in kwargs: shift = kwargs['shift'] if 'Tcurie' in kwargs: shift = kwargs['Tcurie'] - 273 else: shift = 0 self.name = 'Gd - mft' self.density = 7900 # kg/m^3 self.conductivity = 10.9 # W/mK # Specifc Heat cp_data = np.loadtxt('./materials/Gd_mft/gdcp_py.txt') # Magnetization mag_data = np.loadtxt('./materials/Gd_mft/gdmag_py.txt') # Entropy stot_data = np.loadtxt('./materials/Gd_mft/gdstot_py.txt') # Specifc Heat cooling HintCp = cp_data[0, 1:] # Internal Field Values TempCp = cp_data[1:, 0] - shift # # Temperature Values # Build interpolation function self.mCp = RBS(TempCp, HintCp, cp_data[1:, 1:], ky=1, kx=1) # Specifc Heat cooling HintMag = mag_data[0, 1:] # Internal Field Values TempMag = mag_data[1:, 0] - shift # # Temperature Values # Build interpolation function self.mMag = RBS(TempCp, HintCp, mag_data[1:, 1:], ky=1, kx=1) # Specifc Heat cooling HintSTot = stot_data[0, 1:] # Internal Field Values TempSTot = stot_data[1:, 0] - shift # # Temperature Values # Build interpolation function self.mStot = RBS(TempCp, HintCp, stot_data[1:, 1:], ky=1, kx=1)
def get_radar_verif(self, utc, datapath): RADAR = Radar(utc, datapath) Nlim, Elim, Slim, Wlim = self.M['WRFOut'].get_limits() wlats = self.M['WRFOut'].lats1D wlons = self.M['WRFOut'].lons1D data, lats, lons = RADAR.get_subdomain(Nlim, Elim, Slim, Wlim) dBZ = RADAR.get_dBZ(data) dBZ_flip = N.flipud(dBZ) from scipy.interpolate import RectBivariateSpline as RBS rbs = RBS(lats[::-1], lons, dBZ_flip) dBZ_interp = rbs( wlats, wlons, ) #grid=True) # import pdb; pdb.set_trace() # fig, ax = plt.subplots(1) # ax.imshow(dBZ_interp) # ax.invert_yaxis() # fig.tight_layout() # fig.savefig('/home/jrlawson/public_html/bowecho/hires/SAL/dBZ_output.png') return dBZ_interp
def get_subpixel(res, how='min'): assert how in ['min', 'max'], "have to choose min or max" mgx, mgy = np.meshgrid(np.arange(-1, 1.01, 0.1), np.arange(-1, 1.01, 0.1), indexing='xy') # sub-pixel mesh if how == 'min': peakval, _, peakloc, _ = cv2.minMaxLoc(res) mml_ind = 2 else: _, peakval, _, peakloc = cv2.minMaxLoc(res) mml_ind = 3 rbs_halfsize = 3 # size of peak area used for spline for subpixel peak loc rbs_order = 4 # polynomial order for subpixel rbs interpolation of peak location if ((np.array([n - rbs_halfsize for n in peakloc]) >= np.array([0, 0])).all() & (np.array([(n + rbs_halfsize) for n in peakloc]) < np.array(list(res.shape))).all()): rbs_p = RBS( range(-rbs_halfsize, rbs_halfsize + 1), range(-rbs_halfsize, rbs_halfsize + 1), res[(peakloc[1] - rbs_halfsize):(peakloc[1] + rbs_halfsize + 1), (peakloc[0] - rbs_halfsize):(peakloc[0] + rbs_halfsize + 1)], kx=rbs_order, ky=rbs_order) b = rbs_p.ev(mgx.flatten(), mgy.flatten()) mml = cv2.minMaxLoc(b.reshape(21, 21)) # mgx,mgy: meshgrid x,y of common area # sp_delx,sp_dely: subpixel delx,dely sp_delx = mgx[mml[mml_ind][0], mml[mml_ind][1]] sp_dely = mgy[mml[mml_ind][0], mml[mml_ind][1]] else: sp_delx = 0.0 sp_dely = 0.0 return sp_delx, sp_dely
with open('%s/data/kap_data/lowT_fa05_gs98_%s.data' % (os.environ['MESA_DIR'], key),'r') as f: lines = f.readlines() opac2 = np.loadtxt(lines[7:])[:,1:].T logT2 = np.loadtxt(lines[7:])[:,0] logR2 = np.loadtxt(lines[5:6]) Rmin = min(np.min(logR1), np.min(logR2)) Rmax = max(np.max(logR1), np.max(logR2)) Tmin = min(np.min(logT1), np.min(logT2)) Tmax = max(np.max(logT1), np.max(logT2)) i1 = np.where(logT1>=4.)[0][0] i2 = np.where(logT2<4.)[0][-1] interpolator1 = RBS(logR1, logT1, opac1) interpolator2 = RBS(logR2, logT2, opac2) for rho in np.arange(-10.,0.1,2.): T = np.linspace(Tmin, Tmax, 200) R = rho - 3.*T + 18. T = T[(R>Rmin)&(R<Rmax)] R = R[(R>Rmin)&(R<Rmax)] # c = np.hstack((interpolator2(R[:i2],T[:i2]), interpolator1(R[i1:],T[i1:]))) c = [interpolator2(Ri,Ti) for (Ri,Ti) in zip(R,T) if Ti<4.] + \ [interpolator1(Ri,Ti) for (Ri,Ti) in zip(R,T) if Ti>=4.] c = np.squeeze(c) pl.plot(T, c, 'k-') pl.xlabel(r'$\log_{10}(T/\mathrm{K})$')
def interpolate_slice(x, y, Z, NP): mod_interp = RBS(x, y, Z) interp_vals = mod_interp(np.linspace(x[0], x[-1], NP), np.linspace(y[0], y[-1], NP)) return interp_vals
y0 = np.linspace(-H/2, H/2, n_y) X, Y = np.meshgrid(x0, y0) X0v=[-a for j in np.linspace(-a,a,10)] Y0v=[j for j in np.linspace(-a,a,10)] p0x=[1 for j in X0v] p0y=[0 for j in Y0v] U0v=[[X0v[j],p0x[j],Y0v[j],p0y[j]] for j in np.arange(len(X0v))] FS = 20 for i in range(0, len(t)): output = solve_cf(m0,m1,ci).reshape(n_x, n_y) ci = solve_cf(m0,m1,ci) bvspline=RBS(x0,y0,output) plt.clf() for k in U0v: xoutput,youtput=Up(k,500,0.005) plt.plot(youtput,-xoutput,'r-',lw=2) plt.rcParams.update({'font.size': FS}) plt.xlabel("x", fontsize=FS) plt.ylabel("y", fontsize=FS) plt.imshow(output, cmap='Blues', vmin=0, vmax=1, extent=[-H/2,H/2,-W/2,W/2]) plt.colorbar() #plt.xlim(0, W)
def __init__(self, x, y, z, **kwargs): self.__rbs = RBS(x, y, z, **kwargs) return
return (pb.sigma_j(RL, j, 0., **cosmo)[0])**2 dsig1m = n.load('sig1m.npz') sig1mRl, sig1marr = dsig1m['arr_0'], dsig1m['arr_1'] fs1m = interp1d(sig1mRl, sig1marr, kind='cubic') #interpolated from 0.04 to 100000 def sig1m(RL): return fs1m(RL) dSX = n.load('logSX.npz') lSXRl, lSXR0, arrSX = dSX['arr_0'], dSX['arr_1'], dSX['arr_2'] fSX = RBS(lSXRl, lSXR0, arrSX) def SX(RL, R0): res = fSX(n.log(RL), n.log(R0)) if res.size > 1: print 'Warning: SX called with array instead of single number' return res[0][0] ds1mX = n.load('logsig1mX.npz') ls1mXRl, ls1mXR0, arrs1mX = ds1mX['arr_0'], ds1mX['arr_1'], ds1mX['arr_2'] fs1mX = RBS(ls1mXRl, ls1mXR0, arrs1mX) def sig1mX(RL, R0):
def __init__( self, acc_params, lag_params, acc_model_number, lag_model_number, errorbar=1.0, ): """Constructor for the trough object. Args: acc_params (array like): model parameters for accumulation acc_model_number (int): index of the accumulation model lag_params (array like): model parameters for lag(t) lag_model_number (int): index of the lag(t) model errorbar (float): errorbar of the datapoints in pixels; default=1 """ # Load in all data with pkg_resources.path(__package__, "Insolation.txt") as path: insolation, ins_times = np.loadtxt(path, skiprows=1).T with pkg_resources.path(__package__, "R_lookuptable.txt") as path: retreats = np.loadtxt(path).T with pkg_resources.path(__package__, "TMP_xz.txt") as path: xdata, ydata = np.loadtxt(path, unpack=True) # TODO: remember what this means... lol # I'm pretty sure one file has temp data and the other # has real data. # xdata, ydata = np.loadtxt(here+"/RealXandZ.txt") # Trough angle self.angle_degrees = 2.9 # degrees self.sin_angle = np.sin(self.angle_degrees * np.pi / 180.0) self.cos_angle = np.cos(self.angle_degrees * np.pi / 180.0) self.csc_angle = 1.0 / self.sin_angle self.sec_angle = 1.0 / self.cos_angle self.tan_angle = self.sin_angle / self.cos_angle self.cot_angle = 1.0 / self.tan_angle # Set up the trough model self.acc_params = np.array(acc_params) self.lag_params = np.array(lag_params) self.acc_model_number = acc_model_number self.lag_model_number = lag_model_number self.errorbar = errorbar self.meters_per_pixel = np.array([500.0, 20.0]) # meters per pixel # Positive times are now in the past ins_times = -ins_times # Attach data to this object self.insolation = insolation self.ins_times = ins_times self.retreats = retreats self.xdata = xdata * 1000 # meters self.ydata = ydata # meters self.Ndata = len(self.xdata) # number of data points # Create splines self.lags = np.arange(16) + 1 self.lags[0] -= 1 self.lags[-1] = 20 self.ins_spline = IUS(ins_times, insolation) self.iins_spline = self.ins_spline.antiderivative() self.ins2_spline = IUS(ins_times, insolation**2) self.iins2_spline = self.ins2_spline.antiderivative() self.ret_spline = RBS(self.lags, self.ins_times, self.retreats) self.re2_spline = RBS(self.lags, self.ins_times, self.retreats**2) # Pre-calculate the lags at all times self.lags_t = self.get_lag_at_t(self.ins_times) self.compute_splines()
lminInd = list(arglmin).remove(minInd) lmin = rhsArray[lminInd] elif len(arglmin) == 0: #all values are the same lmin = np.nan minInd = 0 else: print(error) extremaDict['min'][(i, j, k)] = np.min(rhsArray[k, maxInd]) extremaDict['lmin'][(i, j, k)] = lmin extremaDict['lmax'][(i, j, k)] = lmax extremaDict['max'][(i, j, k)] = np.max(rhsArray[k, minInd]) extremaDict['nu_min'][(i, j, k)] = v[minInd] extremaDict['nu_lmin'][(i, j, k)] = v[lminInd] extremaDict['nu_lmax'][(i, j, k)] = v[lmaxInd] extremaDict['nu_max'][(i, j, k)] = v[maxInd] rbsDict[(i, j)] = RBS(e, v, rhsArray) #Finds min, max, lmin, lmax of RBS vs nu at specific e finalMemory = dict(psutil.virtual_memory()._asdict())['used'] / (1024.0**3.) t1 = time.time() usedMemory = finalMemory - initMemory usedTime = t1 - t0 print('For RBS') print('Memory Used (GB): ' + str(usedMemory) + ' for ' + str(len(w) * len(inc) * len(e)) + ' points') print('Memory Used Per vbs equivalent (MB/cbs): ' + str(usedMemory / (len(w) * len(inc) * len(e) * 10.**3.))) print('Time Used (s): ' + str(usedTime)) print('Time Used Per cbs equivalent (): ' + str(usedTime /
def __init__( self, acc_model: Union[str, Model], lag_model: Union[str, Model], acc_params: Optional[List[float]] = None, lag_params: Optional[List[float]] = None, tmp: Optional[int] = None, errorbar: float = 1.0, angle: float = 2.9, ): """Constructor for the trough object. Args: acc_params (array like): model parameters for accumulation acc_model_name (str): name of the accumulation model (linear, quadratic, etc) lag_params (array like): model parameters for lag(t) lag_model_name (str): name of the lag(t) model (constant, linear, etc) errorbar (float, optional): errorbar of the datapoints in pixels; default=1 angle (float, optional): south-facing slope angle in degrees. Default is 2.9. """ # Load all data retreat_times, retreats, lags = load_retreat_data() retreat_times = -retreat_times self.angle = angle self.errorbar = errorbar self.meters_per_pixel = np.array([500.0, 20.0]) # meters per pixel # Create submodels if isinstance(acc_model, str): # name of existing model is given if "obliquity" in acc_model: #load obliquity data and times obliquity, obl_times = load_obliquity_data() obl_times = -obl_times.astype(float) #remove zeros from time array condZero = obl_times == 0 indx = np.array(range(len(condZero))) indxZero = indx[condZero] obl_times[indxZero] = 1e-10 acc_time, acc_y = obl_times, obliquity else: insolation, ins_times = load_insolation_data(tmp) ins_times = -ins_times.astype(float) #remove zeros from time array condZero = ins_times == 0 indx = np.array(range(len(condZero))) indxZero = indx[condZero] ins_times[indxZero] = 1e-10 acc_time, acc_y = ins_times, insolation self.accuModel = ACCUMULATION_MODEL_MAP[acc_model](acc_time, acc_y, *acc_params) else: # custom model is given self.accuModel = acc_model # Lag submodel assert isinstance(lag_model, (str, Model)), "lag_model must be a string or Model" if isinstance(lag_model, str): # name of existing model is given self.lagModel = LAG_MODEL_MAP[lag_model](*lag_params) else: # custom model was given self.lagModel = lag_model # Call super() with the acc and lag models. This # way their parameters are visible here. super().__init__(sub_models=[self.accuModel, self.lagModel]) # Create data splines of retreat of ice (no dependency # on model parameters) self.ret_data_spline = RBS(lags, retreat_times, retreats) self.re2_data_spline = RBS(lags, retreat_times, retreats**2) # Calculate the model of retreat of ice per time self.lag_at_t = self.lagModel.get_lag_at_t(self.accuModel._times) self.retreat_model_t = self.ret_data_spline.ev(self.lag_at_t, self.accuModel._times) # Compute the Retreat(time) spline self.retreat_model_t_spline = IUS(self.accuModel._times, self.retreat_model_t)
def apply_s(self,arr_in): # Scaling in the vertical yy_out = self.yy_in * self.r rbs = RBS(yy_out,self.xx_in,arr_in) nn = rbs(self.yy_in,self.xx_in) return nn.round().astype(int)
def apply_r(self,arr_in): # Scaling in the horizontal xx_out = self.xx_in * self.r rbs = RBS(self.yy_in,xx_out,arr_in) nn = rbs(self.yy_in,self.xx_in) return nn.round().astype(int)
def interp_wrap(xc, yc, t_0, X_, u): orde = 3 interp_ = RBS(yc.values, xc.values, u.values[t_0, :, :], kx=orde, ky=orde) return interp_.ev(X_[1, :], X_[0, :])
def pppcDM_EWinterp( fname , channel ) : # Load data from PPPC4 tables # The file is in the same # directory as the location of # this script # The function create a grid with # values of dN/dE (no dN/dLog10x) # And the interpolation function # is created using : # - DM candidate mass (in GeV) # - ratio of gamma-ray energy and mass # The tables used in this sript # correspond to the data with # electroweak corrections :) data = np.genfromtxt( fname , names=True , dtype=None ) # Tuple with column names # from PPPC4 tables # This include also the channels dchannels = data.dtype.names # check if channel is in dchannels # if not, then exit if channel not in dchannels : print( 'Channel not available,\ in PPPC4-DM tables.\n\ The available channels are:' ) print( dchannels ) sys.exit( 'Unknown channel' ) # Get values of DM mass and # x_steps to compute the # number of gamma-rays masses = np.unique( data[ 'mDM' ] ) xvalues = np.unique( data[ 'Log10x' ] ) xvalues = np.power( 10 , xvalues ) # This is needed to create the # interpolating function dmgrid = np.zeros( ( masses.size , xvalues.size ) ) # Filling the grid for mindex , mass in enumerate( masses ) : # Get the indices where data[ 'mDM'] is equal to mass indices = np.where( data[ 'mDM'] == mass ) phi = data[ channel ][ indices ] # Loop to extract dm_flux for counter , omega in enumerate( phi ) : xval = xvalues[ counter ] # Getting the flux and filling the grid dmgrid[ mindex ][ counter ] = omega / mass / np.log( 10 ) / xval interpolator = RBS( masses , xvalues , dmgrid , kx=1 , ky=1 ) return interpolator
def load_Bmns(folder='./', phasing=0., slice=0, cur_up=1., cur_low=1., cur_mid=None, probeg_up=False, probeg_low=False, probeg_mid=False, machine=None, iplasma=None, code='m3dc1', ntor=None, phase=False, Jmn=False): if machine is 'diiid': conv = 1.0 elif machine is 'aug': conv = -1.0 elif machine is 'kstar': conv = 1.0 if phase is not True: phase = False phasing = np.asarray(phasing) if code is 'all': Bc1 = load_Bmns(folder=folder, phasing=phasing, slice=slice, cur_up=cur_up, cur_low=cur_low, machine=machine, iplasma=iplasma, code='m3dc1', ntor=ntor) Bmars = load_Bmns(folder=folder, phasing=phasing, slice=slice, cur_up=cur_up, cur_low=cur_low, machine=machine, iplasma=iplasma, code='mars', ntor=ntor) Bipec = load_Bmns(folder=folder, phasing=phasing, slice=slice, cur_up=cur_up, cur_low=cur_low, machine=machine, iplasma=iplasma, code='ipec', ntor=ntor) return (Bc1, Bmars, Bipec) elif code is 'm3dc1': if Jmn: file_up = folder + '/jmn3_upper-' + str(slice) + '.cdf' file_low = folder + '/jmn3_lower-' + str(slice) + '.cdf' else: file_up = folder + '/bmn_upper-' + str(slice) + '.cdf' file_low = folder + '/bmn_lower-' + str(slice) + '.cdf' ds_up = xr.open_dataset(file_up) ds_low = xr.open_dataset(file_low) ntor = ds_up.attrs['ntor'] if probeg_up: fac = 2. else: fac = geofac(machine=machine, ntor=ntor) cur_up = fac * cur_up if probeg_low: fac = 2. else: fac = geofac(machine=machine, ntor=ntor) cur_low = fac * cur_low m = ds_up.m try: print("Using Bmn netCDF version " + str(ds_up.version)) Psi = ds_up.psi_norm except AttributeError: print("Using Bmn netCDF version 0") Psi = ds_up.psi Bup = ds_up.bmn_real.data + 1j * ds_up.bmn_imag.data Blow = ds_low.bmn_real.data + 1j * ds_low.bmn_imag.data if cur_mid is not None: if Jmn: file_mid = folder + '/jmn_middle-' + str(slice) + '.cdf' else: file_mid = folder + '/bmn_middle-' + str(slice) + '.cdf' ds_mid = xr.open_dataset(file_mid) if probeg_mid: fac = 2. else: fac = geofac(machine=machine, ntor=ntor) cur_mid = fac * cur_mid Bmid = ds_mid.bmn_real.data + 1j * ds_mid.bmn_imag.data if (iplasma is not None) and (slice > 0): if Jmn: file_up = folder + '/jmn3_upper-0.cdf' file_low = folder + '/jmn3_lower-0.cdf' else: file_up = folder + '/bmn_upper-0.cdf' file_low = folder + '/bmn_lower-0.cdf' ds_vu = xr.open_dataset(file_up) ds_vl = xr.open_dataset(file_low) Bup -= ds_vu.bmn_real.data Bup -= 1j * ds_vu.bmn_imag.data Blow -= ds_vl.bmn_real.data Blow -= 1j * ds_vl.bmn_imag.data if cur_mid is not None: if Jmn: file_mid = folder + '/jmn3_middle-0.cdf' else: file_mid = folder + '/bmn_middle-0.cdf' ds_vm = xr.open_dataset(file_mid) Bmid -= ds_vm.bmn_real.data Bmid -= 1j * ds_vm.bmn_imag.data q = ds_up.q.data elif code is 'mars': file = folder + '/mars_bmn.nc' dset = xr.open_dataset(file) m = dset.m Psi = dset.s**2.0 if slice is 0: Bup = dset.vacuum_upper_real.data + 1j * dset.vacuum_upper_imag.data Blow = dset.vacuum_lower_real.data + 1j * dset.vacuum_lower_imag.data elif slice is 1: Bup = dset.total_upper_real.data + 1j * dset.total_upper_imag.data Blow = dset.total_lower_real.data + 1j * dset.total_lower_imag.data if (iplasma is not None): Bup -= dset.vacuum_upper_real.data Bup -= 1j * dset.vacuum_upper_imag.data Blow -= dset.vacuum_lower_real.data Blow -= 1j * dset.vacuum_lower_imag.data else: return NotImplemented Psi1 = dset.q_s**2.0 q1 = dset.q f = interp1d(Psi1, q1, bounds_error=False) q = f(Psi) # flip signs because theta is flipped conv = -conv m = -m q = -q elif code is 'ipec': if (slice == 0) or (iplasma is not None): # get the vacuum field base = folder + 'ipec_vbnormal' sr = 7 # skip this many rows cr = 3 # column of real Bmn (zero-based) ci = 4 # column of imaginary Bmn (zero-based) (Pv, mv, Bvu, Bvl, qv) = B_ipec(base, sr, cr, ci) if slice == 1: # get the total field base = folder + 'ipec_xbnormal' sr = 8 # skip this many rows cr = 7 # column of real Bmn ci = 8 # column of imaginary Bmn (Pt, mt, Btu, Btl, qt) = B_ipec(base, sr, cr, ci) if slice == 0: # just the vacuum field (Psi, m, Bup, Blow, q) = (Pv, mv, Bvu, Bvl, qv) elif (slice == 1) and (iplasma is None): # just the total field (Psi, m, Bup, Blow, q) = (Pt, mt, Btu, Btl, qt) else: # we need to subtract the vacuum from the total field Rv = RBS(Pv, mv, Bvu.real) Iv = RBS(Pv, mv, Bvu.imag) Bvu = Rv(Pt, mt) + 1j * Iv(Pt, mt) Rv = RBS(Pv, mv, Bvl.real) Iv = RBS(Pv, mv, Bvl.imag) Bvl = Rv(Pt, mt) + 1j * Iv(Pt, mt) (Psi, m, Bup, Blow, q) = (Pt, mt, Btu - Bvu, Btl - Bvl, qt) conv = -conv m = -m q = -q else: return NotImplemented # Quantities used for all code results cur = xr.DataArray(np.cos(pi * phasing / 180.) + conv * 1j * np.sin(pi * phasing / 180.), coords=[('phasing', phasing)]) Bmn_up = cur_up * xr.DataArray(Bup, coords=[('Psi', Psi), ('m', m)]) Bmn_low = cur_low * xr.DataArray(Blow, coords=[('Psi', Psi), ('m', m)]) Bmn = cur * Bmn_up + Bmn_low if cur_mid is not None: cur2 = xr.DataArray(np.cos(pi * phasing / 180.) + conv * 1j * np.sin(pi * phasing / 180.), coords=[('phasing2', phasing)]) Bmn_mid = cur_mid * xr.DataArray(Bmid, coords=[('Psi', Psi), ('m', m)]) Bmn = Bmn + cur2 * Bmn_mid if phase: Bmn = xru.angle(Bmn, deg=True) % 360. else: Bmn = np.abs(Bmn) q = xr.DataArray(q, coords=[('Psi', Psi)]) if Jmn: Bmns = xr.Dataset({'Jmn': Bmn, 'q': q}) else: Bmns = xr.Dataset({'Bmn': Bmn, 'q': q}) Bmns.attrs['ntor'] = ntor Bmns.attrs['phase'] = phase return Bmns
def _setup_interpolants(self): """Constructs interpolants of the appropriate fields""" nhH, nhW = 1.0 / self._a - 1.0, self._ar / self._a - 1.0 bbox = [-nhW, nhW, -nhH, nhH] self._Cr_RBS = RBS(self._rs, self._zs, self._aR_data[2, :, :], bbox=bbox) self._Cz_RBS = RBS(self._rs, self._zs, self._aR_data[3, :, :], bbox=bbox) self._Lr_RBS = RBS(self._rs, self._zs, self._aR_data[4, :, :], bbox=bbox) self._Lz_RBS = RBS(self._rs, self._zs, self._aR_data[5, :, :], bbox=bbox) self._Sr_RBS = RBS(self._rs, self._zs, self._aR_data[6, :, :], bbox=bbox) self._Sz_RBS = RBS(self._rs, self._zs, self._aR_data[7, :, :], bbox=bbox) self._Up_RBS = RBS(self._rs, self._zs, self._aR_data[8, :, :], bbox=bbox) self._Wr_RBS = RBS(self._rs, self._zs, self._aR_data[9, :, :], bbox=bbox) self._Wz_RBS = RBS(self._rs, self._zs, self._aR_data[10, :, :], bbox=bbox) self._kappa = 4.0 / (self._R * self._a**3) self._Fr_RBS = RBS(self._rs, self._zs, self._aR_data[4, :, :] + self._kappa * self._aR_data[6, :, :], bbox=bbox) self._Fz_RBS = RBS(self._rs, self._zs, self._aR_data[5, :, :] + self._kappa * self._aR_data[7, :, :], bbox=bbox) return
def _process_gfile(self): # read geqdsk file with open(self.gfile, 'r') as gfile: eqdsk = gfile.readlines() # parse line 0 self.nw = int(eqdsk[0].split()[-2]) self.nh = int(eqdsk[0].split()[-1]) # parse line 1 try: entrylength = 16 rdim, zdim, _, rmin, zmid = \ [float(eqdsk[1][j * entrylength:(j + 1) * entrylength]) for j in range(len(eqdsk[1]) // entrylength)] except: try: entrylength = 15 rdim, zdim, _, rmin, zmid = \ [float(eqdsk[1][j * entrylength:(j + 1) * entrylength]) for j in range(len(eqdsk[1]) // entrylength)] except: raise IOError('Failed to read G-EQDSK line') # parse line 2 self.Rmaxis, self.Zmaxis, self.psiax, self.psisep, _ = \ [float(eqdsk[2][j * entrylength:(j + 1) * entrylength]) for j in range(len(eqdsk[2]) // entrylength)] # parse line 3 _, psiax2, _, rmag2, _ = \ [float(eqdsk[3][j * entrylength:(j + 1) * entrylength]) for j in range(len(eqdsk[3]) // entrylength)] # parse line 4 zmag2, _, psisep2, _, _ = \ [float(eqdsk[4][j * entrylength:(j + 1) * entrylength]) for j in range(len(eqdsk[4]) // entrylength)] if self.Rmaxis != rmag2: raise ValueError('Inconsistent self.Rmaxis: %7.4g, %7.4g' % (self.Rmaxis, rmag2)) if self.psiax != psiax2: raise ValueError('Inconsistent psiax: %7.4g, %7.4g' % (self.psiax, psiax2)) if self.Zmaxis != zmag2: raise ValueError('Inconsistent self.Zmaxis: %7.4g, %7.4g' % (self.Zmaxis, zmag2)) if self.psisep != psisep2: raise ValueError('Inconsistent psisep: %7.4g, %7.4g' % (self.psisep, psisep2)) # read flux profiles and 2D flux grid # pol current (F=RBt) [T-m] on uniform flux grid self.F_fs = np.empty(self.nw) start_line = 5 lines = np.arange(self.nw // 5) if self.nw % 5 != 0: lines = np.arange(self.nw // 5 + 1) for i in lines: n_entries = len(eqdsk[i + start_line]) // entrylength self.F_fs[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 # pressure [Pa] on uniform flux grid self.p_fs = np.empty(self.nw) for i in lines: n_entries = len(eqdsk[i + start_line]) // entrylength self.p_fs[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 # FF'=FdF/dpsi on uniform flux grid self.ffprime_fs = np.empty(self.nw) for i in lines: n_entries = len(eqdsk[i + start_line]) // entrylength self.ffprime_fs[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 # dp/dpsi [Pa/(Wb/rad)] on uniform flux grid self.pprime_fs = np.empty(self.nw) for i in lines: n_entries = len(eqdsk[i + start_line]) // entrylength self.pprime_fs[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 # pol. flux [Wb/rad] on rectangular grid psirz_1d = np.empty(self.nw * self.nh) lines_twod = np.arange(self.nw * self.nh // 5) if self.nw * self.nh % 5 != 0: lines_twod = np.arange(self.nw * self.nh // 5 + 1) for i in lines_twod: n_entries = len(eqdsk[i + start_line]) // entrylength psirz_1d[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 self.psirz = psirz_1d.reshape(self.nh, self.nw) # q safety factor on uniform flux grid self.qpsi_fs = np.empty(self.nw) for i in lines: n_entries = len(eqdsk[i + start_line]) // entrylength self.qpsi_fs[i * 5:i * 5 + n_entries] = \ [float(eqdsk[i + start_line][j * entrylength:(j + 1) * entrylength]) for j in range(n_entries)] start_line = i + start_line + 1 # flip signs if psi-axis > psi-separatrix if self.psiax > self.psisep: self.psirz = -self.psirz self.ffprime_fs = -self.ffprime_fs self.pprime_fs = -self.pprime_fs self.psiax *= -1 self.psisep *= -1 # R,Z grids dw = rdim / (self.nw - 1) dh = zdim / (self.nh - 1) self.rgrid = np.array([rmin + i * dw for i in range(self.nw)]) self.zgrid = np.array([zmid - zdim / 2 + i * dh \ for i in range(self.nh)]) # theta grid self.theta_grid = np.linspace(-np.pi, np.pi, self.ntheta) # flux grids self.psinorm_grid = np.linspace(0, 1, self.nw) self.psi_grid = np.linspace(self.psiax, self.psisep, self.nw) # flux surface R/Z coords. on flux and theta grids self.R_ftgrid = np.empty((self.nw, self.ntheta)) self.Z_ftgrid = np.empty((self.nw, self.ntheta)) t1 = np.arctan2(zmid - zdim / 2 - self.Zmaxis, rmin - self.Rmaxis) # angle(mag axis to bot. left) t2 = np.arctan2(zmid - zdim / 2 - self.Zmaxis, rmin + rdim - self.Rmaxis) # angle (mag ax to bot. rt) t3 = np.arctan2(zmid + zdim / 2 - self.Zmaxis, rmin + rdim - self.Rmaxis) # angle (mag ax to top rt) t4 = np.arctan2(zmid + zdim / 2 - self.Zmaxis, rmin - self.Rmaxis) # angle (mag ax to top left) # spline object for psi on RZ grid self.psi_spl = RBS(self.zgrid, self.rgrid, self.psirz, kx=self.io, ky=self.io) psilimit = self.psisep + (self.psisep - self.psiax) * 0.05 for j, theta in enumerate(self.theta_grid): if theta < t1 or theta >= t4: rad = (rmin - self.Rmaxis) / np.cos(theta) elif theta < t2 and theta >= t1: rad = -(self.Zmaxis - zmid + zdim / 2) / np.sin(theta) elif theta < t3 and theta >= t2: rad = (rmin + rdim - self.Rmaxis) / np.cos(theta) elif theta < t4 and theta >= t3: rad = (zmid + zdim / 2 - self.Zmaxis) / np.sin(theta) else: raise ValueError('Error with theta angle') dr = rad / (self.nw - 1) * np.cos(theta) dz = rad / (self.nw - 1) * np.sin(theta) # RZ coordinates from axis at fixed poloidal angle r_pol = np.array([self.Rmaxis + i * dr for i in range(self.nw)]) z_pol = np.array([self.Zmaxis + i * dz for i in range(self.nw)]) psi_rad = self.psi_spl.ev(z_pol, r_pol) psi_rad[0] = self.psiax # must restrict interpolation range because of non-monotonic psi around coils end_ind = 0 for i in range(self.nw - 1): if psi_rad[i] > psilimit: break if psi_rad[i + 1] <= psi_rad[i] and i < self.nw - 2: psi_rad[i + 1] = 0.5 * (psi_rad[i] + psi_rad[i + 2]) end_ind += 1 # interp objects for indices psi_int = interp1d(psi_rad[:end_ind + 1], np.arange(end_ind + 1), kind=self.io) # near psi-grid index for separatrix indsep = int(psi_int(self.psisep)) + 3 # RZ interp. objects along poloidal line from axis R_int = interp1d(psi_rad[:indsep], r_pol[:indsep], kind=self.io) Z_int = interp1d(psi_rad[:indsep], z_pol[:indsep], kind=self.io) # RZ coords of FS grid at fixed theta self.R_ftgrid[:, j] = R_int(self.psi_grid) self.Z_ftgrid[:, j] = Z_int(self.psi_grid) # find average elevation for all flux surfaces self.Z_avg_fs = np.empty(self.nw) for i in range(self.nw): ds = np.empty(self.ntheta) ds[1:self.ntheta - 1] = 0.5 * np.sqrt((self.R_ftgrid[i, 2:self.ntheta] \ - self.R_ftgrid[i, 0:self.ntheta - 2])**2 \ + (self.Z_ftgrid[i, 2:self.ntheta] \ - self.Z_ftgrid[i, 0:self.ntheta - 2])**2) ds[0] = 0.5 * np.sqrt((self.R_ftgrid[i, 1] - self.R_ftgrid[i, -1]) ** 2 \ + (self.Z_ftgrid[i, 1] - self.Z_ftgrid[i, -1])**2) ds[-1] = 0.5 * np.sqrt((self.R_ftgrid[i, 0] - self.R_ftgrid[i, -2]) ** 2 \ + (self.Z_ftgrid[i, 0] - self.Z_ftgrid[i, -2])**2) self.Z_avg_fs[i] = np.average(self.Z_ftgrid[i, :], weights=ds) # R_major and r_minor for all flux surfaces self.R_major_fs = np.empty(self.nw) self.R_major_fs[0] = self.Rmaxis self.r_minor_fs = np.empty(self.nw) self.r_minor_fs[0] = 0. itheta = self.ntheta // 4 # loop over flux grid for i in range(1, self.nw): # low field side R_array = self.R_ftgrid[i, itheta:3 * itheta] Z_array = self.Z_ftgrid[i, itheta:3 * itheta] Z_int = interp1d(Z_array, R_array, kind=self.io) R_out = Z_int(self.Z_avg_fs[i]) # high field side R_array = np.roll(self.R_ftgrid[i, :-1], self.ntheta // 2)[itheta:3 * itheta] Z_array = np.roll(self.Z_ftgrid[i, :-1], self.ntheta // 2)[itheta:3 * itheta] # have to use negative Z_array here to have increasing order Z_int = interp1d(-Z_array, R_array, kind=self.io) R_in = Z_int(-self.Z_avg_fs[i]) self.R_major_fs[i] = 0.5 * (R_out + R_in) # R_maj at self.Z_avg_fs self.r_minor_fs[i] = 0.5 * (R_out - R_in) # r_min at self.Z_avg_fs self.Rmaxis_lcfs = self.R_major_fs[-1] self.a_lcfs = self.r_minor_fs[-1] self.eps_lcfs = self.a_lcfs / self.Rmaxis_lcfs if not self.quiet: print('Header: %s' % eqdsk[0]) print('Resolution: %d x %d' % (self.nw, self.nh)) print('\n*** Magnetic axis and LCFS ***') print('R mag. axis = {:.3g} m'.format(self.Rmaxis)) print('Z mag. axis = {:.3g} m'.format(self.Zmaxis)) print('psi-axis = {:.3e} Wb/rad'.format(self.psiax)) print('psi-sep = {:.3e} Wb/rad'.format(self.psisep)) print('R0_lcfs = {:.3g} m'.format(self.Rmaxis_lcfs)) print('a_lcfs = {:.3g} m'.format(self.a_lcfs)) print('eps_lcfs = {:.3g}'.format(self.eps_lcfs))