def read(self, var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='d', lpersist=False, dtype=np.float64): """ Read VAR files from Pencil Code. If proc < 0, then load all data and assemble, otherwise load VAR file from specified processor. The file format written by output() (and used, e.g. in var.dat) consists of the followinig Fortran records: 1. data(mx, my, mz, nvar) 2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1) Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3 for one vector field, 8 for var.dat in the case of MHD with entropy. but, deltay(1) is only there if lshear is on! need to know parameters. call signature: var(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='d') Keyword arguments: var_file: Name of the VAR file. datadir: Directory where the data is stored. proc: Processor to be read. If -1 read all and assemble to one array. ivar: Index of the VAR file, if var_file is not specified. quiet: Flag for switching off output. trimall: Trim the data cube to exclude ghost zones. magic: Values to be computed from the data, e.g. B = curl(A). sim: Simulation sim object. precision: Float (f), double (d) or half (half). dtype: precision for var.obj, default double """ import os from scipy.io import FortranFile from pencil.math.derivatives import curl, curl2 from pencil import read from pencil.sim import __Simulation__ def persist(self, infile=None, precision='d', quiet=quiet): """An open Fortran file potentially containing persistent variables appended to the f array and grid data are read from the first proc data Record types provide the labels and id record for the peristent variables in the depricated fortran binary format """ record_types = {} for key in read.record_types.keys(): if read.record_types[key][1] == 'd': record_types[key]=(read.record_types[key][0], precision) else: record_types[key] = read.record_types[key] try: tmp_id = infile.read_record('h') except: return -1 block_id = 0 for i in range(2000): i += 1 tmp_id = infile.read_record('h') block_id = tmp_id[0] if block_id == 2000: break for key in record_types.keys(): if record_types[key][0] == block_id: tmp_val = infile.read_record(record_types[key][1]) self.__setattr__(key, tmp_val[0]) if not quiet: print(key, record_types[key][0], record_types[key][1],tmp_val) return self dim = None param = None index = None if isinstance(sim, __Simulation__): datadir = os.path.expanduser(sim.datadir) dim = sim.dim param = read.param(datadir=sim.datadir, quiet=True, conflicts_quiet=True) index = read.index(datadir=sim.datadir) else: datadir = os.path.expanduser(datadir) if dim is None: if var_file[0:2].lower() == 'og': dim = read.ogdim(datadir, proc) else: if var_file[0:4] == 'VARd': dim = read.dim(datadir, proc, down=True) else: dim = read.dim(datadir, proc) if param is None: param = read.param(datadir=datadir, quiet=quiet, conflicts_quiet=True) if index is None: index = read.index(datadir=datadir) if param.lwrite_aux: total_vars = dim.mvar + dim.maux else: total_vars = dim.mvar if os.path.exists(os.path.join(datadir, 'grid.h5')): # # Read HDF5 files. # import h5py run2D = param.lwrite_2d # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) if not var_file: if ivar < 0: var_file = 'var.h5' else: var_file = 'VAR' + str(ivar) + '.h5' file_name = os.path.join(datadir, 'allprocs', var_file) with h5py.File(file_name, 'r') as tmp: for key in tmp['data'].keys(): self.f[index.__getattribute__(key)-1, :] = dtype( tmp['data/'+key][:]) t = (tmp['time'][()]).astype(precision) x = (tmp['grid/x'][()]).astype(precision) y = (tmp['grid/y'][()]).astype(precision) z = (tmp['grid/z'][()]).astype(precision) dx = (tmp['grid/dx'][()]).astype(precision) dy = (tmp['grid/dy'][()]).astype(precision) dz = (tmp['grid/dz'][()]).astype(precision) if param.lshear: deltay = (tmp['persist/shear_delta_y'][(0)]).astype(precision) if lpersist: for key in tmp['persist'].keys(): self.__setattr__(key, (tmp['persist'][key][0]).astype(precision)) else: # # Read scattered Fortran binary files. # run2D = param.lwrite_2d if dim.precision == 'D': read_precision = 'd' else: read_precision = 'f' if not var_file: if ivar < 0: var_file = 'var.dat' else: var_file = 'VAR' + str(ivar) if proc < 0: proc_dirs = self.__natural_sort( filter(lambda s: s.startswith('proc'), os.listdir(datadir))) if (proc_dirs.count("proc_bounds.dat") > 0): proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ['allprocs'] # else: # proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy] else: proc_dirs = ['proc' + str(proc)] # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) if var_file[0:2].lower() == 'og': procdim = read.ogdim(datadir, proc) else: if var_file[0:4] == 'VARd': procdim = read.dim(datadir, proc, down=True) else: procdim = read.dim(datadir, proc) if not quiet: print("Reading data from processor"+ " {0} of {1} ...".format(proc, len(proc_dirs))) else: # A collective IO strategy is being used procdim = dim # else: # procdim.mx = dim.mx # procdim.my = dim.my # procdim.nx = dim.nx # procdim.ny = dim.ny # procdim.ipx = dim.ipx # procdim.ipy = dim.ipy mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the data. file_name = os.path.join(datadir, directory, var_file) infile = FortranFile(file_name) if not run2D: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc)) else: if dim.ny == 1: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, mxloc)) else: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, myloc, mxloc)) raw_etc = infile.read_record(dtype=read_precision) if lpersist: persist(self, infile=infile, precision=read_precision, quiet=quiet) infile.close() t = raw_etc[0] x_loc = raw_etc[1:mxloc+1] y_loc = raw_etc[mxloc+1:mxloc+myloc+1] z_loc = raw_etc[mxloc+myloc+1:mxloc+myloc+mzloc+1] if param.lshear: shear_offset = 1 deltay = raw_etc[-1] else: shear_offset = 0 dx = raw_etc[-3-shear_offset] dy = raw_etc[-2-shear_offset] dz = raw_etc[-1-shear_offset] if len(proc_dirs) > 1: # Calculate where the local processor will go in # the global array. # # Don't overwrite ghost zones of processor to the # left (and accordingly in y and z direction -- makes # a difference on the diagonals) # # Recall that in NumPy, slicing is NON-INCLUSIVE on # the right end, ie, x[0:4] will slice all of a # 4-digit array, not produce an error like in idl. if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0xloc = 0 i1xloc = procdim.mx else: i0x = procdim.ipx*procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0xloc = procdim.nghostx i1xloc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0yloc = 0 i1yloc = procdim.my else: i0y = procdim.ipy*procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0yloc = procdim.nghosty i1yloc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z+procdim.mz i0zloc = 0 i1zloc = procdim.mz else: i0z = procdim.ipz*procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0zloc = procdim.nghostz i1zloc = procdim.mz x[i0x:i1x] = x_loc[i0xloc:i1xloc] y[i0y:i1y] = y_loc[i0yloc:i1yloc] z[i0z:i1z] = z_loc[i0zloc:i1zloc] if not run2D: self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: if dim.ny == 1: self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc] else: self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: self.f = f_loc x = x_loc y = y_loc z = z_loc if magic is not None: if 'bb' in magic: # Compute the magnetic field before doing trimall. aa = self.f[index.ax-1:index.az, ...] self.bb = dtype(curl(aa, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system)) if trimall: self.bb = self.bb[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] if 'jj' in magic: # Compute the electric current field before doing trimall. aa = self.f[index.ax-1:index.az, ...] self.jj = dtype(curl2(aa, dx, dy, dz, x=x, y=y, coordinate_system=param.coord_system)) if trimall: self.jj = self.jj[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] if 'vort' in magic: # Compute the vorticity field before doing trimall. uu = self.f[index.ux-1:index.uz, ...] self.vort = dtype(curl(uu, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system)) if trimall: if run2D: if dim.nz == 1: self.vort = self.vort[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: self.vort = self.vort[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1] else: self.vort = self.vort[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] # Trim the ghost zones of the global f-array if asked. if trimall: self.x = x[dim.l1:dim.l2+1] self.y = y[dim.m1:dim.m2+1] self.z = z[dim.n1:dim.n2+1] if not run2D: self.f = self.f[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: if dim.ny == 1: self.f = self.f[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1] else: self.f = self.f[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: self.x = x self.y = y self.z = z self.l1 = dim.l1 self.l2 = dim.l2 + 1 self.m1 = dim.m1 self.m2 = dim.m2 + 1 self.n1 = dim.n1 self.n2 = dim.n2 + 1 # Assign an attribute to self for each variable defined in # 'data/index.pro' so that e.g. self.ux is the x-velocity aatest = [] uutest = [] for key in index.__dict__.keys(): if 'aatest' in key: aatest.append(key) if 'uutest' in key: uutest.append(key) if key != 'global_gg' and key != 'keys' and 'aatest' not in key\ and 'uutest' not in key: value = index.__dict__[key] setattr(self, key, self.f[value-1, ...]) # Special treatment for vector quantities. if hasattr(index, 'uu'): self.uu = self.f[index.ux-1:index.uz, ...] if hasattr(index, 'aa'): self.aa = self.f[index.ax-1:index.az, ...] if hasattr(index, 'uu_sph'): self.uu_sph = self.f[index.uu_sphx-1:index.uu_sphz, ...] if hasattr(index, 'bb_sph'): self.bb_sph = self.f[index.bb_sphx-1:index.bb_sphz, ...] # Special treatment for test method vector quantities. #Note index 1,2,3,...,0 last vector may be the zero field/flow if hasattr(index, 'aatest1'): naatest = int(len(aatest)/3) for j in range(0,naatest): key = 'aatest'+str(np.mod(j+1,naatest)) value = index.__dict__['aatest1'] + 3*j setattr(self, key, self.f[value-1:value+2, ...]) if hasattr(index, 'uutest1'): nuutest = int(len(uutest)/3) for j in range(0,nuutest): key = 'uutest'+str(np.mod(j+1,nuutest)) value = index.__dict__['uutest'] + 3*j setattr(self, key, self.f[value-1:value+2, ...]) self.t = t self.dx = dx self.dy = dy self.dz = dz if param.lshear: self.deltay = deltay # Do the rest of magic after the trimall (i.e. no additional curl.) self.magic = magic if self.magic is not None: self.magic_attributes(param, dtype=dtype)
def twonorm_accuracy(simulations, field='ux', strip=0, var_file='ogvar.dat', direction='x', noerr=True, quiet=True): """ Assessment of accuracy of simulation: Computes the two-norm error of all available simulation, where the simulation with the maximum amount of grid points is used as the correct/reference solution. E.g., for runs with grid points assessment of accuracy of x-component of velocity along the y-direction, for runs with grid points nxgrid = n, 2n, 4n, 8n, compute || u_n - u_0 || = dy \sum\limits_{n=0}^n (sqrt(u_n(x_n)-u_0(x_8n))) for all runs (except for the 8n case, used as reference). Requires that the runs have matching grids, that is, grid refined by a factor of 2m, and grids adjusted so that the grid point overlap (needs ofset if periodic BC is used). call signature: twonorm_accuracy(simulations) Keyword arguments: *simulations* array of simulation names to be included in the computations *field* variable used in accuracy assessment *strip*: index for strip along coordinate *var_file*: name of varfile to read from each sim *direction*: compute two-norm along 'x' or 'y' direction *noerr*: set to false if you want to return an array of maximum error along strip, in addition to the two-norm Returns array of two-norms where the larges array is used as base """ import numpy as np import os as os from pencil import read from pencil import sim # Find directories to include in accuracy assessment sims = [] for simulation in simulations: sims.append(sim.get(simulation, quiet=True)) # Sort runs by size of grid spacing if (direction == 'x' or direction == 'r'): sims = sim.sort(sims, 'nx', reverse=False) elif (direction == 'y' or direction == 'th'): sims = sim.sort(sims, 'ny', reverse=False) # From this point we only need the varfile to compute accuracy # Need to update dim to ogdim for reading of ogvar to be done # correctly from simulation object for i, thissim in enumerate(sims): sims[i].dim = read.ogdim(datadir=thissim.datadir) sims[i] = read.ogvar(sim=thissim, trimall=True, var_file=var_file) # Check that increase in size is correct for use in two-norm calculation nsims = len(sims) nx_min = sims[0].r.size ny_min = sims[0].th.size for thissim in sims: if ((thissim.r.size - 1) % (nx_min - 1) != 0): print('ERROR: Incorrect size in r-dir') print('sims.r', thissim.r.size) print('nx_min', nx_min) return False if (thissim.th.size % ny_min != 0): print('ERROR: Incorrect size in th-dir') return False # Check that all var-files are for the same time t = sims[0].t for thissim in sims: if thissim.t != t: print('WARNING: Incorrect time for one or more simulations') # Now we are sure that first coordinate of r and th are the same for all runs # Can compute the two-norms for increasing sizes. Use largest size as normalization of error twonorms = np.zeros(nsims - 1) maxerrs = np.zeros(nsims - 1) if (direction == 'x' or direction == 'r'): dh = sims[0].dx n2_factor = int(dh / sims[-1].dx) elif (direction == 'y' or direction == 'th'): dh = sims[0].dy n2_factor = int(dh / sims[-1].dy) attribute = getattr(sims[-1], field) if (field == 'ux' or field == 'uy'): u2 = attribute[0::n2_factor, 0::n2_factor] else: u2 = attribute[0, 0::n2_factor, 0::n2_factor] strip = int(strip) if (direction == 'x' or direction == 'r'): dh = sims[-1].dx dx_max = sims[0].dx n2_factor = int(thissim.dx / dh) #for i,thissim in enumerate(sims[:-1]): # strips[i]=int(thissim.dx/dx_max*strip) #n1_factor = int(sims[0].dx/sims[-1].dx) elif (direction == 'y' or direction == 'th'): dh = sims[-1].dy dx_max = sims[0].dy #for i,thissim in enumerate(sims[:-1]): # strips[i]=int(thissim.dy/dx_max*strip) #n1_factor = int(sims[0].dx/sims[-1].dy) attribute = getattr(sims[-1], field) # if(field=='ux' or field=='uy'): # u2 = attribute[0::n2_factor,0::n2_factor] # else: # u2 = attribute[0,0::n2_factor,0::n2_factor] j = 1 for i, thissim in enumerate(sims[:-1]): n1_factor = 1 if (direction == 'x' or direction == 'r'): n2_factor = int(thissim.dx / dh) elif (direction == 'y' or direction == 'th'): n2_factor = int(thissim.dy / dh) u1 = getattr(thissim, field) if (field == 'ux' or field == 'uy'): u2 = attribute[0::n2_factor, 0::n2_factor] #u1 = u1[0::n1_factor] else: u2 = attribute[0, 0::n2_factor, 0::n2_factor] u1 = u1[0, :, :] #0::n1_factor,:] #u1 = u1[0,0::n1_factor,:] radius_l = sims[-1].r[0::n2_factor] if (direction == 'x' or direction == 'r'): twonorms[i] = (twonorm(u1[:, strip * j], u2[:, strip * j], thissim.dy * sims[0].r[strip])) maxerrs[i] = (maxerror(u1[:, strip * j], u2[:, strip * j])) elif (direction == 'y' or direction == 'th'): twonorms[i] = (twonorm(u1[strip * j, :], u2[strip * j, :], thissim.dx)) maxerrs[i] = (maxerror(u1[strip * j, :], u2[strip * j, :])) j = j * 2 # n1_factor = 1 # u1 = getattr(thissim,field) # if(not(field=='ux' or field=='uy')): # u1=u1[0,:,:] # # if(direction=='x' or direction=='r'): # n1_factor = int(dx_max/thissim.dx) # n2_factor = int(thissim.dx/dh) # u1 = u1[:,0::n1_factor] # u2 = attribute[0,0::n2_factor,0::n2_factor] # u2 = u2[:,0::n1_factor] # elif(direction=='y' or direction=='th'): # n1_factor = int(dx_max/thissim.dy) # n2_factor = int(thissim.dy/dh) # u1 = u1[0::n1_factor,:] # u2 = attribute[0,0::n2_factor,0::n2_factor] # u2 = u2[0::n1_factor,:] # # if(direction=='x' or direction=='r'): # twonorms[i] = (twonorm(u1[:,strip],u2[:,strip],thissim.dy*sims[0].r[strip])) # maxerrs[i] = (maxerror(u1[:,strip],u2[:,strip])) # elif(direction=='y' or direction=='th'): # twonorms[i] = (twonorm(u1[strip,:],u2[strip,:],thissim.dx)) # maxerrs[i] = (maxerror(u1[strip,:],u2[strip,:])) if (not quiet): print('Two-norm computed for field:', field, ', along strip:', strip) if (direction == 'x'): print('Along x-direction') elif (direction == 'r'): print('Along r-direction') elif (direction == 'y'): print('Along y-direction') elif (direction == 'th'): print('Along th-direction') if not noerr: return twonorms, maxerrs else: return twonorms
def twonorm_accuracy1D(simulations, field='ur', strip=1, direction='r', varfile='ogvar.dat', noerr=True, quiet=True): """ Assessment of accuracy of simulation: Computes the two-norm error of all available simulation, where the simulation with the maximum amount of grid points is used as the correct/reference solution. E.g., for runs with grid points assessment of accuracy of x-component of velocity along the y-direction, for runs with grid points nxgrid = n, 2n, 4n, 8n, compute || u_n - u_0 || = dy \sum\limits_{n=0}^n (sqrt(u_n(x_n)-u_0(x_8n))) for all runs (except for the 8n case, used as reference). Requires that the runs have matching grids, that is, grid refined by a factor of 2m, and grids adjusted so that the grid point overlap (needs ofset if periodic BC is used). call signature: twonorm_accuracy(simulations) Keyword arguments: *simulations* array of simulation names to be included in the computations *field* variable used in accuracy assessment *varfile*: name of varfile to read from each sim *noerr*: set to false if you want to return an array of maximum error along strip, in addition to the two-norm Returns array of two-norms where the larges array is used as base """ import numpy as np import os as os from pencil import read from pencil import sim # Find directories to include in accuracy assessment sims = [] for simulation in simulations: sims.append(sim.get(simulation, quiet=True)) # Sort runs by size of grid spacing sims = sim.sort(sims, 'dx', reverse=True) # From this point we only need the varfile to compute accuracy # Need to update dim to ogdim for reading of ogvar to be done # correctly from simulation object for i, thissim in enumerate(sims): sims[i].dim = read.ogdim(datadir=thissim.datadir) sims[i] = read.ogvar(sim=thissim, trimall=True, varfile=varfile) # Check that increase in size is correct for use in two-norm calculation nsims = len(sims) nx_min = sims[0].r.size ny_min = sims[0].th.size for thissim in sims: if ((thissim.r.size - 1) % (nx_min - 1) != 0): print('ERROR: Incorrect size in r-dir') print('sims.r', thissim.r.size) print('nx_min', nx_min) return False if (thissim.th.size % ny_min != 0): print('ERROR: Incorrect size in th-dir') return False # Check that all var-files are for the same time t = sims[0].t for thissim in sims: if thissim.t != t: print('WARNING: Incorrect time for one or more simulations') # Now we are sure that first coordinate of r and th are the same for all runs # Can compute the two-norms for increasing sizes. Use largest size as normalization of error twonorms = np.zeros(nsims - 1) maxerrs = np.zeros(nsims - 1) # Compute array of factor used to jump indices when comparing two arrays of size N and 2N etc. # Usually n2_fac = [8, 4, 2] or similar n2_fac = np.empty(nsims - 1) for i, thissim in enumerate(sims[:-1]): n2_fac[i] = thissim.dx / sims[-1].dx u2 = getattr(sims[-1], field) if not (field == 'ux' or field == 'uy'): u2 = u2[0, :, :] for i, thissim in enumerate(sims[:-1]): u1 = getattr(thissim, field) if not (field == 'ux' or field == 'uy'): u1 = u1[0, :, :] dr = np.empty(thissim.r.size) dr[0:-1] = thissim.r[1:] - thissim.r[0:-1] dr[-1] = dr[-2] dth = thissim.dy twonorms[i] = 0. n2 = n2_fac[i] if (direction == 'r'): r = sims[0].r[strip] j = int(strip * sims[0].dx / thissim.dx) for k in range(thissim.th.size): twonorms[i] = twonorms[i] + dth * r * ( u1[k, j] - u2[int(k * n2), int(j * n2)])**2 elif (direction == 'th'): k = int(strip * sims[0].dx / thissim.dx) for j in range(thissim.r.size): twonorms[i] = twonorms[i] + dr[j] * ( u1[k, j] - u2[int(k * n2), int(j * n2)])**2 else: print('ERROR: Invalid direction chosen') return False twonorms[i] = np.sqrt(twonorms[i]) if not noerr: return twonorms, maxerrs else: return twonorms
def read( self, var_file="", datadir="data", proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision="d", lpersist=False, dtype=np.float64, ): """ read(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='f') Read VAR files from Pencil Code. If proc < 0, then load all data and assemble, otherwise load VAR file from specified processor. The file format written by output() (and used, e.g. in var.dat) consists of the followinig Fortran records: 1. data(mx, my, mz, nvar) 2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1) Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3 for one vector field, 8 for var.dat in the case of MHD with entropy. but, deltay(1) is only there if lshear is on! need to know parameters. Parameters ---------- var_file : string Name of the VAR file. If not specified, use var.dat (which is the latest snapshot of the fields) datadir : string Directory where the data is stored. proc : int Processor to be read. If -1 read all and assemble to one array. ivar : int Index of the VAR file, if var_file is not specified. quiet : bool Flag for switching off output. trimall : bool Trim the data cube to exclude ghost zones. magic : bool Values to be computed from the data, e.g. B = curl(A). sim : pencil code simulation object Contains information about the local simulation. precision : string Float 'f', double 'd' or half 'half'. lpersist : bool Read the persistent variables if they exist Returns ------- DataCube Instance of the pencil.read.var.DataCube class. All of the computed fields are imported as class members. Examples -------- Read the latest var.dat file and print the shape of the uu array: >>> var = pc.read.var() >>> print(var.uu.shape) Read the VAR2 file, compute the magnetic field B = curl(A), the vorticity omega = curl(u) and remove the ghost zones: >>> var = pc.read.var(var_file='VAR2', magic=['bb', 'vort'], trimall=True) >>> print(var.bb.shape) """ import os from scipy.io import FortranFile from pencil.math.derivatives import curl, curl2 from pencil import read from pencil.sim import __Simulation__ def persist(self, infile=None, precision="d", quiet=quiet): """An open Fortran file potentially containing persistent variables appended to the f array and grid data are read from the first proc data Record types provide the labels and id record for the peristent variables in the depricated fortran binary format """ record_types = {} for key in read.record_types.keys(): if read.record_types[key][1] == "d": record_types[key] = (read.record_types[key][0], precision) else: record_types[key] = read.record_types[key] try: tmp_id = infile.read_record("h") except: return -1 block_id = 0 for i in range(2000): i += 1 tmp_id = infile.read_record("h") block_id = tmp_id[0] if block_id == 2000: break for key in record_types.keys(): if record_types[key][0] == block_id: tmp_val = infile.read_record(record_types[key][1]) self.__setattr__(key, tmp_val[0]) if not quiet: print(key, record_types[key][0], record_types[key][1], tmp_val) return self dim = None param = None index = None if isinstance(sim, __Simulation__): datadir = os.path.expanduser(sim.datadir) dim = sim.dim param = read.param(datadir=sim.datadir, quiet=True, conflicts_quiet=True) index = read.index(datadir=sim.datadir) else: datadir = os.path.expanduser(datadir) if dim is None: if var_file[0:2].lower() == "og": dim = read.ogdim(datadir, proc) else: if var_file[0:4] == "VARd": dim = read.dim(datadir, proc, down=True) else: dim = read.dim(datadir, proc) if param is None: param = read.param(datadir=datadir, quiet=quiet, conflicts_quiet=True) if index is None: index = read.index(datadir=datadir) if param.lwrite_aux: total_vars = dim.mvar + dim.maux else: total_vars = dim.mvar if os.path.exists(os.path.join(datadir, "grid.h5")): # # Read HDF5 files. # import h5py run2D = param.lwrite_2d # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) if not var_file: if ivar < 0: var_file = "var.h5" else: var_file = "VAR" + str(ivar) + ".h5" file_name = os.path.join(datadir, "allprocs", var_file) with h5py.File(file_name, "r") as tmp: for key in tmp["data"].keys(): self.f[index.__getattribute__(key) - 1, :] = dtype( tmp["data/" + key][:]) t = (tmp["time"][()]).astype(precision) x = (tmp["grid/x"][()]).astype(precision) y = (tmp["grid/y"][()]).astype(precision) z = (tmp["grid/z"][()]).astype(precision) dx = (tmp["grid/dx"][()]).astype(precision) dy = (tmp["grid/dy"][()]).astype(precision) dz = (tmp["grid/dz"][()]).astype(precision) if param.lshear: deltay = (tmp["persist/shear_delta_y"][( 0)]).astype(precision) if lpersist: for key in tmp["persist"].keys(): self.__setattr__( key, (tmp["persist"][key][0]).astype(precision)) else: # # Read scattered Fortran binary files. # run2D = param.lwrite_2d if dim.precision == "D": read_precision = "d" else: read_precision = "f" if not var_file: if ivar < 0: var_file = "var.dat" else: var_file = "VAR" + str(ivar) if proc < 0: proc_dirs = self.__natural_sort( filter(lambda s: s.startswith("proc"), os.listdir(datadir))) if proc_dirs.count("proc_bounds.dat") > 0: proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ["allprocs"] # else: # proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy] else: proc_dirs = ["proc" + str(proc)] # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) if var_file[0:2].lower() == "og": procdim = read.ogdim(datadir, proc) else: if var_file[0:4] == "VARd": procdim = read.dim(datadir, proc, down=True) else: procdim = read.dim(datadir, proc) if not quiet: print("Reading data from processor" + " {0} of {1} ...".format(proc, len(proc_dirs))) else: # A collective IO strategy is being used procdim = dim # else: # procdim.mx = dim.mx # procdim.my = dim.my # procdim.nx = dim.nx # procdim.ny = dim.ny # procdim.ipx = dim.ipx # procdim.ipy = dim.ipy mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the data. file_name = os.path.join(datadir, directory, var_file) infile = FortranFile(file_name) if not run2D: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc)) else: if dim.ny == 1: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, mxloc)) else: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, myloc, mxloc)) raw_etc = infile.read_record(dtype=read_precision) if lpersist: persist(self, infile=infile, precision=read_precision, quiet=quiet) infile.close() t = raw_etc[0] x_loc = raw_etc[1:mxloc + 1] y_loc = raw_etc[mxloc + 1:mxloc + myloc + 1] z_loc = raw_etc[mxloc + myloc + 1:mxloc + myloc + mzloc + 1] if param.lshear: shear_offset = 1 deltay = raw_etc[-1] else: shear_offset = 0 dx = raw_etc[-3 - shear_offset] dy = raw_etc[-2 - shear_offset] dz = raw_etc[-1 - shear_offset] if len(proc_dirs) > 1: # Calculate where the local processor will go in # the global array. # # Don't overwrite ghost zones of processor to the # left (and accordingly in y and z direction -- makes # a difference on the diagonals) # # Recall that in NumPy, slicing is NON-INCLUSIVE on # the right end, ie, x[0:4] will slice all of a # 4-digit array, not produce an error like in idl. if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0xloc = 0 i1xloc = procdim.mx else: i0x = procdim.ipx * procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0xloc = procdim.nghostx i1xloc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0yloc = 0 i1yloc = procdim.my else: i0y = procdim.ipy * procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0yloc = procdim.nghosty i1yloc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z + procdim.mz i0zloc = 0 i1zloc = procdim.mz else: i0z = procdim.ipz * procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0zloc = procdim.nghostz i1zloc = procdim.mz x[i0x:i1x] = x_loc[i0xloc:i1xloc] y[i0y:i1y] = y_loc[i0yloc:i1yloc] z[i0z:i1z] = z_loc[i0zloc:i1zloc] if not run2D: self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: if dim.ny == 1: self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc] else: self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: self.f = f_loc x = x_loc y = y_loc z = z_loc if magic is not None: if not np.all(param.lequidist): raise NotImplementedError( "Magic functions are only implemented for equidistant grids." ) if "bb" in magic: # Compute the magnetic field before doing trimall. aa = self.f[index.ax - 1:index.az, ...] self.bb = dtype( curl( aa, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system, )) if trimall: self.bb = self.bb[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] if "jj" in magic: # Compute the electric current field before doing trimall. aa = self.f[index.ax - 1:index.az, ...] self.jj = dtype( curl2(aa, dx, dy, dz, x=x, y=y, coordinate_system=param.coord_system)) if trimall: self.jj = self.jj[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] if "vort" in magic: # Compute the vorticity field before doing trimall. uu = self.f[index.ux - 1:index.uz, ...] self.vort = dtype( curl( uu, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system, )) if trimall: if run2D: if dim.nz == 1: self.vort = self.vort[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: self.vort = self.vort[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1] else: self.vort = self.vort[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1, ] # Trim the ghost zones of the global f-array if asked. if trimall: self.x = x[dim.l1:dim.l2 + 1] self.y = y[dim.m1:dim.m2 + 1] self.z = z[dim.n1:dim.n2 + 1] if not run2D: self.f = self.f[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: if dim.ny == 1: self.f = self.f[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1] else: self.f = self.f[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: self.x = x self.y = y self.z = z self.l1 = dim.l1 self.l2 = dim.l2 + 1 self.m1 = dim.m1 self.m2 = dim.m2 + 1 self.n1 = dim.n1 self.n2 = dim.n2 + 1 # Assign an attribute to self for each variable defined in # 'data/index.pro' so that e.g. self.ux is the x-velocity aatest = [] uutest = [] for key in index.__dict__.keys(): if "aatest" in key: aatest.append(key) if "uutest" in key: uutest.append(key) if (key != "global_gg" and key != "keys" and "aatest" not in key and "uutest" not in key): value = index.__dict__[key] setattr(self, key, self.f[value - 1, ...]) # Special treatment for vector quantities. if hasattr(index, "uu"): self.uu = self.f[index.ux - 1:index.uz, ...] if hasattr(index, "aa"): self.aa = self.f[index.ax - 1:index.az, ...] if hasattr(index, "uu_sph"): self.uu_sph = self.f[index.uu_sphx - 1:index.uu_sphz, ...] if hasattr(index, "bb_sph"): self.bb_sph = self.f[index.bb_sphx - 1:index.bb_sphz, ...] # Special treatment for test method vector quantities. # Note index 1,2,3,...,0 last vector may be the zero field/flow if hasattr(index, "aatest1"): naatest = int(len(aatest) / 3) for j in range(0, naatest): key = "aatest" + str(np.mod(j + 1, naatest)) value = index.__dict__["aatest1"] + 3 * j setattr(self, key, self.f[value - 1:value + 2, ...]) if hasattr(index, "uutest1"): nuutest = int(len(uutest) / 3) for j in range(0, nuutest): key = "uutest" + str(np.mod(j + 1, nuutest)) value = index.__dict__["uutest"] + 3 * j setattr(self, key, self.f[value - 1:value + 2, ...]) self.t = t self.dx = dx self.dy = dy self.dz = dz if param.lshear: self.deltay = deltay # Do the rest of magic after the trimall (i.e. no additional curl.) self.magic = magic if self.magic is not None: self.magic_attributes(param, dtype=dtype)