def animate_slices( field="uu1", datadir="data/", proc=-1, extension="xz", format="native", tmin=0.0, tmax=1.0e38, wait=0.0, amin=0.0, amax=1.0, transform="", oldfile=False, ): """ read 2D slice files and assemble an animation. Options: field --- which variable to slice datadir --- path to data directory proc --- an integer giving the processor to read a slice from extension --- which plane of xy,xz,yz,Xz. for 2D this should be overwritten. format --- endian. one of little, big, or native (default) tmin --- start time tmax --- end time amin --- minimum value for image scaling amax --- maximum value for image scaling transform --- insert arbitrary numerical code to modify the slice wait --- pause in seconds between animation slices """ datadir = os.path.expanduser(datadir) if proc < 0: filename = os.path.join(datadir, "slice_" + field + "." + extension) else: filename = os.path.join(datadir, "/proc" + str(proc), "/slice_" + field + "." + extension) # global dim # param = read_param(datadir) param = read.param(datadir) # dim = read_dim(datadir,proc) dim = read.dim(datadir, proc) if dim.precision == "D": precision = "d" else: precision = "f" # set up slice plane if extension == "xy" or extension == "Xy": hsize = dim.nx vsize = dim.ny if extension == "xz": hsize = dim.nx vsize = dim.nz if extension == "yz": hsize = dim.ny vsize = dim.nz plane = np.zeros((vsize, hsize), dtype=precision) infile = FortranFile(filename) ax = plt.axes() ax.set_xlabel("x") ax.set_ylabel("y") ax.set_ylim image = plt.imshow(plane, vmin=amin, vmax=amax) # for real-time image display manager = plt.get_current_fig_manager() manager.show() ifirst = True islice = 0 while 1: try: raw_data = infile.read_record(dtype=precision) except ValueError: break except TypeError: break if oldfile: t = raw_data[-1] plane = raw_data[:-1].reshape(vsize, hsize) else: slice_z2pos = raw_data[-1] t = raw_data[-2] plane = raw_data[:-2].reshape(vsize, hsize) if transform: exec("plane = plane" + transform) if t > tmin and t < tmax: title = "t = %11.3e" % t ax.set_title(title) image.set_data(plane) manager.canvas.draw() if ifirst: print( "----islice----------t---------min-------max-------delta") print("%10i %10.3e %10.3e %10.3e %10.3e" % (islice, t, plane.min(), plane.max(), plane.max() - plane.min())) ifirst = False islice += 1 plt.pause(wait) if t > tmax: break infile.close()
def write_h5_snapshot( snapshot, file_name="VAR0", datadir="data/allprocs", precision="d", nghost=3, persist=None, settings=None, param=None, grid=None, lghosts=False, indx=None, proc=None, ipx=None, ipy=None, ipz=None, procdim=None, unit=None, t=None, x=None, y=None, z=None, state="a", quiet=True, lshear=False, driver=None, comm=None, overwrite=False, rank=0, size=1, ): """ Write a snapshot given as numpy array. We assume by default that a run simulation directory has already been constructed and start completed successfully in h5 format so that files dim, grid and param files are already present. If not the contents of these will need to be supplied as dictionaries along with persist if included. call signature: write_h5_snapshot(snapshot, file_name='VAR0', datadir='data/allprocs', precision='d', nghost=3, persist=None, settings=None, param=None, grid=None, lghosts=False, indx=None, unit=None, t=None, x=None, y=None, z=None, procdim=None, quiet=True, lshear=False, driver=None, comm=None) Keyword arguments: *snapshot*: Numpy array containing the snapshot. Must be of shape [nvar, nz, ny, nx] without boundaries or. Must be of shape [nvar, mz, my, mx] with boundaries for lghosts=True. *file_name*: Name of the snapshot file to be written, e.g. VAR0 or var. *datadir*: Directory where the data is stored. *precision*: Single 'f' or double 'd' precision. *persist*: optional dictionary of persistent variable. *settings*: optional dictionary of persistent variable. *param*: optional Param object. *grid*: optional Pencil Grid object of grid parameters. *nghost*: Number of ghost zones. *lghosts*: If True the snapshot contains the ghost zones. *indx* Index object of index for each variable in f-array *unit*: Optional dictionary of simulation units. *quiet*: Option to print output. *t*: Time of the snapshot. *xyz*: xyz arrays of the domain with ghost zones. This will normally be obtained from Grid object, but facility to redefine an alternative grid value. *lshear*: Flag for the shear. *driver* File driver for hdf5 io for use in serial or MPI parallel. *comm* MPI objects supplied if driver is 'mpio'. *overwrite* flag to replace existing h5 snapshot file. *rank* rank of process with root=0. """ import numpy as np from os.path import join from pencil import read from pencil.io import open_h5, group_h5, dataset_h5 from pencil import is_sim_dir # test if simulation directory if not is_sim_dir(): print("ERROR: Directory needs to be a simulation") sys.stdout.flush() if indx == None: indx = read.index() # if settings == None: settings = {} skeys = [ "l1", "l2", "m1", "m2", "n1", "n2", "nx", "ny", "nz", "mx", "my", "mz", "nprocx", "nprocy", "nprocz", "maux", "mglobal", "mvar", "precision", ] dim = read.dim() for key in skeys: settings[key] = dim.__getattribute__(key) settings["precision"] = precision.encode() settings["nghost"] = nghost settings["version"] = np.int32(0) nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"] gkeys = [ "x", "y", "z", "Lx", "Ly", "Lz", "dx", "dy", "dz", "dx_1", "dy_1", "dz_1", "dx_tilde", "dy_tilde", "dz_tilde", ] if grid == None: grid = read.grid(quiet=True) else: gd_err = False for key in gkeys: if not key in grid.__dict__.keys(): print("ERROR: key " + key + " missing from grid") sys.stdout.flush() gd_err = True if gd_err: print("ERROR: grid incomplete") sys.stdout.flush() ukeys = [ "length", "velocity", "density", "magnetic", "time", "temperature", "flux", "energy", "mass", "system", ] if param == None: param = read.param(quiet=True) param.__setattr__("unit_mass", param.unit_density * param.unit_length**3) param.__setattr__("unit_energy", param.unit_mass * param.unit_velocity**2) param.__setattr__("unit_time", param.unit_length / param.unit_velocity) param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3) param.unit_system = param.unit_system.encode() # check whether the snapshot matches the simulation shape if lghosts: try: snapshot.shape[0] == settings["mvar"] snapshot.shape[1] == settings["mx"] snapshot.shape[2] == settings["my"] snapshot.shape[3] == settings["mz"] except ValueError: print("ERROR: snapshot shape {} ".format(snapshot.shape) + "does not match simulation dimensions with ghosts.") sys.stdout.flush() else: try: snapshot.shape[0] == settings["mvar"] snapshot.shape[1] == settings["nx"] snapshot.shape[2] == settings["ny"] snapshot.shape[3] == settings["nz"] except ValueError: print("ERROR: snapshot shape {} ".format(snapshot.shape) + "does not match simulation dimensions without ghosts.") sys.stdout.flush() # Determine the precision used and ensure snapshot has correct data_type. if precision == "f": data_type = np.float32 snapshot = np.float32(snapshot) elif precision == "d": data_type = np.float64 snapshot = np.float64(snapshot) else: print("ERROR: Precision {0} not understood.".format(precision) + " Must be either 'f' or 'd'") sys.stdout.flush() return -1 # Check that the shape does not conflict with the proc numbers. if ((settings["nx"] % settings["nprocx"] > 0) or (settings["ny"] % settings["nprocy"] > 0) or (settings["nz"] % settings["nprocz"] > 0)): print("ERROR: Shape of the input array is not compatible with the " + "cpu layout. Make sure that nproci devides ni.") sys.stdout.flush() return -1 # Check the shape of the xyz arrays if specified and overwrite grid values. if x != None: if len(x) != settings["mx"]: print("ERROR: x array is incompatible with the shape of snapshot.") sys.stdout.flush() return -1 grid.x = data_type(x) if y != None: if len(y) != settings["my"]: print("ERROR: y array is incompatible with the shape of snapshot.") sys.stdout.flush() return -1 grid.y = data_type(y) if z != None: if len(z) != settings["mz"]: print("ERROR: z array is incompatible with the shape of snapshot.") sys.stdout.flush() return -1 grid.z = data_type(z) # Define a time. if t is None: t = data_type(0.0) # making use of pc_hdf5 functionality: if not proc == None: state = "a" else: state = "w" filename = join(datadir, file_name) print("write_h5_snapshot: filename =", filename) with open_h5( filename, state, driver=driver, comm=comm, overwrite=overwrite, rank=rank, size=size, ) as ds: data_grp = group_h5( ds, "data", status=state, delete=False, overwrite=overwrite, rank=rank, size=size, ) if not procdim: for key in indx.__dict__.keys(): if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]: continue #create ghost zones if required if not lghosts: tmp_arr = np.zeros([ snapshot.shape[1] + 2 * nghost, snapshot.shape[2] + 2 * nghost, snapshot.shape[3] + 2 * nghost, ]) tmp_arr[dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] = np.array( snapshot[indx.__getattribute__(key) - 1]) dataset_h5( data_grp, key, status=state, data=tmp_arr, dtype=data_type, overwrite=overwrite, rank=rank, comm=comm, size=size, ) else: dataset_h5( data_grp, key, status=state, data=np.array(snapshot[indx.__getattribute__(key) - 1]), dtype=data_type, overwrite=overwrite, rank=rank, comm=comm, size=size, ) else: for key in indx.__dict__.keys(): if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]: continue dataset_h5( data_grp, key, status=state, shape=(settings["mz"], settings["my"], settings["mx"]), dtype=data_type, rank=rank, comm=comm, size=size, ) # adjust indices to include ghost zones at boundaries l1, m1, n1 = procdim.l1, procdim.m1, procdim.n1 if procdim.ipx == 0: l1 = 0 if procdim.ipy == 0: m1 = 0 if procdim.ipz == 0: n1 = 0 l2, m2, n2 = procdim.l2, procdim.m2, procdim.n2 if procdim.ipx == settings["nprocx"] - 1: l2 = procdim.l2 + settings["nghost"] if procdim.ipy == settings["nprocy"] - 1: m2 = procdim.m2 + settings["nghost"] if procdim.ipz == settings["nprocz"] - 1: n2 = procdim.n2 + settings["nghost"] nx, ny, nz = procdim.nx, procdim.ny, procdim.nz ipx, ipy, ipz = procdim.ipx, procdim.ipy, procdim.ipz for key in indx.__dict__.keys(): if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]: continue tmp_arr = np.array(snapshot[indx.__getattribute__(key) - 1]) data_grp[key][n1 + ipz * nz:n2 + ipz * nz + 1, m1 + ipy * ny:m2 + ipy * ny + 1, l1 + ipx * nx:l2 + ipx * nx + 1, ] = tmp_arr[n1:n2 + 1, m1:m2 + 1, l1:l2 + 1] dataset_h5( ds, "time", status=state, data=np.array(t), size=size, dtype=data_type, rank=rank, comm=comm, overwrite=overwrite, ) # add settings sets_grp = group_h5( ds, "settings", status=state, delete=False, overwrite=overwrite, rank=rank, size=size, ) for key in settings.keys(): if "precision" in key: dataset_h5( sets_grp, key, status=state, data=(settings[key], ), dtype=None, rank=rank, comm=comm, size=size, overwrite=overwrite, ) else: dataset_h5( sets_grp, key, status=state, data=(settings[key], ), dtype=data_type, rank=rank, comm=comm, size=size, overwrite=overwrite, ) # add grid grid_grp = group_h5( ds, "grid", status=state, delete=False, overwrite=overwrite, rank=rank, size=size, ) for key in gkeys: dataset_h5( grid_grp, key, status=state, data=(grid.__getattribute__(key)), dtype=data_type, rank=rank, comm=comm, size=size, overwrite=overwrite, ) dataset_h5( grid_grp, "Ox", status=state, data=(param.__getattribute__("xyz0")[0], ), dtype=data_type, rank=rank, comm=comm, size=size, overwrite=overwrite, ) dataset_h5( grid_grp, "Oy", status=state, data=(param.__getattribute__("xyz0")[1], ), dtype=data_type, rank=rank, comm=comm, size=size, overwrite=overwrite, ) dataset_h5( grid_grp, "Oz", status=state, data=(param.__getattribute__("xyz0")[2], ), dtype=data_type, rank=rank, comm=comm, size=size, overwrite=overwrite, ) # add physical units unit_grp = group_h5( ds, "unit", status=state, delete=False, overwrite=overwrite, rank=rank, size=size, ) for key in ukeys: if "system" in key: dataset_h5( unit_grp, key, status=state, data=(param.__getattribute__("unit_" + key), ), rank=rank, comm=comm, size=size, overwrite=overwrite, ) else: dataset_h5( unit_grp, key, status=state, data=param.__getattribute__("unit_" + key), rank=rank, comm=comm, size=size, overwrite=overwrite, ) # add optional persistent data if persist != None: pers_grp = group_h5( ds, "persist", status=state, size=size, delete=False, overwrite=overwrite, rank=rank, ) for key in persist.keys(): if not quiet: print(key, type(persist[key][()])) sys.stdout.flush() arr = np.empty(nprocs, dtype=type(persist[key][()])) arr[:] = persist[key][()] dataset_h5( pers_grp, key, status=state, data=(arr), size=size, dtype=data_type, rank=rank, comm=comm, overwrite=overwrite, )
def derive_data(sim_path, src, dst, magic=['pp', 'tt'], par=[], comm=None, gd=[], overwrite=False, rank=0, size=1, nghost=3, status='a', chunksize=1000.0, dtype=np.float64, quiet=True, nmin=32): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True, conflicts_quiet=True) if isinstance(gd, list): os.chdir(sim_path) gd = read.grid(quiet=True) #get data dimensions nx, ny, nz = src['settings']['nx'][0],\ src['settings']['ny'][0],\ src['settings']['nz'][0] mx, my, mz = src['settings']['mx'][0],\ src['settings']['my'][0],\ src['settings']['mz'][0] #split data into manageable memory chunks dstchunksize = 8 * nx * ny * nz / 1024 * 1024 if dstchunksize > chunksize: nchunks = cpu_optimal(nx, ny, nz, quiet=quiet, mvar=src['settings/mvar'][0], maux=src['settings/maux'][0], MBmin=chunksize, nmin=nmin, size=size)[1] else: nchunks = [1, 1, 1] print('nchunks {}'.format(nchunks)) # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = [ locindx[np.mod( rank + int(rank / nchunks[2]) + int(rank / nchunks[1]), nchunks[0])] ] indy = [locindy[np.mod(rank + int(rank / nchunks[2]), nchunks[1])]] indz = [locindz[np.mod(rank, nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = np.array_split(np.arange(nx) + nghost, nchunks[0]) indy = np.array_split(np.arange(ny) + nghost, nchunks[1]) indz = np.array_split(np.arange(nz) + nghost, nchunks[2]) allchunks = nchunks[0] * nchunks[1] * nchunks[2] # save time dataset_h5(dst, 'time', status=status, data=src['time'][()], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) # ensure derived variables are in a list if isinstance(magic, list): magic = magic else: magic = [magic] # initialise group group = group_h5(dst, 'data', status='a', overwrite=overwrite, comm=comm, rank=rank, size=size) for key in magic: if is_vector(key): dataset_h5(group, key, status=status, shape=[3, mz, my, mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) print('writing ' + key + ' shape {}'.format([3, mz, my, mx])) else: dataset_h5(group, key, status=status, shape=[mz, my, mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) print('writing ' + key + ' shape {}'.format([mz, my, mx])) for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk, nchunks[2])]]: n1, n2 = iz[ 0]-nghost,\ iz[-1]+nghost+1 n1out = n1 + nghost n2out = n2 - nghost varn1 = nghost varn2 = -nghost if iz[0] == locindz[0][0]: n1out = 0 varn1 = 0 if iz[-1] == locindz[-1][-1]: n2out = n2 varn2 = n2 for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: m1, m2 = iy[ 0]-nghost,\ iy[-1]+nghost+1 m1out = m1 + nghost m2out = m2 - nghost varm1 = nghost varm2 = -nghost if iy[0] == locindy[0][0]: m1out = 0 varm1 = 0 if iy[-1] == locindy[-1][-1]: m2out = m2 varm2 = m2 for ix in [ indx[np.mod( ichunk + int(ichunk / nchunks[2]) + int(ichunk / nchunks[1]), nchunks[0])] ]: l1, l2 = ix[ 0]-nghost,\ ix[-1]+nghost+1 l1out = l1 + nghost l2out = l2 - nghost varl1 = nghost varl2 = -nghost if ix[0] == locindx[0][0]: l1out = 0 varl1 = 0 if ix[-1] == locindx[-1][-1]: l2out = l2 varl2 = l2 if not quiet: print('remeshing ' + key + ' chunk {}'.format([iz, iy, ix])) var = calc_derived_data(src['data'], dst['data'], key, par, gd, l1, l2, m1, m2, n1, n2, nghost=nghost) #print('var shape {}'.format(var.shape)) #if not quiet: # print('writing '+key+ # ' shape {} chunk {}'.format( # var.shape, [iz,iy,ix])) if is_vector(key): dst['data'][key][:, n1out:n2out, m1out:m2out, l1out:l2out] = dtype( var[:, varn1:varn2, varm1:varm2, varl1:varl2]) else: dst['data'][key][n1out:n2out, m1out:m2out, l1out:l2out] = dtype( var[varn1:varn2, varm1:varm2, varl1:varl2])
def read(self, var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='d', lpersist=False, dtype=np.float64): """ Read VAR files from Pencil Code. If proc < 0, then load all data and assemble, otherwise load VAR file from specified processor. The file format written by output() (and used, e.g. in var.dat) consists of the followinig Fortran records: 1. data(mx, my, mz, nvar) 2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1) Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3 for one vector field, 8 for var.dat in the case of MHD with entropy. but, deltay(1) is only there if lshear is on! need to know parameters. call signature: var(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='d') Keyword arguments: var_file: Name of the VAR file. datadir: Directory where the data is stored. proc: Processor to be read. If -1 read all and assemble to one array. ivar: Index of the VAR file, if var_file is not specified. quiet: Flag for switching off output. trimall: Trim the data cube to exclude ghost zones. magic: Values to be computed from the data, e.g. B = curl(A). sim: Simulation sim object. precision: Float (f), double (d) or half (half). dtype: precision for var.obj, default double """ import os from scipy.io import FortranFile from pencil.math.derivatives import curl, curl2 from pencil import read from pencil.sim import __Simulation__ def persist(self, infile=None, precision='d', quiet=quiet): """An open Fortran file potentially containing persistent variables appended to the f array and grid data are read from the first proc data Record types provide the labels and id record for the peristent variables in the depricated fortran binary format """ record_types = {} for key in read.record_types.keys(): if read.record_types[key][1] == 'd': record_types[key]=(read.record_types[key][0], precision) else: record_types[key] = read.record_types[key] try: tmp_id = infile.read_record('h') except: return -1 block_id = 0 for i in range(2000): i += 1 tmp_id = infile.read_record('h') block_id = tmp_id[0] if block_id == 2000: break for key in record_types.keys(): if record_types[key][0] == block_id: tmp_val = infile.read_record(record_types[key][1]) self.__setattr__(key, tmp_val[0]) if not quiet: print(key, record_types[key][0], record_types[key][1],tmp_val) return self dim = None param = None index = None if isinstance(sim, __Simulation__): datadir = os.path.expanduser(sim.datadir) dim = sim.dim param = read.param(datadir=sim.datadir, quiet=True, conflicts_quiet=True) index = read.index(datadir=sim.datadir) else: datadir = os.path.expanduser(datadir) if dim is None: if var_file[0:2].lower() == 'og': dim = read.ogdim(datadir, proc) else: if var_file[0:4] == 'VARd': dim = read.dim(datadir, proc, down=True) else: dim = read.dim(datadir, proc) if param is None: param = read.param(datadir=datadir, quiet=quiet, conflicts_quiet=True) if index is None: index = read.index(datadir=datadir) if param.lwrite_aux: total_vars = dim.mvar + dim.maux else: total_vars = dim.mvar if os.path.exists(os.path.join(datadir, 'grid.h5')): # # Read HDF5 files. # import h5py run2D = param.lwrite_2d # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) if not var_file: if ivar < 0: var_file = 'var.h5' else: var_file = 'VAR' + str(ivar) + '.h5' file_name = os.path.join(datadir, 'allprocs', var_file) with h5py.File(file_name, 'r') as tmp: for key in tmp['data'].keys(): self.f[index.__getattribute__(key)-1, :] = dtype( tmp['data/'+key][:]) t = (tmp['time'][()]).astype(precision) x = (tmp['grid/x'][()]).astype(precision) y = (tmp['grid/y'][()]).astype(precision) z = (tmp['grid/z'][()]).astype(precision) dx = (tmp['grid/dx'][()]).astype(precision) dy = (tmp['grid/dy'][()]).astype(precision) dz = (tmp['grid/dz'][()]).astype(precision) if param.lshear: deltay = (tmp['persist/shear_delta_y'][(0)]).astype(precision) if lpersist: for key in tmp['persist'].keys(): self.__setattr__(key, (tmp['persist'][key][0]).astype(precision)) else: # # Read scattered Fortran binary files. # run2D = param.lwrite_2d if dim.precision == 'D': read_precision = 'd' else: read_precision = 'f' if not var_file: if ivar < 0: var_file = 'var.dat' else: var_file = 'VAR' + str(ivar) if proc < 0: proc_dirs = self.__natural_sort( filter(lambda s: s.startswith('proc'), os.listdir(datadir))) if (proc_dirs.count("proc_bounds.dat") > 0): proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ['allprocs'] # else: # proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy] else: proc_dirs = ['proc' + str(proc)] # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) if var_file[0:2].lower() == 'og': procdim = read.ogdim(datadir, proc) else: if var_file[0:4] == 'VARd': procdim = read.dim(datadir, proc, down=True) else: procdim = read.dim(datadir, proc) if not quiet: print("Reading data from processor"+ " {0} of {1} ...".format(proc, len(proc_dirs))) else: # A collective IO strategy is being used procdim = dim # else: # procdim.mx = dim.mx # procdim.my = dim.my # procdim.nx = dim.nx # procdim.ny = dim.ny # procdim.ipx = dim.ipx # procdim.ipy = dim.ipy mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the data. file_name = os.path.join(datadir, directory, var_file) infile = FortranFile(file_name) if not run2D: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc)) else: if dim.ny == 1: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, mxloc)) else: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, myloc, mxloc)) raw_etc = infile.read_record(dtype=read_precision) if lpersist: persist(self, infile=infile, precision=read_precision, quiet=quiet) infile.close() t = raw_etc[0] x_loc = raw_etc[1:mxloc+1] y_loc = raw_etc[mxloc+1:mxloc+myloc+1] z_loc = raw_etc[mxloc+myloc+1:mxloc+myloc+mzloc+1] if param.lshear: shear_offset = 1 deltay = raw_etc[-1] else: shear_offset = 0 dx = raw_etc[-3-shear_offset] dy = raw_etc[-2-shear_offset] dz = raw_etc[-1-shear_offset] if len(proc_dirs) > 1: # Calculate where the local processor will go in # the global array. # # Don't overwrite ghost zones of processor to the # left (and accordingly in y and z direction -- makes # a difference on the diagonals) # # Recall that in NumPy, slicing is NON-INCLUSIVE on # the right end, ie, x[0:4] will slice all of a # 4-digit array, not produce an error like in idl. if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0xloc = 0 i1xloc = procdim.mx else: i0x = procdim.ipx*procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0xloc = procdim.nghostx i1xloc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0yloc = 0 i1yloc = procdim.my else: i0y = procdim.ipy*procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0yloc = procdim.nghosty i1yloc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z+procdim.mz i0zloc = 0 i1zloc = procdim.mz else: i0z = procdim.ipz*procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0zloc = procdim.nghostz i1zloc = procdim.mz x[i0x:i1x] = x_loc[i0xloc:i1xloc] y[i0y:i1y] = y_loc[i0yloc:i1yloc] z[i0z:i1z] = z_loc[i0zloc:i1zloc] if not run2D: self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: if dim.ny == 1: self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc] else: self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: self.f = f_loc x = x_loc y = y_loc z = z_loc if magic is not None: if 'bb' in magic: # Compute the magnetic field before doing trimall. aa = self.f[index.ax-1:index.az, ...] self.bb = dtype(curl(aa, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system)) if trimall: self.bb = self.bb[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] if 'jj' in magic: # Compute the electric current field before doing trimall. aa = self.f[index.ax-1:index.az, ...] self.jj = dtype(curl2(aa, dx, dy, dz, x=x, y=y, coordinate_system=param.coord_system)) if trimall: self.jj = self.jj[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] if 'vort' in magic: # Compute the vorticity field before doing trimall. uu = self.f[index.ux-1:index.uz, ...] self.vort = dtype(curl(uu, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system)) if trimall: if run2D: if dim.nz == 1: self.vort = self.vort[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: self.vort = self.vort[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1] else: self.vort = self.vort[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] # Trim the ghost zones of the global f-array if asked. if trimall: self.x = x[dim.l1:dim.l2+1] self.y = y[dim.m1:dim.m2+1] self.z = z[dim.n1:dim.n2+1] if not run2D: self.f = self.f[:, dim.n1:dim.n2+1, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: if dim.ny == 1: self.f = self.f[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1] else: self.f = self.f[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1] else: self.x = x self.y = y self.z = z self.l1 = dim.l1 self.l2 = dim.l2 + 1 self.m1 = dim.m1 self.m2 = dim.m2 + 1 self.n1 = dim.n1 self.n2 = dim.n2 + 1 # Assign an attribute to self for each variable defined in # 'data/index.pro' so that e.g. self.ux is the x-velocity aatest = [] uutest = [] for key in index.__dict__.keys(): if 'aatest' in key: aatest.append(key) if 'uutest' in key: uutest.append(key) if key != 'global_gg' and key != 'keys' and 'aatest' not in key\ and 'uutest' not in key: value = index.__dict__[key] setattr(self, key, self.f[value-1, ...]) # Special treatment for vector quantities. if hasattr(index, 'uu'): self.uu = self.f[index.ux-1:index.uz, ...] if hasattr(index, 'aa'): self.aa = self.f[index.ax-1:index.az, ...] if hasattr(index, 'uu_sph'): self.uu_sph = self.f[index.uu_sphx-1:index.uu_sphz, ...] if hasattr(index, 'bb_sph'): self.bb_sph = self.f[index.bb_sphx-1:index.bb_sphz, ...] # Special treatment for test method vector quantities. #Note index 1,2,3,...,0 last vector may be the zero field/flow if hasattr(index, 'aatest1'): naatest = int(len(aatest)/3) for j in range(0,naatest): key = 'aatest'+str(np.mod(j+1,naatest)) value = index.__dict__['aatest1'] + 3*j setattr(self, key, self.f[value-1:value+2, ...]) if hasattr(index, 'uutest1'): nuutest = int(len(uutest)/3) for j in range(0,nuutest): key = 'uutest'+str(np.mod(j+1,nuutest)) value = index.__dict__['uutest'] + 3*j setattr(self, key, self.f[value-1:value+2, ...]) self.t = t self.dx = dx self.dy = dy self.dz = dz if param.lshear: self.deltay = deltay # Do the rest of magic after the trimall (i.e. no additional curl.) self.magic = magic if self.magic is not None: self.magic_attributes(param, dtype=dtype)
def rhs_data(sim_path, src, dst, magic=["uxb","etadel2a"], par=[], comm=None, gd=[], grp_overwrite=False, overwrite=False, rank=0, size=1, nghost=3,status="a", chunksize = 1000.0, dtype=np.float64, quiet=True, nmin=32, Reynolds_shock=False, lmix=False ): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True,conflicts_quiet=True) if isinstance(gd, list): os.chdir(sim_path) gd = read.grid(quiet=True) #get data dimensions nx, ny, nz = src["settings"]["nx"][0],\ src["settings"]["ny"][0],\ src["settings"]["nz"][0] mx, my, mz = src["settings"]["mx"][0],\ src["settings"]["my"][0],\ src["settings"]["mz"][0] #split data into manageable memory chunks dstchunksize = 8*nx*ny*nz/1024*1024 if dstchunksize > chunksize: nchunks = cpu_optimal(nx,ny,nz,quiet=quiet, mvar=src["settings/mvar"][0], maux=src["settings/maux"][0], MBmin=chunksize,nmin=nmin,size=size)[1] else: nchunks = [1,1,1] print("nchunks {}".format(nchunks)) # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx)+nghost,nchunks[0]) locindy = np.array_split(np.arange(ny)+nghost,nchunks[1]) locindz = np.array_split(np.arange(nz)+nghost,nchunks[2]) indx = [locindx[np.mod(rank+int(rank/nchunks[2]) +int(rank/nchunks[1]),nchunks[0])]] indy = [locindy[np.mod(rank+int(rank/nchunks[2]),nchunks[1])]] indz = [locindz[np.mod(rank,nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx)+nghost,nchunks[0]) locindy = np.array_split(np.arange(ny)+nghost,nchunks[1]) locindz = np.array_split(np.arange(nz)+nghost,nchunks[2]) indx = np.array_split(np.arange(nx)+nghost,nchunks[0]) indy = np.array_split(np.arange(ny)+nghost,nchunks[1]) indz = np.array_split(np.arange(nz)+nghost,nchunks[2]) allchunks = nchunks[0]*nchunks[1]*nchunks[2] # save time dataset_h5(dst, "time", status=status, data=src["time"][()], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) # ensure derived variables are in a list if isinstance(magic, list): magic = magic else: magic = [magic] # confirm exists group group_h5(dst, "data", status="a", overwrite=grp_overwrite, comm=comm, rank=rank, size=size) # initialise group group = group_h5(dst, "calc", status="a", overwrite=grp_overwrite, comm=comm, rank=rank, size=size) for key in magic: if is_vector(key): dataset_h5(group, key, status=status, shape=[3,mz,my,mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) print("writing "+key+" shape {}".format([3,mz,my,mx])) else: dataset_h5(group, key, status=status, shape=[mz,my,mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype) print("writing "+key+" shape {}".format([mz,my,mx])) for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk,nchunks[2])]]: n1, n2 = iz[ 0]-nghost,\ iz[-1]+nghost+1 n1out = n1+nghost n2out = n2-nghost varn1 = nghost varn2 = -nghost if iz[0] == locindz[0][0]: n1out = 0 varn1 = 0 if iz[-1] == locindz[-1][-1]: n2out = n2 varn2 = n2 for iy in [indy[np.mod(ichunk+ int(ichunk/nchunks[2]),nchunks[1])]]: m1, m2 = iy[ 0]-nghost,\ iy[-1]+nghost+1 m1out = m1+nghost m2out = m2-nghost varm1 = nghost varm2 = -nghost if iy[0] == locindy[0][0]: m1out = 0 varm1 = 0 if iy[-1] == locindy[-1][-1]: m2out = m2 varm2 = m2 for ix in [indx[np.mod(ichunk+int(ichunk/nchunks[2]) +int(ichunk/nchunks[1]),nchunks[0])]]: l1, l2 = ix[ 0]-nghost,\ ix[-1]+nghost+1 l1out = l1+nghost l2out = l2-nghost varl1 = nghost varl2 = -nghost if ix[0] == locindx[0][0]: l1out = 0 varl1 = 0 if ix[-1] == locindx[-1][-1]: l2out = l2 varl2 = l2 if not quiet: print("remeshing "+key+" chunk {}".format( [iz,iy,ix])) var = calc_rhs_data(src, dst, key, par, gd, l1, l2, m1, m2, n1, n2, nghost=nghost, Reynolds_shock=Reynolds_shock, lmix=lmix) if is_vector(key): dst["calc"][key][:,n1out:n2out, m1out:m2out, l1out:l2out] = dtype(var[:, varn1:varn2, varm1:varm2, varl1:varl2]) else: dst["calc"][key][n1out:n2out, m1out:m2out, l1out:l2out] = dtype(var[ varn1:varn2, varm1:varm2, varl1:varl2])
def update(self, hard=False, quiet=True): """Update simulation object: if not read in: - read param.nml - read grid and ghost grid Set hard=True to force update. """ from os.path import exists from os.path import join from pencil.read import param, grid, dim REEXPORT = False if hard == True: self.param = False self.grid = False self.ghost_grid = False self.dim = False REEXPORT = True if self.param == False: try: if exists(join(self.datadir, 'param.nml')): print('~ Reading param.nml.. ') param = param(quiet=quiet, datadir=self.datadir) self.param = {} # read params into Simulation object for key in dir(param): if key.startswith('_') or key == 'read': continue if type(getattr(param, key)) in [bool, list, float, int, str]: self.param[key] = getattr(param, key) else: try: # allow for nested param objects self.param[key] = {} for subkey in dir(getattr(param, key)): if subkey.startswith( '_') or subkey == 'read': continue if type( getattr(getattr(param, key), subkey)) in [ bool, list, float, int, str ]: self.param[key][subkey] = getattr( getattr(param, key), subkey) except: # not nested param objects continue REEXPORT = True else: if not quiet: print('? WARNING: for ' + self.path + '\n? Simulation has ' + 'not run yet! Meaning: No param.nml found!') REEXPORT = True except: print('! ERROR: while reading param.nml for ' + self.path) self.param = False REEXPORT = True if self.param != False and (self.grid == False or self.ghost_grid == False): # read grid only if param is not False try: print('~ Reading grid.. ') self.grid = grid(datadir=self.datadir, trim=True, quiet=True) print('~ Reading ghost_grid.. ') self.ghost_grid = grid(datadir=self.datadir, trim=False, quiet=True) print('~ Reading dim.. ') self.dim = dim(datadir=self.datadir) if not quiet: print('# Updating grid and ghost_grid succesfull') REEXPORT = True # adding lx, dx etc to params self.param['Lx'] = self.grid.Lx self.param['Ly'] = self.grid.Ly self.param['Lz'] = self.grid.Lz self.param['lx'] = self.grid.Lx self.param['ly'] = self.grid.Ly self.param['lz'] = self.grid.Lz self.param['dx'] = self.grid.dx self.param['dy'] = self.grid.dy self.param['dz'] = self.grid.dz except: if not quiet: print( '? WARNING: Updating grid and ghost_grid ' + 'was not successfull, since run has not yet started.') if self.started() or (not quiet): print('? WARNING: Couldnt load grid for ' + self.path) self.grid = False self.ghost_grid = False self.dim = False REEXPORT = True elif self.param == False: if not quiet: print('? WARNING: Updating grid and ghost_grid ' + 'was not successfull, since run has not yet started.') self.grid = False self.ghost_grid = False self.dim = False REEXPORT = True if REEXPORT == True: self.export() return self
def read(self, datadir="data", param=None, dim=None): """ read(datadir='data', param=None, dim=None) Read Pencil Code index data from index.pro. Parameters ---------- datadir : string Directory where the data is stored. param : obj Parameter object. dim : obj Dimension object. Returns ------- Class containing the index information. """ import os import re import numpy as np from pencil import read if param is None: param = read.param(datadir=datadir, quiet=True) if dim is None: dim = read.dim(datadir=datadir) if param.lwrite_aux: totalvars = dim.mvar + dim.maux else: totalvars = dim.mvar index_file = open(os.path.join(datadir, "index.pro")) ntestfield, ntestflow, ntestlnrho, ntestscalar = 0, 0, 0, 0 for line in index_file.readlines(): clean = line.strip() name = clean.split("=")[0].strip().replace("[", "").replace("]", "") if clean.split("=")[1].strip().startswith("intarr(370)"): continue try: val = int(clean.split("=")[1].strip()) except: val = np.arange(int( re.search(r"\(([0-9]+)\)", clean).group(1)))[0] + int( clean.split("=")[1].strip().split("+")[1]) if (val != 0 and val <= totalvars and not name.startswith("i_") and name.startswith("i")): name = name.lstrip("i") if name == "lnTT" and param.ltemperature_nolog: name = "tt" if name == "aatest": iaatest = val if name == "uutest": iuutest = val if name == "hhtest": ihhtest = val if name == "cctest": icctest = val setattr(self, name, val) elif name == "ntestfield": ntestfield = val elif name == "ntestflow": ntestflow = val elif name == "ntestlnrho": ntestlnrho = val elif name == "ntestscalar": ntestscalar = val if ntestfield > 0: self.__delattr__("aatest") for i in range(1, ntestfield + 1): setattr(self, "aatest" + str(i), iaatest - 1 + i) if ntestflow > 0: self.__delattr__("uutest") for i in range(1, ntestflow + 1): setattr(self, "uutest" + str(i), iuutest - 1 + i) if ntestlnrho > 0: self.__delattr__("hhtest") for i in range(1, ntestlnrho + 1): setattr(self, "hhtest" + str(i), ihhtest - 1 + i) if ntestscalar > 0: self.__delattr__("cctest") for i in range(1, ntestscalar + 1): setattr(self, "cctest" + str(i), icctest - 1 + i)
def kernel_smooth( sim_path, src, dst, magic=["meanuu"], par=[], comm=None, gd=[], grp_overwrite=False, overwrite=False, rank=0, size=1, nghost=3, kernel=1., status="a", chunksize=1000.0, dtype=np.float64, quiet=True, nmin=32, typ='piecewise', mode=list(), ): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True, conflicts_quiet=True) if isinstance(gd, list): os.chdir(sim_path) gd = read.grid(quiet=True) # get data dimensions nx, ny, nz = ( src["settings"]["nx"][0], src["settings"]["ny"][0], src["settings"]["nz"][0], ) mx, my, mz = ( src["settings"]["mx"][0], src["settings"]["my"][0], src["settings"]["mz"][0], ) # extend gost zones to include up to 1.5 * kernel length) dx = max(src['grid/dx'][()], src['grid/dy'][()], src['grid/dz'][()]) nkernel = np.int(2.5 * kernel / dx) sigma = kernel / dx print('sigma {:.2f}, kernel {:.2f}, dx {:.2f}'.format(sigma, kernel, dx)) # split data into manageable memory chunks dstchunksize = 8 * nx * ny * nz / 1024 * 1024 if dstchunksize > chunksize: nchunks = cpu_optimal( nx, ny, nz, quiet=quiet, mvar=src["settings/mvar"][0], maux=src["settings/maux"][0], MBmin=chunksize, nmin=nmin, size=size, )[1] else: nchunks = [1, 1, 1] print("nchunks {}".format(nchunks)) # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = [ locindx[np.mod( rank + int(rank / nchunks[2]) + int(rank / nchunks[1]), nchunks[0])] ] indy = [locindy[np.mod(rank + int(rank / nchunks[2]), nchunks[1])]] indz = [locindz[np.mod(rank, nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = np.array_split(np.arange(nx) + nghost, nchunks[0]) indy = np.array_split(np.arange(ny) + nghost, nchunks[1]) indz = np.array_split(np.arange(nz) + nghost, nchunks[2]) allchunks = nchunks[0] * nchunks[1] * nchunks[2] if 1 in nchunks: mode = ["reflect", "reflect", "reflect"] for ich in range(3): if nchunks[ich] == 1: mode[2 - ich] = "wrap" if mode[2 - ich] == "reflect": typ = "piecewise" else: typ = "all" print('mode:', mode, 'typ:', typ) # save time dataset_h5( dst, "time", status=status, data=src["time"][()], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype, ) # ensure derived variables are in a list if isinstance(magic, list): magic = magic else: magic = [magic] # initialise group group = group_h5( dst, "data", status="a", overwrite=grp_overwrite, comm=comm, rank=rank, size=size, ) for key in magic: if is_vector(key): dataset_h5( group, key + str(nkernel), status=status, shape=[3, mz, my, mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype, ) print("writing " + key + " shape {}".format([3, mz, my, mx])) else: dataset_h5( group, key + str(nkernel), status=status, shape=[mz, my, mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=dtype, ) print("writing " + key + " shape {}".format([mz, my, mx])) for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk, nchunks[2])]]: if nchunks[2] == 1: zextra = nghost else: zextra = nkernel + nghost n1, n2 = iz[0] - zextra, iz[-1] + zextra + 1 lindz = np.arange(n1, n2) n1out = n1 + zextra n2out = n2 - zextra varn1 = zextra varn2 = -zextra if iz[0] == locindz[0][0]: n1out = 0 varn1 = zextra - nghost if iz[-1] == locindz[-1][-1]: n2out = n2 - zextra + nghost varn2 = n2 - n1 - zextra + nghost if n1 < 0: lindz[np.where(lindz < nghost)[0]] += nz if n2 > mz - 1: lindz[np.where(lindz > mz - 1 - nghost)[0]] -= nz print('n1out {},n2out {},varn1 {},varn2 {},zextra {}'.format( n1out, n2out, varn1, varn2, zextra)) for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: if nchunks[1] == 1: yextra = nghost else: yextra = nkernel + nghost m1, m2 = iy[0] - yextra, iy[-1] + yextra + 1 lindy = np.arange(m1, m2) m1out = m1 + yextra m2out = m2 + 1 - yextra varm1 = yextra varm2 = -yextra if iy[0] == locindy[0][0]: m1out = 0 varm1 = yextra - nghost if iy[-1] == locindy[-1][-1]: m2out = m2 - yextra + nghost varm2 = m2 - m1 - yextra + nghost if m1 < 0: lindy[np.where(lindy < 0)[0]] += ny if m2 > my - 1: lindy[np.where(lindy > my - 1)[0]] -= ny print( 'm1out {},m2out {},varm1 {},varm2 {},yextra {}'.format( m1out, m2out, varm1, varm2, yextra)) for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: for ix in [ indx[np.mod( ichunk + int(ichunk / nchunks[2]) + int(ichunk / nchunks[1]), nchunks[0], )] ]: if nchunks[1] == 1: xextra = nghost else: xextra = nkernel + nghost l1, l2 = ix[0] - xextra, ix[-1] + xextra + 1 lindx = np.arange(l1, l2) l1out = l1 + xextra l2out = l2 + 1 - xextra varl1 = xextra varl2 = -xextra if ix[0] == locindx[0][0]: l1out = 0 varl1 = xextra - nghost if ix[-1] == locindx[-1][-1]: l2out = l2 - xextra + nghost varl2 = l2 - l1 - xextra + nghost if l1 < 0: lindx[np.where(lindx < 0)[0]] += nx if l2 > mx - 1: lindx[np.where(lindx > mx - 1)[0]] -= nx print('l1out {},l2out {},varl1 {},varl2 {},xextra {}'. format(l1out, l2out, varl1, varl2, xextra)) if not quiet: print("remeshing " + key + " chunk {}".format([iz, iy, ix])) print('sending ichunk {} with index ranges {}'.format( ichunk, [n1, n2, m1, m2, l1, l2])) var = smoothed_data(src["data"], dst["data"], key, par, gd, lindx, lindy, lindz, nghost, sigma, typ, mode) print( 'ichunk {}, var min {:.1e}, var max {:.1e}'.format( ichunk, var.min(), var.max())) # print('var shape {}'.format(var.shape)) # if not quiet: # print('writing '+key+ # ' shape {} chunk {}'.format( # var.shape, [iz,iy,ix])) print('ichunk: out indices {}'.format( [n1out, n2out, m1out, m2out, l1out, l2out])) if is_vector(key): dst["data"][key + str(nkernel)][:, n1out:n2out, m1out:m2out, l1out:l2out] = dtype( var[:, varn1:varn2, varm1:varm2, varl1:varl2]) else: dst["data"][key + str(nkernel)][n1out:n2out, m1out:m2out, l1out:l2out] = dtype( var[varn1:varn2, varm1:varm2, varl1:varl2])
def var2vtk(var_file='var.dat', datadir='data', proc=-1, variables=None, b_ext=False, magic=[], destination='work', quiet=True, trimall=True, ti=-1, tf=-1): """ Convert data from PencilCode format to vtk. call signature:: var2vtk(var_file='', datadir='data', proc=-1, variables='', b_ext=False, destination='work', quiet=True, trimall=True, ti=-1, tf=-1) Read *var_file* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *var_file*: The original var_file. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables*: List of variables which should be written. If None all. *b_ext*: Add the external magnetic field. *destination*: Destination file. *quiet*: Keep quiet when reading the var files. *trimall*: Trim the data cube to exclude ghost zones. *ti, tf*: Start and end index for animation. Leave negative for no animation. Overwrites variable var_file. """ import numpy as np import sys from pencil import read from pencil import math # Determine of we want an animation. if ti < 0 or tf < 0: animation = False else: animation = True # If no variables specified collect all by default if not variables: variables = [] indx = read.index() for key in indx.__dict__.keys(): if 'keys' not in key: variables.append(key) if 'uu' in variables: magic.append('vort') variables.append('vort') if 'rho' in variables or 'lnrho' in variables: if 'ss' in variables: magic.append('tt') variables.append('tt') magic.append('pp') variables.append('pp') if 'aa' in variables: magic.append('bb') variables.append('bb') magic.append('jj') variables.append('jj') variables.append('ab') variables.append('b_mag') variables.append('j_mag') else: # Convert single variable string into length 1 list of arrays. if (len(variables) > 0): if (len(variables[0]) == 1): variables = [variables] if 'tt' in variables: magic.append('tt') if 'pp' in variables: magic.append('pp') if 'bb' in variables: magic.append('bb') if 'jj' in variables: magic.append('jj') if 'vort' in variables: magic.append('vort') if 'b_mag' in variables and not 'bb' in magic: magic.append('bb') if 'j_mag' in variables and not 'jj' in magic: magic.append('jj') if 'ab' in variables and not 'bb' in magic: magic.append('bb') for t_idx in range(ti, tf + 1): if animation: var_file = 'VAR' + str(t_idx) # Read the PencilCode variables and set the dimensions. var = read.var(var_file=var_file, datadir=datadir, proc=proc, magic=magic, trimall=True, quiet=quiet) grid = read.grid(datadir=datadir, proc=proc, trim=trimall, quiet=True) params = read.param(quiet=True) # Add external magnetic field. if (b_ext == True): B_ext = np.array(params.b_ext) var.bb[0, ...] += B_ext[0] var.bb[1, ...] += B_ext[1] var.bb[2, ...] += B_ext[2] dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1) dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1) dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1) # Write the vtk header. if animation: fd = open(destination + str(t_idx) + '.vtk', 'wb') else: fd = open(destination + '.vtk', 'wb') fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('VAR files\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy, dimz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], grid.y[0], grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( dx, dy, dz).encode('utf-8')) fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8')) # Write the data. for v in variables: print('Writing {0}.'.format(v)) # Prepare the data to the correct format. if v == 'ab': data = math.dot(var.aa, var.bb) elif v == 'b_mag': data = np.sqrt(math.dot2(var.bb)) elif v == 'j_mag': data = np.sqrt(math.dot2(var.jj)) else: data = getattr(var, v) if sys.byteorder == 'little': data = data.astype(np.float32).byteswap() else: data = data.astype(np.float32) # Check if we have vectors or scalars. if data.ndim == 4: data = np.moveaxis(data, 0, 3) fd.write('VECTORS {0} float\n'.format(v).encode('utf-8')) else: fd.write('SCALARS {0} float\n'.format(v).encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) fd.write(data.tobytes()) del (var) fd.close()
def slices2vtk(field='', extension='', datadir='data', destination='slices', proc=-1): """ Convert slices from PencilCode format to vtk. call signature:: slices2vtk(field='', extension='', datadir='data', destination='slices', proc=-1) Read slice files specified by *variables* and convert them into vtk format for the specified extensions. Write the result in *destination*. NB: You need to have called src/read_videofiles.x before using this script. Keyword arguments: *field*: All allowed fields which can be written as slice files, e.g. b2, uu1, lnrho, ... See the pencil code manual for more (chapter: "List of parameters for `video.in'"). *extension*: List of slice positions. *datadir*: Directory where the data is stored. *destination*: Destination files. *proc*: Processor which should be read. Set to -1 for all processors. """ import sys import numpy as np from pencil import read # Convert single variable string into length 1 list of arrays. if (len(field) > 0): if (len(field[0]) == 1): field = [field] if (len(extension) > 0): if (len(extension[0]) == 1): extension = [extension] # Read the grid dimensions. grid = read.grid(datadir=datadir, proc=proc, trim=True, quiet=True) # Read the dimensions. dim = read.dim(datadir=datadir, proc=proc) # Read the user given parameters for the slice positions. params = read.param(quiet=True) # Read the slice file for all specified variables and extensions. slices = read.slices(field=field, extension=extension, datadir=datadir, proc=proc) # Determine the position of the slices. if params.ix != -1: x0 = grid.x[params.ix] elif params.slice_position == 'm': x0 = grid.x[int(len(grid.x) / 2)] if params.iy != -1: y0 = grid.y[params.iy] elif params.slice_position == 'm': y0 = grid.y[int(len(grid.y) / 2)] if params.iz != -1: z0 = grid.z[params.iz] elif params.slice_position == 'm': z0 = grid.z[int(len(grid.z) / 2)] if params.iz2 != -1: z02 = grid.z[params.iz] elif params.slice_position == 'm': z02 = grid.z[int(len(grid.z) / 2)] for t_idx, t in enumerate(slices.t): for ext in extension: # Open the destination file for writing. fd = open(destination + '_' + ext + '_' + str(t_idx) + '.vtk', 'wb') # Write the header. fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('slices {0}\n'.format(ext).encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) if ext == 'xy': fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( dim.nx, dim.ny, 1).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], grid.y[0], z0).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.dx, grid.dy, 1.).encode('utf-8')) dim_p = dim.nx dim_q = dim.ny if ext == 'xy2': fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( dim.nx, dim.ny, 1).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], grid.y[0], z02).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.dx, grid.dy, 1.).encode('utf-8')) dim_p = dim.nx dim_q = dim.ny if ext == 'xz': fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( dim.nx, 1, dim.nz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], y0, grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.dx, 1., grid.dz).encode('utf-8')) dim_p = dim.nx dim_q = dim.nz if ext == 'yz': fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( 1, dim.ny, dim.nz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( x0, grid.y[0], grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( 1., grid.dy, grid.dz).encode('utf-8')) dim_p = dim.ny dim_q = dim.nz fd.write('POINT_DATA {0:9}\n'.format(dim_p * dim_q).encode('utf-8')) # Write the data. for fi in field: data = getattr(getattr(slices, ext), fi) fd.write( ('SCALARS ' + ext + '_' + fi + ' float\n').encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) if sys.byteorder == 'little': data = data.astype(np.float32).byteswap() else: data = data.astype(np.float32) fd.write(data[t_idx].tobytes()) fd.close()
def calc_shocktube( self, xarr, time, par=list(), lreference=False, DEBUG=False, lplot=False, itplot=0, magic=["ee", "tt", "Ms"], ): """ *xarr*: Coordinate vector (the initial discontinuity is always at x=0) *time*: Time after membrane snapped. *par*: Param object *lreference*: Use default parameters for Sod's reference problem. *DEBUG*: Flag to switch on output *lplot*: Plot first snapshot profiles *itplot*: Iteration index of snaphot to plot *magic*: Optional profiles to include in Sod object. """ # Apply, update and append parameters from simulation directory if isinstance(par, list): par = param() if lreference: print("lreference is True: running Sod's reference problem") par.uu_right = 0.0 par.uu_left = 0.0 par.rho_right = [ 0.125, ] par.rho_left = [ 1.0, ] par.gamma = 1.4 par.ss_right = np.log(0.1) / par.gamma - np.log(0.125) # pressure=0.1 par.ss_left = np.log(3.0) / par.gamma - np.log(1.0) # pressure=3.0 cv1 = par.gamma / par.cp cp1 = 1.0 / par.cp cv = par.cp / par.gamma gamma_m1 = par.gamma - 1.0 gamma1 = 1.0 / par.gamma cpmcv1 = 1.0 / (par.cp - cv) if par.gamma == 1.0: lnTT0 = np.log(par.cs0 ** 2 / par.cp) else: lnTT0 = np.log(par.cs0 ** 2 / (par.cp * gamma_m1)) lnrho0 = np.log(par.rho0) lnTT_l = ( lnTT0 + cv1 * par.ss_left + gamma_m1 * (np.log(par.rho_left[0]) - lnrho0) ) lnTT_r = ( lnTT0 + cv1 * par.ss_right + gamma_m1 * (np.log(par.rho_right[0]) - lnrho0) ) par.__setattr__( "pp_left", (par.cp - cv) * np.exp(lnTT_l + np.log(par.rho_left[0])) ) par.__setattr__( "pp_right", (par.cp - cv) * np.exp(lnTT_r + np.log(par.rho_right[0])) ) ## Warn about imperfections: if not par.uu_left == 0.0 or not par.uu_right == 0.0: print( "Case initially not at rest not yet implemented" + " -- results not valid" ) if DEBUG: for key in ["pp_left", "pp_right"]: print(key, par.__getattribute__(key)) csl = np.sqrt(par.gamma * par.pp_left / par.rho_left[0]) # left sound speed # iteratively find p3/pl p3 = par.pp_left * (par.pp_right / par.pp_left) ** 0.2 # initial guess for i in range(1, 21): u3 = ( csl * 2 / gamma_m1 * (1 - (p3 / par.pp_left) ** (gamma_m1 / 2 / par.gamma)) ) p3 = par.pp_right + (u3 - par.uu_right) * np.sqrt( par.rho_right[0] / 2 * ((par.gamma + 1) * p3 + gamma_m1 * par.pp_right) ) if DEBUG: print("p3/pl {}, u3 {}".format(p3 / par.pp_left, u3)) rho3 = par.rho_left[0] * (p3 / par.pp_left) ** (1.0 / par.gamma) cs3 = np.sqrt(par.gamma * p3 / rho3) p4 = p3 u4 = u3 # velocity of shock front us = par.uu_right + (par.pp_right - p4) / (par.uu_right - u4) / par.rho_right[0] rho4 = -(par.pp_right - p4) / (par.uu_right - u4) / (u4 - us) cs4 = np.sqrt(par.gamma * p4 / rho4) if not isinstance(time, np.ndarray): time = np.array(time) if not isinstance(xarr, np.ndarray): xarr = np.array(xarr) print("xarr shape: {}".format(xarr.shape)) # declare fields ux = np.empty([time.size, xarr.size]) pp = ux.copy() rh = ux.copy() for mag in magic: if "tt" == mag: tt = ux.copy() elif "ee" == mag: ee = ux.copy() elif "Ms" == mag: Ms = ux.copy() else: print("Please implement calculation of {}".format(mag)) magic.remove(mag) # iterate solution for each time for it in range(time.size): ## positions of separating faces x1 = (par.uu_left - csl) * time[it] if DEBUG: print("x1 {}, csl {}, par.uu_left {}".format(x1, csl, par.uu_left)) x2 = (u3 - cs3) * time[it] x3 = u4 * time[it] x4 = us * time[it] ## calculate profiles left = np.where(xarr <= x1)[0] if len(left) > 0: # expansion region reg2 = np.where(xarr[left[-1] + 1 :] < x2)[0] + left[-1] + 1 else: reg2 = np.where(xarr < x2)[0] if len(reg2) > 0: reg3 = np.where(xarr[reg2[-1] + 1 :] < x3)[0] + reg2[-1] + 1 else: if len(left) > 0: reg3 = np.where(xarr[left[-1] + 1 :] < x3)[0] + left[-1] + 1 else: reg3 = np.where(xarr < x3)[0] if len(reg3) > 0: reg4 = np.where(xarr[reg3[-1] + 1 :] < x4)[0] + reg3[-1] + 1 else: if len(reg2) > 0: reg4 = np.where(xarr[reg2[-1] + 1 :] < x4)[0] + reg2[-1] + 1 else: if len(left) > 0: reg4 = np.where(xarr[left[-1] + 1 :] < x4)[0] + left[-1] + 1 else: reg4 = np.where(xarr < x4)[0] right = np.where(xarr > x4) if len(left) > 0: ux[it, left] = par.uu_left pp[it, left] = par.pp_left rh[it, left] = par.rho_left[0] if len(reg2) > 0: ux[it, reg2] = ( 2 / (par.gamma + 1) * (csl + xarr[reg2] / time[it] + gamma_m1 / 2 * par.uu_right) ) pp[it, reg2] = par.pp_left * ( 1 - gamma_m1 / 2 * ux[it, reg2] / csl ) ** (2 * par.gamma / gamma_m1) rh[it, reg2] = par.rho_left[0] * ( 1 - gamma_m1 / 2 * ux[it, reg2] / csl ) ** (2 / gamma_m1) if len(reg3) > 0: ux[it, reg3] = u3 pp[it, reg3] = p3 rh[it, reg3] = rho3 if len(reg4) > 0: ux[it, reg4] = u4 pp[it, reg4] = p4 rh[it, reg4] = rho4 if len(right) > 0: ux[it, right] = par.uu_right pp[it, right] = par.pp_right rh[it, right] = par.rho_right[0] if "tt" in magic: tt[it] = pp[it] * cpmcv1 / rh[it] if "ee" in magic: if not "tt" in magic: ee[it] = pp[it] / (gamma_m1 * rh[it]) else: ee[it] = cv * tt[it] if "Ms" in magic: if not "tt" in magic: Ms[it] = ux[it] * np.sqrt(gamma1 * rh[it] / pp[it]) else: Ms[it] = ux[it] / np.sqrt(par.cp * gamma_m1 * tt[it]) setattr(self, "t", time) setattr(self, "x", xarr) setattr(self, "rho", rh) setattr(self, "ux", ux) setattr(self, "pp", pp) if "ee" in magic: setattr(self, "ee", ee) if "tt" in magic: setattr(self, "tt", tt) if "Ms" in magic: setattr(self, "Ms", Ms) if lplot: fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True) ax1.semilogy(xarr, pp[itplot]) ax1.set(ylabel=r"$p$") ax2.semilogy(xarr, rh[itplot]) ax2.set(ylabel=r"$\rho$") ax3.plot(xarr, ux[itplot]) ax3.set(ylabel=r"$u$") ax1.set_title(r"$t={:.3e}$".format(time[itplot])) plt.tight_layout() plt.show() if len(magic) > 0: fig, ax = plt.subplots(3, 1, sharex=True) for ip in range(len(magic)): if magic[ip] == "tt": ylabel = r"$T$" elif magic[ip] == "ee": ylabel = r"$e$" else: ylabel = r"${}$".format(magic[ip]) ax[ip].plot(xarr, self.__getattribute__(magic[ip])[itplot]) ax[ip].set(ylabel=ylabel) ax[0].set_title(r"$t={:.3e}$".format(time[itplot])) plt.tight_layout() plt.show() if DEBUG: print("u3={}, u4 ={}".format(u3, u4)) print("p3={}, p4 ={}".format(p3, p4)) print("rho4 ={}".format(rho4)) print("rho3 ={}".format(rho3)) print("V1 ={}".format(par.uu_left - csl)) print("V2 ={}".format(u4 - cs3)) print("V3 ={}".format(u4)) print("V4 ={}".format(us))
def derive_stats(sim_path, src, dst, stat_keys=['Rm', 'uu', 'Ms'], par=[], comm=None, overwrite=False, rank=0, size=1, nghost=3, status='a', chunksize=1000.0, quiet=True, nmin=32, lmask=False, mask_key='hot'): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True, conflicts_quiet=True) #get data dimensions nx, ny, nz = src['settings']['nx'][0],\ src['settings']['ny'][0],\ src['settings']['nz'][0] mx, my, mz = src['settings']['mx'][0],\ src['settings']['my'][0],\ src['settings']['mz'][0] #split data into manageable memory chunks dstchunksize = 8 * nx * ny * nz / 1024 * 1024 if dstchunksize > chunksize: nchunks = cpu_optimal(nx, ny, nz, quiet=quiet, mvar=src['settings/mvar'][0], maux=src['settings/maux'][0], MBmin=chunksize, nmin=nmin, size=size)[1] else: nchunks = [1, 1, 1] print('nchunks {}'.format(nchunks)) # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = [ locindx[np.mod( rank + int(rank / nchunks[2]) + int(rank / nchunks[1]), nchunks[0])] ] indy = [locindy[np.mod(rank + int(rank / nchunks[2]), nchunks[1])]] indz = [locindz[np.mod(rank, nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = np.array_split(np.arange(nx) + nghost, nchunks[0]) indy = np.array_split(np.arange(ny) + nghost, nchunks[1]) indz = np.array_split(np.arange(nz) + nghost, nchunks[2]) allchunks = nchunks[0] * nchunks[1] * nchunks[2] # ensure derived variables are in a list if isinstance(stat_keys, list): stat_keys = stat_keys else: stat_keys = [stat_keys] # initialise group group = group_h5(dst, 'stats', status='a', overwrite=overwrite, comm=comm, rank=rank, size=size) for key in stat_keys: mean_stat = list() stdv_stat = list() mean_mask = list() stdv_mask = list() nmask_msk = list() mean_nmsk = list() stdv_nmsk = list() nmask_nmk = list() for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk, nchunks[2])]]: n1, n2 = iz[ 0],\ iz[-1]+1 for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: m1, m2 = iy[ 0],\ iy[-1]+1 for ix in [ indx[np.mod( ichunk + int(ichunk / nchunks[2]) + int(ichunk / nchunks[1]), nchunks[0])] ]: l1, l2 = ix[ 0],\ ix[-1]+1 if key in src['data'].keys(): var = src['data'][key][n1:n2, m1:m2, l1:l2] elif key == 'uu' or key == 'aa': tmp = np.array([ src['data'][key[0] + 'x'][n1:n2, m1:m2, l1:l2], src['data'][key[0] + 'y'][n1:n2, m1:m2, l1:l2], src['data'][key[0] + 'z'][n1:n2, m1:m2, l1:l2] ]) var = np.sqrt(dot2(tmp)) else: if key in dst['data'].keys(): if is_vector(key): var = np.sqrt( dot2(dst['data'][key][:, n1:n2, m1:m2, l1:l2])) else: var = dst['data'][key][n1:n2, m1:m2, l1:l2] else: print('stats: ' + key + ' does not exist in ', src, 'or', dst) continue if lmask: mask = dst['masks'][mask_key][0, n1:n2, m1:m2, l1:l2] Nmask = mask[mask == False].size if Nmask > 0: mean_mask.append(var[mask == False].mean() * Nmask) stdv_mask.append(var[mask == False].std() * Nmask) else: mean_mask.append(0) stdv_mask.append(0) nmask_msk.append(Nmask) nmask = mask[mask == True].size if nmask > 0: mean_nmsk.append(var[mask == True].mean() * nmask) stdv_nmsk.append(var[mask == True].std() * nmask) else: mean_nmsk.append(0) stdv_nmsk.append(0) nmask_nmk.append(nmask) mean_stat.append(var.mean()) stdv_stat.append(var.std()) if comm: if lmask: mean_mask = comm.gather(mean_mask, root=0) stdv_mask = comm.gather(stdv_mask, root=0) mean_mask = comm.bcast(mean_mask, root=0) stdv_mask = comm.bcast(stdv_mask, root=0) mean_nmsk = comm.gather(mean_nmsk, root=0) stdv_nmsk = comm.gather(stdv_nmsk, root=0) mean_nmsk = comm.bcast(mean_nmsk, root=0) stdv_nmsk = comm.bcast(stdv_nmsk, root=0) nmask_msk = comm.gather(nmask_msk, root=0) nmask_nmk = comm.gather(nmask_nmk, root=0) nmask_msk = comm.bcast(nmask_msk, root=0) nmask_nmk = comm.bcast(nmask_nmk, root=0) mean_stat = comm.gather(mean_stat, root=0) stdv_stat = comm.gather(stdv_stat, root=0) mean_stat = comm.bcast(mean_stat, root=0) stdv_stat = comm.bcast(stdv_stat, root=0) if lmask: summk = np.sum(nmask_msk) if summk > 0: meanm = np.sum(mean_mask) / summk stdvm = np.sum(stdv_mask) / summk else: meanm = 0 stdvm = 0 sumnk = np.sum(nmask_nmk) if sumnk > 0: meann = np.sum(mean_nmsk) / sumnk stdvn = np.sum(stdv_nmsk) / sumnk else: meann = 0 stdvn = 0 print(mask_key + '-' + key + '-mean = {}, '.format(meanm) + mask_key + '-' + key + '-std = {}'.format(stdvm)) print('not-' + mask_key + '-' + key + '-mean = {}, '.format(meann) + 'not-' + mask_key + '-' + key + '-std = {}'.format(stdvn)) dataset_h5(group, mask_key + '-' + key + '-mean', status=status, data=meanm, comm=comm, size=size, rank=rank, overwrite=True) dataset_h5(group, mask_key + '-' + key + '-std', status=status, data=stdvm, comm=comm, size=size, rank=rank, overwrite=True) dataset_h5(group, 'not-' + mask_key + '-' + key + '-mean', status=status, data=meann, comm=comm, size=size, rank=rank, overwrite=True) dataset_h5(group, 'not-' + mask_key + '-' + key + '-std', status=status, data=stdvn, comm=comm, size=size, rank=rank, overwrite=True) mstat = np.mean(mean_stat) dstat = np.mean(stdv_stat) print(key + '-mean = {}, '.format(mstat) + key + '-std = {}'.format(dstat)) dataset_h5(group, key + '-mean', status=status, data=mstat, comm=comm, size=size, rank=rank, overwrite=True) dataset_h5(group, key + '-std', status=status, data=dstat, comm=comm, size=size, rank=rank, overwrite=True)
def read( self, var_file="", datadir="data", proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision="d", lpersist=False, dtype=np.float64, ): """ read(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True, trimall=False, magic=None, sim=None, precision='f') Read VAR files from Pencil Code. If proc < 0, then load all data and assemble, otherwise load VAR file from specified processor. The file format written by output() (and used, e.g. in var.dat) consists of the followinig Fortran records: 1. data(mx, my, mz, nvar) 2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1) Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3 for one vector field, 8 for var.dat in the case of MHD with entropy. but, deltay(1) is only there if lshear is on! need to know parameters. Parameters ---------- var_file : string Name of the VAR file. If not specified, use var.dat (which is the latest snapshot of the fields) datadir : string Directory where the data is stored. proc : int Processor to be read. If -1 read all and assemble to one array. ivar : int Index of the VAR file, if var_file is not specified. quiet : bool Flag for switching off output. trimall : bool Trim the data cube to exclude ghost zones. magic : bool Values to be computed from the data, e.g. B = curl(A). sim : pencil code simulation object Contains information about the local simulation. precision : string Float 'f', double 'd' or half 'half'. lpersist : bool Read the persistent variables if they exist Returns ------- DataCube Instance of the pencil.read.var.DataCube class. All of the computed fields are imported as class members. Examples -------- Read the latest var.dat file and print the shape of the uu array: >>> var = pc.read.var() >>> print(var.uu.shape) Read the VAR2 file, compute the magnetic field B = curl(A), the vorticity omega = curl(u) and remove the ghost zones: >>> var = pc.read.var(var_file='VAR2', magic=['bb', 'vort'], trimall=True) >>> print(var.bb.shape) """ import os from scipy.io import FortranFile from pencil.math.derivatives import curl, curl2 from pencil import read from pencil.sim import __Simulation__ def persist(self, infile=None, precision="d", quiet=quiet): """An open Fortran file potentially containing persistent variables appended to the f array and grid data are read from the first proc data Record types provide the labels and id record for the peristent variables in the depricated fortran binary format """ record_types = {} for key in read.record_types.keys(): if read.record_types[key][1] == "d": record_types[key] = (read.record_types[key][0], precision) else: record_types[key] = read.record_types[key] try: tmp_id = infile.read_record("h") except: return -1 block_id = 0 for i in range(2000): i += 1 tmp_id = infile.read_record("h") block_id = tmp_id[0] if block_id == 2000: break for key in record_types.keys(): if record_types[key][0] == block_id: tmp_val = infile.read_record(record_types[key][1]) self.__setattr__(key, tmp_val[0]) if not quiet: print(key, record_types[key][0], record_types[key][1], tmp_val) return self dim = None param = None index = None if isinstance(sim, __Simulation__): datadir = os.path.expanduser(sim.datadir) dim = sim.dim param = read.param(datadir=sim.datadir, quiet=True, conflicts_quiet=True) index = read.index(datadir=sim.datadir) else: datadir = os.path.expanduser(datadir) if dim is None: if var_file[0:2].lower() == "og": dim = read.ogdim(datadir, proc) else: if var_file[0:4] == "VARd": dim = read.dim(datadir, proc, down=True) else: dim = read.dim(datadir, proc) if param is None: param = read.param(datadir=datadir, quiet=quiet, conflicts_quiet=True) if index is None: index = read.index(datadir=datadir) if param.lwrite_aux: total_vars = dim.mvar + dim.maux else: total_vars = dim.mvar if os.path.exists(os.path.join(datadir, "grid.h5")): # # Read HDF5 files. # import h5py run2D = param.lwrite_2d # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) if not var_file: if ivar < 0: var_file = "var.h5" else: var_file = "VAR" + str(ivar) + ".h5" file_name = os.path.join(datadir, "allprocs", var_file) with h5py.File(file_name, "r") as tmp: for key in tmp["data"].keys(): self.f[index.__getattribute__(key) - 1, :] = dtype( tmp["data/" + key][:]) t = (tmp["time"][()]).astype(precision) x = (tmp["grid/x"][()]).astype(precision) y = (tmp["grid/y"][()]).astype(precision) z = (tmp["grid/z"][()]).astype(precision) dx = (tmp["grid/dx"][()]).astype(precision) dy = (tmp["grid/dy"][()]).astype(precision) dz = (tmp["grid/dz"][()]).astype(precision) if param.lshear: deltay = (tmp["persist/shear_delta_y"][( 0)]).astype(precision) if lpersist: for key in tmp["persist"].keys(): self.__setattr__( key, (tmp["persist"][key][0]).astype(precision)) else: # # Read scattered Fortran binary files. # run2D = param.lwrite_2d if dim.precision == "D": read_precision = "d" else: read_precision = "f" if not var_file: if ivar < 0: var_file = "var.dat" else: var_file = "VAR" + str(ivar) if proc < 0: proc_dirs = self.__natural_sort( filter(lambda s: s.startswith("proc"), os.listdir(datadir))) if proc_dirs.count("proc_bounds.dat") > 0: proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ["allprocs"] # else: # proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy] else: proc_dirs = ["proc" + str(proc)] # Set up the global array. if not run2D: self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=dtype) else: if dim.ny == 1: self.f = np.zeros((total_vars, dim.mz, dim.mx), dtype=dtype) else: self.f = np.zeros((total_vars, dim.my, dim.mx), dtype=dtype) x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) if var_file[0:2].lower() == "og": procdim = read.ogdim(datadir, proc) else: if var_file[0:4] == "VARd": procdim = read.dim(datadir, proc, down=True) else: procdim = read.dim(datadir, proc) if not quiet: print("Reading data from processor" + " {0} of {1} ...".format(proc, len(proc_dirs))) else: # A collective IO strategy is being used procdim = dim # else: # procdim.mx = dim.mx # procdim.my = dim.my # procdim.nx = dim.nx # procdim.ny = dim.ny # procdim.ipx = dim.ipx # procdim.ipy = dim.ipy mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the data. file_name = os.path.join(datadir, directory, var_file) infile = FortranFile(file_name) if not run2D: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc)) else: if dim.ny == 1: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, mzloc, mxloc)) else: f_loc = dtype(infile.read_record(dtype=read_precision)) f_loc = f_loc.reshape((-1, myloc, mxloc)) raw_etc = infile.read_record(dtype=read_precision) if lpersist: persist(self, infile=infile, precision=read_precision, quiet=quiet) infile.close() t = raw_etc[0] x_loc = raw_etc[1:mxloc + 1] y_loc = raw_etc[mxloc + 1:mxloc + myloc + 1] z_loc = raw_etc[mxloc + myloc + 1:mxloc + myloc + mzloc + 1] if param.lshear: shear_offset = 1 deltay = raw_etc[-1] else: shear_offset = 0 dx = raw_etc[-3 - shear_offset] dy = raw_etc[-2 - shear_offset] dz = raw_etc[-1 - shear_offset] if len(proc_dirs) > 1: # Calculate where the local processor will go in # the global array. # # Don't overwrite ghost zones of processor to the # left (and accordingly in y and z direction -- makes # a difference on the diagonals) # # Recall that in NumPy, slicing is NON-INCLUSIVE on # the right end, ie, x[0:4] will slice all of a # 4-digit array, not produce an error like in idl. if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0xloc = 0 i1xloc = procdim.mx else: i0x = procdim.ipx * procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0xloc = procdim.nghostx i1xloc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0yloc = 0 i1yloc = procdim.my else: i0y = procdim.ipy * procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0yloc = procdim.nghosty i1yloc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z + procdim.mz i0zloc = 0 i1zloc = procdim.mz else: i0z = procdim.ipz * procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0zloc = procdim.nghostz i1zloc = procdim.mz x[i0x:i1x] = x_loc[i0xloc:i1xloc] y[i0y:i1y] = y_loc[i0yloc:i1yloc] z[i0z:i1z] = z_loc[i0zloc:i1zloc] if not run2D: self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: if dim.ny == 1: self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc] else: self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc] else: self.f = f_loc x = x_loc y = y_loc z = z_loc if magic is not None: if not np.all(param.lequidist): raise NotImplementedError( "Magic functions are only implemented for equidistant grids." ) if "bb" in magic: # Compute the magnetic field before doing trimall. aa = self.f[index.ax - 1:index.az, ...] self.bb = dtype( curl( aa, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system, )) if trimall: self.bb = self.bb[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] if "jj" in magic: # Compute the electric current field before doing trimall. aa = self.f[index.ax - 1:index.az, ...] self.jj = dtype( curl2(aa, dx, dy, dz, x=x, y=y, coordinate_system=param.coord_system)) if trimall: self.jj = self.jj[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] if "vort" in magic: # Compute the vorticity field before doing trimall. uu = self.f[index.ux - 1:index.uz, ...] self.vort = dtype( curl( uu, dx, dy, dz, x=x, y=y, run2D=run2D, coordinate_system=param.coord_system, )) if trimall: if run2D: if dim.nz == 1: self.vort = self.vort[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: self.vort = self.vort[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1] else: self.vort = self.vort[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1, ] # Trim the ghost zones of the global f-array if asked. if trimall: self.x = x[dim.l1:dim.l2 + 1] self.y = y[dim.m1:dim.m2 + 1] self.z = z[dim.n1:dim.n2 + 1] if not run2D: self.f = self.f[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: if dim.ny == 1: self.f = self.f[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1] else: self.f = self.f[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1] else: self.x = x self.y = y self.z = z self.l1 = dim.l1 self.l2 = dim.l2 + 1 self.m1 = dim.m1 self.m2 = dim.m2 + 1 self.n1 = dim.n1 self.n2 = dim.n2 + 1 # Assign an attribute to self for each variable defined in # 'data/index.pro' so that e.g. self.ux is the x-velocity aatest = [] uutest = [] for key in index.__dict__.keys(): if "aatest" in key: aatest.append(key) if "uutest" in key: uutest.append(key) if (key != "global_gg" and key != "keys" and "aatest" not in key and "uutest" not in key): value = index.__dict__[key] setattr(self, key, self.f[value - 1, ...]) # Special treatment for vector quantities. if hasattr(index, "uu"): self.uu = self.f[index.ux - 1:index.uz, ...] if hasattr(index, "aa"): self.aa = self.f[index.ax - 1:index.az, ...] if hasattr(index, "uu_sph"): self.uu_sph = self.f[index.uu_sphx - 1:index.uu_sphz, ...] if hasattr(index, "bb_sph"): self.bb_sph = self.f[index.bb_sphx - 1:index.bb_sphz, ...] # Special treatment for test method vector quantities. # Note index 1,2,3,...,0 last vector may be the zero field/flow if hasattr(index, "aatest1"): naatest = int(len(aatest) / 3) for j in range(0, naatest): key = "aatest" + str(np.mod(j + 1, naatest)) value = index.__dict__["aatest1"] + 3 * j setattr(self, key, self.f[value - 1:value + 2, ...]) if hasattr(index, "uutest1"): nuutest = int(len(uutest) / 3) for j in range(0, nuutest): key = "uutest" + str(np.mod(j + 1, nuutest)) value = index.__dict__["uutest"] + 3 * j setattr(self, key, self.f[value - 1:value + 2, ...]) self.t = t self.dx = dx self.dy = dy self.dz = dz if param.lshear: self.deltay = deltay # Do the rest of magic after the trimall (i.e. no additional curl.) self.magic = magic if self.magic is not None: self.magic_attributes(param, dtype=dtype)
def write_h5_grid( file_name="grid", datadir="data", precision="d", nghost=3, settings=None, param=None, grid=None, unit=None, quiet=True, driver=None, comm=None, overwrite=False, rank=0, ): """ Write the grid information as hdf5. We assume by default that a run simulation directory has already been constructed, but start has not been executed in h5 format so that binary sim files dim, grid and param files are already present in the sim directory, or provided from an old binary sim source directory as inputs. call signature: write_h5_grid(file_name='grid', datadir='data', precision='d', nghost=3, settings=None, param=None, grid=None, unit=None, quiet=True, driver=None, comm=None) Keyword arguments: *file_name*: Prefix of the file name to be written, 'grid'. *datadir*: Directory where 'grid.h5' is stored. *precision*: Single 'f' or double 'd' precision. *nghost*: Number of ghost zones. *settings*: Optional dictionary of persistent variable. *param*: Optional Param object. *grid*: Optional Pencil Grid object of grid parameters. *unit*: Optional dictionary of simulation units. *quiet*: Option to print output. """ from os.path import join import numpy as np from pencil import read from pencil.io import open_h5, group_h5, dataset_h5 from pencil import is_sim_dir # test if simulation directory if not is_sim_dir(): print("ERROR: Directory needs to be a simulation") sys.stdout.flush() # if settings == None: settings = {} skeys = [ "l1", "l2", "m1", "m2", "n1", "n2", "nx", "ny", "nz", "mx", "my", "mz", "nprocx", "nprocy", "nprocz", "maux", "mglobal", "mvar", "precision", ] dim = read.dim() for key in skeys: settings[key] = dim.__getattribute__(key) settings["precision"] = precision.encode() settings["nghost"] = nghost settings["version"] = np.int32(0) gkeys = [ "x", "y", "z", "Lx", "Ly", "Lz", "dx", "dy", "dz", "dx_1", "dy_1", "dz_1", "dx_tilde", "dy_tilde", "dz_tilde", ] if grid == None: grid = read.grid(quiet=True) else: gd_err = False for key in gkeys: if not key in grid.__dict__.keys(): print("ERROR: key " + key + " missing from grid") sys.stdout.flush() gd_err = True if gd_err: print("ERROR: grid incomplete") sys.stdout.flush() ukeys = [ "length", "velocity", "density", "magnetic", "time", "temperature", "flux", "energy", "mass", "system", ] if param == None: param = read.param(quiet=True) param.__setattr__("unit_mass", param.unit_density * param.unit_length**3) param.__setattr__("unit_energy", param.unit_mass * param.unit_velocity**2) param.__setattr__("unit_time", param.unit_length / param.unit_velocity) param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3) param.unit_system = param.unit_system.encode() # open file for writing data filename = join(datadir, file_name + ".h5") with open_h5(filename, "w", driver=driver, comm=comm, overwrite=overwrite, rank=rank) as ds: # add settings sets_grp = group_h5(ds, "settings", status="w") for key in settings.keys(): if "precision" in key: dataset_h5(sets_grp, key, status="w", data=(settings[key], )) else: dataset_h5(sets_grp, key, status="w", data=(settings[key], )) # add grid grid_grp = group_h5(ds, "grid", status="w") for key in gkeys: dataset_h5(grid_grp, key, status="w", data=(grid.__getattribute__(key))) dataset_h5(grid_grp, "Ox", status="w", data=(param.__getattribute__("xyz0")[0], )) dataset_h5(grid_grp, "Oy", status="w", data=(param.__getattribute__("xyz0")[1], )) dataset_h5(grid_grp, "Oz", status="w", data=(param.__getattribute__("xyz0")[2], )) # add physical units unit_grp = group_h5(ds, "unit", status="w") for key in ukeys: if "system" in key: dataset_h5( unit_grp, key, status="w", data=(param.__getattribute__("unit_" + key), ), ) else: dataset_h5( unit_grp, key, status="w", data=param.__getattribute__("unit_" + key), )
def read(self, datadir="data", proc=-1, quiet=False, precision="f", trim=False): """ read(datadir='data', proc=-1, quiet=False, trim=False) Read the grid data from the pencil code simulation. If proc < 0, then load all data and assemble. Otherwise, load grid from specified processor. Parameters ---------- datadir : string Directory where the data is stored. proc : int Processor to be read. If proc is -1, then read the 'global' grid. If proc is >=0, then read the grid.dat in the corresponding processor directory. quiet : bool Flag for switching of output. trim : bool Cuts off the ghost points. Returns ------- Class containing the grid information. """ import numpy as np import os from scipy.io import FortranFile from pencil import read if precision == "f": dtype = np.float32 elif precision == "d": dtype = np.float64 elif precision == "half": dtype = np.float16 else: print('read grid: {} precision not set, using "f"'.format(precision)) dtype = np.float32 if os.path.exists(os.path.join(datadir, "grid.h5")): dim = read.dim(datadir, proc) import h5py with h5py.File(os.path.join(datadir, "grid.h5"), "r") as tmp: x = dtype(tmp["grid"]["x"][()]) y = dtype(tmp["grid"]["y"][()]) z = dtype(tmp["grid"]["z"][()]) dx_1 = dtype(tmp["grid"]["dx_1"][()]) dy_1 = dtype(tmp["grid"]["dy_1"][()]) dz_1 = dtype(tmp["grid"]["dz_1"][()]) dx_tilde = dtype(tmp["grid"]["dx_tilde"][()]) dy_tilde = dtype(tmp["grid"]["dy_tilde"][()]) dz_tilde = dtype(tmp["grid"]["dz_tilde"][()]) dx = dtype(tmp["grid"]["dx"][()]) dy = dtype(tmp["grid"]["dy"][()]) dz = dtype(tmp["grid"]["dz"][()]) Lx = dtype(tmp["grid"]["Lx"][()]) Ly = dtype(tmp["grid"]["Ly"][()]) Lz = dtype(tmp["grid"]["Lz"][()]) t = dtype(0.0) else: datadir = os.path.expanduser(datadir) dim = read.dim(datadir, proc) param = read.param(datadir=datadir, quiet=True, conflicts_quiet=True) if dim.precision == "D": read_precision = "d" else: read_precision = "f" if proc < 0: proc_dirs = list( filter( lambda string: string.startswith("proc"), os.listdir(datadir) ) ) if proc_dirs.count("proc_bounds.dat") > 0: proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ["allprocs"] else: proc_dirs = ["proc" + str(proc)] # Define the global arrays. x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) dx_1 = np.zeros(dim.mx, dtype=precision) dy_1 = np.zeros(dim.my, dtype=precision) dz_1 = np.zeros(dim.mz, dtype=precision) dx_tilde = np.zeros(dim.mx, dtype=precision) dy_tilde = np.zeros(dim.my, dtype=precision) dz_tilde = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) procdim = read.dim(datadir, proc) if not quiet: print( "reading grid data from processor" + " {0} of {1} ...".format(proc, len(proc_dirs)) ) else: procdim = dim mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the grid data. file_name = os.path.join(datadir, directory, "grid.dat") infile = FortranFile(file_name, "r") grid_raw = infile.read_record(dtype=read_precision) dx, dy, dz = tuple(infile.read_record(dtype=read_precision)) Lx, Ly, Lz = tuple(infile.read_record(dtype=read_precision)) dx_1_raw = infile.read_record(dtype=read_precision) dx_tilde_raw = infile.read_record(dtype=read_precision) infile.close() # Reshape the arrays. t = dtype(grid_raw[0]) x_loc = grid_raw[1 : mxloc + 1] y_loc = grid_raw[mxloc + 1 : mxloc + myloc + 1] z_loc = grid_raw[mxloc + myloc + 1 : mxloc + myloc + mzloc + 1] dx_1_loc = dx_1_raw[0:mxloc] dy_1_loc = dx_1_raw[mxloc : mxloc + myloc] dz_1_loc = dx_1_raw[mxloc + myloc : mxloc + myloc + mzloc] dx_tilde_loc = dx_tilde_raw[0:mxloc] dy_tilde_loc = dx_tilde_raw[mxloc : mxloc + myloc] dz_tilde_loc = dx_tilde_raw[mxloc + myloc : mxloc + myloc + mzloc] if len(proc_dirs) > 1: if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0x_loc = 0 i1x_loc = procdim.mx else: i0x = procdim.ipx * procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0x_loc = procdim.nghostx i1x_loc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0y_loc = 0 i1y_loc = procdim.my else: i0y = procdim.ipy * procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0y_loc = procdim.nghosty i1y_loc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z + procdim.mz i0z_loc = 0 i1z_loc = procdim.mz else: i0z = procdim.ipz * procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0z_loc = procdim.nghostz i1z_loc = procdim.mz x[i0x:i1x] = x_loc[i0x_loc:i1x_loc] y[i0y:i1y] = y_loc[i0y_loc:i1y_loc] z[i0z:i1z] = z_loc[i0z_loc:i1z_loc] dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc] dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc] dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc] dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc] dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc] dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc] else: # x = dtype(x_loc.astype) x = dtype(x_loc) y = dtype(y_loc) z = dtype(z_loc) dx_1 = dtype(dx_1_loc) dy_1 = dtype(dy_1_loc) dz_1 = dtype(dz_1_loc) dx_tilde = dtype(dx_tilde_loc) dy_tilde = dtype(dy_tilde_loc) dz_tilde = dtype(dz_tilde_loc) if trim: self.x = x[dim.l1 : dim.l2 + 1] self.y = y[dim.m1 : dim.m2 + 1] self.z = z[dim.n1 : dim.n2 + 1] self.dx_1 = dx_1[dim.l1 : dim.l2 + 1] self.dy_1 = dy_1[dim.m1 : dim.m2 + 1] self.dz_1 = dz_1[dim.n1 : dim.n2 + 1] self.dx_tilde = dx_tilde[dim.l1 : dim.l2 + 1] self.dy_tilde = dy_tilde[dim.m1 : dim.m2 + 1] self.dz_tilde = dz_tilde[dim.n1 : dim.n2 + 1] else: self.x = x self.y = y self.z = z self.dx_1 = dx_1 self.dy_1 = dy_1 self.dz_1 = dz_1 self.dx_tilde = dx_tilde self.dy_tilde = dy_tilde self.dz_tilde = dz_tilde self.t = t self.dx = dx self.dy = dy self.dz = dz self.Lx = Lx self.Ly = Ly self.Lz = Lz
def animate_slices(field='uu1', datadir='data/', proc=-1, extension='xz', format='native', tmin=0., tmax=1.e38, wait=0., amin=0., amax=1., transform='', oldfile=False): """ read 2D slice files and assemble an animation. Options: field --- which variable to slice datadir --- path to data directory proc --- an integer giving the processor to read a slice from extension --- which plane of xy,xz,yz,Xz. for 2D this should be overwritten. format --- endian. one of little, big, or native (default) tmin --- start time tmax --- end time amin --- minimum value for image scaling amax --- maximum value for image scaling transform --- insert arbitrary numerical code to modify the slice wait --- pause in seconds between animation slices """ datadir = os.path.expanduser(datadir) if proc < 0: filename = datadir + '/slice_' + field + '.' + extension else: filename = datadir + '/proc' + str( proc) + '/slice_' + field + '.' + extension # global dim #param = read_param(datadir) param = read.param(datadir) #dim = read_dim(datadir,proc) dim = read.dim(datadir, proc) if dim.precision == 'D': precision = 'd' else: precision = 'f' # set up slice plane if (extension == 'xy' or extension == 'Xy'): hsize = dim.nx vsize = dim.ny if (extension == 'xz'): hsize = dim.nx vsize = dim.nz if (extension == 'yz'): hsize = dim.ny vsize = dim.nz plane = np.zeros((vsize, hsize), dtype=precision) infile = FortranFile(filename) ax = plt.axes() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_ylim image = plt.imshow(plane, vmin=amin, vmax=amax) # for real-time image display manager = plt.get_current_fig_manager() manager.show() ifirst = True islice = 0 while 1: try: raw_data = infile.read_record(dtype=precision) except ValueError: break except TypeError: break if oldfile: t = raw_data[-1] plane = raw_data[:-1].reshape(vsize, hsize) else: slice_z2pos = raw_data[-1] t = raw_data[-2] plane = raw_data[:-2].reshape(vsize, hsize) if transform: exec('plane = plane' + transform) if (t > tmin and t < tmax): title = 't = %11.3e' % t ax.set_title(title) image.set_data(plane) manager.canvas.draw() if ifirst: print( "----islice----------t---------min-------max-------delta") print("%10i %10.3e %10.3e %10.3e %10.3e" % (islice, t, plane.min(), plane.max(), plane.max() - plane.min())) ifirst = False islice += 1 sleep(wait) infile.close()
def find_tracers(self, var_file='VAR0', datadir='data', trace_field='bb', ti=-1, tf=-1): """ Trace streamlines of the vectofield 'field' from z = z0 to z = z1 and integrate quantities 'int_q' along the lines. Creates a 2d mapping as in 'streamlines.f90'. call signature: find_tracers(var_file='VAR0', datadir='data', trace_field='bb', ti=-1, tf=-1) Keyword arguments: *var_file*: Varfile to be read. *datadir*: Directory where the data is stored. *trace_field*: Vector field used for the streamline tracing. *ti*: Initial VAR file index for tracer time sequences. Overrides 'var_file'. *tf*: Final VAR file index for tracer time sequences. Overrides 'var_file'. """ import numpy as np import multiprocessing as mp from pencil import read from pencil import math # Write the tracing parameters. self.params.trace_field = trace_field self.params.datadir = datadir # Multi core setup. if not(np.isscalar(self.params.n_proc)) or (self.params.n_proc%1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() # Read the data. magic = [] if trace_field == 'bb': magic.append('bb') if trace_field == 'jj': magic.append('jj') if trace_field == 'vort': magic.append('vort') if self.params.int_q == 'ee': magic.append('bb') magic.append('jj') dim = read.dim(datadir=datadir) self.params.var_file = var_file # Check if user wants a tracer time series. if (ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti): series = True nTimes = tf-ti+1 else: series = False nTimes = 1 # Initialize the arrays. self.x0 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.y0 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.x1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.y1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.z1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.l = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) if self.params.int_q == 'curly_A': self.curly_A = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) if self.params.int_q == 'ee': self.ee = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.mapping = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes, 3]) self.t = np.zeros(nTimes) for t_idx in range(ti, tf+1): if series: var_file = 'VAR' + str(t_idx) # Read the data. var = read.var(var_file=var_file, datadir=datadir, magic=magic, quiet=True, trimall=True) grid = read.grid(datadir=datadir, quiet=True, trim=True) param2 = read.param(datadir=datadir, quiet=True) self.t[t_idx] = var.t # Extract the requested vector trace_field. field = getattr(var, trace_field) if self.params.int_q == 'curly_A': self.aa = var.aa if self.params.int_q == 'ee': self.ee = var.jj*param2.eta - math.cross(var.uu, var.bb) # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz # Initialize the tracers. for ix in range(int(self.params.trace_sub*dim.nx)): for iy in range(int(self.params.trace_sub*dim.ny)): self.x0[ix, iy, t_idx] = grid.x[0] + grid.dx/self.params.trace_sub*ix self.x1[ix, iy, t_idx] = self.x0[ix, iy, t_idx].copy() self.y0[ix, iy, t_idx] = grid.y[0] + grid.dy/self.params.trace_sub*iy self.y1[ix, iy, t_idx] = self.y0[ix, iy, t_idx].copy() self.z1[ix, iy, t_idx] = grid.z[0] proc = [] sub_data = [] for i_proc in range(self.params.n_proc): proc.append(mp.Process(target=self.__sub_tracers, args=(queue, field, t_idx, i_proc, self.params.n_proc))) for i_proc in range(self.params.n_proc): proc[i_proc].start() for i_proc in range(self.params.n_proc): sub_data.append(queue.get()) for i_proc in range(self.params.n_proc): proc[i_proc].join() for i_proc in range(self.params.n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] self.x1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][1] self.y1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][2] self.z1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][3] self.l[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][4] self.mapping[sub_proc::self.params.n_proc, :, t_idx, :] = sub_data[i_proc][5] if self.params.int_q == 'curly_A': self.curly_A[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][6] if self.params.int_q == 'ee': self.ee[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][7] for i_proc in range(self.params.n_proc): proc[i_proc].terminate() return 0
def derive_masks(sim_path, src, dst, data_key='data/ss', par=[], comm=None, overwrite=False, rank=0, size=1, nghost=3, status='a', chunksize=1000.0, quiet=True, nmin=32, ent_cuts=[ 2.32e9, ], mask_keys=[ 'hot', ], unit_key='unit_entropy'): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True, conflicts_quiet=True) #get data dimensions nx, ny, nz = src['settings']['nx'][0],\ src['settings']['ny'][0],\ src['settings']['nz'][0] mx, my, mz = src['settings']['mx'][0],\ src['settings']['my'][0],\ src['settings']['mz'][0] #split data into manageable memory chunks dstchunksize = 8 * nx * ny * nz / 1024 * 1024 if dstchunksize > chunksize: nchunks = cpu_optimal(nx, ny, nz, quiet=quiet, mvar=src['settings/mvar'][0], maux=src['settings/maux'][0], MBmin=chunksize, nmin=nmin, size=size)[1] else: nchunks = [1, 1, 1] print('nchunks {}'.format(nchunks)) # for mpi split chunks across processes # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = [ locindx[np.mod( rank + int(rank / nchunks[2]) + int(rank / nchunks[1]), nchunks[0])] ] indy = [locindy[np.mod(rank + int(rank / nchunks[2]), nchunks[1])]] indz = [locindz[np.mod(rank, nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = np.array_split(np.arange(nx) + nghost, nchunks[0]) indy = np.array_split(np.arange(ny) + nghost, nchunks[1]) indz = np.array_split(np.arange(nz) + nghost, nchunks[2]) allchunks = nchunks[0] * nchunks[1] * nchunks[2] # ensure derived variables are in a list if isinstance(mask_keys, list): mask_keys = mask_keys else: mask_keys = [mask_keys] # initialise group group = group_h5(dst, 'masks', status='a', overwrite=overwrite, comm=comm, rank=rank, size=size) for key in mask_keys: ne = len(ent_cuts) dataset_h5(group, key, status=status, shape=[ne, mz, my, mx], comm=comm, size=size, rank=rank, overwrite=overwrite, dtype=np.bool_) print('writing ' + key + ' shape {}'.format([ne, mz, my, mx])) for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk, nchunks[2])]]: n1, n2 = iz[ 0]-nghost,\ iz[-1]+nghost+1 n1out = n1 + nghost n2out = n2 - nghost varn1 = nghost varn2 = -nghost if iz[0] == locindz[0][0]: n1out = 0 varn1 = 0 if iz[-1] == locindz[-1][-1]: n2out = n2 varn2 = n2 for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: m1, m2 = iy[ 0]-nghost,\ iy[-1]+nghost+1 m1out = m1 + nghost m2out = m2 - nghost varm1 = nghost varm2 = -nghost if iy[0] == locindy[0][0]: m1out = 0 varm1 = 0 if iy[-1] == locindy[-1][-1]: m2out = m2 varm2 = m2 for ix in [ indx[np.mod( ichunk + int(ichunk / nchunks[2]) + int(ichunk / nchunks[1]), nchunks[0])] ]: l1, l2 = ix[ 0]-nghost,\ ix[-1]+nghost+1 l1out = l1 + nghost l2out = l2 - nghost varl1 = nghost varl2 = -nghost if ix[0] == locindx[0][0]: l1out = 0 varl1 = 0 if ix[-1] == locindx[-1][-1]: l2out = l2 varl2 = l2 if data_key in src.keys(): ss = src[data_key][n1:n2, m1:m2, l1:l2] else: if data_key in dst.keys(): ss = dst[data_key][n1:n2, m1:m2, l1:l2] else: print( 'masks: ' + data_key + ' does not exist in ', src, 'or', dst) return 1 masks = thermal_decomposition(ss, par, unit_key=unit_key, ent_cut=ent_cuts) cut = 0 for mask in masks: dst['masks'][key][cut, n1out:n2out, m1out:m2out, l1out:l2out] = mask[varn1:varn2, varm1:varm2, varl1:varl2] cut += 1
def sim2h5( newdir=".", olddir=".", varfile_names=None, todatadir="data/allprocs", fromdatadir="data", precision="d", nghost=3, lpersist=True, x=None, y=None, z=None, lshear=False, snap_by_proc=False, aver_by_proc=False, lremove_old_snapshots=False, lremove_old_slices=False, lread_all_videoslices=False, vlarge=100000000, lremove_old_averages=False, execute=False, quiet=True, l2D=True, lvars=True, lvids=True, laver=True, laver2D=False, lremove_deprecated_vids=False, lsplit_slices=False, ): """ Copy a simulation object written in Fortran binary to hdf5. The default is to copy all snapshots from/to the current simulation directory. Optionally the old files can be removed to call signature: sim2h5(newdir='.', olddir='.', varfile_names=None, todatadir='data/allprocs', fromdatadir='data', precision='d', nghost=3, lpersist=False, x=None, y=None, z=None, lshear=False, snap_by_proc=False, aver_by_proc=False, lremove_old_snapshots=False, lremove_old_slices=False, lread_all_videoslices=True, lremove_old_averages=False, execute=False, quiet=True, l2D=True, lvars=True, lvids=True, laver=True) Keyword arguments: *olddir*: String path to simulation source directory. Path may be relative or absolute. *newdir*: String path to simulation destination directory. Path may be relative or absolute. *varfile_names*: A list of names of the snapshot files to be written, e.g. VAR0 If None all varfiles in olddir+'/data/proc0/' will be converted *todatadir*: Directory to which the data is stored. *fromdatadir*: Directory from which the data is collected. *precision*: Single 'f' or double 'd' precision for new data. *nghost*: Number of ghost zones. TODO: handle switching size of ghost zones. *lpersist*: option to include persistent variables from snapshots. *xyz*: xyz arrays of the domain with ghost zones. This will normally be obtained from Grid object, but facility to redefine an alternative grid value. *lshear*: Flag for the shear. *execute*: optional confirmation required if lremove_old. *lremove_old_snapshots*: If True the old snapshot data will be deleted once the new h5 data has been saved. *lremove_old_slices*: If True the old video slice data will be deleted once the new h5 data has been saved. *lremove_old_averages*: If True the old averages data will be deleted once the new h5 data has been saved. *aver_by_proc* Option to read old binary files by processor and write in parallel *laver2D* If True apply to each plane_list 'y', 'z' and load each variable sequentially *l_mpi*: Applying MPI parallel process *driver*: HDF5 file io driver either None or mpio *comm*: MPI library calls *rank*: Integer ID of processor *size*: Number of MPI processes """ import glob import numpy as np import os from os.path import exists, join import subprocess as sub import sys from .. import read from .. import sim from . import write_h5_grid from pencil.util import is_sim_dir try: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() driver = "mpio" l_mpi = True l_mpi = l_mpi and (size != 1) except ImportError: comm = None driver = None rank = 0 size = 1 l_mpi = False if not l_mpi: comm = None driver = None print("rank {} and size {}".format(rank, size)) sys.stdout.flush() if rank == size - 1: print("l_mpi", l_mpi) sys.stdout.flush() # test if simulation directories if newdir == ".": newdir = os.getcwd() if olddir == ".": olddir = os.getcwd() os.chdir(olddir) if not is_sim_dir(): if rank == 0: print("ERROR: Directory (" + olddir + ") needs to be a simulation") sys.stdout.flush() return -1 if newdir != olddir: if not exists(newdir): cmd = "pc_newrun -s " + newdir if rank == size - 1: process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) # os.system(cmd) if comm: comm.Barrier() os.chdir(newdir) if not is_sim_dir(): if rank == 0: print("ERROR: Directory (" + newdir + ") needs to be a simulation") sys.stdout.flush() return -1 # lremove_old = lremove_old_snapshots or lremove_old_slices or lremove_old_averages if lremove_old: if not execute: os.chdir(olddir) if rank == 0: print("WARNING: Are you sure you wish to remove the Fortran" + " binary files from \n" + os.getcwd() + ".\n" + "Set execute=True to proceed.") sys.stdout.flush() return -1 os.chdir(olddir) if lvars: if varfile_names == None: os.chdir(fromdatadir + "/proc0") lVARd = False varfiled_names = natural_sort(glob.glob("VARd*")) if len(varfiled_names) > 0: varfile_names = natural_sort(glob.glob("VAR*")) for iv in range(len(varfile_names) - 1, -1, -1): if "VARd" in varfile_names[iv]: varfile_names.remove(varfile_names[iv]) lVARd = True else: varfile_names = natural_sort(glob.glob("VAR*")) os.chdir(olddir) else: lVARd = False if isinstance(varfile_names, list): varfile_names = varfile_names else: varfile_names = [varfile_names] varfiled_names = [] tmp_names = [] for varfile_name in varfile_names: if "VARd" in varfile_names: varfiled_names.append(varfile_name) lVARd = True else: tmp_names.append(varfile_name) varfile_names = tmp_names gkeys = [ "x", "y", "z", "Lx", "Ly", "Lz", "dx", "dy", "dz", "dx_1", "dy_1", "dz_1", "dx_tilde", "dy_tilde", "dz_tilde", ] grid = None if rank == size - 1: grid = read.grid(quiet=True) if l_mpi: grid = comm.bcast(grid, root=size - 1) if not quiet: print(rank, grid) sys.stdout.flush() for key in gkeys: if not key in grid.__dict__.keys(): if rank == 0: print("ERROR: key " + key + " missing from grid") sys.stdout.flush() return -1 # obtain the settings from the old simulation settings = {} skeys = [ "l1", "l2", "m1", "m2", "n1", "n2", "nx", "ny", "nz", "mx", "my", "mz", "nprocx", "nprocy", "nprocz", "maux", "mglobal", "mvar", "precision", ] if rank == 0: olddim = read.dim() for key in skeys: settings[key] = np.array(olddim.__getattribute__(key)) olddim = None settings["nghost"] = np.array(nghost) settings["precision"] = precision.encode() if l_mpi: settings = comm.bcast(settings, root=0) if snap_by_proc: nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"] if np.mod(nprocs, size) != 0: print("WARNING: efficiency requires cpus to divide ncpus") sys.stdout.flush() if not quiet: print(rank, grid) sys.stdout.flush() # obtain physical units from old simulation ukeys = [ "length", "velocity", "density", "magnetic", "time", "temperature", "flux", "energy", "mass", "system", ] param = read.param(quiet=True) param.__setattr__("unit_mass", param.unit_density * param.unit_length**3) param.__setattr__("unit_energy", param.unit_mass * param.unit_velocity**2) param.__setattr__("unit_time", param.unit_length / param.unit_velocity) param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3) param.unit_system = param.unit_system.encode() # index list for variables in f-array if not quiet: print(rank, param) sys.stdout.flush() indx = None if rank == 0: indx = read.index() if l_mpi: indx = comm.bcast(indx, root=0) # check consistency between Fortran binary and h5 data os.chdir(newdir) dim = None if is_sim_dir(): if rank == size - 1: if exists(join(newdir, "data", "dim.dat")): try: dim = read.dim() except ValueError: pass if l_mpi: dim = comm.bcast(dim, root=size - 1) if dim: if not quiet: print(rank, dim) sys.stdout.flush() try: dim.mvar == settings["mvar"] dim.mx == settings["mx"] dim.my == settings["my"] dim.mz == settings["mz"] except ValueError: if rank == size - 1: print("ERROR: new simulation dimensions do not match.") sys.stdout.flush() return -1 dim = None os.chdir(olddir) if rank == size - 1: print("precision is ", precision) sys.stdout.flush() if laver2D: aver2h5( newdir, olddir, todatadir="data/averages", fromdatadir="data", l2D=False, precision=precision, quiet=quiet, laver2D=laver2D, lremove_old_averages=False, aver_by_proc=aver_by_proc, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) l2D = False # copy snapshots if lvars and len(varfile_names) > 0: var2h5( newdir, olddir, varfile_names, todatadir, fromdatadir, snap_by_proc, precision, lpersist, quiet, nghost, settings, param, grid, x, y, z, lshear, lremove_old_snapshots, indx, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) # copy downsampled snapshots if present if lvars and lVARd: var2h5( newdir, olddir, varfiled_names, todatadir, fromdatadir, False, precision, lpersist, quiet, nghost, settings, param, grid, x, y, z, lshear, lremove_old_snapshots, indx, trimall=True, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) if lvars: var2h5( newdir, olddir, [ "var.dat", ], todatadir, fromdatadir, snap_by_proc, precision, lpersist, quiet, nghost, settings, param, grid, x, y, z, lshear, lremove_old_snapshots, indx, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) # copy old video slices to new h5 sim if lvids: if lremove_deprecated_vids: for ext in [ "bb.", "uu.", "ux.", "uy.", "uz.", "bx.", "by.", "bz." ]: cmd = "rm -f " + join(olddir, fromdatadir, "proc*", "slice_" + ext + "*") if rank == 0: process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) cmd = "rm -f " + join(fromdatadir, "slice_" + ext + "*") if rank == 0: process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) if comm: comm.Barrier() cmd = "src/read_all_videofiles.x" if rank == size - 1 and lread_all_videoslices: process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) if comm: comm.Barrier() slices2h5( newdir, olddir, grid, todatadir="data/slices", fromdatadir=fromdatadir, precision=precision, quiet=quiet, vlarge=vlarge, lsplit_slices=lsplit_slices, lremove_old_slices=lremove_old_slices, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) # copy old averages data to new h5 sim if laver: aver2h5( newdir, olddir, todatadir="data/averages", fromdatadir=fromdatadir, l2D=l2D, precision=precision, quiet=quiet, aver_by_proc=False, lremove_old_averages=lremove_old_averages, l_mpi=l_mpi, driver=driver, comm=comm, rank=rank, size=size, ) # check some critical sim files are present for new sim without start # construct grid.h5 sim information if requied for new h5 sim os.chdir(newdir) if l_mpi: comm.Barrier() if rank == 0: write_h5_grid( file_name="grid", datadir="data", precision=precision, nghost=nghost, settings=settings, param=param, grid=grid, unit=None, quiet=quiet, ) source_file = join(olddir, fromdatadir, "proc0/varN.list") target_file = join(newdir, todatadir, "varN.list") if exists(source_file): cmd = "cp " + source_file + " " + target_file process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) items = [ "def_var.pro", "index.pro", "jobid.dat", "param.nml", "particle_index.pro", "pc_constants.pro", "pointmass_index.pro", "pt_positions.dat", "sn_series.dat", "svnid.dat", "time_series.dat", "tsnap.dat", "tspec.dat", "tvid.dat", "t2davg.dat", "var.general", "variables.pro", "varname.dat", ] for item in items: source_file = join(olddir, fromdatadir, item) target_file = join(newdir, fromdatadir, item) if exists(source_file): if not exists(target_file): cmd = "cp " + source_file + " " + target_file process = sub.Popen(cmd.split(), stdout=sub.PIPE) output, error = process.communicate() print(cmd, output, error) print("Simulation Fortran to h5 completed on rank {}.".format(rank)) sys.stdout.flush()
def find_fixed( self, datadir="data", var_file="VAR0", trace_field="bb", ti=-1, tf=-1, tracer_file_name=None, ): """ Find the fixed points to a snapshot or existing tracer file. call signature:: find_fixed(datadir='data', var_file='VAR0', trace_field='bb', ti=-1, tf=-1, tracer_file_name=None): Keyword arguments: *datadir*: Data directory. *var_file*: Varfile to be read. *trace_field*: Vector field used for the streamline tracing. *ti*: Initial VAR file index for tracer time sequences. Overrides 'var_file'. *tf*: Final VAR file index for tracer time sequences. Overrides 'var_file'. *tracer_file_name* Name of the tracer file to be read. If 'None' compute the tracers. """ import numpy as np import multiprocessing as mp from pencil import read from pencil import math from pencil.diag.tracers import Tracers from pencil.calc.streamlines import Stream from pencil.math.interpolation import vec_int if self.params.int_q == "curly_A": self.curly_A = [] if self.params.int_q == "ee": self.ee = [] # Multi core setup. if not (np.isscalar(self.params.n_proc)) or (self.params.n_proc % 1 != 0): print("Error: invalid processor number") return -1 queue = mp.Queue() # Make sure to read the var files with the correct magic. magic = [] if trace_field == "bb": magic.append("bb") if trace_field == "jj": magic.append("jj") if trace_field == "vort": magic.append("vort") if self.params.int_q == "ee": magic.append("bb") magic.append("jj") dim = read.dim(datadir=datadir) # Check if user wants a tracer time series. if (ti % 1 == 0) and (tf % 1 == 0) and (ti >= 0) and (tf >= ti): series = True var_file = "VAR{0}".format(ti) n_times = tf - ti + 1 else: series = False n_times = 1 self.t = np.zeros(n_times) # Read the initial field. var = read.var( var_file=var_file, datadir=datadir, magic=magic, quiet=True, trimall=True ) self.t[0] = var.t grid = read.grid(datadir=datadir, quiet=True, trim=True) field = getattr(var, trace_field) param2 = read.param(datadir=datadir, quiet=True) if self.params.int_q == "ee": ee = var.jj * param2.eta - math.cross(var.uu, var.bb) self.params.datadir = datadir self.params.var_file = var_file self.params.trace_field = trace_field # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz tracers = Tracers() tracers.params = self.params # Create the mapping for all times. if not tracer_file_name: tracers.find_tracers( var_file=var_file, datadir=datadir, trace_field=trace_field, ti=ti, tf=tf, ) else: tracers.read(datadir=datadir, file_name=tracer_file_name) self.tracers = tracers # Set some default values. self.t = np.zeros((tf - ti + 1) * series + (1 - series)) self.fixed_index = np.zeros((tf - ti + 1) * series + (1 - series)) self.poincare = np.zeros( [ int(self.params.trace_sub * dim.nx), int(self.params.trace_sub * dim.ny), n_times, ] ) ix0 = range(0, int(self.params.nx * self.params.trace_sub) - 1) iy0 = range(0, int(self.params.ny * self.params.trace_sub) - 1) self.fixed_points = [] self.fixed_sign = [] self.fixed_tracers = [] # Start the parallelized fixed point finding. for tidx in range(n_times): if tidx > 0: var = read.var( var_file="VAR{0}".format(tidx + ti), datadir=datadir, magic=magic, quiet=True, trimall=True, ) field = getattr(var, trace_field) self.t[tidx] = var.t proc = [] sub_data = [] fixed = [] fixed_sign = [] fixed_tracers = [] for i_proc in range(self.params.n_proc): proc.append( mp.Process( target=self.__sub_fixed, args=(queue, ix0, iy0, field, self.tracers, tidx, var, i_proc), ) ) for i_proc in range(self.params.n_proc): proc[i_proc].start() for i_proc in range(self.params.n_proc): sub_data.append(queue.get()) for i_proc in range(self.params.n_proc): proc[i_proc].join() for i_proc in range(self.params.n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] fixed.extend(sub_data[i_proc][1]) fixed_tracers.extend(sub_data[i_proc][2]) fixed_sign.extend(sub_data[i_proc][3]) self.fixed_index[tidx] += sub_data[i_proc][4] self.poincare[sub_proc :: self.params.n_proc, :, tidx] = sub_data[ i_proc ][5] for i_proc in range(self.params.n_proc): proc[i_proc].terminate() # Discard fixed points which lie too close to each other. fixed, fixed_tracers, fixed_sign = self.__discard_close_fixed_points( np.array(fixed), np.array(fixed_sign), np.array(fixed_tracers), var ) if self.fixed_points is None: self.fixed_points = [] self.fixed_sign = [] self.fixed_tracers = [] self.fixed_points.append(np.array(fixed)) self.fixed_sign.append(np.array(fixed_sign)) self.fixed_tracers.append(fixed_tracers) # Compute the traced quantities along the fixed point streamlines. if (self.params.int_q == "curly_A") or (self.params.int_q == "ee"): for t_idx in range(0, n_times): if self.params.int_q == "curly_A": self.curly_A.append([]) if self.params.int_q == "ee": self.ee.append([]) for fixed in self.fixed_points[t_idx]: # Trace the stream line. xx = np.array([fixed[0], fixed[1], self.params.Oz]) # time = np.linspace(0, self.params.Lz/np.max(abs(field[2])), 10) field_strength_z0 = vec_int( xx, field, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) field_strength_z0 = np.sqrt(np.sum(field_strength_z0 ** 2)) time = np.linspace(0, 4 * self.params.Lz / field_strength_z0, 500) stream = Stream(field, self.params, xx=xx, time=time) # Do the field line integration. if self.params.int_q == "curly_A": curly_A = 0 for l in range(stream.iterations - 1): aaInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, var.aa, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) curly_A += np.dot( aaInt, (stream.tracers[l + 1] - stream.tracers[l]) ) self.curly_A[-1].append(curly_A) if self.params.int_q == "ee": ee_p = 0 for l in range(stream.iterations - 1): eeInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, ee, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) ee_p += np.dot( eeInt, (stream.tracers[l + 1] - stream.tracers[l]) ) self.ee[-1].append(ee_p) if self.params.int_q == "curly_A": self.curly_A[-1] = np.array(self.curly_A[-1]) if self.params.int_q == "ee": self.ee[-1] = np.array(self.ee[-1]) return 0
def read(self, datadir='data', proc=-1, quiet=False, precision='f', trim=False): """ Read the grid data from the pencil code simulation. If proc < 0, then load all data and assemble. Otherwise, load grid from specified processor. call signature: grid(datadir='data', proc=-1, quiet=False, trim=False) Keyword arguments: *datadir*: Directory where the data is stored. *proc* Processor to be read. If proc is -1, then read the 'global' grid. If proc is >=0, then read the grid.dat in the corresponding processor directory. *quiet* Flag for switching of output. *precision* Float (f), double (d) or half (half). *trim* Cuts off the ghost points. """ import numpy as np import os from scipy.io import FortranFile from pencil import read if precision == 'f': dtype = np.float32 elif precision == 'd': dtype = np.float64 elif precision == 'half': dtype = np.float16 else: print( 'read grid: {} precision not set, using "f"'.format(precision)) dtype = np.float32 if os.path.exists(os.path.join(datadir, 'grid.h5')): dim = read.dim(datadir, proc) import h5py with h5py.File(os.path.join(datadir, 'grid.h5'), 'r') as tmp: x = dtype(tmp['grid']['x'][()]) y = dtype(tmp['grid']['y'][()]) z = dtype(tmp['grid']['z'][()]) dx_1 = dtype(tmp['grid']['dx_1'][()]) dy_1 = dtype(tmp['grid']['dy_1'][()]) dz_1 = dtype(tmp['grid']['dz_1'][()]) dx_tilde = dtype(tmp['grid']['dx_tilde'][()]) dy_tilde = dtype(tmp['grid']['dy_tilde'][()]) dz_tilde = dtype(tmp['grid']['dz_tilde'][()]) dx = dtype(tmp['grid']['dx'][()]) dy = dtype(tmp['grid']['dy'][()]) dz = dtype(tmp['grid']['dz'][()]) Lx = dtype(tmp['grid']['Lx'][()]) Ly = dtype(tmp['grid']['Ly'][()]) Lz = dtype(tmp['grid']['Lz'][()]) t = dtype(0.0) else: datadir = os.path.expanduser(datadir) dim = read.dim(datadir, proc) param = read.param(datadir=datadir, quiet=True, conflicts_quiet=True) if dim.precision == 'D': read_precision = 'd' else: read_precision = 'f' if proc < 0: proc_dirs = list( filter(lambda string: string.startswith('proc'), os.listdir(datadir))) if (proc_dirs.count("proc_bounds.dat") > 0): proc_dirs.remove("proc_bounds.dat") if param.lcollective_io: # A collective IO strategy is being used proc_dirs = ['allprocs'] else: proc_dirs = ['proc' + str(proc)] # Define the global arrays. x = np.zeros(dim.mx, dtype=precision) y = np.zeros(dim.my, dtype=precision) z = np.zeros(dim.mz, dtype=precision) dx_1 = np.zeros(dim.mx, dtype=precision) dy_1 = np.zeros(dim.my, dtype=precision) dz_1 = np.zeros(dim.mz, dtype=precision) dx_tilde = np.zeros(dim.mx, dtype=precision) dy_tilde = np.zeros(dim.my, dtype=precision) dz_tilde = np.zeros(dim.mz, dtype=precision) for directory in proc_dirs: if not param.lcollective_io: proc = int(directory[4:]) procdim = read.dim(datadir, proc) if not quiet: print("reading grid data from processor" + " {0} of {1} ...".format(proc, len(proc_dirs))) else: procdim = dim mxloc = procdim.mx myloc = procdim.my mzloc = procdim.mz # Read the grid data. file_name = os.path.join(datadir, directory, 'grid.dat') infile = FortranFile(file_name, 'r') grid_raw = infile.read_record(dtype=read_precision) dx, dy, dz = tuple(infile.read_record(dtype=read_precision)) Lx, Ly, Lz = tuple(infile.read_record(dtype=read_precision)) dx_1_raw = infile.read_record(dtype=read_precision) dx_tilde_raw = infile.read_record(dtype=read_precision) infile.close() # Reshape the arrays. t = dtype(grid_raw[0]) x_loc = grid_raw[1:mxloc + 1] y_loc = grid_raw[mxloc + 1:mxloc + myloc + 1] z_loc = grid_raw[mxloc + myloc + 1:mxloc + myloc + mzloc + 1] dx_1_loc = dx_1_raw[0:mxloc] dy_1_loc = dx_1_raw[mxloc:mxloc + myloc] dz_1_loc = dx_1_raw[mxloc + myloc:mxloc + myloc + mzloc] dx_tilde_loc = dx_tilde_raw[0:mxloc] dy_tilde_loc = dx_tilde_raw[mxloc:mxloc + myloc] dz_tilde_loc = dx_tilde_raw[mxloc + myloc:mxloc + myloc + mzloc] if len(proc_dirs) > 1: if procdim.ipx == 0: i0x = 0 i1x = i0x + procdim.mx i0x_loc = 0 i1x_loc = procdim.mx else: i0x = procdim.ipx * procdim.nx + procdim.nghostx i1x = i0x + procdim.mx - procdim.nghostx i0x_loc = procdim.nghostx i1x_loc = procdim.mx if procdim.ipy == 0: i0y = 0 i1y = i0y + procdim.my i0y_loc = 0 i1y_loc = procdim.my else: i0y = procdim.ipy * procdim.ny + procdim.nghosty i1y = i0y + procdim.my - procdim.nghosty i0y_loc = procdim.nghosty i1y_loc = procdim.my if procdim.ipz == 0: i0z = 0 i1z = i0z + procdim.mz i0z_loc = 0 i1z_loc = procdim.mz else: i0z = procdim.ipz * procdim.nz + procdim.nghostz i1z = i0z + procdim.mz - procdim.nghostz i0z_loc = procdim.nghostz i1z_loc = procdim.mz x[i0x:i1x] = x_loc[i0x_loc:i1x_loc] y[i0y:i1y] = y_loc[i0y_loc:i1y_loc] z[i0z:i1z] = z_loc[i0z_loc:i1z_loc] dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc] dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc] dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc] dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc] dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc] dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc] else: #x = dtype(x_loc.astype) x = dtype(x_loc) y = dtype(y_loc) z = dtype(z_loc) dx_1 = dtype(dx_1_loc) dy_1 = dtype(dy_1_loc) dz_1 = dtype(dz_1_loc) dx_tilde = dtype(dx_tilde_loc) dy_tilde = dtype(dy_tilde_loc) dz_tilde = dtype(dz_tilde_loc) if trim: self.x = x[dim.l1:dim.l2 + 1] self.y = y[dim.m1:dim.m2 + 1] self.z = z[dim.n1:dim.n2 + 1] self.dx_1 = dx_1[dim.l1:dim.l2 + 1] self.dy_1 = dy_1[dim.m1:dim.m2 + 1] self.dz_1 = dz_1[dim.n1:dim.n2 + 1] self.dx_tilde = dx_tilde[dim.l1:dim.l2 + 1] self.dy_tilde = dy_tilde[dim.m1:dim.m2 + 1] self.dz_tilde = dz_tilde[dim.n1:dim.n2 + 1] else: self.x = x self.y = y self.z = z self.dx_1 = dx_1 self.dy_1 = dy_1 self.dz_1 = dz_1 self.dx_tilde = dx_tilde self.dy_tilde = dy_tilde self.dz_tilde = dz_tilde self.t = t self.dx = dx self.dy = dy self.dz = dz self.Lx = Lx self.Ly = Ly self.Lz = Lz
def derive_stats( sim_path, src, dst, stat_keys=["Rm", "uu", "Ms"], par=[], comm=None, overwrite=False, rank=0, size=1, nghost=3, status="a", chunksize=1000.0, quiet=True, nmin=32, lmask=False, mask_key="hot", ): if comm: overwrite = False if isinstance(par, list): os.chdir(sim_path) par = read.param(quiet=True, conflicts_quiet=True) # get data dimensions nx, ny, nz = ( src["settings"]["nx"][0], src["settings"]["ny"][0], src["settings"]["nz"][0], ) mx, my, mz = ( src["settings"]["mx"][0], src["settings"]["my"][0], src["settings"]["mz"][0], ) # split data into manageable memory chunks dstchunksize = 8 * nx * ny * nz / 1024 * 1024 if dstchunksize > chunksize: nchunks = cpu_optimal( nx, ny, nz, quiet=quiet, mvar=src["settings/mvar"][0], maux=src["settings/maux"][0], MBmin=chunksize, nmin=nmin, size=size, )[1] else: nchunks = [1, 1, 1] print("nchunks {}".format(nchunks)) # for mpi split chunks across processes if size > 1: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = [ locindx[np.mod( rank + int(rank / nchunks[2]) + int(rank / nchunks[1]), nchunks[0])] ] indy = [locindy[np.mod(rank + int(rank / nchunks[2]), nchunks[1])]] indz = [locindz[np.mod(rank, nchunks[2])]] allchunks = 1 else: locindx = np.array_split(np.arange(nx) + nghost, nchunks[0]) locindy = np.array_split(np.arange(ny) + nghost, nchunks[1]) locindz = np.array_split(np.arange(nz) + nghost, nchunks[2]) indx = np.array_split(np.arange(nx) + nghost, nchunks[0]) indy = np.array_split(np.arange(ny) + nghost, nchunks[1]) indz = np.array_split(np.arange(nz) + nghost, nchunks[2]) allchunks = nchunks[0] * nchunks[1] * nchunks[2] # ensure derived variables are in a list if isinstance(stat_keys, list): stat_keys = stat_keys else: stat_keys = [stat_keys] # initialise group group = group_h5(dst, "stats", status="a", overwrite=overwrite, comm=comm, rank=rank, size=size) for key in stat_keys: mean_stat = list() stdv_stat = list() mean_mask = list() stdv_mask = list() nmask_msk = list() mean_nmsk = list() stdv_nmsk = list() nmask_nmk = list() for ichunk in range(allchunks): for iz in [indz[np.mod(ichunk, nchunks[2])]]: n1, n2 = iz[0], iz[-1] + 1 for iy in [ indy[np.mod(ichunk + int(ichunk / nchunks[2]), nchunks[1])] ]: m1, m2 = iy[0], iy[-1] + 1 for ix in [ indx[np.mod( ichunk + int(ichunk / nchunks[2]) + int(ichunk / nchunks[1]), nchunks[0], )] ]: l1, l2 = ix[0], ix[-1] + 1 if key in src["data"].keys(): var = src["data"][key][n1:n2, m1:m2, l1:l2] elif key == "uu" or key == "aa": tmp = np.array([ src["data"][key[0] + "x"][n1:n2, m1:m2, l1:l2], src["data"][key[0] + "y"][n1:n2, m1:m2, l1:l2], src["data"][key[0] + "z"][n1:n2, m1:m2, l1:l2], ]) var = np.sqrt(dot2(tmp)) else: if key in dst["data"].keys(): if is_vector(key): var = np.sqrt( dot2(dst["data"][key][:, n1:n2, m1:m2, l1:l2])) else: var = dst["data"][key][n1:n2, m1:m2, l1:l2] else: print( "stats: " + key + " does not exist in ", src, "or", dst, ) continue if lmask: mask = dst["masks"][mask_key][0, n1:n2, m1:m2, l1:l2] Nmask = mask[mask == False].size if Nmask > 0: mean_mask.append(var[mask == False].mean() * Nmask) stdv_mask.append(var[mask == False].std() * Nmask) else: mean_mask.append(0) stdv_mask.append(0) nmask_msk.append(Nmask) nmask = mask[mask == True].size if nmask > 0: mean_nmsk.append(var[mask == True].mean() * nmask) stdv_nmsk.append(var[mask == True].std() * nmask) else: mean_nmsk.append(0) stdv_nmsk.append(0) nmask_nmk.append(nmask) mean_stat.append(var.mean()) stdv_stat.append(var.std()) if comm: if lmask: mean_mask = comm.gather(mean_mask, root=0) stdv_mask = comm.gather(stdv_mask, root=0) mean_mask = comm.bcast(mean_mask, root=0) stdv_mask = comm.bcast(stdv_mask, root=0) mean_nmsk = comm.gather(mean_nmsk, root=0) stdv_nmsk = comm.gather(stdv_nmsk, root=0) mean_nmsk = comm.bcast(mean_nmsk, root=0) stdv_nmsk = comm.bcast(stdv_nmsk, root=0) nmask_msk = comm.gather(nmask_msk, root=0) nmask_nmk = comm.gather(nmask_nmk, root=0) nmask_msk = comm.bcast(nmask_msk, root=0) nmask_nmk = comm.bcast(nmask_nmk, root=0) mean_stat = comm.gather(mean_stat, root=0) stdv_stat = comm.gather(stdv_stat, root=0) mean_stat = comm.bcast(mean_stat, root=0) stdv_stat = comm.bcast(stdv_stat, root=0) if lmask: summk = np.sum(nmask_msk) if summk > 0: meanm = np.sum(mean_mask) / summk stdvm = np.sum(stdv_mask) / summk else: meanm = 0 stdvm = 0 sumnk = np.sum(nmask_nmk) if sumnk > 0: meann = np.sum(mean_nmsk) / sumnk stdvn = np.sum(stdv_nmsk) / sumnk else: meann = 0 stdvn = 0 print(mask_key + "-" + key + "-mean = {}, ".format(meanm) + mask_key + "-" + key + "-std = {}".format(stdvm)) print("not-" + mask_key + "-" + key + "-mean = {}, ".format(meann) + "not-" + mask_key + "-" + key + "-std = {}".format(stdvn)) dataset_h5( group, mask_key + "-" + key + "-mean", status=status, data=meanm, comm=comm, size=size, rank=rank, overwrite=True, ) dataset_h5( group, mask_key + "-" + key + "-std", status=status, data=stdvm, comm=comm, size=size, rank=rank, overwrite=True, ) dataset_h5( group, "not-" + mask_key + "-" + key + "-mean", status=status, data=meann, comm=comm, size=size, rank=rank, overwrite=True, ) dataset_h5( group, "not-" + mask_key + "-" + key + "-std", status=status, data=stdvn, comm=comm, size=size, rank=rank, overwrite=True, ) mstat = np.mean(mean_stat) dstat = np.mean(stdv_stat) print(key + "-mean = {}, ".format(mstat) + key + "-std = {}".format(dstat)) dataset_h5( group, key + "-mean", status=status, data=mstat, comm=comm, size=size, rank=rank, overwrite=True, ) dataset_h5( group, key + "-std", status=status, data=dstat, comm=comm, size=size, rank=rank, overwrite=True, )
def read(self, datadir='data', param=None, dim=None): """ Read Pencil Code index data from index.pro. call signature: read(self, datadir='data', param=None, dim=None) Keyword arguments: *datadir*: Directory where the data is stored. *param* Parameter object. *dim* Dimension object. """ import os import re import numpy as np from pencil import read if param is None: param = read.param(datadir=datadir, quiet=True) if dim is None: dim = read.dim(datadir=datadir) if param.lwrite_aux: totalvars = dim.mvar + dim.maux else: totalvars = dim.mvar index_file = open(os.path.join(datadir, 'index.pro')) ntestfield, ntestflow, ntestlnrho, ntestscalar = 0, 0, 0, 0 for line in index_file.readlines(): clean = line.strip() name = clean.split('=')[0].strip().replace('[', '').replace(']', '') if clean.split('=')[1].strip().startswith('intarr(370)'): continue try: val = int(clean.split('=')[1].strip()) except: val = np.arange(int(re.search(r"\(([0-9]+)\)", clean).group(1))) + \ int(clean.split('=')[1].strip().split('+')[1]) if val != 0 and val <= totalvars \ and not name.startswith('i_') and name.startswith('i'): name = name.lstrip('i') if name == 'lnTT' and param.ltemperature_nolog: name = 'tt' if name == 'aatest': iaatest = val if name == 'uutest': iuutest = val if name == 'hhtest': ihhtest = val if name == 'cctest': icctest = val setattr(self, name, val) elif name == 'ntestfield': ntestfield = val elif name == 'ntestflow': ntestflow = val elif name == 'ntestlnrho': ntestlnrho = val elif name == 'ntestscalar': ntestscalar = val if ntestfield > 0: self.__delattr__('aatest') for i in range(1, ntestfield + 1): setattr(self, 'aatest' + str(i), iaatest - 1 + i) if ntestflow > 0: self.__delattr__('uutest') for i in range(1, ntestflow + 1): setattr(self, 'uutest' + str(i), iuutest - 1 + i) if ntestlnrho > 0: self.__delattr__('hhtest') for i in range(1, ntestlnrho + 1): setattr(self, 'hhtest' + str(i), ihhtest - 1 + i) if ntestscalar > 0: self.__delattr__('cctest') for i in range(1, ntestscalar + 1): setattr(self, 'cctest' + str(i), icctest - 1 + i)