def dispersion_and_drift(sim=False, OVERWRITE=False, GLOBAL=True, LOCAL=True, use_IDL=False, recalculate_gas_velo_at_particle_pos=False): """This calculates the dispersion (sigma) and drift (zeta) locally and globally by using the gas_velo_at_particle_pos script and dataset for all particles. With sigma = sqrt( 1/N_par * sum_i^N_par( (v_par(i) - <v_par>)^2 ) ) and zeta = sqrt( 1/N_par * sum_i^N_par( (v_par(i) - u(xp_i))^2 ) ) Arg: OVERWRITE: set to True to overwrite already calculated results GLOBAL: Calculate drift and dispersion globally, i.e. whole simulation domain LOCAL: Calculate drift and dispersion locally, i.e. grid cell wise recalculate_gas_velo_at_particle_pos: if the dataset shall be recalcualted use_IDL: use backup solution of IDL script and sav files returns True if successfull """ from pencil import get_sim from pencil import io from pencil import read from pencil.diag.particle import gas_velo_at_particle_pos from scipy.io import readsav from os import listdir from os.path import exists, join, dirname import numpy as np if sim == False: sim = get_sim() if sim == False: print('! ERROR: Specify simulation object!') return False SIM = sim ## calculate gas speed at particle position dataset gas_velo_at_particle_pos(OVERWRITE=recalculate_gas_velo_at_particle_pos, use_IDL=use_IDL, sim=sim) print( '\n##################### Starting the whole calculation process of DISPERSON and DRIFT for ' + SIM.name + ' #####################') ## default and setup GASVELO_DESTINATION = 'gas_velo_at_particle_pos' GASVELO_DIR = join(SIM.pc_datadir, GASVELO_DESTINATION) ## get list of available files if use_IDL: file_filetype = '.sav' else: file_filetype = '.pkl' files = [] if exists(GASVELO_DIR): files = [ i for i in listdir(GASVELO_DIR) if i.startswith(GASVELO_DESTINATION) and i.endswith(file_filetype) ] if files == []: print( '!! ERROR: No calc_gas_speed_at_particle_position-files found for ' + SIM.name + '! Use idl script to produce them first!') if use_IDL: USE_PKL_FILES = False files = [i.split('_')[-1].split(file_filetype)[0] for i in files] scheme = '' else: USE_PKL_FILES = True files = [i.split('_')[-1].split(file_filetype)[0] for i in files] scheme = '_tsc' ## calculate global dispersion for all snapshot for which gas_velo_at_particle_pos files are found for file_no in files: print( '## Starting the calculation for DISPERSON and DRIFT for ### VAR' + str(file_no) + ' ###') # check if files already exist if (not OVERWRITE) and io.exists( 'sigma_' + file_no, folder=SIM.pc_datadir) and io.exists( 'zeta_' + file_no, folder=SIM.pc_datadir) and io.exists( 'sigma_l_' + file_no, folder=join(SIM.pc_datadir, 'sigma_l')) and io.exists( 'zeta_l_' + file_no, folder=join(SIM.pc_datadir, 'zeta_l')): print('## Skipping calculations') continue ## read sav and var file print('## reading gas_velo_at_particle_pos file and VAR') if USE_PKL_FILES: sav_file = io.load( join(GASVELO_DIR, GASVELO_DESTINATION + scheme + '_' + file_no + '.pkl'))[GASVELO_DESTINATION] else: sav_file = readsav( join(GASVELO_DIR, GASVELO_DESTINATION + scheme + '_' + file_no + '.sav'))[GASVELO_DESTINATION] var_file = read.var(varfile='VAR' + file_no, quiet=True, trimall=True, datadir=SIM.datadir) ## get everything ready dim = SIM.dim pdim = read.pdim(sim=SIM) npar = pdim.npar npar1 = 1. / npar ## get first quantities and setup the DATA_SET if use_IDL: time = sav_file['time'][0] DATA_SET = np.core.records.fromarrays( [ range(0, npar), sav_file['par_idx'][0][0].astype('int'), sav_file['par_idx'][0][1].astype('int'), sav_file['par_idx'][0][2].astype('int'), sav_file['par_pos'][0][0], sav_file['par_pos'][0][1], sav_file['par_pos'][0][2], sav_file['par_velo'][0][0], sav_file['par_velo'][0][1], sav_file['par_velo'][0][2], sav_file['npar'][0], sav_file['gas_velo'][0][0], sav_file['gas_velo'][0][1], sav_file['gas_velo'][0][2], var_file.rho[sav_file['par_idx'][0][2].astype('int'), sav_file['par_idx'][0][1].astype('int'), sav_file['par_idx'][0][0].astype('int')], var_file.rhop[sav_file['par_idx'][0][2].astype('int'), sav_file['par_idx'][0][1].astype('int'), sav_file['par_idx'][0][0].astype('int')] ], names= 'parid, idx, idy, idz, posx, posy, posz, vx, vy, vz, npar, gasv_x, gasv_y, gasv_z, rho, rhop', formats= 'int, int,int,int, float,float,float, float,float,float, int, float,float,float, float, float' ) else: time = sav_file['time'] DATA_SET = np.core.records.fromarrays( [ range(0, npar), sav_file['par_idx'][0].astype('int'), sav_file['par_idx'][1].astype('int'), sav_file['par_idx'][2].astype('int'), sav_file['par_pos'][0], sav_file['par_pos'][1], sav_file['par_pos'][2], sav_file['par_velo'][0], sav_file['par_velo'][1], sav_file['par_velo'][2], sav_file['npar'], sav_file['gas_velo'][0], sav_file['gas_velo'][1], sav_file['gas_velo'][2], var_file.rho[sav_file['par_idx'][2].astype('int'), sav_file['par_idx'][1].astype('int'), sav_file['par_idx'][0].astype('int')], var_file.rhop[sav_file['par_idx'][2].astype('int'), sav_file['par_idx'][1].astype('int'), sav_file['par_idx'][0].astype('int')] ], names= 'parid, idx, idy, idz, posx, posy, posz, vx, vy, vz, npar, gasv_x, gasv_y, gasv_z, rho, rhop', formats= 'int, int,int,int, float,float,float, float,float,float, int, float,float,float, float, float' ) DATA_SET = np.sort(DATA_SET, order=['idx', 'idy', 'idz']) # calculate GLOBAL DISPERSION in x, y and z direction, also the absolute magnitude if GLOBAL: print( '## Calculating GLOBAL DISPERSION values in x,y,z direction and abs value' ) mean_vx = np.mean(DATA_SET['vx']) mean_vy = np.mean(DATA_SET['vy']) mean_vz = np.mean(DATA_SET['vz']) SIGMA = { 'SIGMA_o_x': np.sqrt(npar1 * np.sum((DATA_SET['vx'] - mean_vx)**2)), 'SIGMA_o_y': np.sqrt(npar1 * np.sum((DATA_SET['vy'] - mean_vy)**2)), 'SIGMA_o_z': np.sqrt(npar1 * np.sum((DATA_SET['vz'] - mean_vz)**2)), 'SIGMA_o': np.sqrt(npar1 * np.sum((DATA_SET['vx'] - mean_vx)**2 + (DATA_SET['vy'] - mean_vy)**2 + (DATA_SET['vz'] - mean_vz)**2)) } # calculate GLOBAL DRIFT in x, y and z direction, also the absolute magnitude print( '## Calculating GLOBAL DRIFT values in x,y,z direction and abs value' ) ZETA = { 'ZETA_o_x': np.sqrt(npar1 * np.sum( (DATA_SET['vx'] - DATA_SET['gasv_x'])**2)), 'ZETA_o_y': np.sqrt(npar1 * np.sum( (DATA_SET['vy'] - DATA_SET['gasv_y'])**2)), 'ZETA_o_z': np.sqrt(npar1 * np.sum( (DATA_SET['vz'] - DATA_SET['gasv_z'])**2)), 'ZETA_o': np.sqrt(npar1 * np.sum((DATA_SET['vx'] - DATA_SET['gasv_x'])**2 + (DATA_SET['vy'] - DATA_SET['gasv_y'])**2 + (DATA_SET['vz'] - DATA_SET['gasv_z'])**2)) } print('## saving calculated GLOBAL DISPERSION and DRIFT') io.save(SIGMA, 'sigma_' + file_no, folder=SIM.pc_datadir) io.save(ZETA, 'zeta_' + file_no, folder=SIM.pc_datadir) # calculate LOCAL DISPERSION and DRIFT if LOCAL: print('## Calculating LOCAL DISPERSION and DRIFT values') tmp_id = [ DATA_SET[0]['idx'], DATA_SET[0]['idy'], DATA_SET[0]['idz'] ] sigma_l = np.zeros((dim.nx, dim.ny, dim.nz)) zeta_l = np.zeros((dim.nx, dim.ny, dim.nz)) particles_l = [] for particle in DATA_SET: if [particle['idx'], particle['idy'], particle['idz']] == tmp_id: particles_l.append(particle) else: np_l = np.size(particles_l) if np_l != 0: np1_l = 1. / np_l mean_vx_l = 0 mean_vy_l = 0 mean_vz_l = 0 sum_s_l = 0 sum_z_l = 0 for entry in particles_l: mean_vx_l = mean_vx_l + np1_l * entry['vx'] mean_vy_l = mean_vy_l + np1_l * entry['vy'] mean_vz_l = mean_vz_l + np1_l * entry['vz'] for entry in particles_l: sum_s_l = sum_s_l + (entry['vx'] - mean_vx_l)**2 + ( entry['vy'] - mean_vy_l)**2 + (entry['vz'] - mean_vz_l)**2 sum_z_l = sum_z_l + ( entry['vx'] - entry['gasv_x'])**2 + ( entry['vy'] - entry['gasv_y'])**2 + ( entry['vz'] - entry['gasv_z'])**2 sigma_l[tmp_id[0]][tmp_id[1]][tmp_id[2]] = np.sqrt(np1_l * sum_s_l) zeta_l[tmp_id[0]][tmp_id[1]][tmp_id[2]] = np.sqrt(np1_l * sum_z_l) # reset all local variables and lists to the new state (ie towards the newst particle) tmp_id = [ particle['idx'], particle['idy'], particle['idz'] ] particles_l = [] particles_l.append(particle) # do this local calculations a last time for the last grid cell np_l = np.size(particles_l) np1_l = 1. / np_l mean_vx_l = 0 mean_vy_l = 0 mean_vz_l = 0 sum_s_l = 0 sum_z_l = 0 for entry in particles_l: mean_vx_l = mean_vx_l + np1_l * entry['vx'] mean_vy_l = mean_vy_l + np1_l * entry['vy'] mean_vz_l = mean_vz_l + np1_l * entry['vz'] for entry in particles_l: sum_s_l = sum_s_l + (entry['vx'] - mean_vx_l)**2 + ( entry['vy'] - mean_vy_l)**2 + (entry['vz'] - mean_vz_l)**2 sum_z_l = sum_z_l + (entry['vx'] - entry['gasv_x'])**2 + ( entry['vy'] - entry['gasv_y'])**2 + (entry['vz'] - entry['gasv_z'])**2 sigma_l[tmp_id[0]][tmp_id[1]][tmp_id[2]] = np.sqrt(np1_l * sum_s_l) zeta_l[tmp_id[0]][tmp_id[1]][tmp_id[2]] = np.sqrt(np1_l * sum_z_l) # save sigma, zeta locally and globally to SIM.pc_datadir print('## saving calculated LOCAL DISPERSION and DRIFT') io.save(sigma_l, 'sigma_l_' + file_no, folder=join(SIM.pc_datadir, 'sigma_l')) io.save(zeta_l, 'zeta_l_' + file_no, folder=join(SIM.pc_datadir, 'zeta_l')) ## Please keep this lines as a reminder on how to add columns to an record array! # add colums to DATA_SET fror local zeta and sigma for the individuel particle #DATA_SET = add_column_to_record_array(DATA_SET, 'zeta_l', zeta_l[DATA_SET['idx'],DATA_SET['idy'],DATA_SET['idz']], dtypes='float', usemask=False, asrecarray=True) #DATA_SET = add_column_to_record_array(DATA_SET, 'sigma_l', sigma_l[DATA_SET['idx'],DATA_SET['idy'],DATA_SET['idz']], dtypes='float', usemask=False, asrecarray=True) return True
def find_fixed( self, datadir="data", var_file="VAR0", trace_field="bb", ti=-1, tf=-1, tracer_file_name=None, ): """ Find the fixed points to a snapshot or existing tracer file. call signature:: find_fixed(datadir='data', var_file='VAR0', trace_field='bb', ti=-1, tf=-1, tracer_file_name=None): Keyword arguments: *datadir*: Data directory. *var_file*: Varfile to be read. *trace_field*: Vector field used for the streamline tracing. *ti*: Initial VAR file index for tracer time sequences. Overrides 'var_file'. *tf*: Final VAR file index for tracer time sequences. Overrides 'var_file'. *tracer_file_name* Name of the tracer file to be read. If 'None' compute the tracers. """ import numpy as np import multiprocessing as mp from pencil import read from pencil import math from pencil.diag.tracers import Tracers from pencil.calc.streamlines import Stream from pencil.math.interpolation import vec_int if self.params.int_q == "curly_A": self.curly_A = [] if self.params.int_q == "ee": self.ee = [] # Multi core setup. if not (np.isscalar(self.params.n_proc)) or (self.params.n_proc % 1 != 0): print("Error: invalid processor number") return -1 queue = mp.Queue() # Make sure to read the var files with the correct magic. magic = [] if trace_field == "bb": magic.append("bb") if trace_field == "jj": magic.append("jj") if trace_field == "vort": magic.append("vort") if self.params.int_q == "ee": magic.append("bb") magic.append("jj") dim = read.dim(datadir=datadir) # Check if user wants a tracer time series. if (ti % 1 == 0) and (tf % 1 == 0) and (ti >= 0) and (tf >= ti): series = True var_file = "VAR{0}".format(ti) n_times = tf - ti + 1 else: series = False n_times = 1 self.t = np.zeros(n_times) # Read the initial field. var = read.var( var_file=var_file, datadir=datadir, magic=magic, quiet=True, trimall=True ) self.t[0] = var.t grid = read.grid(datadir=datadir, quiet=True, trim=True) field = getattr(var, trace_field) param2 = read.param(datadir=datadir, quiet=True) if self.params.int_q == "ee": ee = var.jj * param2.eta - math.cross(var.uu, var.bb) self.params.datadir = datadir self.params.var_file = var_file self.params.trace_field = trace_field # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz tracers = Tracers() tracers.params = self.params # Create the mapping for all times. if not tracer_file_name: tracers.find_tracers( var_file=var_file, datadir=datadir, trace_field=trace_field, ti=ti, tf=tf, ) else: tracers.read(datadir=datadir, file_name=tracer_file_name) self.tracers = tracers # Set some default values. self.t = np.zeros((tf - ti + 1) * series + (1 - series)) self.fixed_index = np.zeros((tf - ti + 1) * series + (1 - series)) self.poincare = np.zeros( [ int(self.params.trace_sub * dim.nx), int(self.params.trace_sub * dim.ny), n_times, ] ) ix0 = range(0, int(self.params.nx * self.params.trace_sub) - 1) iy0 = range(0, int(self.params.ny * self.params.trace_sub) - 1) self.fixed_points = [] self.fixed_sign = [] self.fixed_tracers = [] # Start the parallelized fixed point finding. for tidx in range(n_times): if tidx > 0: var = read.var( var_file="VAR{0}".format(tidx + ti), datadir=datadir, magic=magic, quiet=True, trimall=True, ) field = getattr(var, trace_field) self.t[tidx] = var.t proc = [] sub_data = [] fixed = [] fixed_sign = [] fixed_tracers = [] for i_proc in range(self.params.n_proc): proc.append( mp.Process( target=self.__sub_fixed, args=(queue, ix0, iy0, field, self.tracers, tidx, var, i_proc), ) ) for i_proc in range(self.params.n_proc): proc[i_proc].start() for i_proc in range(self.params.n_proc): sub_data.append(queue.get()) for i_proc in range(self.params.n_proc): proc[i_proc].join() for i_proc in range(self.params.n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] fixed.extend(sub_data[i_proc][1]) fixed_tracers.extend(sub_data[i_proc][2]) fixed_sign.extend(sub_data[i_proc][3]) self.fixed_index[tidx] += sub_data[i_proc][4] self.poincare[sub_proc :: self.params.n_proc, :, tidx] = sub_data[ i_proc ][5] for i_proc in range(self.params.n_proc): proc[i_proc].terminate() # Discard fixed points which lie too close to each other. fixed, fixed_tracers, fixed_sign = self.__discard_close_fixed_points( np.array(fixed), np.array(fixed_sign), np.array(fixed_tracers), var ) if self.fixed_points is None: self.fixed_points = [] self.fixed_sign = [] self.fixed_tracers = [] self.fixed_points.append(np.array(fixed)) self.fixed_sign.append(np.array(fixed_sign)) self.fixed_tracers.append(fixed_tracers) # Compute the traced quantities along the fixed point streamlines. if (self.params.int_q == "curly_A") or (self.params.int_q == "ee"): for t_idx in range(0, n_times): if self.params.int_q == "curly_A": self.curly_A.append([]) if self.params.int_q == "ee": self.ee.append([]) for fixed in self.fixed_points[t_idx]: # Trace the stream line. xx = np.array([fixed[0], fixed[1], self.params.Oz]) # time = np.linspace(0, self.params.Lz/np.max(abs(field[2])), 10) field_strength_z0 = vec_int( xx, field, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) field_strength_z0 = np.sqrt(np.sum(field_strength_z0 ** 2)) time = np.linspace(0, 4 * self.params.Lz / field_strength_z0, 500) stream = Stream(field, self.params, xx=xx, time=time) # Do the field line integration. if self.params.int_q == "curly_A": curly_A = 0 for l in range(stream.iterations - 1): aaInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, var.aa, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) curly_A += np.dot( aaInt, (stream.tracers[l + 1] - stream.tracers[l]) ) self.curly_A[-1].append(curly_A) if self.params.int_q == "ee": ee_p = 0 for l in range(stream.iterations - 1): eeInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, ee, [var.dx, var.dy, var.dz], [var.x[0], var.y[0], var.z[0]], [len(var.x), len(var.y), len(var.z)], interpolation=self.params.interpolation, ) ee_p += np.dot( eeInt, (stream.tracers[l + 1] - stream.tracers[l]) ) self.ee[-1].append(ee_p) if self.params.int_q == "curly_A": self.curly_A[-1] = np.array(self.curly_A[-1]) if self.params.int_q == "ee": self.ee[-1] = np.array(self.ee[-1]) return 0
def gas_velo_at_particle_pos( varfiles="last4", sim=False, scheme="tsc", use_IDL=False, OVERWRITE=False ): """This script calulates the gas velocity at the particle position and stores this together with particle position, containing grid cell idicies, particle velocities, and particle index in a gas_velo_at_particle_pos file. Args: varfiles: specifiy varfiles for calculation, e.g. 'last', 'first', 'all', 'VAR###', 'last4', 'first3' scheme: possible are: - ngp: nearest grid point - cic: cloud in cell - tsc: triangular shaped cloud OVERWRITE: set to True to overwrite already calculated results """ from pencil import get_sim from pencil import read from pencil import diag from pencil.io import mkdir from os import listdir from os.path import exists, join, dirname import numpy as np GAS_VELO_TAG = "gas_velo_at_particle_pos" if sim == False: sim = get_sim() if sim == False: print("! ERROR: Specify simulation object!") return False SIM = sim if use_IDL: print( "? WARNING: IDL VERSION OF THIS SCRIPT BY JOHANSEN, not recommended for 2D data" ) from ...backpack import pidly print("## starting IDL engine..") IDL = pidly.IDL(long_delay=0.05) # start IDL engine ## skip if nothing is new if ( (not OVERWRITE) and (exists(join(SIM.pc_datadir, "sigma.pkl"))) and (exists(join(SIM.pc_datadir, "zeta.pkl"))) ): print( "~ " + SIM.name + " is already calculated and up-to-date! -> skipping it!" ) else: ## start calculations print( '~ Calculating gas_velo_at_particle_pos for "' + SIM.name + '" in "' + SIM.path + '"' ) IDL( ".COMPILE " + str( join( dirname(diag.particle.__file__), "gas_velo_at_particle_pos.pro" ) ) ) IDL.pro( "gas_velo_at_particle_pos", datadir=SIM.datadir, destination=GAS_VELO_TAG, doforthelastNvar=varfiles[4:], ) files = [ i.split("_")[-1].split(".sav")[0] for i in listdir(join(SIM.pc_datadir, GAS_VELO_TAG)) if i.startswith(GAS_VELO_TAG) and i.endswith(".sav") or i.endswith(".pkl") ] if files == []: print( "!! ERROR: No calc_gas_speed_at_particle_position-files found for " + SIM.name + "! Use idl script to produce them first!" ) IDL.close() return True else: print( '~ Calculating gas_velo_at_particle_pos for "' + SIM.name + '" in "' + SIM.path + '"' ) save_destination = join(SIM.pc_datadir, GAS_VELO_TAG) mkdir(save_destination) varlist = SIM.get_varlist(pos=varfiles, particle=False) pvarlist = SIM.get_varlist(pos=varfiles, particle=True) for f, p in zip(varlist, pvarlist): save_filename = GAS_VELO_TAG + "_" + scheme + "_" + f[3:] if not OVERWRITE and exists(save_filename, folder=save_destination): continue print("## Reading " + f + " ...") ff = read.var(datadir=SIM.datadir, varfile=f, quiet=True, trimall=False) pp = read.pvar(datadir=SIM.datadir, varfile=p) ## remove ghost zones from grid, call the reduced grid the "real grid" realgridx = ff.x[ff.l1 : ff.l2] realgridy = ff.y[ff.m1 : ff.m2] realgridz = ff.z[ff.n1 : ff.n2] nx = ff.l2 - ff.l1 ny = ff.m2 - ff.m1 nz = ff.n2 - ff.n1 ## prepare list for all quantities l_ipars = pp.ipars # particle number KNOWN l_px = pp.xp l_py = pp.yp l_pz = pp.zp # particle absolut position KNOWN l_vx = pp.vpx l_vy = pp.vpy l_vz = pp.vpz # particle velocity KNOWN l_rix = [] l_riy = [] l_riz = ( [] ) # particle untrimmed realgrid index (grid index = l/m/n + readgrid index ???) l_ix = [] l_iy = [] l_iz = [] # particle grid index (in untrimmed grid) l_ux = [] l_uy = [] l_uz = [] # underlying gas velocity at position of particle ## get index of realgrid cell for each particle for i in range(len(l_ipars)): l_rix.append(np.abs(realgridx - l_px[i]).argmin()) l_riy.append(np.abs(realgridy - l_py[i]).argmin()) l_riz.append(np.abs(realgridz - l_pz[i]).argmin()) ## convert into untrimmed grid l_ix = np.array(l_rix) + ff.l1 l_iy = np.array(l_riy) + ff.m1 l_iz = np.array(l_riz) + ff.n1 ## NGP if scheme == "ngp" or scheme == "NGP": print("## Calculating gas velocities via " + scheme) l_ux = ff.ux[l_iz, l_iy, l_ix] l_uy = ff.uy[l_iz, l_iy, l_ix] l_uz = ff.uz[l_iz, l_iy, l_ix] ## CIC if scheme == "cic" or scheme == "CIC": print("## Calculating gas velocities via " + scheme) for ix0, iy0, iz0, px, py, pz in zip( l_ix, l_iy, l_iz, l_px, l_py, l_pz ): # for each particle if ff.x[ix0] > px: ix0 = ix0 - 1 # ix0 must be left to particle if ff.y[iy0] > py: iy0 = iy0 - 1 # iy0 must be below the particle if ff.z[iz0] > pz: iz0 = iz0 - 1 # iz0 must be under particle ix1 = ix0 iy1 = iy0 iz1 = iz0 # if a dim. is zero, this is default, else: if nx > 1: ix1 = ix0 + 1 dx_1 = ( 1.0 / ff.dx ) # if a dim is non-zero, ajust ix1 to right cell if ny > 1: iy1 = iy0 + 1 dy_1 = ( 1.0 / ff.dy ) # if a dim is non-zero, ajust iy1 to above cell if nz > 1: iz1 = iz0 + 1 dz_1 = ( 1.0 / ff.dz ) # if a dim is non-zero, ajust iz1 to above cell ux = 0.0 uy = 0.0 uz = 0.0 for ix in [ix0, ix1]: for iy in [iy0, iy1]: for iz in [iz0, iz1]: weight = 1.0 if nx > 1: weight = weight * (1.0 - abs(px - ff.x[ix]) * dx_1) if ny > 1: weight = weight * (1.0 - abs(py - ff.y[iy]) * dy_1) if nz > 1: weight = weight * (1.0 - abs(pz - ff.z[iz]) * dz_1) ux = ux + weight * ff.ux[iz, iy, ix] uy = uy + weight * ff.uy[iz, iy, ix] uz = uz + weight * ff.uz[iz, iy, ix] if iz0 == iz1: break # beware of degeneracy: if iy0 == iy1: break # beware of degeneracy: if ix0 == ix1: break # beware of degeneracy: l_ux.append(ux) l_uy.append(uy) l_uz.append(uz) ## TSC if scheme == "tsc" or scheme == "TSC": for ix0, iy0, iz0, px, py, pz in zip( l_ix, l_iy, l_iz, l_px, l_py, l_pz ): # for each particle ixx0 = ix0 ixx1 = ix0 # beware of degeneracy iyy0 = iy0 iyy1 = iy0 izz0 = iz0 izz1 = iz0 if nx > 1: ixx0 = ix0 - 1 ixx1 = ix0 + 1 dx_1 = 1.0 / ff.dx dx_2 = 1.0 / ff.dx ** 2 if ny > 1: iyy0 = iy0 - 1 iyy1 = iy0 + 1 dy_1 = 1.0 / ff.dy dy_2 = 1.0 / ff.dy ** 2 if nz > 1: izz0 = iz0 - 1 izz1 = iz0 + 1 dz_1 = 1.0 / ff.dz dz_2 = 1.0 / ff.dz ** 2 ux = 0.0 uy = 0.0 uz = 0.0 for ix in [ix0, ixx0, ixx1]: weight_x = 0.0 if ix - ix0 == -1 or ix - ix0 == 1: weight_x = ( 1.125 - 1.5 * abs(px - ff.x[ix]) * dx_1 + 0.5 * abs(px - ff.x[ix]) ** 2 * dx_2 ) elif nx != 1: weight_x = 0.75 - (px - ff.x[ix]) ** 2 * dx_2 for iy in [iy0, iyy0, iyy1]: weight_y = 0.0 if iy - iy0 == -1 or iy - iy0 == 1: weight_y = ( 1.125 - 1.5 * abs(py - ff.y[iy]) * dy_1 + 0.5 * abs(py - ff.y[iy]) ** 2 * dy_2 ) elif ny != 1: weight_y = 0.75 - (py - ff.y[iy]) ** 2 * dy_2 for iz in [iz0, izz0, izz1]: weight_z = 0.0 if iz - iz0 == -1 or iz - iz0 == 1: weight_z = ( 1.125 - 1.5 * abs(pz - ff.z[iz]) * dz_1 + 0.5 * abs(pz - ff.z[iz]) ** 2 * dz_2 ) elif nz != 1: weight_z = 0.75 - (pz - ff.z[iz]) ** 2 * dz_2 weight = 1.0 if nx > 1: weight = weight * weight_x if ny > 1: weight = weight * weight_y if nz > 1: weight = weight * weight_z ux = ux + weight * ff.ux[iz, iy, ix] uy = uy + weight * ff.uy[iz, iy, ix] uz = uz + weight * ff.uz[iz, iy, ix] if izz0 == izz1: break # beware of degeneracy: if iyy0 == iyy1: break # beware of degeneracy: if ixx0 == ixx1: break # beware of degeneracy: l_ux.append(ux) l_uy.append(uy) l_uz.append(uz) ## Convert all information into a single record array data_set = np.core.records.fromarrays( [ l_ipars.astype("int"), l_px, l_py, l_pz, l_vx, l_vy, l_vz, l_rix, l_riy, l_riz, l_ix, l_iy, l_iz, l_ux, l_uy, l_uz, ], names="ipar, ipx, ipy, ipz, vx, vy, vz, rix, riy, riz, ix, iy, iz, ux, uy, uz", formats="int, float, float, float, float, float, float, int, int, int, int, int, int, float, float, float", ) gas_velo_at_particle_pos = np.sort(data_set, order=["ix", "iy", "iz"]) Nix = int(gas_velo_at_particle_pos["rix"].max() + 1) Niy = int(gas_velo_at_particle_pos["riy"].max() + 1) Niz = int(gas_velo_at_particle_pos["riz"].max() + 1) Npar_arr = np.array( [ gas_velo_at_particle_pos["rix"], gas_velo_at_particle_pos["riy"], gas_velo_at_particle_pos["riz"], ] ) # rgrid_edges = (grid.x[1:]-(grid.x[1:]-grid.x[:-1])/2)[2:-2] xrange = np.arange(0, float(gas_velo_at_particle_pos["rix"].max()) + 2) xrange = xrange - 0.5 yrange = np.arange(0, float(gas_velo_at_particle_pos["riy"].max()) + 2) zrange = np.arange(0, float(gas_velo_at_particle_pos["riz"].max()) + 2) Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(xrange, yrange, zrange)) Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(Nix, Niy, Niz)) gas_velo_at_particle_pos = { "time": ff.t, "par_pos": np.array( [ gas_velo_at_particle_pos["ipx"], gas_velo_at_particle_pos["ipy"], gas_velo_at_particle_pos["ipz"], ] ), "par_velo": np.array( [ gas_velo_at_particle_pos["vx"], gas_velo_at_particle_pos["vy"], gas_velo_at_particle_pos["vz"], ] ), "par_idx": np.array( [ gas_velo_at_particle_pos["rix"], gas_velo_at_particle_pos["riy"], gas_velo_at_particle_pos["riz"], ] ), "npar": np.array( Npar_hist[ gas_velo_at_particle_pos["rix"], gas_velo_at_particle_pos["riy"], gas_velo_at_particle_pos["riz"], ] ), "gas_velo": np.array( [ gas_velo_at_particle_pos["ux"], gas_velo_at_particle_pos["uy"], gas_velo_at_particle_pos["uz"], ] ), } print("## Saving dataset into " + save_destination + "...") pkl_save( {"gas_velo_at_particle_pos": gas_velo_at_particle_pos, "t": ff.t}, save_filename, folder=save_destination, ) print("## Done!")
def gas_velo_at_particle_pos(varfiles='last4', sim=False, scheme='tsc', use_IDL=False, OVERWRITE=False): """This script calulates the gas velocity at the particle position and stores this together with particle position, containing grid cell idicies, particle velocities, and particle index in a gas_velo_at_particle_pos file. Args: varfiles: specifiy varfiles for calculation, e.g. 'last', 'first', 'all', 'VAR###', 'last4', 'first3' scheme: possible are: - ngp: nearest grid point - cic: cloud in cell - tsc: triangular shaped cloud OVERWRITE: set to True to overwrite already calculated results """ import os from pencil import io from pencil import read from pencil import get_sim from os.path import exists import numpy as np GAS_VELO_TAG = 'gas_velo_at_particle_pos' if sim == False: sim = get_sim() if sim == False: print('! ERROR: Specify simulation object!') return False SIM = sim if use_IDL: print( '? WARNING: IDL VERSION OF THIS SCRIPT BY JOHANSEN, not recommended for 2D data' ) from pencil.backpack import pidly print('## starting IDL engine..') IDL = pidly.IDL(long_delay=0.05) # start IDL engine ## skip if nothing is new if (not OVERWRITE) and (exists( os.path.join(SIM.pc_datadir, 'sigma.pkl'))) and (exists( os.path.join(SIM.pc_datadir, 'zeta.pkl'))): print('~ ' + SIM.name + ' is already calculated and up-to-date! -> skipping it!') else: ## start calculations print('~ Calculating gas_velo_at_particle_pos for "' + SIM.name + '" in "' + SIM.path + '"') IDL.pro('gas_velo_at_particle_pos', datadir=SIM.datadir, destination=GAS_VELO_TAG, doforthelastNvar=varfiles[4:]) files = [ i.split('_')[-1].split('.sav')[0] for i in os.listdir(os.path.join(SIM.pendatadir, GAS_VELO_TAG)) if i.startswith(GAS_VELO_TAG) and i.endswith('.sav') or i.endswith('.pkl') ] if files == []: print( '!! ERROR: No calc_gas_speed_at_particle_position-files found for ' + SIM.name + '! Use idl script to produce them first!') IDL.close() return True else: print('~ Calculating gas_velo_at_particle_pos for "' + SIM.name + '" in "' + SIM.path + '"') save_destination = os.path.join(SIM.pc_datadir, GAS_VELO_TAG) io.mkdir(save_destination) varlist = SIM.get_varlist(pos=varfiles, particle=False) pvarlist = SIM.get_varlist(pos=varfiles, particle=True) for f, p in zip(varlist, pvarlist): save_filename = GAS_VELO_TAG + '_' + scheme + '_' + f[3:] if not OVERWRITE and exists(save_filename, folder=save_destination): continue print('## Reading ' + f + ' ...') ff = read.var(datadir=SIM.datadir, varfile=f, quiet=True, trimall=False) pp = read.pvar(datadir=SIM.datadir, varfile=p) ## remove ghost zones from grid, call the reduced grid the "real grid" realgridx = ff.x[ff.l1:ff.l2] realgridy = ff.y[ff.m1:ff.m2] realgridz = ff.z[ff.n1:ff.n2] nx = ff.l2 - ff.l1 ny = ff.m2 - ff.m1 nz = ff.n2 - ff.n1 ## prepare list for all quantities l_ipars = pp.ipars # particle number KNOWN l_px = pp.xp l_py = pp.yp l_pz = pp.zp # particle absolut position KNOWN l_vx = pp.vpx l_vy = pp.vpy l_vz = pp.vpz # particle velocity KNOWN l_rix = [] l_riy = [] l_riz = [ ] # particle untrimmed realgrid index (grid index = l/m/n + readgrid index ???) l_ix = [] l_iy = [] l_iz = [] # particle grid index (in untrimmed grid) l_ux = [] l_uy = [] l_uz = [] # underlying gas velocity at position of particle ## get index of realgrid cell for each particle for i in range(len(l_ipars)): l_rix.append(np.abs(realgridx - l_px[i]).argmin()) l_riy.append(np.abs(realgridy - l_py[i]).argmin()) l_riz.append(np.abs(realgridz - l_pz[i]).argmin()) ## convert into untrimmed grid l_ix = np.array(l_rix) + ff.l1 l_iy = np.array(l_riy) + ff.m1 l_iz = np.array(l_riz) + ff.n1 ## NGP if scheme == 'ngp' or scheme == 'NGP': print('## Calculating gas velocities via ' + scheme) l_ux = ff.ux[l_iz, l_iy, l_ix] l_uy = ff.uy[l_iz, l_iy, l_ix] l_uz = ff.uz[l_iz, l_iy, l_ix] ## CIC if scheme == 'cic' or scheme == 'CIC': print('## Calculating gas velocities via ' + scheme) for ix0, iy0, iz0, px, py, pz in zip( l_ix, l_iy, l_iz, l_px, l_py, l_pz): # for each particle if ff.x[ix0] > px: ix0 = ix0 - 1 # ix0 must be left to particle if ff.y[iy0] > py: iy0 = iy0 - 1 # iy0 must be below the particle if ff.z[iz0] > pz: iz0 = iz0 - 1 # iz0 must be under particle ix1 = ix0 iy1 = iy0 iz1 = iz0 # if a dim. is zero, this is default, else: if nx > 1: ix1 = ix0 + 1 dx_1 = 1. / ff.dx # if a dim is non-zero, ajust ix1 to right cell if ny > 1: iy1 = iy0 + 1 dy_1 = 1. / ff.dy # if a dim is non-zero, ajust iy1 to above cell if nz > 1: iz1 = iz0 + 1 dz_1 = 1. / ff.dz # if a dim is non-zero, ajust iz1 to above cell ux = 0. uy = 0. uz = 0. for ix in [ix0, ix1]: for iy in [iy0, iy1]: for iz in [iz0, iz1]: weight = 1. if nx > 1: weight = weight * ( 1. - abs(px - ff.x[ix]) * dx_1) if ny > 1: weight = weight * ( 1. - abs(py - ff.y[iy]) * dy_1) if nz > 1: weight = weight * ( 1. - abs(pz - ff.z[iz]) * dz_1) ux = ux + weight * ff.ux[iz, iy, ix] uy = uy + weight * ff.uy[iz, iy, ix] uz = uz + weight * ff.uz[iz, iy, ix] if iz0 == iz1: break # beware of degeneracy: if iy0 == iy1: break # beware of degeneracy: if ix0 == ix1: break # beware of degeneracy: l_ux.append(ux) l_uy.append(uy) l_uz.append(uz) ## TSC if scheme == 'tsc' or scheme == 'TSC': for ix0, iy0, iz0, px, py, pz in zip( l_ix, l_iy, l_iz, l_px, l_py, l_pz): # for each particle ixx0 = ix0 ixx1 = ix0 # beware of degeneracy iyy0 = iy0 iyy1 = iy0 izz0 = iz0 izz1 = iz0 if nx > 1: ixx0 = ix0 - 1 ixx1 = ix0 + 1 dx_1 = 1. / ff.dx dx_2 = 1. / ff.dx**2 if ny > 1: iyy0 = iy0 - 1 iyy1 = iy0 + 1 dy_1 = 1. / ff.dy dy_2 = 1. / ff.dy**2 if nz > 1: izz0 = iz0 - 1 izz1 = iz0 + 1 dz_1 = 1. / ff.dz dz_2 = 1. / ff.dz**2 ux = 0. uy = 0. uz = 0. for ix in [ix0, ixx0, ixx1]: weight_x = 0. if ix - ix0 == -1 or ix - ix0 == 1: weight_x = 1.125 - 1.5 * abs( px - ff.x[ix]) * dx_1 + 0.5 * abs( px - ff.x[ix])**2 * dx_2 elif nx != 1: weight_x = 0.75 - (px - ff.x[ix])**2 * dx_2 for iy in [iy0, iyy0, iyy1]: weight_y = 0. if iy - iy0 == -1 or iy - iy0 == 1: weight_y = 1.125 - 1.5 * abs( py - ff.y[iy]) * dy_1 + 0.5 * abs( py - ff.y[iy])**2 * dy_2 elif ny != 1: weight_y = 0.75 - (py - ff.y[iy])**2 * dy_2 for iz in [iz0, izz0, izz1]: weight_z = 0. if iz - iz0 == -1 or iz - iz0 == 1: weight_z = 1.125 - 1.5 * abs( pz - ff.z[iz]) * dz_1 + 0.5 * abs( pz - ff.z[iz])**2 * dz_2 elif nz != 1: weight_z = 0.75 - (pz - ff.z[iz])**2 * dz_2 weight = 1. if nx > 1: weight = weight * weight_x if ny > 1: weight = weight * weight_y if nz > 1: weight = weight * weight_z ux = ux + weight * ff.ux[iz, iy, ix] uy = uy + weight * ff.uy[iz, iy, ix] uz = uz + weight * ff.uz[iz, iy, ix] if izz0 == izz1: break # beware of degeneracy: if iyy0 == iyy1: break # beware of degeneracy: if ixx0 == ixx1: break # beware of degeneracy: l_ux.append(ux) l_uy.append(uy) l_uz.append(uz) ## Convert all information into a single record array data_set = np.core.records.fromarrays( [ l_ipars.astype('int'), l_px, l_py, l_pz, l_vx, l_vy, l_vz, l_rix, l_riy, l_riz, l_ix, l_iy, l_iz, l_ux, l_uy, l_uz ], names= 'ipar, ipx, ipy, ipz, vx, vy, vz, rix, riy, riz, ix, iy, iz, ux, uy, uz', formats= 'int, float, float, float, float, float, float, int, int, int, int, int, int, float, float, float' ) gas_velo_at_particle_pos = np.sort(data_set, order=['ix', 'iy', 'iz']) Nix = int(gas_velo_at_particle_pos['rix'].max() + 1) Niy = int(gas_velo_at_particle_pos['riy'].max() + 1) Niz = int(gas_velo_at_particle_pos['riz'].max() + 1) Npar_arr = np.array([ gas_velo_at_particle_pos['rix'], gas_velo_at_particle_pos['riy'], gas_velo_at_particle_pos['riz'] ]) #rgrid_edges = (grid.x[1:]-(grid.x[1:]-grid.x[:-1])/2)[2:-2] xrange = np.arange( 0, float(gas_velo_at_particle_pos['rix'].max()) + 2) xrange = xrange - 0.5 yrange = np.arange( 0, float(gas_velo_at_particle_pos['riy'].max()) + 2) zrange = np.arange( 0, float(gas_velo_at_particle_pos['riz'].max()) + 2) Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(xrange, yrange, zrange)) Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(Nix, Niy, Niz)) gas_velo_at_particle_pos = { 'time': ff.t, 'par_pos': np.array([ gas_velo_at_particle_pos['ipx'], gas_velo_at_particle_pos['ipy'], gas_velo_at_particle_pos['ipz'] ]), 'par_velo': np.array([ gas_velo_at_particle_pos['vx'], gas_velo_at_particle_pos['vy'], gas_velo_at_particle_pos['vz'] ]), 'par_idx': np.array([ gas_velo_at_particle_pos['rix'], gas_velo_at_particle_pos['riy'], gas_velo_at_particle_pos['riz'] ]), 'npar': np.array(Npar_hist[gas_velo_at_particle_pos['rix'], gas_velo_at_particle_pos['riy'], gas_velo_at_particle_pos['riz']]), 'gas_velo': np.array([ gas_velo_at_particle_pos['ux'], gas_velo_at_particle_pos['uy'], gas_velo_at_particle_pos['uz'] ]) } print('## Saving dataset into ' + save_destination + '...') io.pkl_save( { 'gas_velo_at_particle_pos': gas_velo_at_particle_pos, 't': ff.t }, save_filename, folder=save_destination) print('## Done!')
def var2h5( newdir, olddir, allfile_names, todatadir, fromdatadir, snap_by_proc, precision, lpersist, quiet, nghost, settings, param, grid, x, y, z, lshear, lremove_old_snapshots, indx, trimall=False, l_mpi=False, driver=None, comm=None, rank=0, size=1, ): """ Copy a simulation snapshot set written in Fortran binary to hdf5. call signature: var2h5(newdir, olddir, allfile_names, todatadir, fromdatadir, snap_by_proc, precision, lpersist, quiet, nghost, settings, param, grid, x, y, z, lshear, lremove_old_snapshots, indx, trimall=False, l_mpi=False, driver=None, comm=None, rank=0, size=1 ) Keyword arguments: *newdir*: String path to simulation destination directory. *olddir*: String path to simulation destination directory. *allfile_names*: A list of names of the snapshot files to be written, e.g. VAR0. *todatadir*: Directory to which the data is stored. *fromdatadir*: Directory from which the data is collected. *snap_by_proc*: Read and write snapshots by procdir of the fortran binary tree *precision*: Single 'f' or double 'd' precision for new data. *lpersist*: option to include persistent variables from snapshots. *quiet* Option not to print output. *nghost*: Number of ghost zones. *settings* simulation properties. *param* simulation Param object. *grid* simulation Grid object. *xyz*: xyz arrays of the domain with ghost zones. *lshear*: Flag for the shear. *lremove_old_snapshots*: If True the old snapshots will be deleted once the new snapshot has been saved. *indx*: List of variable indices in the f-array. *trimall*: Strip ghost zones from snapshots *l_mpi*: Applying MPI parallel process *driver*: HDF5 file io driver either None or mpio *comm*: MPI library calls *rank*: Integer ID of processor *size*: Number of MPI processes """ import os from os.path import exists, join import numpy as np import glob from pencil import read from pencil import sim from pencil.io import write_h5_snapshot import sys import time import subprocess as sub if isinstance(allfile_names, list): allfile_names = allfile_names else: allfile_names = [allfile_names] # proceed to copy each snapshot in varfile_names nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"] if l_mpi: if not snap_by_proc: file_names = np.array_split(allfile_names, size) if "VARd1" in allfile_names: varfile_names = file_names[size - rank - 1] else: varfile_names = file_names[rank] else: os.chdir(olddir) if size > nprocs: nnames = len(allfile_names) if size > nnames * nprocs: file_names = np.array_split(allfile_names, nnames) varfile_names = file_names[np.mod(rank, nnames)] nprocsplit = int(size / nnames) iprocs = np.array_split(np.arange(nprocs), nprocs) procs = iprocs[np.mod(rank, nprocs)] else: file_names = np.array_split(allfile_names, nnames) varfile_names = file_names[np.mod(rank, nnames)] if nnames > size: procs = np.arange(nprocs) else: nproc_per_fname = int(size / nnames) isize = np.int(np.mod(rank, nnames) / nproc_per_fname) if np.mod(isize, nproc_per_fname + 1) == 0: npf = nproc_per_fname + 1 iprocs = np.array( np.array_split(np.arange(nprocs), npf)).T else: npf = nproc_per_fname iprocs = np.array( np.array_split(np.arange(nprocs), npf)).T procs = iprocs[np.mod(int((rank * nnames) / size), npf)] else: if np.mod(nprocs, size) > 0: procs = np.arange(nprocs + size - np.mod(nprocs, size)) procs[-size + np.mod(nprocs, size):] = np.arange( size - np.mod(nprocs, size)) else: procs = np.arange(nprocs) iprocs = np.array_split(procs, size) procs = iprocs[rank] varfile_names = allfile_names print("rank {} procs:".format(rank), procs) sys.stdout.flush() else: varfile_names = allfile_names procs = np.arange(nprocs) if len(varfile_names) > 0: for file_name in varfile_names: # load Fortran binary snapshot if not quiet: print("rank {}:".format(rank) + "saving " + file_name) sys.stdout.flush() if snap_by_proc: if len(procs) > 0: proctime = time.time() for proc in procs: os.chdir(olddir) if np.mod(proc, size) == size - 1: print( "rank {}:".format(rank) + "saving " + file_name + " on proc{}\n".format(proc), time.ctime(), ) sys.stdout.flush() procdim = read.dim(proc=proc) var = read.var( file_name, datadir=fromdatadir, quiet=quiet, lpersist=lpersist, trimall=trimall, proc=proc, ) try: var.deltay lshear = True except: lshear = False if lpersist: persist = {} for key in read.record_types.keys(): try: persist[key] = var.__getattribute__(key)[( )] if type(persist[key][0]) == str: persist[key][0] = var.__getattribute__( key)[0].encode() except: pass else: persist = None if np.mod(proc, size) == size - 1: print("rank {}:".format(rank) + "loaded " + file_name + " on proc{} in {} seconds".format( proc, time.time() - proctime)) sys.stdout.flush() # write data to h5 os.chdir(newdir) write_h5_snapshot( var.f, file_name=file_name, state="a", datadir=todatadir, precision=precision, nghost=nghost, persist=persist, proc=proc, procdim=procdim, settings=settings, param=param, grid=grid, lghosts=True, indx=indx, t=var.t, x=x, y=y, z=z, quiet=quiet, rank=rank, size=size, lshear=lshear, driver=driver, comm=comm, ) if np.mod(proc, size) == size - 1: print("rank {}:".format(rank) + "written " + file_name + " on proc{} in {} seconds".format( proc, time.time() - proctime)) sys.stdout.flush() proctime = time.time() else: var = read.var( file_name, datadir=fromdatadir, quiet=quiet, lpersist=lpersist, trimall=trimall, ) try: var.deltay lshear = True except: lshear = False if lpersist: persist = {} for key in read.record_types.keys(): try: persist[key] = var.__getattribute__(key)[()] if type(persist[key][0]) == str: persist[key][0] = var.__getattribute__( key)[0].encode() except: pass else: persist = None # write data to h5 os.chdir(newdir) write_h5_snapshot( var.f, file_name=file_name, datadir=todatadir, precision=precision, nghost=nghost, persist=persist, settings=settings, param=param, grid=grid, lghosts=True, indx=indx, t=var.t, x=x, y=y, z=z, lshear=lshear, driver=None, comm=None, ) if lremove_old_snapshots: os.chdir(olddir) cmd = "rm -f " + join(olddir, fromdatadir, "proc*", file_name) os.system(cmd) del var
def find_tracers(self, var_file='VAR0', datadir='data', trace_field='bb', ti=-1, tf=-1): """ Trace streamlines of the vectofield 'field' from z = z0 to z = z1 and integrate quantities 'int_q' along the lines. Creates a 2d mapping as in 'streamlines.f90'. call signature: find_tracers(var_file='VAR0', datadir='data', trace_field='bb', ti=-1, tf=-1) Keyword arguments: *var_file*: Varfile to be read. *datadir*: Directory where the data is stored. *trace_field*: Vector field used for the streamline tracing. *ti*: Initial VAR file index for tracer time sequences. Overrides 'var_file'. *tf*: Final VAR file index for tracer time sequences. Overrides 'var_file'. """ import numpy as np import multiprocessing as mp from pencil import read from pencil import math # Write the tracing parameters. self.params.trace_field = trace_field self.params.datadir = datadir # Multi core setup. if not(np.isscalar(self.params.n_proc)) or (self.params.n_proc%1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() # Read the data. magic = [] if trace_field == 'bb': magic.append('bb') if trace_field == 'jj': magic.append('jj') if trace_field == 'vort': magic.append('vort') if self.params.int_q == 'ee': magic.append('bb') magic.append('jj') dim = read.dim(datadir=datadir) self.params.var_file = var_file # Check if user wants a tracer time series. if (ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti): series = True nTimes = tf-ti+1 else: series = False nTimes = 1 # Initialize the arrays. self.x0 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.y0 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.x1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.y1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.z1 = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.l = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) if self.params.int_q == 'curly_A': self.curly_A = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) if self.params.int_q == 'ee': self.ee = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes]) self.mapping = np.zeros([int(self.params.trace_sub*dim.nx), int(self.params.trace_sub*dim.ny), nTimes, 3]) self.t = np.zeros(nTimes) for t_idx in range(ti, tf+1): if series: var_file = 'VAR' + str(t_idx) # Read the data. var = read.var(var_file=var_file, datadir=datadir, magic=magic, quiet=True, trimall=True) grid = read.grid(datadir=datadir, quiet=True, trim=True) param2 = read.param(datadir=datadir, quiet=True) self.t[t_idx] = var.t # Extract the requested vector trace_field. field = getattr(var, trace_field) if self.params.int_q == 'curly_A': self.aa = var.aa if self.params.int_q == 'ee': self.ee = var.jj*param2.eta - math.cross(var.uu, var.bb) # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz # Initialize the tracers. for ix in range(int(self.params.trace_sub*dim.nx)): for iy in range(int(self.params.trace_sub*dim.ny)): self.x0[ix, iy, t_idx] = grid.x[0] + grid.dx/self.params.trace_sub*ix self.x1[ix, iy, t_idx] = self.x0[ix, iy, t_idx].copy() self.y0[ix, iy, t_idx] = grid.y[0] + grid.dy/self.params.trace_sub*iy self.y1[ix, iy, t_idx] = self.y0[ix, iy, t_idx].copy() self.z1[ix, iy, t_idx] = grid.z[0] proc = [] sub_data = [] for i_proc in range(self.params.n_proc): proc.append(mp.Process(target=self.__sub_tracers, args=(queue, field, t_idx, i_proc, self.params.n_proc))) for i_proc in range(self.params.n_proc): proc[i_proc].start() for i_proc in range(self.params.n_proc): sub_data.append(queue.get()) for i_proc in range(self.params.n_proc): proc[i_proc].join() for i_proc in range(self.params.n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] self.x1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][1] self.y1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][2] self.z1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][3] self.l[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][4] self.mapping[sub_proc::self.params.n_proc, :, t_idx, :] = sub_data[i_proc][5] if self.params.int_q == 'curly_A': self.curly_A[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][6] if self.params.int_q == 'ee': self.ee[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][7] for i_proc in range(self.params.n_proc): proc[i_proc].terminate() return 0
def var2vtk(var_file='var.dat', datadir='data', proc=-1, variables=None, b_ext=False, magic=[], destination='work', quiet=True, trimall=True, ti=-1, tf=-1): """ Convert data from PencilCode format to vtk. call signature:: var2vtk(var_file='', datadir='data', proc=-1, variables='', b_ext=False, destination='work', quiet=True, trimall=True, ti=-1, tf=-1) Read *var_file* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *var_file*: The original var_file. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables*: List of variables which should be written. If None all. *b_ext*: Add the external magnetic field. *destination*: Destination file. *quiet*: Keep quiet when reading the var files. *trimall*: Trim the data cube to exclude ghost zones. *ti, tf*: Start and end index for animation. Leave negative for no animation. Overwrites variable var_file. """ import numpy as np import sys from pencil import read from pencil import math # Determine of we want an animation. if ti < 0 or tf < 0: animation = False else: animation = True # If no variables specified collect all by default if not variables: variables = [] indx = read.index() for key in indx.__dict__.keys(): if 'keys' not in key: variables.append(key) if 'uu' in variables: magic.append('vort') variables.append('vort') if 'rho' in variables or 'lnrho' in variables: if 'ss' in variables: magic.append('tt') variables.append('tt') magic.append('pp') variables.append('pp') if 'aa' in variables: magic.append('bb') variables.append('bb') magic.append('jj') variables.append('jj') variables.append('ab') variables.append('b_mag') variables.append('j_mag') else: # Convert single variable string into length 1 list of arrays. if (len(variables) > 0): if (len(variables[0]) == 1): variables = [variables] if 'tt' in variables: magic.append('tt') if 'pp' in variables: magic.append('pp') if 'bb' in variables: magic.append('bb') if 'jj' in variables: magic.append('jj') if 'vort' in variables: magic.append('vort') if 'b_mag' in variables and not 'bb' in magic: magic.append('bb') if 'j_mag' in variables and not 'jj' in magic: magic.append('jj') if 'ab' in variables and not 'bb' in magic: magic.append('bb') for t_idx in range(ti, tf + 1): if animation: var_file = 'VAR' + str(t_idx) # Read the PencilCode variables and set the dimensions. var = read.var(var_file=var_file, datadir=datadir, proc=proc, magic=magic, trimall=True, quiet=quiet) grid = read.grid(datadir=datadir, proc=proc, trim=trimall, quiet=True) params = read.param(quiet=True) # Add external magnetic field. if (b_ext == True): B_ext = np.array(params.b_ext) var.bb[0, ...] += B_ext[0] var.bb[1, ...] += B_ext[1] var.bb[2, ...] += B_ext[2] dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1) dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1) dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1) # Write the vtk header. if animation: fd = open(destination + str(t_idx) + '.vtk', 'wb') else: fd = open(destination + '.vtk', 'wb') fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('VAR files\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy, dimz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], grid.y[0], grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( dx, dy, dz).encode('utf-8')) fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8')) # Write the data. for v in variables: print('Writing {0}.'.format(v)) # Prepare the data to the correct format. if v == 'ab': data = math.dot(var.aa, var.bb) elif v == 'b_mag': data = np.sqrt(math.dot2(var.bb)) elif v == 'j_mag': data = np.sqrt(math.dot2(var.jj)) else: data = getattr(var, v) if sys.byteorder == 'little': data = data.astype(np.float32).byteswap() else: data = data.astype(np.float32) # Check if we have vectors or scalars. if data.ndim == 4: data = np.moveaxis(data, 0, 3) fd.write('VECTORS {0} float\n'.format(v).encode('utf-8')) else: fd.write('SCALARS {0} float\n'.format(v).encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) fd.write(data.tobytes()) del (var) fd.close()